blob: 45c53c18cafc04a8a59dd25a52469552c54f057e [file] [log] [blame]
Jason Evanse476f8a2010-01-16 09:53:50 -08001#define JEMALLOC_ARENA_C_
Jason Evans376b1522010-02-11 14:45:59 -08002#include "jemalloc/internal/jemalloc_internal.h"
Jason Evanse476f8a2010-01-16 09:53:50 -08003
4/******************************************************************************/
5/* Data. */
6
Jason Evans243f7a02016-02-19 20:09:31 -08007purge_mode_t opt_purge = PURGE_DEFAULT;
8const char *purge_mode_names[] = {
9 "ratio",
10 "decay",
11 "N/A"
12};
Jason Evanse476f8a2010-01-16 09:53:50 -080013ssize_t opt_lg_dirty_mult = LG_DIRTY_MULT_DEFAULT;
Jason Evans8d6a3e82015-03-18 18:55:33 -070014static ssize_t lg_dirty_mult_default;
Jason Evans243f7a02016-02-19 20:09:31 -080015ssize_t opt_decay_time = DECAY_TIME_DEFAULT;
16static ssize_t decay_time_default;
17
Jason Evansb1726102012-02-28 16:50:47 -080018arena_bin_info_t arena_bin_info[NBINS];
Jason Evanse476f8a2010-01-16 09:53:50 -080019
Jason Evans155bfa72014-10-05 17:54:10 -070020size_t map_bias;
21size_t map_misc_offset;
22size_t arena_maxrun; /* Max run size for arenas. */
Jason Evans676df882015-09-11 20:50:20 -070023size_t large_maxclass; /* Max large size class. */
Jason Evans0da8ce12016-02-22 16:20:56 -080024size_t run_quantize_max; /* Max run_quantize_*() input. */
25static size_t small_maxrun; /* Max run size for small size classes. */
Jason Evans8a03cf02015-05-04 09:58:36 -070026static bool *small_run_tab; /* Valid small run page multiples. */
Jason Evans0da8ce12016-02-22 16:20:56 -080027static size_t *run_quantize_floor_tab; /* run_quantize_floor() memoization. */
28static size_t *run_quantize_ceil_tab; /* run_quantize_ceil() memoization. */
Jason Evans3c4d92e2014-10-12 22:53:59 -070029unsigned nlclasses; /* Number of large size classes. */
30unsigned nhclasses; /* Number of huge size classes. */
Dave Watson3417a302016-02-23 12:06:21 -080031static szind_t runs_avail_bias; /* Size index for first runs_avail tree. */
32static szind_t runs_avail_nclasses; /* Number of runs_avail trees. */
Jason Evanse476f8a2010-01-16 09:53:50 -080033
34/******************************************************************************/
Jason Evansaa5113b2014-01-14 16:23:03 -080035/*
36 * Function prototypes for static functions that are referenced prior to
37 * definition.
38 */
Jason Evanse476f8a2010-01-16 09:53:50 -080039
Jason Evansb2c0d632016-04-13 23:36:15 -070040static void arena_purge_to_limit(tsd_t *tsd, arena_t *arena,
41 size_t ndirty_limit);
42static void arena_run_dalloc(tsd_t *tsd, arena_t *arena, arena_run_t *run,
43 bool dirty, bool cleaned, bool decommitted);
44static void arena_dalloc_bin_run(tsd_t *tsd, arena_t *arena,
45 arena_chunk_t *chunk, arena_run_t *run, arena_bin_t *bin);
Jason Evans940a2e02010-10-17 17:51:37 -070046static void arena_bin_lower_run(arena_t *arena, arena_chunk_t *chunk,
47 arena_run_t *run, arena_bin_t *bin);
Jason Evanse476f8a2010-01-16 09:53:50 -080048
49/******************************************************************************/
50
Jason Evans8fadb1a2015-08-04 10:49:46 -070051JEMALLOC_INLINE_C size_t
Joshua Kahn13b40152015-09-18 16:58:17 -040052arena_miscelm_size_get(const arena_chunk_map_misc_t *miscelm)
Jason Evans8fadb1a2015-08-04 10:49:46 -070053{
54 arena_chunk_t *chunk;
55 size_t pageind, mapbits;
56
Jason Evans8fadb1a2015-08-04 10:49:46 -070057 chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(miscelm);
58 pageind = arena_miscelm_to_pageind(miscelm);
59 mapbits = arena_mapbits_get(chunk, pageind);
Jason Evans5ef33a92015-08-19 14:12:05 -070060 return (arena_mapbits_size_decode(mapbits));
Ben Maurerf9ff6032014-04-06 13:24:16 -070061}
62
Jason Evansc6a2c392016-03-26 17:30:37 -070063JEMALLOC_INLINE_C int
64arena_run_addr_comp(const arena_chunk_map_misc_t *a,
65 const arena_chunk_map_misc_t *b)
66{
67 uintptr_t a_miscelm = (uintptr_t)a;
68 uintptr_t b_miscelm = (uintptr_t)b;
69
70 assert(a != NULL);
71 assert(b != NULL);
72
73 return ((a_miscelm > b_miscelm) - (a_miscelm < b_miscelm));
74}
75
76/* Generate pairing heap functions. */
77ph_gen(static UNUSED, arena_run_heap_, arena_run_heap_t, arena_chunk_map_misc_t,
78 ph_link, arena_run_addr_comp)
79
Jason Evans8a03cf02015-05-04 09:58:36 -070080static size_t
Jason Evans0da8ce12016-02-22 16:20:56 -080081run_quantize_floor_compute(size_t size)
Jason Evans8a03cf02015-05-04 09:58:36 -070082{
83 size_t qsize;
84
85 assert(size != 0);
86 assert(size == PAGE_CEILING(size));
87
88 /* Don't change sizes that are valid small run sizes. */
89 if (size <= small_maxrun && small_run_tab[size >> LG_PAGE])
90 return (size);
91
92 /*
93 * Round down to the nearest run size that can actually be requested
94 * during normal large allocation. Add large_pad so that cache index
95 * randomization can offset the allocation from the page boundary.
96 */
97 qsize = index2size(size2index(size - large_pad + 1) - 1) + large_pad;
98 if (qsize <= SMALL_MAXCLASS + large_pad)
Jason Evans0da8ce12016-02-22 16:20:56 -080099 return (run_quantize_floor_compute(size - large_pad));
Jason Evans8a03cf02015-05-04 09:58:36 -0700100 assert(qsize <= size);
101 return (qsize);
102}
103
104static size_t
Jason Evans0da8ce12016-02-22 16:20:56 -0800105run_quantize_ceil_compute_hard(size_t size)
Jason Evans8a03cf02015-05-04 09:58:36 -0700106{
107 size_t large_run_size_next;
108
109 assert(size != 0);
110 assert(size == PAGE_CEILING(size));
111
112 /*
113 * Return the next quantized size greater than the input size.
114 * Quantized sizes comprise the union of run sizes that back small
115 * region runs, and run sizes that back large regions with no explicit
116 * alignment constraints.
117 */
118
119 if (size > SMALL_MAXCLASS) {
120 large_run_size_next = PAGE_CEILING(index2size(size2index(size -
121 large_pad) + 1) + large_pad);
122 } else
123 large_run_size_next = SIZE_T_MAX;
124 if (size >= small_maxrun)
125 return (large_run_size_next);
126
127 while (true) {
128 size += PAGE;
129 assert(size <= small_maxrun);
130 if (small_run_tab[size >> LG_PAGE]) {
131 if (large_run_size_next < size)
132 return (large_run_size_next);
133 return (size);
134 }
135 }
136}
137
138static size_t
Jason Evans0da8ce12016-02-22 16:20:56 -0800139run_quantize_ceil_compute(size_t size)
Jason Evans8a03cf02015-05-04 09:58:36 -0700140{
Jason Evans0da8ce12016-02-22 16:20:56 -0800141 size_t qsize = run_quantize_floor_compute(size);
Jason Evans8a03cf02015-05-04 09:58:36 -0700142
143 if (qsize < size) {
144 /*
145 * Skip a quantization that may have an adequately large run,
146 * because under-sized runs may be mixed in. This only happens
147 * when an unusual size is requested, i.e. for aligned
148 * allocation, and is just one of several places where linear
149 * search would potentially find sufficiently aligned available
150 * memory somewhere lower.
151 */
Jason Evans0da8ce12016-02-22 16:20:56 -0800152 qsize = run_quantize_ceil_compute_hard(qsize);
Jason Evans8a03cf02015-05-04 09:58:36 -0700153 }
154 return (qsize);
155}
Jason Evans0da8ce12016-02-22 16:20:56 -0800156
157#ifdef JEMALLOC_JET
158#undef run_quantize_floor
Jason Evansab0cfe02016-04-18 15:11:20 -0700159#define run_quantize_floor JEMALLOC_N(n_run_quantize_floor)
Jason Evans0da8ce12016-02-22 16:20:56 -0800160#endif
161static size_t
162run_quantize_floor(size_t size)
163{
164 size_t ret;
165
166 assert(size > 0);
167 assert(size <= run_quantize_max);
168 assert((size & PAGE_MASK) == 0);
169
170 ret = run_quantize_floor_tab[(size >> LG_PAGE) - 1];
171 assert(ret == run_quantize_floor_compute(size));
172 return (ret);
173}
174#ifdef JEMALLOC_JET
175#undef run_quantize_floor
176#define run_quantize_floor JEMALLOC_N(run_quantize_floor)
Jason Evansab0cfe02016-04-18 15:11:20 -0700177run_quantize_t *run_quantize_floor = JEMALLOC_N(n_run_quantize_floor);
Jason Evans0da8ce12016-02-22 16:20:56 -0800178#endif
179
180#ifdef JEMALLOC_JET
181#undef run_quantize_ceil
Jason Evansab0cfe02016-04-18 15:11:20 -0700182#define run_quantize_ceil JEMALLOC_N(n_run_quantize_ceil)
Jason Evans0da8ce12016-02-22 16:20:56 -0800183#endif
184static size_t
185run_quantize_ceil(size_t size)
186{
187 size_t ret;
188
189 assert(size > 0);
190 assert(size <= run_quantize_max);
191 assert((size & PAGE_MASK) == 0);
192
193 ret = run_quantize_ceil_tab[(size >> LG_PAGE) - 1];
194 assert(ret == run_quantize_ceil_compute(size));
195 return (ret);
196}
Jason Evansa9a46842016-02-22 14:58:05 -0800197#ifdef JEMALLOC_JET
198#undef run_quantize_ceil
199#define run_quantize_ceil JEMALLOC_N(run_quantize_ceil)
Jason Evansab0cfe02016-04-18 15:11:20 -0700200run_quantize_t *run_quantize_ceil = JEMALLOC_N(n_run_quantize_ceil);
Jason Evansa9a46842016-02-22 14:58:05 -0800201#endif
Jason Evans8a03cf02015-05-04 09:58:36 -0700202
Jason Evansc6a2c392016-03-26 17:30:37 -0700203static arena_run_heap_t *
Dave Watson3417a302016-02-23 12:06:21 -0800204arena_runs_avail_get(arena_t *arena, szind_t ind)
205{
206
207 assert(ind >= runs_avail_bias);
208 assert(ind - runs_avail_bias < runs_avail_nclasses);
209
210 return (&arena->runs_avail[ind - runs_avail_bias]);
211}
Jason Evanse476f8a2010-01-16 09:53:50 -0800212
Jason Evanse3d13062012-10-30 15:42:37 -0700213static void
214arena_avail_insert(arena_t *arena, arena_chunk_t *chunk, size_t pageind,
Qinfan Wu90737fc2014-07-21 19:39:20 -0700215 size_t npages)
Jason Evanse3d13062012-10-30 15:42:37 -0700216{
Dave Watson3417a302016-02-23 12:06:21 -0800217 szind_t ind = size2index(run_quantize_floor(arena_miscelm_size_get(
Jason Evans61a6dfc2016-03-23 16:04:38 -0700218 arena_miscelm_get_const(chunk, pageind))));
Jason Evanse3d13062012-10-30 15:42:37 -0700219 assert(npages == (arena_mapbits_unallocated_size_get(chunk, pageind) >>
220 LG_PAGE));
Jason Evansc6a2c392016-03-26 17:30:37 -0700221 arena_run_heap_insert(arena_runs_avail_get(arena, ind),
222 arena_miscelm_get_mutable(chunk, pageind));
Jason Evanse3d13062012-10-30 15:42:37 -0700223}
224
225static void
226arena_avail_remove(arena_t *arena, arena_chunk_t *chunk, size_t pageind,
Qinfan Wu90737fc2014-07-21 19:39:20 -0700227 size_t npages)
Jason Evanse3d13062012-10-30 15:42:37 -0700228{
Dave Watson3417a302016-02-23 12:06:21 -0800229 szind_t ind = size2index(run_quantize_floor(arena_miscelm_size_get(
Jason Evans61a6dfc2016-03-23 16:04:38 -0700230 arena_miscelm_get_const(chunk, pageind))));
Jason Evanse3d13062012-10-30 15:42:37 -0700231 assert(npages == (arena_mapbits_unallocated_size_get(chunk, pageind) >>
232 LG_PAGE));
Jason Evansc6a2c392016-03-26 17:30:37 -0700233 arena_run_heap_remove(arena_runs_avail_get(arena, ind),
234 arena_miscelm_get_mutable(chunk, pageind));
Jason Evanse3d13062012-10-30 15:42:37 -0700235}
236
Jason Evans070b3c32014-08-14 14:45:58 -0700237static void
Jason Evansee41ad42015-02-15 18:04:46 -0800238arena_run_dirty_insert(arena_t *arena, arena_chunk_t *chunk, size_t pageind,
Jason Evans070b3c32014-08-14 14:45:58 -0700239 size_t npages)
240{
Jason Evans61a6dfc2016-03-23 16:04:38 -0700241 arena_chunk_map_misc_t *miscelm = arena_miscelm_get_mutable(chunk,
242 pageind);
Jason Evansee41ad42015-02-15 18:04:46 -0800243
Jason Evans070b3c32014-08-14 14:45:58 -0700244 assert(npages == (arena_mapbits_unallocated_size_get(chunk, pageind) >>
245 LG_PAGE));
246 assert(arena_mapbits_dirty_get(chunk, pageind) == CHUNK_MAP_DIRTY);
247 assert(arena_mapbits_dirty_get(chunk, pageind+npages-1) ==
248 CHUNK_MAP_DIRTY);
Jason Evansee41ad42015-02-15 18:04:46 -0800249
Jason Evans613cdc82016-03-08 01:04:48 -0800250 qr_new(&miscelm->rd, rd_link);
251 qr_meld(&arena->runs_dirty, &miscelm->rd, rd_link);
Jason Evans070b3c32014-08-14 14:45:58 -0700252 arena->ndirty += npages;
253}
254
255static void
Jason Evansee41ad42015-02-15 18:04:46 -0800256arena_run_dirty_remove(arena_t *arena, arena_chunk_t *chunk, size_t pageind,
Jason Evans070b3c32014-08-14 14:45:58 -0700257 size_t npages)
258{
Jason Evans61a6dfc2016-03-23 16:04:38 -0700259 arena_chunk_map_misc_t *miscelm = arena_miscelm_get_mutable(chunk,
260 pageind);
Jason Evansee41ad42015-02-15 18:04:46 -0800261
Jason Evans070b3c32014-08-14 14:45:58 -0700262 assert(npages == (arena_mapbits_unallocated_size_get(chunk, pageind) >>
263 LG_PAGE));
264 assert(arena_mapbits_dirty_get(chunk, pageind) == CHUNK_MAP_DIRTY);
265 assert(arena_mapbits_dirty_get(chunk, pageind+npages-1) ==
266 CHUNK_MAP_DIRTY);
Jason Evansee41ad42015-02-15 18:04:46 -0800267
Jason Evans613cdc82016-03-08 01:04:48 -0800268 qr_remove(&miscelm->rd, rd_link);
Jason Evansee41ad42015-02-15 18:04:46 -0800269 assert(arena->ndirty >= npages);
Jason Evans070b3c32014-08-14 14:45:58 -0700270 arena->ndirty -= npages;
271}
272
Jason Evansee41ad42015-02-15 18:04:46 -0800273static size_t
274arena_chunk_dirty_npages(const extent_node_t *node)
275{
276
277 return (extent_node_size_get(node) >> LG_PAGE);
278}
279
Jason Evansee41ad42015-02-15 18:04:46 -0800280void
Jason Evans738e0892015-02-18 01:15:50 -0800281arena_chunk_cache_maybe_insert(arena_t *arena, extent_node_t *node, bool cache)
Jason Evansee41ad42015-02-15 18:04:46 -0800282{
283
Jason Evans738e0892015-02-18 01:15:50 -0800284 if (cache) {
Jason Evans47701b22015-02-17 22:23:10 -0800285 extent_node_dirty_linkage_init(node);
Jason Evans738e0892015-02-18 01:15:50 -0800286 extent_node_dirty_insert(node, &arena->runs_dirty,
287 &arena->chunks_cache);
Jason Evansee41ad42015-02-15 18:04:46 -0800288 arena->ndirty += arena_chunk_dirty_npages(node);
289 }
290}
291
292void
Jason Evans738e0892015-02-18 01:15:50 -0800293arena_chunk_cache_maybe_remove(arena_t *arena, extent_node_t *node, bool dirty)
Jason Evansee41ad42015-02-15 18:04:46 -0800294{
295
296 if (dirty) {
Jason Evans738e0892015-02-18 01:15:50 -0800297 extent_node_dirty_remove(node);
Jason Evansee41ad42015-02-15 18:04:46 -0800298 assert(arena->ndirty >= arena_chunk_dirty_npages(node));
299 arena->ndirty -= arena_chunk_dirty_npages(node);
300 }
301}
302
Jason Evansaf1f5922014-10-30 16:38:08 -0700303JEMALLOC_INLINE_C void *
Jason Evans49f7e8f2011-03-15 13:59:15 -0700304arena_run_reg_alloc(arena_run_t *run, arena_bin_info_t *bin_info)
Jason Evanse476f8a2010-01-16 09:53:50 -0800305{
306 void *ret;
Jason Evans42ce80e2016-02-25 20:51:00 -0800307 size_t regind;
Jason Evans0c5dd032014-09-29 01:31:39 -0700308 arena_chunk_map_misc_t *miscelm;
309 void *rpages;
Jason Evanse476f8a2010-01-16 09:53:50 -0800310
Jason Evans1e0a6362010-03-13 13:41:58 -0800311 assert(run->nfree > 0);
Jason Evans551ebc42014-10-03 10:16:09 -0700312 assert(!bitmap_full(run->bitmap, &bin_info->bitmap_info));
Jason Evanse476f8a2010-01-16 09:53:50 -0800313
Jason Evans9e1810c2016-02-24 12:42:23 -0800314 regind = (unsigned)bitmap_sfu(run->bitmap, &bin_info->bitmap_info);
Jason Evans0c5dd032014-09-29 01:31:39 -0700315 miscelm = arena_run_to_miscelm(run);
316 rpages = arena_miscelm_to_rpages(miscelm);
317 ret = (void *)((uintptr_t)rpages + (uintptr_t)bin_info->reg0_offset +
Jason Evans122449b2012-04-06 00:35:09 -0700318 (uintptr_t)(bin_info->reg_interval * regind));
Jason Evans1e0a6362010-03-13 13:41:58 -0800319 run->nfree--;
Jason Evans1e0a6362010-03-13 13:41:58 -0800320 return (ret);
Jason Evans6109fe02010-02-10 10:37:56 -0800321}
322
Jason Evansaf1f5922014-10-30 16:38:08 -0700323JEMALLOC_INLINE_C void
Jason Evans1e0a6362010-03-13 13:41:58 -0800324arena_run_reg_dalloc(arena_run_t *run, void *ptr)
Jason Evans6109fe02010-02-10 10:37:56 -0800325{
Jason Evans49f7e8f2011-03-15 13:59:15 -0700326 arena_chunk_t *chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(run);
Jason Evans203484e2012-05-02 00:30:36 -0700327 size_t pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> LG_PAGE;
328 size_t mapbits = arena_mapbits_get(chunk, pageind);
Jason Evansd01fd192015-08-19 15:21:32 -0700329 szind_t binind = arena_ptr_small_binind_get(ptr, mapbits);
Jason Evans49f7e8f2011-03-15 13:59:15 -0700330 arena_bin_info_t *bin_info = &arena_bin_info[binind];
Jason Evans42ce80e2016-02-25 20:51:00 -0800331 size_t regind = arena_run_regind(run, bin_info, ptr);
Jason Evans84c8eef2011-03-16 10:30:13 -0700332
Jason Evans49f7e8f2011-03-15 13:59:15 -0700333 assert(run->nfree < bin_info->nregs);
Jason Evans1e0a6362010-03-13 13:41:58 -0800334 /* Freeing an interior pointer can cause assertion failure. */
Jason Evans0c5dd032014-09-29 01:31:39 -0700335 assert(((uintptr_t)ptr -
336 ((uintptr_t)arena_miscelm_to_rpages(arena_run_to_miscelm(run)) +
Jason Evans122449b2012-04-06 00:35:09 -0700337 (uintptr_t)bin_info->reg0_offset)) %
338 (uintptr_t)bin_info->reg_interval == 0);
Jason Evans0c5dd032014-09-29 01:31:39 -0700339 assert((uintptr_t)ptr >=
340 (uintptr_t)arena_miscelm_to_rpages(arena_run_to_miscelm(run)) +
Jason Evans49f7e8f2011-03-15 13:59:15 -0700341 (uintptr_t)bin_info->reg0_offset);
Jason Evans84c8eef2011-03-16 10:30:13 -0700342 /* Freeing an unallocated pointer can cause assertion failure. */
Jason Evans0c5dd032014-09-29 01:31:39 -0700343 assert(bitmap_get(run->bitmap, &bin_info->bitmap_info, regind));
Jason Evanse476f8a2010-01-16 09:53:50 -0800344
Jason Evans0c5dd032014-09-29 01:31:39 -0700345 bitmap_unset(run->bitmap, &bin_info->bitmap_info, regind);
Jason Evans1e0a6362010-03-13 13:41:58 -0800346 run->nfree++;
Jason Evanse476f8a2010-01-16 09:53:50 -0800347}
348
Jason Evansaf1f5922014-10-30 16:38:08 -0700349JEMALLOC_INLINE_C void
Jason Evans38067482013-01-21 20:04:42 -0800350arena_run_zero(arena_chunk_t *chunk, size_t run_ind, size_t npages)
351{
352
Jason Evansbd87b012014-04-15 16:35:08 -0700353 JEMALLOC_VALGRIND_MAKE_MEM_UNDEFINED((void *)((uintptr_t)chunk +
354 (run_ind << LG_PAGE)), (npages << LG_PAGE));
Jason Evans38067482013-01-21 20:04:42 -0800355 memset((void *)((uintptr_t)chunk + (run_ind << LG_PAGE)), 0,
356 (npages << LG_PAGE));
Jason Evans38067482013-01-21 20:04:42 -0800357}
358
Jason Evansaf1f5922014-10-30 16:38:08 -0700359JEMALLOC_INLINE_C void
Jason Evansdda90f52013-10-19 23:48:40 -0700360arena_run_page_mark_zeroed(arena_chunk_t *chunk, size_t run_ind)
361{
362
Jason Evansbd87b012014-04-15 16:35:08 -0700363 JEMALLOC_VALGRIND_MAKE_MEM_DEFINED((void *)((uintptr_t)chunk + (run_ind
364 << LG_PAGE)), PAGE);
Jason Evansdda90f52013-10-19 23:48:40 -0700365}
366
Jason Evansaf1f5922014-10-30 16:38:08 -0700367JEMALLOC_INLINE_C void
Jason Evans38067482013-01-21 20:04:42 -0800368arena_run_page_validate_zeroed(arena_chunk_t *chunk, size_t run_ind)
Jason Evans21fb95b2010-10-18 17:45:40 -0700369{
Jason Evansd4bab212010-10-24 20:08:37 -0700370 size_t i;
Jason Evansae4c7b42012-04-02 07:04:34 -0700371 UNUSED size_t *p = (size_t *)((uintptr_t)chunk + (run_ind << LG_PAGE));
Jason Evansd4bab212010-10-24 20:08:37 -0700372
Jason Evansdda90f52013-10-19 23:48:40 -0700373 arena_run_page_mark_zeroed(chunk, run_ind);
Jason Evansae4c7b42012-04-02 07:04:34 -0700374 for (i = 0; i < PAGE / sizeof(size_t); i++)
Jason Evans21fb95b2010-10-18 17:45:40 -0700375 assert(p[i] == 0);
376}
Jason Evans21fb95b2010-10-18 17:45:40 -0700377
Jason Evanse476f8a2010-01-16 09:53:50 -0800378static void
Jason Evans40ee9aa2016-02-27 12:34:50 -0800379arena_nactive_add(arena_t *arena, size_t add_pages)
Jason Evansaa5113b2014-01-14 16:23:03 -0800380{
381
382 if (config_stats) {
Jason Evans3763d3b2016-02-26 17:29:35 -0800383 size_t cactive_add = CHUNK_CEILING((arena->nactive +
384 add_pages) << LG_PAGE) - CHUNK_CEILING(arena->nactive <<
Jason Evans15229372014-08-06 23:38:39 -0700385 LG_PAGE);
Jason Evans3763d3b2016-02-26 17:29:35 -0800386 if (cactive_add != 0)
387 stats_cactive_add(cactive_add);
388 }
Jason Evans40ee9aa2016-02-27 12:34:50 -0800389 arena->nactive += add_pages;
Jason Evans3763d3b2016-02-26 17:29:35 -0800390}
391
392static void
Jason Evans40ee9aa2016-02-27 12:34:50 -0800393arena_nactive_sub(arena_t *arena, size_t sub_pages)
Jason Evans3763d3b2016-02-26 17:29:35 -0800394{
395
396 if (config_stats) {
397 size_t cactive_sub = CHUNK_CEILING(arena->nactive << LG_PAGE) -
398 CHUNK_CEILING((arena->nactive - sub_pages) << LG_PAGE);
399 if (cactive_sub != 0)
400 stats_cactive_sub(cactive_sub);
Jason Evansaa5113b2014-01-14 16:23:03 -0800401 }
Jason Evans40ee9aa2016-02-27 12:34:50 -0800402 arena->nactive -= sub_pages;
Jason Evansaa5113b2014-01-14 16:23:03 -0800403}
404
405static void
406arena_run_split_remove(arena_t *arena, arena_chunk_t *chunk, size_t run_ind,
Jason Evans8fadb1a2015-08-04 10:49:46 -0700407 size_t flag_dirty, size_t flag_decommitted, size_t need_pages)
Jason Evansaa5113b2014-01-14 16:23:03 -0800408{
409 size_t total_pages, rem_pages;
410
Jason Evans8fadb1a2015-08-04 10:49:46 -0700411 assert(flag_dirty == 0 || flag_decommitted == 0);
412
Jason Evansaa5113b2014-01-14 16:23:03 -0800413 total_pages = arena_mapbits_unallocated_size_get(chunk, run_ind) >>
414 LG_PAGE;
415 assert(arena_mapbits_dirty_get(chunk, run_ind+total_pages-1) ==
416 flag_dirty);
417 assert(need_pages <= total_pages);
418 rem_pages = total_pages - need_pages;
419
Qinfan Wu90737fc2014-07-21 19:39:20 -0700420 arena_avail_remove(arena, chunk, run_ind, total_pages);
Jason Evans070b3c32014-08-14 14:45:58 -0700421 if (flag_dirty != 0)
Jason Evansee41ad42015-02-15 18:04:46 -0800422 arena_run_dirty_remove(arena, chunk, run_ind, total_pages);
Jason Evans40ee9aa2016-02-27 12:34:50 -0800423 arena_nactive_add(arena, need_pages);
Jason Evansaa5113b2014-01-14 16:23:03 -0800424
425 /* Keep track of trailing unused pages for later use. */
426 if (rem_pages > 0) {
Jason Evans8fadb1a2015-08-04 10:49:46 -0700427 size_t flags = flag_dirty | flag_decommitted;
Jason Evans1f27abc2015-08-11 12:42:33 -0700428 size_t flag_unzeroed_mask = (flags == 0) ? CHUNK_MAP_UNZEROED :
429 0;
Jason Evans8fadb1a2015-08-04 10:49:46 -0700430
Jason Evans1f27abc2015-08-11 12:42:33 -0700431 arena_mapbits_unallocated_set(chunk, run_ind+need_pages,
432 (rem_pages << LG_PAGE), flags |
433 (arena_mapbits_unzeroed_get(chunk, run_ind+need_pages) &
434 flag_unzeroed_mask));
435 arena_mapbits_unallocated_set(chunk, run_ind+total_pages-1,
436 (rem_pages << LG_PAGE), flags |
437 (arena_mapbits_unzeroed_get(chunk, run_ind+total_pages-1) &
438 flag_unzeroed_mask));
439 if (flag_dirty != 0) {
440 arena_run_dirty_insert(arena, chunk, run_ind+need_pages,
441 rem_pages);
Jason Evansaa5113b2014-01-14 16:23:03 -0800442 }
Qinfan Wu90737fc2014-07-21 19:39:20 -0700443 arena_avail_insert(arena, chunk, run_ind+need_pages, rem_pages);
Jason Evansaa5113b2014-01-14 16:23:03 -0800444 }
445}
446
Jason Evans8fadb1a2015-08-04 10:49:46 -0700447static bool
Jason Evansaa5113b2014-01-14 16:23:03 -0800448arena_run_split_large_helper(arena_t *arena, arena_run_t *run, size_t size,
449 bool remove, bool zero)
Jason Evanse476f8a2010-01-16 09:53:50 -0800450{
451 arena_chunk_t *chunk;
Jason Evans0c5dd032014-09-29 01:31:39 -0700452 arena_chunk_map_misc_t *miscelm;
Dmitry-Mea306a602015-09-04 13:15:28 +0300453 size_t flag_dirty, flag_decommitted, run_ind, need_pages;
Jason Evans1f27abc2015-08-11 12:42:33 -0700454 size_t flag_unzeroed_mask;
Jason Evans203484e2012-05-02 00:30:36 -0700455
Jason Evanse476f8a2010-01-16 09:53:50 -0800456 chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(run);
Jason Evans0c5dd032014-09-29 01:31:39 -0700457 miscelm = arena_run_to_miscelm(run);
458 run_ind = arena_miscelm_to_pageind(miscelm);
Jason Evans203484e2012-05-02 00:30:36 -0700459 flag_dirty = arena_mapbits_dirty_get(chunk, run_ind);
Jason Evans8fadb1a2015-08-04 10:49:46 -0700460 flag_decommitted = arena_mapbits_decommitted_get(chunk, run_ind);
Jason Evansae4c7b42012-04-02 07:04:34 -0700461 need_pages = (size >> LG_PAGE);
Jason Evanse476f8a2010-01-16 09:53:50 -0800462 assert(need_pages > 0);
Jason Evanse476f8a2010-01-16 09:53:50 -0800463
Jason Evansde249c82015-08-09 16:47:27 -0700464 if (flag_decommitted != 0 && arena->chunk_hooks.commit(chunk, chunksize,
465 run_ind << LG_PAGE, size, arena->ind))
Jason Evans8fadb1a2015-08-04 10:49:46 -0700466 return (true);
467
Jason Evansc368f8c2013-10-29 18:17:42 -0700468 if (remove) {
Jason Evansaa5113b2014-01-14 16:23:03 -0800469 arena_run_split_remove(arena, chunk, run_ind, flag_dirty,
Jason Evans8fadb1a2015-08-04 10:49:46 -0700470 flag_decommitted, need_pages);
Jason Evans19b3d612010-03-18 20:36:40 -0700471 }
472
Jason Evansaa5113b2014-01-14 16:23:03 -0800473 if (zero) {
Jason Evans1f27abc2015-08-11 12:42:33 -0700474 if (flag_decommitted != 0) {
475 /* The run is untouched, and therefore zeroed. */
476 JEMALLOC_VALGRIND_MAKE_MEM_DEFINED((void
477 *)((uintptr_t)chunk + (run_ind << LG_PAGE)),
478 (need_pages << LG_PAGE));
479 } else if (flag_dirty != 0) {
480 /* The run is dirty, so all pages must be zeroed. */
481 arena_run_zero(chunk, run_ind, need_pages);
482 } else {
Jason Evansaa5113b2014-01-14 16:23:03 -0800483 /*
484 * The run is clean, so some pages may be zeroed (i.e.
485 * never before touched).
486 */
Dmitry-Mea306a602015-09-04 13:15:28 +0300487 size_t i;
Jason Evansaa5113b2014-01-14 16:23:03 -0800488 for (i = 0; i < need_pages; i++) {
489 if (arena_mapbits_unzeroed_get(chunk, run_ind+i)
490 != 0)
491 arena_run_zero(chunk, run_ind+i, 1);
492 else if (config_debug) {
493 arena_run_page_validate_zeroed(chunk,
494 run_ind+i);
495 } else {
496 arena_run_page_mark_zeroed(chunk,
497 run_ind+i);
Jason Evans19b3d612010-03-18 20:36:40 -0700498 }
Jason Evanse476f8a2010-01-16 09:53:50 -0800499 }
500 }
Jason Evans19b3d612010-03-18 20:36:40 -0700501 } else {
Jason Evansbd87b012014-04-15 16:35:08 -0700502 JEMALLOC_VALGRIND_MAKE_MEM_UNDEFINED((void *)((uintptr_t)chunk +
Jason Evansdda90f52013-10-19 23:48:40 -0700503 (run_ind << LG_PAGE)), (need_pages << LG_PAGE));
Jason Evanse476f8a2010-01-16 09:53:50 -0800504 }
Jason Evansaa5113b2014-01-14 16:23:03 -0800505
506 /*
507 * Set the last element first, in case the run only contains one page
508 * (i.e. both statements set the same element).
509 */
Jason Evans1f27abc2015-08-11 12:42:33 -0700510 flag_unzeroed_mask = (flag_dirty | flag_decommitted) == 0 ?
511 CHUNK_MAP_UNZEROED : 0;
512 arena_mapbits_large_set(chunk, run_ind+need_pages-1, 0, flag_dirty |
513 (flag_unzeroed_mask & arena_mapbits_unzeroed_get(chunk,
514 run_ind+need_pages-1)));
515 arena_mapbits_large_set(chunk, run_ind, size, flag_dirty |
516 (flag_unzeroed_mask & arena_mapbits_unzeroed_get(chunk, run_ind)));
Jason Evans8fadb1a2015-08-04 10:49:46 -0700517 return (false);
Jason Evanse476f8a2010-01-16 09:53:50 -0800518}
519
Jason Evans8fadb1a2015-08-04 10:49:46 -0700520static bool
Jason Evansaa5113b2014-01-14 16:23:03 -0800521arena_run_split_large(arena_t *arena, arena_run_t *run, size_t size, bool zero)
Jason Evansc368f8c2013-10-29 18:17:42 -0700522{
523
Jason Evans8fadb1a2015-08-04 10:49:46 -0700524 return (arena_run_split_large_helper(arena, run, size, true, zero));
Jason Evansc368f8c2013-10-29 18:17:42 -0700525}
526
Jason Evans8fadb1a2015-08-04 10:49:46 -0700527static bool
Jason Evansaa5113b2014-01-14 16:23:03 -0800528arena_run_init_large(arena_t *arena, arena_run_t *run, size_t size, bool zero)
Jason Evansc368f8c2013-10-29 18:17:42 -0700529{
530
Jason Evans8fadb1a2015-08-04 10:49:46 -0700531 return (arena_run_split_large_helper(arena, run, size, false, zero));
Jason Evansaa5113b2014-01-14 16:23:03 -0800532}
533
Jason Evans8fadb1a2015-08-04 10:49:46 -0700534static bool
Jason Evansaa5113b2014-01-14 16:23:03 -0800535arena_run_split_small(arena_t *arena, arena_run_t *run, size_t size,
Jason Evansd01fd192015-08-19 15:21:32 -0700536 szind_t binind)
Jason Evansaa5113b2014-01-14 16:23:03 -0800537{
538 arena_chunk_t *chunk;
Jason Evans0c5dd032014-09-29 01:31:39 -0700539 arena_chunk_map_misc_t *miscelm;
Jason Evans8fadb1a2015-08-04 10:49:46 -0700540 size_t flag_dirty, flag_decommitted, run_ind, need_pages, i;
Jason Evansaa5113b2014-01-14 16:23:03 -0800541
542 assert(binind != BININD_INVALID);
543
544 chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(run);
Jason Evans0c5dd032014-09-29 01:31:39 -0700545 miscelm = arena_run_to_miscelm(run);
546 run_ind = arena_miscelm_to_pageind(miscelm);
Jason Evansaa5113b2014-01-14 16:23:03 -0800547 flag_dirty = arena_mapbits_dirty_get(chunk, run_ind);
Jason Evans8fadb1a2015-08-04 10:49:46 -0700548 flag_decommitted = arena_mapbits_decommitted_get(chunk, run_ind);
Jason Evansaa5113b2014-01-14 16:23:03 -0800549 need_pages = (size >> LG_PAGE);
550 assert(need_pages > 0);
551
Jason Evans8fadb1a2015-08-04 10:49:46 -0700552 if (flag_decommitted != 0 && arena->chunk_hooks.commit(chunk, chunksize,
553 run_ind << LG_PAGE, size, arena->ind))
554 return (true);
555
556 arena_run_split_remove(arena, chunk, run_ind, flag_dirty,
557 flag_decommitted, need_pages);
Jason Evansaa5113b2014-01-14 16:23:03 -0800558
Jason Evans381c23d2014-10-10 23:01:03 -0700559 for (i = 0; i < need_pages; i++) {
Jason Evans1f27abc2015-08-11 12:42:33 -0700560 size_t flag_unzeroed = arena_mapbits_unzeroed_get(chunk,
561 run_ind+i);
562 arena_mapbits_small_set(chunk, run_ind+i, i, binind,
563 flag_unzeroed);
564 if (config_debug && flag_dirty == 0 && flag_unzeroed == 0)
Jason Evansaa5113b2014-01-14 16:23:03 -0800565 arena_run_page_validate_zeroed(chunk, run_ind+i);
566 }
Jason Evansbd87b012014-04-15 16:35:08 -0700567 JEMALLOC_VALGRIND_MAKE_MEM_UNDEFINED((void *)((uintptr_t)chunk +
Jason Evansaa5113b2014-01-14 16:23:03 -0800568 (run_ind << LG_PAGE)), (need_pages << LG_PAGE));
Jason Evans8fadb1a2015-08-04 10:49:46 -0700569 return (false);
Jason Evansaa5113b2014-01-14 16:23:03 -0800570}
571
572static arena_chunk_t *
573arena_chunk_init_spare(arena_t *arena)
574{
575 arena_chunk_t *chunk;
576
577 assert(arena->spare != NULL);
578
579 chunk = arena->spare;
580 arena->spare = NULL;
581
582 assert(arena_mapbits_allocated_get(chunk, map_bias) == 0);
583 assert(arena_mapbits_allocated_get(chunk, chunk_npages-1) == 0);
584 assert(arena_mapbits_unallocated_size_get(chunk, map_bias) ==
Jason Evans155bfa72014-10-05 17:54:10 -0700585 arena_maxrun);
Jason Evansaa5113b2014-01-14 16:23:03 -0800586 assert(arena_mapbits_unallocated_size_get(chunk, chunk_npages-1) ==
Jason Evans155bfa72014-10-05 17:54:10 -0700587 arena_maxrun);
Jason Evansaa5113b2014-01-14 16:23:03 -0800588 assert(arena_mapbits_dirty_get(chunk, map_bias) ==
589 arena_mapbits_dirty_get(chunk, chunk_npages-1));
590
591 return (chunk);
592}
593
Jason Evans99bd94f2015-02-18 16:40:53 -0800594static bool
Jason Evansb2c0d632016-04-13 23:36:15 -0700595arena_chunk_register(tsd_t *tsd, arena_t *arena, arena_chunk_t *chunk,
596 bool zero)
Jason Evans99bd94f2015-02-18 16:40:53 -0800597{
598
Jason Evans8fadb1a2015-08-04 10:49:46 -0700599 /*
600 * The extent node notion of "committed" doesn't directly apply to
Jason Evansde249c82015-08-09 16:47:27 -0700601 * arena chunks. Arbitrarily mark them as committed. The commit state
602 * of runs is tracked individually, and upon chunk deallocation the
603 * entire chunk is in a consistent commit state.
Jason Evans8fadb1a2015-08-04 10:49:46 -0700604 */
605 extent_node_init(&chunk->node, arena, chunk, chunksize, zero, true);
Jason Evans99bd94f2015-02-18 16:40:53 -0800606 extent_node_achunk_set(&chunk->node, true);
Jason Evansb2c0d632016-04-13 23:36:15 -0700607 return (chunk_register(tsd, chunk, &chunk->node));
Jason Evans99bd94f2015-02-18 16:40:53 -0800608}
609
610static arena_chunk_t *
Jason Evansb2c0d632016-04-13 23:36:15 -0700611arena_chunk_alloc_internal_hard(tsd_t *tsd, arena_t *arena,
612 chunk_hooks_t *chunk_hooks, bool *zero, bool *commit)
Jason Evans99bd94f2015-02-18 16:40:53 -0800613{
614 arena_chunk_t *chunk;
Jason Evans99bd94f2015-02-18 16:40:53 -0800615
Jason Evansb2c0d632016-04-13 23:36:15 -0700616 malloc_mutex_unlock(tsd, &arena->lock);
Jason Evansb49a3342015-07-28 11:28:19 -0400617
Jason Evansb2c0d632016-04-13 23:36:15 -0700618 chunk = (arena_chunk_t *)chunk_alloc_wrapper(tsd, arena, chunk_hooks,
619 NULL, chunksize, chunksize, zero, commit);
Jason Evans8fadb1a2015-08-04 10:49:46 -0700620 if (chunk != NULL && !*commit) {
621 /* Commit header. */
622 if (chunk_hooks->commit(chunk, chunksize, 0, map_bias <<
623 LG_PAGE, arena->ind)) {
Jason Evansb2c0d632016-04-13 23:36:15 -0700624 chunk_dalloc_wrapper(tsd, arena, chunk_hooks,
625 (void *)chunk, chunksize, *zero, *commit);
Jason Evans8fadb1a2015-08-04 10:49:46 -0700626 chunk = NULL;
627 }
628 }
Jason Evansb2c0d632016-04-13 23:36:15 -0700629 if (chunk != NULL && arena_chunk_register(tsd, arena, chunk, *zero)) {
Jason Evans8fadb1a2015-08-04 10:49:46 -0700630 if (!*commit) {
631 /* Undo commit of header. */
632 chunk_hooks->decommit(chunk, chunksize, 0, map_bias <<
633 LG_PAGE, arena->ind);
634 }
Jason Evansb2c0d632016-04-13 23:36:15 -0700635 chunk_dalloc_wrapper(tsd, arena, chunk_hooks, (void *)chunk,
Jason Evansce7c0f92016-03-30 18:36:04 -0700636 chunksize, *zero, *commit);
Jason Evans99bd94f2015-02-18 16:40:53 -0800637 chunk = NULL;
638 }
Jason Evans99bd94f2015-02-18 16:40:53 -0800639
Jason Evansb2c0d632016-04-13 23:36:15 -0700640 malloc_mutex_lock(tsd, &arena->lock);
Jason Evans99bd94f2015-02-18 16:40:53 -0800641 return (chunk);
642}
643
Jason Evansaa5113b2014-01-14 16:23:03 -0800644static arena_chunk_t *
Jason Evansb2c0d632016-04-13 23:36:15 -0700645arena_chunk_alloc_internal(tsd_t *tsd, arena_t *arena, bool *zero, bool *commit)
Jason Evanse2deab72014-05-15 22:22:27 -0700646{
647 arena_chunk_t *chunk;
Jason Evansb49a3342015-07-28 11:28:19 -0400648 chunk_hooks_t chunk_hooks = CHUNK_HOOKS_INITIALIZER;
Jason Evanse2deab72014-05-15 22:22:27 -0700649
Jason Evansb2c0d632016-04-13 23:36:15 -0700650 chunk = chunk_alloc_cache(tsd, arena, &chunk_hooks, NULL, chunksize,
Jason Evansb49a3342015-07-28 11:28:19 -0400651 chunksize, zero, true);
Jason Evans8fadb1a2015-08-04 10:49:46 -0700652 if (chunk != NULL) {
Jason Evansb2c0d632016-04-13 23:36:15 -0700653 if (arena_chunk_register(tsd, arena, chunk, *zero)) {
654 chunk_dalloc_cache(tsd, arena, &chunk_hooks, chunk,
Jason Evansde249c82015-08-09 16:47:27 -0700655 chunksize, true);
Jason Evans8fadb1a2015-08-04 10:49:46 -0700656 return (NULL);
657 }
658 *commit = true;
Jason Evansb49a3342015-07-28 11:28:19 -0400659 }
660 if (chunk == NULL) {
Jason Evansb2c0d632016-04-13 23:36:15 -0700661 chunk = arena_chunk_alloc_internal_hard(tsd, arena,
662 &chunk_hooks, zero, commit);
Jason Evansb49a3342015-07-28 11:28:19 -0400663 }
Jason Evans99bd94f2015-02-18 16:40:53 -0800664
Jason Evans4581b972014-11-27 17:22:36 -0200665 if (config_stats && chunk != NULL) {
Jason Evanse2deab72014-05-15 22:22:27 -0700666 arena->stats.mapped += chunksize;
Jason Evans4581b972014-11-27 17:22:36 -0200667 arena->stats.metadata_mapped += (map_bias << LG_PAGE);
668 }
Jason Evanse2deab72014-05-15 22:22:27 -0700669
670 return (chunk);
671}
672
Jason Evanse2deab72014-05-15 22:22:27 -0700673static arena_chunk_t *
Jason Evansb2c0d632016-04-13 23:36:15 -0700674arena_chunk_init_hard(tsd_t *tsd, arena_t *arena)
Jason Evansaa5113b2014-01-14 16:23:03 -0800675{
676 arena_chunk_t *chunk;
Jason Evans8fadb1a2015-08-04 10:49:46 -0700677 bool zero, commit;
Jason Evans45186f02015-08-10 23:03:34 -0700678 size_t flag_unzeroed, flag_decommitted, i;
Jason Evansaa5113b2014-01-14 16:23:03 -0800679
680 assert(arena->spare == NULL);
681
682 zero = false;
Jason Evans8fadb1a2015-08-04 10:49:46 -0700683 commit = false;
Jason Evansb2c0d632016-04-13 23:36:15 -0700684 chunk = arena_chunk_alloc_internal(tsd, arena, &zero, &commit);
Jason Evansaa5113b2014-01-14 16:23:03 -0800685 if (chunk == NULL)
686 return (NULL);
Jason Evansaa5113b2014-01-14 16:23:03 -0800687
Jason Evansaa5113b2014-01-14 16:23:03 -0800688 /*
Jason Evansaa5113b2014-01-14 16:23:03 -0800689 * Initialize the map to contain one maximal free untouched run. Mark
Jason Evansf86bc082016-03-31 11:19:46 -0700690 * the pages as zeroed if arena_chunk_alloc_internal() returned a zeroed
691 * or decommitted chunk.
Jason Evansaa5113b2014-01-14 16:23:03 -0800692 */
Jason Evans45186f02015-08-10 23:03:34 -0700693 flag_unzeroed = (zero || !commit) ? 0 : CHUNK_MAP_UNZEROED;
694 flag_decommitted = commit ? 0 : CHUNK_MAP_DECOMMITTED;
695 arena_mapbits_unallocated_set(chunk, map_bias, arena_maxrun,
696 flag_unzeroed | flag_decommitted);
Jason Evansaa5113b2014-01-14 16:23:03 -0800697 /*
698 * There is no need to initialize the internal page map entries unless
699 * the chunk is not zeroed.
700 */
Jason Evans551ebc42014-10-03 10:16:09 -0700701 if (!zero) {
Jason Evansbd87b012014-04-15 16:35:08 -0700702 JEMALLOC_VALGRIND_MAKE_MEM_UNDEFINED(
Jason Evans61a6dfc2016-03-23 16:04:38 -0700703 (void *)arena_bitselm_get_const(chunk, map_bias+1),
704 (size_t)((uintptr_t)arena_bitselm_get_const(chunk,
705 chunk_npages-1) -
706 (uintptr_t)arena_bitselm_get_const(chunk, map_bias+1)));
Jason Evansaa5113b2014-01-14 16:23:03 -0800707 for (i = map_bias+1; i < chunk_npages-1; i++)
Jason Evans45186f02015-08-10 23:03:34 -0700708 arena_mapbits_internal_set(chunk, i, flag_unzeroed);
Jason Evansaa5113b2014-01-14 16:23:03 -0800709 } else {
Qinfan Wuff6a31d2014-08-29 13:34:40 -0700710 JEMALLOC_VALGRIND_MAKE_MEM_DEFINED((void
Jason Evans61a6dfc2016-03-23 16:04:38 -0700711 *)arena_bitselm_get_const(chunk, map_bias+1),
712 (size_t)((uintptr_t)arena_bitselm_get_const(chunk,
713 chunk_npages-1) -
714 (uintptr_t)arena_bitselm_get_const(chunk, map_bias+1)));
Jason Evansaa5113b2014-01-14 16:23:03 -0800715 if (config_debug) {
716 for (i = map_bias+1; i < chunk_npages-1; i++) {
717 assert(arena_mapbits_unzeroed_get(chunk, i) ==
Jason Evans45186f02015-08-10 23:03:34 -0700718 flag_unzeroed);
Jason Evansaa5113b2014-01-14 16:23:03 -0800719 }
720 }
721 }
Jason Evans155bfa72014-10-05 17:54:10 -0700722 arena_mapbits_unallocated_set(chunk, chunk_npages-1, arena_maxrun,
Jason Evans45186f02015-08-10 23:03:34 -0700723 flag_unzeroed);
Jason Evansaa5113b2014-01-14 16:23:03 -0800724
725 return (chunk);
Jason Evansc368f8c2013-10-29 18:17:42 -0700726}
727
Jason Evanse476f8a2010-01-16 09:53:50 -0800728static arena_chunk_t *
Jason Evansb2c0d632016-04-13 23:36:15 -0700729arena_chunk_alloc(tsd_t *tsd, arena_t *arena)
Jason Evanse476f8a2010-01-16 09:53:50 -0800730{
731 arena_chunk_t *chunk;
Jason Evanse476f8a2010-01-16 09:53:50 -0800732
Jason Evansaa5113b2014-01-14 16:23:03 -0800733 if (arena->spare != NULL)
734 chunk = arena_chunk_init_spare(arena);
Chris Pride20a8c782014-03-25 22:36:05 -0700735 else {
Jason Evansb2c0d632016-04-13 23:36:15 -0700736 chunk = arena_chunk_init_hard(tsd, arena);
Chris Pride20a8c782014-03-25 22:36:05 -0700737 if (chunk == NULL)
738 return (NULL);
739 }
Jason Evanse476f8a2010-01-16 09:53:50 -0800740
Jason Evans19ff2ce2016-04-22 14:37:17 -0700741 ql_elm_new(&chunk->node, ql_link);
742 ql_tail_insert(&arena->achunks, &chunk->node, ql_link);
Qinfan Wu90737fc2014-07-21 19:39:20 -0700743 arena_avail_insert(arena, chunk, map_bias, chunk_npages-map_bias);
Jason Evanse3d13062012-10-30 15:42:37 -0700744
Jason Evanse476f8a2010-01-16 09:53:50 -0800745 return (chunk);
746}
747
748static void
Jason Evans19ff2ce2016-04-22 14:37:17 -0700749arena_chunk_discard(tsd_t *tsd, arena_t *arena, arena_chunk_t *chunk)
750{
751 bool committed;
752 chunk_hooks_t chunk_hooks = CHUNK_HOOKS_INITIALIZER;
753
754 chunk_deregister(chunk, &chunk->node);
755
756 committed = (arena_mapbits_decommitted_get(chunk, map_bias) == 0);
757 if (!committed) {
758 /*
759 * Decommit the header. Mark the chunk as decommitted even if
760 * header decommit fails, since treating a partially committed
761 * chunk as committed has a high potential for causing later
762 * access of decommitted memory.
763 */
764 chunk_hooks = chunk_hooks_get(tsd, arena);
765 chunk_hooks.decommit(chunk, chunksize, 0, map_bias << LG_PAGE,
766 arena->ind);
767 }
768
769 chunk_dalloc_cache(tsd, arena, &chunk_hooks, (void *)chunk, chunksize,
770 committed);
771
772 if (config_stats) {
773 arena->stats.mapped -= chunksize;
774 arena->stats.metadata_mapped -= (map_bias << LG_PAGE);
775 }
776}
777
778static void
779arena_spare_discard(tsd_t *tsd, arena_t *arena, arena_chunk_t *spare)
780{
781
782 assert(arena->spare != spare);
783
784 if (arena_mapbits_dirty_get(spare, map_bias) != 0) {
785 arena_run_dirty_remove(arena, spare, map_bias,
786 chunk_npages-map_bias);
787 }
788
789 arena_chunk_discard(tsd, arena, spare);
790}
791
792static void
Jason Evansb2c0d632016-04-13 23:36:15 -0700793arena_chunk_dalloc(tsd_t *tsd, arena_t *arena, arena_chunk_t *chunk)
Jason Evanse476f8a2010-01-16 09:53:50 -0800794{
Jason Evans19ff2ce2016-04-22 14:37:17 -0700795 arena_chunk_t *spare;
Qinfan Wu04d60a12014-07-18 14:21:17 -0700796
Jason Evans30fe12b2012-05-10 17:09:17 -0700797 assert(arena_mapbits_allocated_get(chunk, map_bias) == 0);
798 assert(arena_mapbits_allocated_get(chunk, chunk_npages-1) == 0);
799 assert(arena_mapbits_unallocated_size_get(chunk, map_bias) ==
Jason Evans155bfa72014-10-05 17:54:10 -0700800 arena_maxrun);
Jason Evans30fe12b2012-05-10 17:09:17 -0700801 assert(arena_mapbits_unallocated_size_get(chunk, chunk_npages-1) ==
Jason Evans155bfa72014-10-05 17:54:10 -0700802 arena_maxrun);
Jason Evans30fe12b2012-05-10 17:09:17 -0700803 assert(arena_mapbits_dirty_get(chunk, map_bias) ==
804 arena_mapbits_dirty_get(chunk, chunk_npages-1));
Jason Evansde249c82015-08-09 16:47:27 -0700805 assert(arena_mapbits_decommitted_get(chunk, map_bias) ==
806 arena_mapbits_decommitted_get(chunk, chunk_npages-1));
Jason Evans30fe12b2012-05-10 17:09:17 -0700807
Dave Watson3417a302016-02-23 12:06:21 -0800808 /* Remove run from runs_avail, so that the arena does not use it. */
Qinfan Wu90737fc2014-07-21 19:39:20 -0700809 arena_avail_remove(arena, chunk, map_bias, chunk_npages-map_bias);
Jason Evanse476f8a2010-01-16 09:53:50 -0800810
Jason Evans19ff2ce2016-04-22 14:37:17 -0700811 ql_remove(&arena->achunks, &chunk->node, ql_link);
812 spare = arena->spare;
813 arena->spare = chunk;
814 if (spare != NULL)
815 arena_spare_discard(tsd, arena, spare);
Jason Evanse476f8a2010-01-16 09:53:50 -0800816}
817
Jason Evans9b41ac92014-10-14 22:20:00 -0700818static void
819arena_huge_malloc_stats_update(arena_t *arena, size_t usize)
820{
Jason Evansd01fd192015-08-19 15:21:32 -0700821 szind_t index = size2index(usize) - nlclasses - NBINS;
Jason Evans9b41ac92014-10-14 22:20:00 -0700822
823 cassert(config_stats);
824
825 arena->stats.nmalloc_huge++;
826 arena->stats.allocated_huge += usize;
827 arena->stats.hstats[index].nmalloc++;
828 arena->stats.hstats[index].curhchunks++;
829}
830
831static void
832arena_huge_malloc_stats_update_undo(arena_t *arena, size_t usize)
833{
Jason Evansd01fd192015-08-19 15:21:32 -0700834 szind_t index = size2index(usize) - nlclasses - NBINS;
Jason Evans9b41ac92014-10-14 22:20:00 -0700835
836 cassert(config_stats);
837
838 arena->stats.nmalloc_huge--;
839 arena->stats.allocated_huge -= usize;
840 arena->stats.hstats[index].nmalloc--;
841 arena->stats.hstats[index].curhchunks--;
842}
843
844static void
845arena_huge_dalloc_stats_update(arena_t *arena, size_t usize)
846{
Jason Evansd01fd192015-08-19 15:21:32 -0700847 szind_t index = size2index(usize) - nlclasses - NBINS;
Jason Evans9b41ac92014-10-14 22:20:00 -0700848
849 cassert(config_stats);
850
851 arena->stats.ndalloc_huge++;
852 arena->stats.allocated_huge -= usize;
853 arena->stats.hstats[index].ndalloc++;
854 arena->stats.hstats[index].curhchunks--;
855}
856
857static void
Jason Evans7e674952016-04-25 13:26:54 -0700858arena_huge_reset_stats_cancel(arena_t *arena, size_t usize)
859{
860 szind_t index = size2index(usize) - nlclasses - NBINS;
861
862 cassert(config_stats);
863
864 arena->stats.ndalloc_huge++;
865 arena->stats.hstats[index].ndalloc--;
866}
867
868static void
Jason Evans9b41ac92014-10-14 22:20:00 -0700869arena_huge_dalloc_stats_update_undo(arena_t *arena, size_t usize)
870{
Jason Evansd01fd192015-08-19 15:21:32 -0700871 szind_t index = size2index(usize) - nlclasses - NBINS;
Jason Evans9b41ac92014-10-14 22:20:00 -0700872
873 cassert(config_stats);
874
875 arena->stats.ndalloc_huge--;
876 arena->stats.allocated_huge += usize;
877 arena->stats.hstats[index].ndalloc--;
878 arena->stats.hstats[index].curhchunks++;
879}
880
881static void
882arena_huge_ralloc_stats_update(arena_t *arena, size_t oldsize, size_t usize)
883{
884
885 arena_huge_dalloc_stats_update(arena, oldsize);
886 arena_huge_malloc_stats_update(arena, usize);
887}
888
889static void
890arena_huge_ralloc_stats_update_undo(arena_t *arena, size_t oldsize,
891 size_t usize)
892{
893
894 arena_huge_dalloc_stats_update_undo(arena, oldsize);
895 arena_huge_malloc_stats_update_undo(arena, usize);
896}
897
Jason Evanscbf3a6d2015-02-11 12:24:27 -0800898extent_node_t *
Jason Evansb2c0d632016-04-13 23:36:15 -0700899arena_node_alloc(tsd_t *tsd, arena_t *arena)
Jason Evanscbf3a6d2015-02-11 12:24:27 -0800900{
901 extent_node_t *node;
902
Jason Evansb2c0d632016-04-13 23:36:15 -0700903 malloc_mutex_lock(tsd, &arena->node_cache_mtx);
Jason Evans2195ba42015-02-15 16:43:52 -0800904 node = ql_last(&arena->node_cache, ql_link);
Jason Evanscbf3a6d2015-02-11 12:24:27 -0800905 if (node == NULL) {
Jason Evansb2c0d632016-04-13 23:36:15 -0700906 malloc_mutex_unlock(tsd, &arena->node_cache_mtx);
907 return (base_alloc(tsd, sizeof(extent_node_t)));
Jason Evanscbf3a6d2015-02-11 12:24:27 -0800908 }
Jason Evans2195ba42015-02-15 16:43:52 -0800909 ql_tail_remove(&arena->node_cache, extent_node_t, ql_link);
Jason Evansb2c0d632016-04-13 23:36:15 -0700910 malloc_mutex_unlock(tsd, &arena->node_cache_mtx);
Jason Evanscbf3a6d2015-02-11 12:24:27 -0800911 return (node);
912}
913
914void
Jason Evansb2c0d632016-04-13 23:36:15 -0700915arena_node_dalloc(tsd_t *tsd, arena_t *arena, extent_node_t *node)
Jason Evanscbf3a6d2015-02-11 12:24:27 -0800916{
917
Jason Evansb2c0d632016-04-13 23:36:15 -0700918 malloc_mutex_lock(tsd, &arena->node_cache_mtx);
Jason Evans2195ba42015-02-15 16:43:52 -0800919 ql_elm_new(node, ql_link);
920 ql_tail_insert(&arena->node_cache, node, ql_link);
Jason Evansb2c0d632016-04-13 23:36:15 -0700921 malloc_mutex_unlock(tsd, &arena->node_cache_mtx);
Jason Evanscbf3a6d2015-02-11 12:24:27 -0800922}
923
Jason Evans99bd94f2015-02-18 16:40:53 -0800924static void *
Jason Evansb2c0d632016-04-13 23:36:15 -0700925arena_chunk_alloc_huge_hard(tsd_t *tsd, arena_t *arena,
926 chunk_hooks_t *chunk_hooks, size_t usize, size_t alignment, bool *zero,
927 size_t csize)
Jason Evans9b41ac92014-10-14 22:20:00 -0700928{
929 void *ret;
Jason Evans8fadb1a2015-08-04 10:49:46 -0700930 bool commit = true;
Jason Evans9b41ac92014-10-14 22:20:00 -0700931
Jason Evansb2c0d632016-04-13 23:36:15 -0700932 ret = chunk_alloc_wrapper(tsd, arena, chunk_hooks, NULL, csize,
933 alignment, zero, &commit);
Jason Evans9b41ac92014-10-14 22:20:00 -0700934 if (ret == NULL) {
935 /* Revert optimistic stats updates. */
Jason Evansb2c0d632016-04-13 23:36:15 -0700936 malloc_mutex_lock(tsd, &arena->lock);
Jason Evans9b41ac92014-10-14 22:20:00 -0700937 if (config_stats) {
938 arena_huge_malloc_stats_update_undo(arena, usize);
939 arena->stats.mapped -= usize;
940 }
Jason Evans40ee9aa2016-02-27 12:34:50 -0800941 arena_nactive_sub(arena, usize >> LG_PAGE);
Jason Evansb2c0d632016-04-13 23:36:15 -0700942 malloc_mutex_unlock(tsd, &arena->lock);
Jason Evans9b41ac92014-10-14 22:20:00 -0700943 }
944
Jason Evans99bd94f2015-02-18 16:40:53 -0800945 return (ret);
946}
Jason Evans9b41ac92014-10-14 22:20:00 -0700947
Jason Evans99bd94f2015-02-18 16:40:53 -0800948void *
Jason Evansb2c0d632016-04-13 23:36:15 -0700949arena_chunk_alloc_huge(tsd_t *tsd, arena_t *arena, size_t usize,
950 size_t alignment, bool *zero)
Jason Evans99bd94f2015-02-18 16:40:53 -0800951{
952 void *ret;
Jason Evansb49a3342015-07-28 11:28:19 -0400953 chunk_hooks_t chunk_hooks = CHUNK_HOOKS_INITIALIZER;
Jason Evans99bd94f2015-02-18 16:40:53 -0800954 size_t csize = CHUNK_CEILING(usize);
955
Jason Evansb2c0d632016-04-13 23:36:15 -0700956 malloc_mutex_lock(tsd, &arena->lock);
Jason Evans99bd94f2015-02-18 16:40:53 -0800957
958 /* Optimistically update stats. */
959 if (config_stats) {
960 arena_huge_malloc_stats_update(arena, usize);
961 arena->stats.mapped += usize;
962 }
Jason Evans40ee9aa2016-02-27 12:34:50 -0800963 arena_nactive_add(arena, usize >> LG_PAGE);
Jason Evans99bd94f2015-02-18 16:40:53 -0800964
Jason Evansb2c0d632016-04-13 23:36:15 -0700965 ret = chunk_alloc_cache(tsd, arena, &chunk_hooks, NULL, csize,
966 alignment, zero, true);
967 malloc_mutex_unlock(tsd, &arena->lock);
Jason Evans99bd94f2015-02-18 16:40:53 -0800968 if (ret == NULL) {
Jason Evansb2c0d632016-04-13 23:36:15 -0700969 ret = arena_chunk_alloc_huge_hard(tsd, arena, &chunk_hooks,
970 usize, alignment, zero, csize);
Jason Evans99bd94f2015-02-18 16:40:53 -0800971 }
972
Jason Evans9b41ac92014-10-14 22:20:00 -0700973 return (ret);
974}
975
976void
Jason Evansb2c0d632016-04-13 23:36:15 -0700977arena_chunk_dalloc_huge(tsd_t *tsd, arena_t *arena, void *chunk, size_t usize)
Jason Evans9b41ac92014-10-14 22:20:00 -0700978{
Jason Evansb49a3342015-07-28 11:28:19 -0400979 chunk_hooks_t chunk_hooks = CHUNK_HOOKS_INITIALIZER;
Jason Evans99bd94f2015-02-18 16:40:53 -0800980 size_t csize;
Jason Evans9b41ac92014-10-14 22:20:00 -0700981
Jason Evans99bd94f2015-02-18 16:40:53 -0800982 csize = CHUNK_CEILING(usize);
Jason Evansb2c0d632016-04-13 23:36:15 -0700983 malloc_mutex_lock(tsd, &arena->lock);
Jason Evans9b41ac92014-10-14 22:20:00 -0700984 if (config_stats) {
985 arena_huge_dalloc_stats_update(arena, usize);
986 arena->stats.mapped -= usize;
Jason Evans9b41ac92014-10-14 22:20:00 -0700987 }
Jason Evans40ee9aa2016-02-27 12:34:50 -0800988 arena_nactive_sub(arena, usize >> LG_PAGE);
Jason Evans99bd94f2015-02-18 16:40:53 -0800989
Jason Evansb2c0d632016-04-13 23:36:15 -0700990 chunk_dalloc_cache(tsd, arena, &chunk_hooks, chunk, csize, true);
991 malloc_mutex_unlock(tsd, &arena->lock);
Jason Evans9b41ac92014-10-14 22:20:00 -0700992}
993
994void
Jason Evansb2c0d632016-04-13 23:36:15 -0700995arena_chunk_ralloc_huge_similar(tsd_t *tsd, arena_t *arena, void *chunk,
996 size_t oldsize, size_t usize)
Jason Evans9b41ac92014-10-14 22:20:00 -0700997{
998
999 assert(CHUNK_CEILING(oldsize) == CHUNK_CEILING(usize));
1000 assert(oldsize != usize);
1001
Jason Evansb2c0d632016-04-13 23:36:15 -07001002 malloc_mutex_lock(tsd, &arena->lock);
Jason Evans9b41ac92014-10-14 22:20:00 -07001003 if (config_stats)
1004 arena_huge_ralloc_stats_update(arena, oldsize, usize);
Jason Evans40ee9aa2016-02-27 12:34:50 -08001005 if (oldsize < usize)
1006 arena_nactive_add(arena, (usize - oldsize) >> LG_PAGE);
1007 else
1008 arena_nactive_sub(arena, (oldsize - usize) >> LG_PAGE);
Jason Evansb2c0d632016-04-13 23:36:15 -07001009 malloc_mutex_unlock(tsd, &arena->lock);
Jason Evans9b41ac92014-10-14 22:20:00 -07001010}
1011
1012void
Jason Evansb2c0d632016-04-13 23:36:15 -07001013arena_chunk_ralloc_huge_shrink(tsd_t *tsd, arena_t *arena, void *chunk,
1014 size_t oldsize, size_t usize)
Jason Evans9b41ac92014-10-14 22:20:00 -07001015{
Jason Evans9b41ac92014-10-14 22:20:00 -07001016 size_t udiff = oldsize - usize;
1017 size_t cdiff = CHUNK_CEILING(oldsize) - CHUNK_CEILING(usize);
1018
Jason Evansb2c0d632016-04-13 23:36:15 -07001019 malloc_mutex_lock(tsd, &arena->lock);
Jason Evans9b41ac92014-10-14 22:20:00 -07001020 if (config_stats) {
1021 arena_huge_ralloc_stats_update(arena, oldsize, usize);
Jason Evans40ee9aa2016-02-27 12:34:50 -08001022 if (cdiff != 0)
Jason Evans9b41ac92014-10-14 22:20:00 -07001023 arena->stats.mapped -= cdiff;
Jason Evans9b41ac92014-10-14 22:20:00 -07001024 }
Jason Evans40ee9aa2016-02-27 12:34:50 -08001025 arena_nactive_sub(arena, udiff >> LG_PAGE);
Jason Evans99bd94f2015-02-18 16:40:53 -08001026
Jason Evans2012d5a2014-11-17 09:54:49 -08001027 if (cdiff != 0) {
Jason Evansb49a3342015-07-28 11:28:19 -04001028 chunk_hooks_t chunk_hooks = CHUNK_HOOKS_INITIALIZER;
Jason Evans99bd94f2015-02-18 16:40:53 -08001029 void *nchunk = (void *)((uintptr_t)chunk +
1030 CHUNK_CEILING(usize));
1031
Jason Evansb2c0d632016-04-13 23:36:15 -07001032 chunk_dalloc_cache(tsd, arena, &chunk_hooks, nchunk, cdiff,
1033 true);
Jason Evansb49a3342015-07-28 11:28:19 -04001034 }
Jason Evansb2c0d632016-04-13 23:36:15 -07001035 malloc_mutex_unlock(tsd, &arena->lock);
Jason Evans99bd94f2015-02-18 16:40:53 -08001036}
1037
Jason Evansb49a3342015-07-28 11:28:19 -04001038static bool
Jason Evansb2c0d632016-04-13 23:36:15 -07001039arena_chunk_ralloc_huge_expand_hard(tsd_t *tsd, arena_t *arena,
1040 chunk_hooks_t *chunk_hooks, void *chunk, size_t oldsize, size_t usize,
1041 bool *zero, void *nchunk, size_t udiff, size_t cdiff)
Jason Evans99bd94f2015-02-18 16:40:53 -08001042{
1043 bool err;
Jason Evans8fadb1a2015-08-04 10:49:46 -07001044 bool commit = true;
Jason Evans99bd94f2015-02-18 16:40:53 -08001045
Jason Evansb2c0d632016-04-13 23:36:15 -07001046 err = (chunk_alloc_wrapper(tsd, arena, chunk_hooks, nchunk, cdiff,
1047 chunksize, zero, &commit) == NULL);
Jason Evans99bd94f2015-02-18 16:40:53 -08001048 if (err) {
1049 /* Revert optimistic stats updates. */
Jason Evansb2c0d632016-04-13 23:36:15 -07001050 malloc_mutex_lock(tsd, &arena->lock);
Jason Evans99bd94f2015-02-18 16:40:53 -08001051 if (config_stats) {
1052 arena_huge_ralloc_stats_update_undo(arena, oldsize,
1053 usize);
1054 arena->stats.mapped -= cdiff;
1055 }
Jason Evans40ee9aa2016-02-27 12:34:50 -08001056 arena_nactive_sub(arena, udiff >> LG_PAGE);
Jason Evansb2c0d632016-04-13 23:36:15 -07001057 malloc_mutex_unlock(tsd, &arena->lock);
Jason Evansb49a3342015-07-28 11:28:19 -04001058 } else if (chunk_hooks->merge(chunk, CHUNK_CEILING(oldsize), nchunk,
1059 cdiff, true, arena->ind)) {
Jason Evansb2c0d632016-04-13 23:36:15 -07001060 chunk_dalloc_wrapper(tsd, arena, chunk_hooks, nchunk, cdiff,
1061 *zero, true);
Jason Evansb49a3342015-07-28 11:28:19 -04001062 err = true;
Jason Evans2012d5a2014-11-17 09:54:49 -08001063 }
Jason Evans99bd94f2015-02-18 16:40:53 -08001064 return (err);
Jason Evans9b41ac92014-10-14 22:20:00 -07001065}
1066
1067bool
Jason Evansb2c0d632016-04-13 23:36:15 -07001068arena_chunk_ralloc_huge_expand(tsd_t *tsd, arena_t *arena, void *chunk,
1069 size_t oldsize, size_t usize, bool *zero)
Jason Evans9b41ac92014-10-14 22:20:00 -07001070{
Jason Evans99bd94f2015-02-18 16:40:53 -08001071 bool err;
Jason Evansb2c0d632016-04-13 23:36:15 -07001072 chunk_hooks_t chunk_hooks = chunk_hooks_get(tsd, arena);
Jason Evans99bd94f2015-02-18 16:40:53 -08001073 void *nchunk = (void *)((uintptr_t)chunk + CHUNK_CEILING(oldsize));
Jason Evans9b41ac92014-10-14 22:20:00 -07001074 size_t udiff = usize - oldsize;
1075 size_t cdiff = CHUNK_CEILING(usize) - CHUNK_CEILING(oldsize);
1076
Jason Evansb2c0d632016-04-13 23:36:15 -07001077 malloc_mutex_lock(tsd, &arena->lock);
Jason Evans99bd94f2015-02-18 16:40:53 -08001078
1079 /* Optimistically update stats. */
Jason Evans9b41ac92014-10-14 22:20:00 -07001080 if (config_stats) {
Jason Evans9b41ac92014-10-14 22:20:00 -07001081 arena_huge_ralloc_stats_update(arena, oldsize, usize);
1082 arena->stats.mapped += cdiff;
1083 }
Jason Evans40ee9aa2016-02-27 12:34:50 -08001084 arena_nactive_add(arena, udiff >> LG_PAGE);
Jason Evans9b41ac92014-10-14 22:20:00 -07001085
Jason Evansb2c0d632016-04-13 23:36:15 -07001086 err = (chunk_alloc_cache(tsd, arena, &chunk_hooks, nchunk, cdiff,
1087 chunksize, zero, true) == NULL);
1088 malloc_mutex_unlock(tsd, &arena->lock);
Jason Evans99bd94f2015-02-18 16:40:53 -08001089 if (err) {
Jason Evansb2c0d632016-04-13 23:36:15 -07001090 err = arena_chunk_ralloc_huge_expand_hard(tsd, arena,
1091 &chunk_hooks, chunk, oldsize, usize, zero, nchunk, udiff,
Jason Evansb49a3342015-07-28 11:28:19 -04001092 cdiff);
1093 } else if (chunk_hooks.merge(chunk, CHUNK_CEILING(oldsize), nchunk,
1094 cdiff, true, arena->ind)) {
Jason Evansb2c0d632016-04-13 23:36:15 -07001095 chunk_dalloc_wrapper(tsd, arena, &chunk_hooks, nchunk, cdiff,
1096 *zero, true);
Jason Evansb49a3342015-07-28 11:28:19 -04001097 err = true;
Jason Evans9b41ac92014-10-14 22:20:00 -07001098 }
1099
Jason Evans99bd94f2015-02-18 16:40:53 -08001100 return (err);
Jason Evans9b41ac92014-10-14 22:20:00 -07001101}
1102
Jason Evansaa282662015-07-15 16:02:21 -07001103/*
1104 * Do first-best-fit run selection, i.e. select the lowest run that best fits.
Dave Watson3417a302016-02-23 12:06:21 -08001105 * Run sizes are indexed, so not all candidate runs are necessarily exactly the
1106 * same size.
Jason Evansaa282662015-07-15 16:02:21 -07001107 */
Jason Evans97c04a92015-03-06 19:57:36 -08001108static arena_run_t *
Jason Evansaa282662015-07-15 16:02:21 -07001109arena_run_first_best_fit(arena_t *arena, size_t size)
Jason Evans97c04a92015-03-06 19:57:36 -08001110{
Dave Watson3417a302016-02-23 12:06:21 -08001111 szind_t ind, i;
1112
1113 ind = size2index(run_quantize_ceil(size));
Dave Watsoncd86c142016-02-24 11:02:49 -08001114 for (i = ind; i < runs_avail_nclasses + runs_avail_bias; i++) {
Jason Evansc6a2c392016-03-26 17:30:37 -07001115 arena_chunk_map_misc_t *miscelm = arena_run_heap_first(
1116 arena_runs_avail_get(arena, i));
1117 if (miscelm != NULL)
Dave Watson3417a302016-02-23 12:06:21 -08001118 return (&miscelm->run);
1119 }
1120
1121 return (NULL);
Jason Evans97c04a92015-03-06 19:57:36 -08001122}
1123
Jason Evanse476f8a2010-01-16 09:53:50 -08001124static arena_run_t *
Jason Evansaa5113b2014-01-14 16:23:03 -08001125arena_run_alloc_large_helper(arena_t *arena, size_t size, bool zero)
Jason Evanse476f8a2010-01-16 09:53:50 -08001126{
Jason Evansaa282662015-07-15 16:02:21 -07001127 arena_run_t *run = arena_run_first_best_fit(arena, s2u(size));
Jason Evans8fadb1a2015-08-04 10:49:46 -07001128 if (run != NULL) {
1129 if (arena_run_split_large(arena, run, size, zero))
1130 run = NULL;
1131 }
Jason Evans97c04a92015-03-06 19:57:36 -08001132 return (run);
Jason Evans5b0c9962012-05-10 15:47:24 -07001133}
1134
1135static arena_run_t *
Jason Evansb2c0d632016-04-13 23:36:15 -07001136arena_run_alloc_large(tsd_t *tsd, arena_t *arena, size_t size, bool zero)
Jason Evans5b0c9962012-05-10 15:47:24 -07001137{
1138 arena_chunk_t *chunk;
1139 arena_run_t *run;
1140
Jason Evansfc0b3b72014-10-09 17:54:06 -07001141 assert(size <= arena_maxrun);
Jason Evans8a03cf02015-05-04 09:58:36 -07001142 assert(size == PAGE_CEILING(size));
Jason Evans5b0c9962012-05-10 15:47:24 -07001143
1144 /* Search the arena's chunks for the lowest best fit. */
Jason Evansaa5113b2014-01-14 16:23:03 -08001145 run = arena_run_alloc_large_helper(arena, size, zero);
Jason Evans5b0c9962012-05-10 15:47:24 -07001146 if (run != NULL)
1147 return (run);
1148
Jason Evanse476f8a2010-01-16 09:53:50 -08001149 /*
1150 * No usable runs. Create a new chunk from which to allocate the run.
1151 */
Jason Evansb2c0d632016-04-13 23:36:15 -07001152 chunk = arena_chunk_alloc(tsd, arena);
Jason Evanse00572b2010-03-14 19:43:56 -07001153 if (chunk != NULL) {
Jason Evans61a6dfc2016-03-23 16:04:38 -07001154 run = &arena_miscelm_get_mutable(chunk, map_bias)->run;
Jason Evans8fadb1a2015-08-04 10:49:46 -07001155 if (arena_run_split_large(arena, run, size, zero))
1156 run = NULL;
Jason Evanse00572b2010-03-14 19:43:56 -07001157 return (run);
1158 }
1159
1160 /*
1161 * arena_chunk_alloc() failed, but another thread may have made
1162 * sufficient memory available while this one dropped arena->lock in
1163 * arena_chunk_alloc(), so search one more time.
1164 */
Jason Evansaa5113b2014-01-14 16:23:03 -08001165 return (arena_run_alloc_large_helper(arena, size, zero));
1166}
1167
1168static arena_run_t *
Jason Evansd01fd192015-08-19 15:21:32 -07001169arena_run_alloc_small_helper(arena_t *arena, size_t size, szind_t binind)
Jason Evansaa5113b2014-01-14 16:23:03 -08001170{
Jason Evansaa282662015-07-15 16:02:21 -07001171 arena_run_t *run = arena_run_first_best_fit(arena, size);
Jason Evans8fadb1a2015-08-04 10:49:46 -07001172 if (run != NULL) {
1173 if (arena_run_split_small(arena, run, size, binind))
1174 run = NULL;
1175 }
Jason Evans97c04a92015-03-06 19:57:36 -08001176 return (run);
Jason Evansaa5113b2014-01-14 16:23:03 -08001177}
1178
1179static arena_run_t *
Jason Evansb2c0d632016-04-13 23:36:15 -07001180arena_run_alloc_small(tsd_t *tsd, arena_t *arena, size_t size, szind_t binind)
Jason Evansaa5113b2014-01-14 16:23:03 -08001181{
1182 arena_chunk_t *chunk;
1183 arena_run_t *run;
1184
Jason Evansfc0b3b72014-10-09 17:54:06 -07001185 assert(size <= arena_maxrun);
Jason Evans8a03cf02015-05-04 09:58:36 -07001186 assert(size == PAGE_CEILING(size));
Jason Evansaa5113b2014-01-14 16:23:03 -08001187 assert(binind != BININD_INVALID);
1188
1189 /* Search the arena's chunks for the lowest best fit. */
1190 run = arena_run_alloc_small_helper(arena, size, binind);
1191 if (run != NULL)
1192 return (run);
1193
1194 /*
1195 * No usable runs. Create a new chunk from which to allocate the run.
1196 */
Jason Evansb2c0d632016-04-13 23:36:15 -07001197 chunk = arena_chunk_alloc(tsd, arena);
Jason Evansaa5113b2014-01-14 16:23:03 -08001198 if (chunk != NULL) {
Jason Evans61a6dfc2016-03-23 16:04:38 -07001199 run = &arena_miscelm_get_mutable(chunk, map_bias)->run;
Jason Evans8fadb1a2015-08-04 10:49:46 -07001200 if (arena_run_split_small(arena, run, size, binind))
1201 run = NULL;
Jason Evansaa5113b2014-01-14 16:23:03 -08001202 return (run);
1203 }
1204
1205 /*
1206 * arena_chunk_alloc() failed, but another thread may have made
1207 * sufficient memory available while this one dropped arena->lock in
1208 * arena_chunk_alloc(), so search one more time.
1209 */
1210 return (arena_run_alloc_small_helper(arena, size, binind));
Jason Evanse476f8a2010-01-16 09:53:50 -08001211}
1212
Jason Evans8d6a3e82015-03-18 18:55:33 -07001213static bool
1214arena_lg_dirty_mult_valid(ssize_t lg_dirty_mult)
1215{
1216
Jason Evansbd16ea42015-03-24 15:59:28 -07001217 return (lg_dirty_mult >= -1 && lg_dirty_mult < (ssize_t)(sizeof(size_t)
1218 << 3));
Jason Evans8d6a3e82015-03-18 18:55:33 -07001219}
1220
1221ssize_t
Jason Evansb2c0d632016-04-13 23:36:15 -07001222arena_lg_dirty_mult_get(tsd_t *tsd, arena_t *arena)
Jason Evans8d6a3e82015-03-18 18:55:33 -07001223{
1224 ssize_t lg_dirty_mult;
1225
Jason Evansb2c0d632016-04-13 23:36:15 -07001226 malloc_mutex_lock(tsd, &arena->lock);
Jason Evans8d6a3e82015-03-18 18:55:33 -07001227 lg_dirty_mult = arena->lg_dirty_mult;
Jason Evansb2c0d632016-04-13 23:36:15 -07001228 malloc_mutex_unlock(tsd, &arena->lock);
Jason Evans8d6a3e82015-03-18 18:55:33 -07001229
1230 return (lg_dirty_mult);
1231}
1232
1233bool
Jason Evansb2c0d632016-04-13 23:36:15 -07001234arena_lg_dirty_mult_set(tsd_t *tsd, arena_t *arena, ssize_t lg_dirty_mult)
Jason Evans8d6a3e82015-03-18 18:55:33 -07001235{
1236
1237 if (!arena_lg_dirty_mult_valid(lg_dirty_mult))
1238 return (true);
1239
Jason Evansb2c0d632016-04-13 23:36:15 -07001240 malloc_mutex_lock(tsd, &arena->lock);
Jason Evans8d6a3e82015-03-18 18:55:33 -07001241 arena->lg_dirty_mult = lg_dirty_mult;
Jason Evansb2c0d632016-04-13 23:36:15 -07001242 arena_maybe_purge(tsd, arena);
1243 malloc_mutex_unlock(tsd, &arena->lock);
Jason Evans8d6a3e82015-03-18 18:55:33 -07001244
1245 return (false);
1246}
1247
Jason Evans1a4ad3c2016-02-19 19:51:23 -08001248static void
Jason Evans243f7a02016-02-19 20:09:31 -08001249arena_decay_deadline_init(arena_t *arena)
1250{
1251
1252 assert(opt_purge == purge_mode_decay);
1253
1254 /*
1255 * Generate a new deadline that is uniformly random within the next
1256 * epoch after the current one.
1257 */
Jason Evans9bad0792016-02-21 11:25:02 -08001258 nstime_copy(&arena->decay_deadline, &arena->decay_epoch);
1259 nstime_add(&arena->decay_deadline, &arena->decay_interval);
Jason Evans243f7a02016-02-19 20:09:31 -08001260 if (arena->decay_time > 0) {
Jason Evans9bad0792016-02-21 11:25:02 -08001261 nstime_t jitter;
Jason Evans243f7a02016-02-19 20:09:31 -08001262
Jason Evans9bad0792016-02-21 11:25:02 -08001263 nstime_init(&jitter, prng_range(&arena->decay_jitter_state,
1264 nstime_ns(&arena->decay_interval)));
1265 nstime_add(&arena->decay_deadline, &jitter);
Jason Evans243f7a02016-02-19 20:09:31 -08001266 }
1267}
1268
1269static bool
Jason Evans9bad0792016-02-21 11:25:02 -08001270arena_decay_deadline_reached(const arena_t *arena, const nstime_t *time)
Jason Evans243f7a02016-02-19 20:09:31 -08001271{
1272
1273 assert(opt_purge == purge_mode_decay);
1274
Jason Evans9bad0792016-02-21 11:25:02 -08001275 return (nstime_compare(&arena->decay_deadline, time) <= 0);
Jason Evans243f7a02016-02-19 20:09:31 -08001276}
1277
1278static size_t
1279arena_decay_backlog_npages_limit(const arena_t *arena)
1280{
1281 static const uint64_t h_steps[] = {
1282#define STEP(step, h, x, y) \
1283 h,
1284 SMOOTHSTEP
1285#undef STEP
1286 };
1287 uint64_t sum;
1288 size_t npages_limit_backlog;
1289 unsigned i;
1290
1291 assert(opt_purge == purge_mode_decay);
1292
1293 /*
1294 * For each element of decay_backlog, multiply by the corresponding
1295 * fixed-point smoothstep decay factor. Sum the products, then divide
1296 * to round down to the nearest whole number of pages.
1297 */
1298 sum = 0;
1299 for (i = 0; i < SMOOTHSTEP_NSTEPS; i++)
1300 sum += arena->decay_backlog[i] * h_steps[i];
rustyx00432332016-04-12 09:50:54 +02001301 npages_limit_backlog = (size_t)(sum >> SMOOTHSTEP_BFP);
Jason Evans243f7a02016-02-19 20:09:31 -08001302
1303 return (npages_limit_backlog);
1304}
1305
1306static void
Jason Evans9bad0792016-02-21 11:25:02 -08001307arena_decay_epoch_advance(arena_t *arena, const nstime_t *time)
Jason Evans243f7a02016-02-19 20:09:31 -08001308{
rustyx00432332016-04-12 09:50:54 +02001309 uint64_t nadvance_u64;
Jason Evans9bad0792016-02-21 11:25:02 -08001310 nstime_t delta;
Jason Evans243f7a02016-02-19 20:09:31 -08001311 size_t ndirty_delta;
1312
1313 assert(opt_purge == purge_mode_decay);
1314 assert(arena_decay_deadline_reached(arena, time));
1315
Jason Evans9bad0792016-02-21 11:25:02 -08001316 nstime_copy(&delta, time);
1317 nstime_subtract(&delta, &arena->decay_epoch);
rustyx00432332016-04-12 09:50:54 +02001318 nadvance_u64 = nstime_divide(&delta, &arena->decay_interval);
1319 assert(nadvance_u64 > 0);
Jason Evans243f7a02016-02-19 20:09:31 -08001320
rustyx00432332016-04-12 09:50:54 +02001321 /* Add nadvance_u64 decay intervals to epoch. */
Jason Evans9bad0792016-02-21 11:25:02 -08001322 nstime_copy(&delta, &arena->decay_interval);
rustyx00432332016-04-12 09:50:54 +02001323 nstime_imultiply(&delta, nadvance_u64);
Jason Evans9bad0792016-02-21 11:25:02 -08001324 nstime_add(&arena->decay_epoch, &delta);
Jason Evans243f7a02016-02-19 20:09:31 -08001325
1326 /* Set a new deadline. */
1327 arena_decay_deadline_init(arena);
1328
1329 /* Update the backlog. */
rustyx00432332016-04-12 09:50:54 +02001330 if (nadvance_u64 >= SMOOTHSTEP_NSTEPS) {
Jason Evans243f7a02016-02-19 20:09:31 -08001331 memset(arena->decay_backlog, 0, (SMOOTHSTEP_NSTEPS-1) *
1332 sizeof(size_t));
1333 } else {
rustyx00432332016-04-12 09:50:54 +02001334 size_t nadvance_z = (size_t)nadvance_u64;
1335
1336 assert((uint64_t)nadvance_z == nadvance_u64);
1337
1338 memmove(arena->decay_backlog, &arena->decay_backlog[nadvance_z],
1339 (SMOOTHSTEP_NSTEPS - nadvance_z) * sizeof(size_t));
1340 if (nadvance_z > 1) {
Jason Evans243f7a02016-02-19 20:09:31 -08001341 memset(&arena->decay_backlog[SMOOTHSTEP_NSTEPS -
rustyx00432332016-04-12 09:50:54 +02001342 nadvance_z], 0, (nadvance_z-1) * sizeof(size_t));
Jason Evans243f7a02016-02-19 20:09:31 -08001343 }
1344 }
1345 ndirty_delta = (arena->ndirty > arena->decay_ndirty) ? arena->ndirty -
1346 arena->decay_ndirty : 0;
1347 arena->decay_ndirty = arena->ndirty;
1348 arena->decay_backlog[SMOOTHSTEP_NSTEPS-1] = ndirty_delta;
1349 arena->decay_backlog_npages_limit =
1350 arena_decay_backlog_npages_limit(arena);
1351}
1352
1353static size_t
1354arena_decay_npages_limit(arena_t *arena)
1355{
1356 size_t npages_limit;
1357
1358 assert(opt_purge == purge_mode_decay);
1359
1360 npages_limit = arena->decay_backlog_npages_limit;
1361
1362 /* Add in any dirty pages created during the current epoch. */
1363 if (arena->ndirty > arena->decay_ndirty)
1364 npages_limit += arena->ndirty - arena->decay_ndirty;
1365
1366 return (npages_limit);
1367}
1368
1369static void
1370arena_decay_init(arena_t *arena, ssize_t decay_time)
1371{
1372
1373 arena->decay_time = decay_time;
1374 if (decay_time > 0) {
Jason Evans9bad0792016-02-21 11:25:02 -08001375 nstime_init2(&arena->decay_interval, decay_time, 0);
1376 nstime_idivide(&arena->decay_interval, SMOOTHSTEP_NSTEPS);
Jason Evans243f7a02016-02-19 20:09:31 -08001377 }
1378
Jason Evans9bad0792016-02-21 11:25:02 -08001379 nstime_init(&arena->decay_epoch, 0);
1380 nstime_update(&arena->decay_epoch);
Jason Evans243f7a02016-02-19 20:09:31 -08001381 arena->decay_jitter_state = (uint64_t)(uintptr_t)arena;
1382 arena_decay_deadline_init(arena);
1383 arena->decay_ndirty = arena->ndirty;
1384 arena->decay_backlog_npages_limit = 0;
1385 memset(arena->decay_backlog, 0, SMOOTHSTEP_NSTEPS * sizeof(size_t));
1386}
1387
1388static bool
1389arena_decay_time_valid(ssize_t decay_time)
1390{
1391
Jason Evans022f6892016-03-02 22:41:32 -08001392 if (decay_time < -1)
1393 return (false);
1394 if (decay_time == -1 || (uint64_t)decay_time <= NSTIME_SEC_MAX)
1395 return (true);
1396 return (false);
Jason Evans243f7a02016-02-19 20:09:31 -08001397}
1398
1399ssize_t
Jason Evansb2c0d632016-04-13 23:36:15 -07001400arena_decay_time_get(tsd_t *tsd, arena_t *arena)
Jason Evans243f7a02016-02-19 20:09:31 -08001401{
1402 ssize_t decay_time;
1403
Jason Evansb2c0d632016-04-13 23:36:15 -07001404 malloc_mutex_lock(tsd, &arena->lock);
Jason Evans243f7a02016-02-19 20:09:31 -08001405 decay_time = arena->decay_time;
Jason Evansb2c0d632016-04-13 23:36:15 -07001406 malloc_mutex_unlock(tsd, &arena->lock);
Jason Evans243f7a02016-02-19 20:09:31 -08001407
1408 return (decay_time);
1409}
1410
1411bool
Jason Evansb2c0d632016-04-13 23:36:15 -07001412arena_decay_time_set(tsd_t *tsd, arena_t *arena, ssize_t decay_time)
Jason Evans243f7a02016-02-19 20:09:31 -08001413{
1414
1415 if (!arena_decay_time_valid(decay_time))
1416 return (true);
1417
Jason Evansb2c0d632016-04-13 23:36:15 -07001418 malloc_mutex_lock(tsd, &arena->lock);
Jason Evans243f7a02016-02-19 20:09:31 -08001419 /*
1420 * Restart decay backlog from scratch, which may cause many dirty pages
1421 * to be immediately purged. It would conceptually be possible to map
1422 * the old backlog onto the new backlog, but there is no justification
1423 * for such complexity since decay_time changes are intended to be
1424 * infrequent, either between the {-1, 0, >0} states, or a one-time
1425 * arbitrary change during initial arena configuration.
1426 */
1427 arena_decay_init(arena, decay_time);
Jason Evansb2c0d632016-04-13 23:36:15 -07001428 arena_maybe_purge(tsd, arena);
1429 malloc_mutex_unlock(tsd, &arena->lock);
Jason Evans243f7a02016-02-19 20:09:31 -08001430
1431 return (false);
1432}
1433
1434static void
Jason Evansb2c0d632016-04-13 23:36:15 -07001435arena_maybe_purge_ratio(tsd_t *tsd, arena_t *arena)
Jason Evans05b21be2010-03-14 17:36:10 -07001436{
1437
Jason Evans243f7a02016-02-19 20:09:31 -08001438 assert(opt_purge == purge_mode_ratio);
1439
Jason Evanse3d13062012-10-30 15:42:37 -07001440 /* Don't purge if the option is disabled. */
Jason Evans8d6a3e82015-03-18 18:55:33 -07001441 if (arena->lg_dirty_mult < 0)
Jason Evanse3d13062012-10-30 15:42:37 -07001442 return;
Jason Evans1a4ad3c2016-02-19 19:51:23 -08001443
Jason Evans0a9f9a42015-06-22 18:50:32 -07001444 /*
1445 * Iterate, since preventing recursive purging could otherwise leave too
1446 * many dirty pages.
1447 */
1448 while (true) {
1449 size_t threshold = (arena->nactive >> arena->lg_dirty_mult);
1450 if (threshold < chunk_npages)
1451 threshold = chunk_npages;
1452 /*
1453 * Don't purge unless the number of purgeable pages exceeds the
1454 * threshold.
1455 */
1456 if (arena->ndirty <= threshold)
1457 return;
Jason Evansb2c0d632016-04-13 23:36:15 -07001458 arena_purge_to_limit(tsd, arena, threshold);
Jason Evans0a9f9a42015-06-22 18:50:32 -07001459 }
Jason Evans05b21be2010-03-14 17:36:10 -07001460}
1461
Jason Evans243f7a02016-02-19 20:09:31 -08001462static void
Jason Evansb2c0d632016-04-13 23:36:15 -07001463arena_maybe_purge_decay(tsd_t *tsd, arena_t *arena)
Jason Evans243f7a02016-02-19 20:09:31 -08001464{
Jason Evans9bad0792016-02-21 11:25:02 -08001465 nstime_t time;
Jason Evans243f7a02016-02-19 20:09:31 -08001466 size_t ndirty_limit;
1467
1468 assert(opt_purge == purge_mode_decay);
1469
1470 /* Purge all or nothing if the option is disabled. */
1471 if (arena->decay_time <= 0) {
1472 if (arena->decay_time == 0)
Jason Evansb2c0d632016-04-13 23:36:15 -07001473 arena_purge_to_limit(tsd, arena, 0);
Jason Evans243f7a02016-02-19 20:09:31 -08001474 return;
1475 }
1476
Jason Evans9bad0792016-02-21 11:25:02 -08001477 nstime_copy(&time, &arena->decay_epoch);
1478 if (unlikely(nstime_update(&time))) {
Jason Evans243f7a02016-02-19 20:09:31 -08001479 /* Time went backwards. Force an epoch advance. */
Jason Evans9bad0792016-02-21 11:25:02 -08001480 nstime_copy(&time, &arena->decay_deadline);
Jason Evans243f7a02016-02-19 20:09:31 -08001481 }
1482
1483 if (arena_decay_deadline_reached(arena, &time))
1484 arena_decay_epoch_advance(arena, &time);
1485
1486 ndirty_limit = arena_decay_npages_limit(arena);
1487
1488 /*
1489 * Don't try to purge unless the number of purgeable pages exceeds the
1490 * current limit.
1491 */
1492 if (arena->ndirty <= ndirty_limit)
1493 return;
Jason Evansb2c0d632016-04-13 23:36:15 -07001494 arena_purge_to_limit(tsd, arena, ndirty_limit);
Jason Evans243f7a02016-02-19 20:09:31 -08001495}
1496
Jason Evans1a4ad3c2016-02-19 19:51:23 -08001497void
Jason Evansb2c0d632016-04-13 23:36:15 -07001498arena_maybe_purge(tsd_t *tsd, arena_t *arena)
Jason Evans1a4ad3c2016-02-19 19:51:23 -08001499{
1500
1501 /* Don't recursively purge. */
1502 if (arena->purging)
1503 return;
1504
Jason Evans243f7a02016-02-19 20:09:31 -08001505 if (opt_purge == purge_mode_ratio)
Jason Evansb2c0d632016-04-13 23:36:15 -07001506 arena_maybe_purge_ratio(tsd, arena);
Jason Evans243f7a02016-02-19 20:09:31 -08001507 else
Jason Evansb2c0d632016-04-13 23:36:15 -07001508 arena_maybe_purge_decay(tsd, arena);
Jason Evans1a4ad3c2016-02-19 19:51:23 -08001509}
1510
Qinfan Wua244e502014-07-21 10:23:36 -07001511static size_t
1512arena_dirty_count(arena_t *arena)
1513{
1514 size_t ndirty = 0;
Jason Evans38e42d32015-03-10 18:15:40 -07001515 arena_runs_dirty_link_t *rdelm;
Jason Evansee41ad42015-02-15 18:04:46 -08001516 extent_node_t *chunkselm;
Qinfan Wua244e502014-07-21 10:23:36 -07001517
Jason Evans38e42d32015-03-10 18:15:40 -07001518 for (rdelm = qr_next(&arena->runs_dirty, rd_link),
Jason Evans738e0892015-02-18 01:15:50 -08001519 chunkselm = qr_next(&arena->chunks_cache, cc_link);
Jason Evans38e42d32015-03-10 18:15:40 -07001520 rdelm != &arena->runs_dirty; rdelm = qr_next(rdelm, rd_link)) {
Jason Evansee41ad42015-02-15 18:04:46 -08001521 size_t npages;
1522
Jason Evansf5c8f372015-03-10 18:29:49 -07001523 if (rdelm == &chunkselm->rd) {
Jason Evansee41ad42015-02-15 18:04:46 -08001524 npages = extent_node_size_get(chunkselm) >> LG_PAGE;
Jason Evans738e0892015-02-18 01:15:50 -08001525 chunkselm = qr_next(chunkselm, cc_link);
Jason Evansee41ad42015-02-15 18:04:46 -08001526 } else {
Jason Evans38e42d32015-03-10 18:15:40 -07001527 arena_chunk_t *chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(
1528 rdelm);
1529 arena_chunk_map_misc_t *miscelm =
1530 arena_rd_to_miscelm(rdelm);
1531 size_t pageind = arena_miscelm_to_pageind(miscelm);
Jason Evansee41ad42015-02-15 18:04:46 -08001532 assert(arena_mapbits_allocated_get(chunk, pageind) ==
1533 0);
1534 assert(arena_mapbits_large_get(chunk, pageind) == 0);
1535 assert(arena_mapbits_dirty_get(chunk, pageind) != 0);
1536 npages = arena_mapbits_unallocated_size_get(chunk,
1537 pageind) >> LG_PAGE;
1538 }
Qinfan Wua244e502014-07-21 10:23:36 -07001539 ndirty += npages;
1540 }
1541
Jason Evans2b2f6dc2014-11-01 02:29:10 -07001542 return (ndirty);
Jason Evansaa5113b2014-01-14 16:23:03 -08001543}
1544
1545static size_t
Jason Evansb2c0d632016-04-13 23:36:15 -07001546arena_stash_dirty(tsd_t *tsd, arena_t *arena, chunk_hooks_t *chunk_hooks,
Jason Evans1a4ad3c2016-02-19 19:51:23 -08001547 size_t ndirty_limit, arena_runs_dirty_link_t *purge_runs_sentinel,
Jason Evansee41ad42015-02-15 18:04:46 -08001548 extent_node_t *purge_chunks_sentinel)
Jason Evansaa5113b2014-01-14 16:23:03 -08001549{
Jason Evans38e42d32015-03-10 18:15:40 -07001550 arena_runs_dirty_link_t *rdelm, *rdelm_next;
Jason Evansee41ad42015-02-15 18:04:46 -08001551 extent_node_t *chunkselm;
Qinfan Wue9708002014-07-21 18:09:04 -07001552 size_t nstashed = 0;
Jason Evansaa5113b2014-01-14 16:23:03 -08001553
Jason Evans1a4ad3c2016-02-19 19:51:23 -08001554 /* Stash runs/chunks according to ndirty_limit. */
Jason Evans38e42d32015-03-10 18:15:40 -07001555 for (rdelm = qr_next(&arena->runs_dirty, rd_link),
Jason Evans738e0892015-02-18 01:15:50 -08001556 chunkselm = qr_next(&arena->chunks_cache, cc_link);
Jason Evans38e42d32015-03-10 18:15:40 -07001557 rdelm != &arena->runs_dirty; rdelm = rdelm_next) {
Jason Evansee41ad42015-02-15 18:04:46 -08001558 size_t npages;
Jason Evans38e42d32015-03-10 18:15:40 -07001559 rdelm_next = qr_next(rdelm, rd_link);
Jason Evansaa5113b2014-01-14 16:23:03 -08001560
Jason Evansf5c8f372015-03-10 18:29:49 -07001561 if (rdelm == &chunkselm->rd) {
Jason Evans99bd94f2015-02-18 16:40:53 -08001562 extent_node_t *chunkselm_next;
1563 bool zero;
Jason Evansee41ad42015-02-15 18:04:46 -08001564 UNUSED void *chunk;
Jason Evansaa5113b2014-01-14 16:23:03 -08001565
Jason Evans1a4ad3c2016-02-19 19:51:23 -08001566 npages = extent_node_size_get(chunkselm) >> LG_PAGE;
Jason Evans243f7a02016-02-19 20:09:31 -08001567 if (opt_purge == purge_mode_decay && arena->ndirty -
1568 (nstashed + npages) < ndirty_limit)
1569 break;
Jason Evans1a4ad3c2016-02-19 19:51:23 -08001570
Jason Evans738e0892015-02-18 01:15:50 -08001571 chunkselm_next = qr_next(chunkselm, cc_link);
Jason Evansee41ad42015-02-15 18:04:46 -08001572 /*
Jason Evans99bd94f2015-02-18 16:40:53 -08001573 * Allocate. chunkselm remains valid due to the
1574 * dalloc_node=false argument to chunk_alloc_cache().
Jason Evansee41ad42015-02-15 18:04:46 -08001575 */
Jason Evansee41ad42015-02-15 18:04:46 -08001576 zero = false;
Jason Evansb2c0d632016-04-13 23:36:15 -07001577 chunk = chunk_alloc_cache(tsd, arena, chunk_hooks,
Jason Evans99bd94f2015-02-18 16:40:53 -08001578 extent_node_addr_get(chunkselm),
1579 extent_node_size_get(chunkselm), chunksize, &zero,
1580 false);
1581 assert(chunk == extent_node_addr_get(chunkselm));
1582 assert(zero == extent_node_zeroed_get(chunkselm));
1583 extent_node_dirty_insert(chunkselm, purge_runs_sentinel,
Jason Evans738e0892015-02-18 01:15:50 -08001584 purge_chunks_sentinel);
Jason Evans1a4ad3c2016-02-19 19:51:23 -08001585 assert(npages == (extent_node_size_get(chunkselm) >>
1586 LG_PAGE));
Jason Evansee41ad42015-02-15 18:04:46 -08001587 chunkselm = chunkselm_next;
1588 } else {
1589 arena_chunk_t *chunk =
Jason Evans38e42d32015-03-10 18:15:40 -07001590 (arena_chunk_t *)CHUNK_ADDR2BASE(rdelm);
1591 arena_chunk_map_misc_t *miscelm =
1592 arena_rd_to_miscelm(rdelm);
1593 size_t pageind = arena_miscelm_to_pageind(miscelm);
1594 arena_run_t *run = &miscelm->run;
Jason Evansee41ad42015-02-15 18:04:46 -08001595 size_t run_size =
1596 arena_mapbits_unallocated_size_get(chunk, pageind);
Jason Evans070b3c32014-08-14 14:45:58 -07001597
Jason Evansee41ad42015-02-15 18:04:46 -08001598 npages = run_size >> LG_PAGE;
Jason Evans243f7a02016-02-19 20:09:31 -08001599 if (opt_purge == purge_mode_decay && arena->ndirty -
1600 (nstashed + npages) < ndirty_limit)
1601 break;
Jason Evansee41ad42015-02-15 18:04:46 -08001602
1603 assert(pageind + npages <= chunk_npages);
1604 assert(arena_mapbits_dirty_get(chunk, pageind) ==
1605 arena_mapbits_dirty_get(chunk, pageind+npages-1));
1606
1607 /*
1608 * If purging the spare chunk's run, make it available
1609 * prior to allocation.
1610 */
1611 if (chunk == arena->spare)
Jason Evansb2c0d632016-04-13 23:36:15 -07001612 arena_chunk_alloc(tsd, arena);
Jason Evansee41ad42015-02-15 18:04:46 -08001613
1614 /* Temporarily allocate the free dirty run. */
1615 arena_run_split_large(arena, run, run_size, false);
Jason Evans339c2b22015-02-17 22:25:56 -08001616 /* Stash. */
Jason Evansee41ad42015-02-15 18:04:46 -08001617 if (false)
Jason Evans38e42d32015-03-10 18:15:40 -07001618 qr_new(rdelm, rd_link); /* Redundant. */
Jason Evansee41ad42015-02-15 18:04:46 -08001619 else {
Jason Evans38e42d32015-03-10 18:15:40 -07001620 assert(qr_next(rdelm, rd_link) == rdelm);
1621 assert(qr_prev(rdelm, rd_link) == rdelm);
Jason Evansee41ad42015-02-15 18:04:46 -08001622 }
Jason Evans38e42d32015-03-10 18:15:40 -07001623 qr_meld(purge_runs_sentinel, rdelm, rd_link);
Jason Evansee41ad42015-02-15 18:04:46 -08001624 }
Jason Evansaa5113b2014-01-14 16:23:03 -08001625
Qinfan Wue9708002014-07-21 18:09:04 -07001626 nstashed += npages;
Jason Evans243f7a02016-02-19 20:09:31 -08001627 if (opt_purge == purge_mode_ratio && arena->ndirty - nstashed <=
1628 ndirty_limit)
Qinfan Wue9708002014-07-21 18:09:04 -07001629 break;
Jason Evansaa5113b2014-01-14 16:23:03 -08001630 }
Qinfan Wue9708002014-07-21 18:09:04 -07001631
1632 return (nstashed);
Jason Evansaa5113b2014-01-14 16:23:03 -08001633}
1634
1635static size_t
Jason Evansb2c0d632016-04-13 23:36:15 -07001636arena_purge_stashed(tsd_t *tsd, arena_t *arena, chunk_hooks_t *chunk_hooks,
Jason Evans38e42d32015-03-10 18:15:40 -07001637 arena_runs_dirty_link_t *purge_runs_sentinel,
Jason Evansee41ad42015-02-15 18:04:46 -08001638 extent_node_t *purge_chunks_sentinel)
Jason Evansaa5113b2014-01-14 16:23:03 -08001639{
Qinfan Wue9708002014-07-21 18:09:04 -07001640 size_t npurged, nmadvise;
Jason Evans38e42d32015-03-10 18:15:40 -07001641 arena_runs_dirty_link_t *rdelm;
Jason Evansee41ad42015-02-15 18:04:46 -08001642 extent_node_t *chunkselm;
Jason Evansaa5113b2014-01-14 16:23:03 -08001643
Jason Evansaa5113b2014-01-14 16:23:03 -08001644 if (config_stats)
1645 nmadvise = 0;
1646 npurged = 0;
Qinfan Wue9708002014-07-21 18:09:04 -07001647
Jason Evansb2c0d632016-04-13 23:36:15 -07001648 malloc_mutex_unlock(tsd, &arena->lock);
Jason Evans38e42d32015-03-10 18:15:40 -07001649 for (rdelm = qr_next(purge_runs_sentinel, rd_link),
Jason Evans738e0892015-02-18 01:15:50 -08001650 chunkselm = qr_next(purge_chunks_sentinel, cc_link);
Jason Evans38e42d32015-03-10 18:15:40 -07001651 rdelm != purge_runs_sentinel; rdelm = qr_next(rdelm, rd_link)) {
Jason Evansee41ad42015-02-15 18:04:46 -08001652 size_t npages;
Qinfan Wue9708002014-07-21 18:09:04 -07001653
Jason Evansf5c8f372015-03-10 18:29:49 -07001654 if (rdelm == &chunkselm->rd) {
Jason Evansb49a3342015-07-28 11:28:19 -04001655 /*
1656 * Don't actually purge the chunk here because 1)
1657 * chunkselm is embedded in the chunk and must remain
1658 * valid, and 2) we deallocate the chunk in
1659 * arena_unstash_purged(), where it is destroyed,
1660 * decommitted, or purged, depending on chunk
1661 * deallocation policy.
1662 */
Jason Evansee41ad42015-02-15 18:04:46 -08001663 size_t size = extent_node_size_get(chunkselm);
Jason Evansee41ad42015-02-15 18:04:46 -08001664 npages = size >> LG_PAGE;
Jason Evans738e0892015-02-18 01:15:50 -08001665 chunkselm = qr_next(chunkselm, cc_link);
Jason Evansee41ad42015-02-15 18:04:46 -08001666 } else {
Jason Evans45186f02015-08-10 23:03:34 -07001667 size_t pageind, run_size, flag_unzeroed, flags, i;
1668 bool decommitted;
Jason Evans8fadb1a2015-08-04 10:49:46 -07001669 arena_chunk_t *chunk =
1670 (arena_chunk_t *)CHUNK_ADDR2BASE(rdelm);
Jason Evans38e42d32015-03-10 18:15:40 -07001671 arena_chunk_map_misc_t *miscelm =
1672 arena_rd_to_miscelm(rdelm);
1673 pageind = arena_miscelm_to_pageind(miscelm);
Jason Evansee41ad42015-02-15 18:04:46 -08001674 run_size = arena_mapbits_large_size_get(chunk, pageind);
1675 npages = run_size >> LG_PAGE;
Qinfan Wue9708002014-07-21 18:09:04 -07001676
Jason Evansee41ad42015-02-15 18:04:46 -08001677 assert(pageind + npages <= chunk_npages);
Jason Evansde249c82015-08-09 16:47:27 -07001678 assert(!arena_mapbits_decommitted_get(chunk, pageind));
1679 assert(!arena_mapbits_decommitted_get(chunk,
1680 pageind+npages-1));
Jason Evans8fadb1a2015-08-04 10:49:46 -07001681 decommitted = !chunk_hooks->decommit(chunk, chunksize,
1682 pageind << LG_PAGE, npages << LG_PAGE, arena->ind);
1683 if (decommitted) {
Jason Evans45186f02015-08-10 23:03:34 -07001684 flag_unzeroed = 0;
1685 flags = CHUNK_MAP_DECOMMITTED;
Jason Evans8fadb1a2015-08-04 10:49:46 -07001686 } else {
Jason Evansb2c0d632016-04-13 23:36:15 -07001687 flag_unzeroed = chunk_purge_wrapper(tsd, arena,
Jason Evans8fadb1a2015-08-04 10:49:46 -07001688 chunk_hooks, chunk, chunksize, pageind <<
Jason Evans45186f02015-08-10 23:03:34 -07001689 LG_PAGE, run_size) ? CHUNK_MAP_UNZEROED : 0;
1690 flags = flag_unzeroed;
Jason Evans8fadb1a2015-08-04 10:49:46 -07001691 }
Jason Evans45186f02015-08-10 23:03:34 -07001692 arena_mapbits_large_set(chunk, pageind+npages-1, 0,
1693 flags);
1694 arena_mapbits_large_set(chunk, pageind, run_size,
1695 flags);
Jason Evansee41ad42015-02-15 18:04:46 -08001696
1697 /*
Jason Evans45186f02015-08-10 23:03:34 -07001698 * Set the unzeroed flag for internal pages, now that
Jason Evans8d6a3e82015-03-18 18:55:33 -07001699 * chunk_purge_wrapper() has returned whether the pages
1700 * were zeroed as a side effect of purging. This chunk
1701 * map modification is safe even though the arena mutex
Jason Evansee41ad42015-02-15 18:04:46 -08001702 * isn't currently owned by this thread, because the run
1703 * is marked as allocated, thus protecting it from being
1704 * modified by any other thread. As long as these
1705 * writes don't perturb the first and last elements'
1706 * CHUNK_MAP_ALLOCATED bits, behavior is well defined.
1707 */
Jason Evans45186f02015-08-10 23:03:34 -07001708 for (i = 1; i < npages-1; i++) {
1709 arena_mapbits_internal_set(chunk, pageind+i,
Jason Evansee41ad42015-02-15 18:04:46 -08001710 flag_unzeroed);
1711 }
Jason Evansaa5113b2014-01-14 16:23:03 -08001712 }
Qinfan Wue9708002014-07-21 18:09:04 -07001713
Jason Evansaa5113b2014-01-14 16:23:03 -08001714 npurged += npages;
1715 if (config_stats)
1716 nmadvise++;
1717 }
Jason Evansb2c0d632016-04-13 23:36:15 -07001718 malloc_mutex_lock(tsd, &arena->lock);
Qinfan Wue9708002014-07-21 18:09:04 -07001719
1720 if (config_stats) {
Jason Evansaa5113b2014-01-14 16:23:03 -08001721 arena->stats.nmadvise += nmadvise;
Qinfan Wue9708002014-07-21 18:09:04 -07001722 arena->stats.purged += npurged;
1723 }
Jason Evansaa5113b2014-01-14 16:23:03 -08001724
1725 return (npurged);
1726}
1727
1728static void
Jason Evansb2c0d632016-04-13 23:36:15 -07001729arena_unstash_purged(tsd_t *tsd, arena_t *arena, chunk_hooks_t *chunk_hooks,
Jason Evans38e42d32015-03-10 18:15:40 -07001730 arena_runs_dirty_link_t *purge_runs_sentinel,
Jason Evansee41ad42015-02-15 18:04:46 -08001731 extent_node_t *purge_chunks_sentinel)
Jason Evansaa5113b2014-01-14 16:23:03 -08001732{
Jason Evans38e42d32015-03-10 18:15:40 -07001733 arena_runs_dirty_link_t *rdelm, *rdelm_next;
Jason Evansee41ad42015-02-15 18:04:46 -08001734 extent_node_t *chunkselm;
Jason Evansaa5113b2014-01-14 16:23:03 -08001735
Jason Evansb49a3342015-07-28 11:28:19 -04001736 /* Deallocate chunks/runs. */
Jason Evans38e42d32015-03-10 18:15:40 -07001737 for (rdelm = qr_next(purge_runs_sentinel, rd_link),
Jason Evans738e0892015-02-18 01:15:50 -08001738 chunkselm = qr_next(purge_chunks_sentinel, cc_link);
Jason Evans38e42d32015-03-10 18:15:40 -07001739 rdelm != purge_runs_sentinel; rdelm = rdelm_next) {
1740 rdelm_next = qr_next(rdelm, rd_link);
Jason Evansf5c8f372015-03-10 18:29:49 -07001741 if (rdelm == &chunkselm->rd) {
Jason Evansee41ad42015-02-15 18:04:46 -08001742 extent_node_t *chunkselm_next = qr_next(chunkselm,
Jason Evans738e0892015-02-18 01:15:50 -08001743 cc_link);
Jason Evans339c2b22015-02-17 22:25:56 -08001744 void *addr = extent_node_addr_get(chunkselm);
1745 size_t size = extent_node_size_get(chunkselm);
Jason Evans738e0892015-02-18 01:15:50 -08001746 bool zeroed = extent_node_zeroed_get(chunkselm);
Jason Evansde249c82015-08-09 16:47:27 -07001747 bool committed = extent_node_committed_get(chunkselm);
Jason Evans738e0892015-02-18 01:15:50 -08001748 extent_node_dirty_remove(chunkselm);
Jason Evansb2c0d632016-04-13 23:36:15 -07001749 arena_node_dalloc(tsd, arena, chunkselm);
Jason Evansee41ad42015-02-15 18:04:46 -08001750 chunkselm = chunkselm_next;
Jason Evansb2c0d632016-04-13 23:36:15 -07001751 chunk_dalloc_wrapper(tsd, arena, chunk_hooks, addr,
1752 size, zeroed, committed);
Jason Evansee41ad42015-02-15 18:04:46 -08001753 } else {
Jason Evans8fadb1a2015-08-04 10:49:46 -07001754 arena_chunk_t *chunk =
1755 (arena_chunk_t *)CHUNK_ADDR2BASE(rdelm);
Jason Evans38e42d32015-03-10 18:15:40 -07001756 arena_chunk_map_misc_t *miscelm =
1757 arena_rd_to_miscelm(rdelm);
Jason Evans8fadb1a2015-08-04 10:49:46 -07001758 size_t pageind = arena_miscelm_to_pageind(miscelm);
1759 bool decommitted = (arena_mapbits_decommitted_get(chunk,
1760 pageind) != 0);
Jason Evans38e42d32015-03-10 18:15:40 -07001761 arena_run_t *run = &miscelm->run;
1762 qr_remove(rdelm, rd_link);
Jason Evansb2c0d632016-04-13 23:36:15 -07001763 arena_run_dalloc(tsd, arena, run, false, true,
1764 decommitted);
Jason Evansee41ad42015-02-15 18:04:46 -08001765 }
Jason Evansaa5113b2014-01-14 16:23:03 -08001766 }
1767}
1768
Jason Evans243f7a02016-02-19 20:09:31 -08001769/*
1770 * NB: ndirty_limit is interpreted differently depending on opt_purge:
1771 * - purge_mode_ratio: Purge as few dirty run/chunks as possible to reach the
1772 * desired state:
1773 * (arena->ndirty <= ndirty_limit)
1774 * - purge_mode_decay: Purge as many dirty runs/chunks as possible without
1775 * violating the invariant:
1776 * (arena->ndirty >= ndirty_limit)
1777 */
Jason Evans8d6a3e82015-03-18 18:55:33 -07001778static void
Jason Evansb2c0d632016-04-13 23:36:15 -07001779arena_purge_to_limit(tsd_t *tsd, arena_t *arena, size_t ndirty_limit)
Jason Evanse476f8a2010-01-16 09:53:50 -08001780{
Jason Evansb2c0d632016-04-13 23:36:15 -07001781 chunk_hooks_t chunk_hooks = chunk_hooks_get(tsd, arena);
Jason Evans1a4ad3c2016-02-19 19:51:23 -08001782 size_t npurge, npurged;
Jason Evans38e42d32015-03-10 18:15:40 -07001783 arena_runs_dirty_link_t purge_runs_sentinel;
Jason Evansee41ad42015-02-15 18:04:46 -08001784 extent_node_t purge_chunks_sentinel;
Qinfan Wue9708002014-07-21 18:09:04 -07001785
Jason Evans0a9f9a42015-06-22 18:50:32 -07001786 arena->purging = true;
1787
Jason Evans2b2f6dc2014-11-01 02:29:10 -07001788 /*
1789 * Calls to arena_dirty_count() are disabled even for debug builds
1790 * because overhead grows nonlinearly as memory usage increases.
1791 */
1792 if (false && config_debug) {
Qinfan Wu90737fc2014-07-21 19:39:20 -07001793 size_t ndirty = arena_dirty_count(arena);
Qinfan Wua244e502014-07-21 10:23:36 -07001794 assert(ndirty == arena->ndirty);
Jason Evans2caa4712010-03-04 21:35:07 -08001795 }
Jason Evans243f7a02016-02-19 20:09:31 -08001796 assert(opt_purge != purge_mode_ratio || (arena->nactive >>
1797 arena->lg_dirty_mult) < arena->ndirty || ndirty_limit == 0);
Jason Evans1a4ad3c2016-02-19 19:51:23 -08001798
1799 qr_new(&purge_runs_sentinel, rd_link);
1800 extent_node_dirty_linkage_init(&purge_chunks_sentinel);
1801
Jason Evansb2c0d632016-04-13 23:36:15 -07001802 npurge = arena_stash_dirty(tsd, arena, &chunk_hooks, ndirty_limit,
Jason Evans1a4ad3c2016-02-19 19:51:23 -08001803 &purge_runs_sentinel, &purge_chunks_sentinel);
1804 if (npurge == 0)
1805 goto label_return;
Jason Evansb2c0d632016-04-13 23:36:15 -07001806 npurged = arena_purge_stashed(tsd, arena, &chunk_hooks,
1807 &purge_runs_sentinel, &purge_chunks_sentinel);
Jason Evans1a4ad3c2016-02-19 19:51:23 -08001808 assert(npurged == npurge);
Jason Evansb2c0d632016-04-13 23:36:15 -07001809 arena_unstash_purged(tsd, arena, &chunk_hooks, &purge_runs_sentinel,
Jason Evans1a4ad3c2016-02-19 19:51:23 -08001810 &purge_chunks_sentinel);
Jason Evanse476f8a2010-01-16 09:53:50 -08001811
Jason Evans7372b152012-02-10 20:22:09 -08001812 if (config_stats)
1813 arena->stats.npurge++;
Jason Evanse476f8a2010-01-16 09:53:50 -08001814
Jason Evans1a4ad3c2016-02-19 19:51:23 -08001815label_return:
Jason Evans0a9f9a42015-06-22 18:50:32 -07001816 arena->purging = false;
Jason Evanse476f8a2010-01-16 09:53:50 -08001817}
1818
Jason Evans6005f072010-09-30 16:55:08 -07001819void
Jason Evansb2c0d632016-04-13 23:36:15 -07001820arena_purge(tsd_t *tsd, arena_t *arena, bool all)
Jason Evans6005f072010-09-30 16:55:08 -07001821{
1822
Jason Evansb2c0d632016-04-13 23:36:15 -07001823 malloc_mutex_lock(tsd, &arena->lock);
Jason Evans243f7a02016-02-19 20:09:31 -08001824 if (all)
Jason Evansb2c0d632016-04-13 23:36:15 -07001825 arena_purge_to_limit(tsd, arena, 0);
Jason Evans243f7a02016-02-19 20:09:31 -08001826 else
Jason Evansb2c0d632016-04-13 23:36:15 -07001827 arena_maybe_purge(tsd, arena);
1828 malloc_mutex_unlock(tsd, &arena->lock);
Jason Evans6005f072010-09-30 16:55:08 -07001829}
1830
Jason Evanse476f8a2010-01-16 09:53:50 -08001831static void
Jason Evans19ff2ce2016-04-22 14:37:17 -07001832arena_achunk_prof_reset(tsd_t *tsd, arena_t *arena, arena_chunk_t *chunk)
1833{
1834 size_t pageind, npages;
1835
1836 cassert(config_prof);
1837 assert(opt_prof);
1838
1839 /*
1840 * Iterate over the allocated runs and remove profiled allocations from
1841 * the sample set.
1842 */
1843 for (pageind = map_bias; pageind < chunk_npages; pageind += npages) {
1844 if (arena_mapbits_allocated_get(chunk, pageind) != 0) {
1845 if (arena_mapbits_large_get(chunk, pageind) != 0) {
1846 void *ptr = (void *)((uintptr_t)chunk + (pageind
1847 << LG_PAGE));
1848 size_t usize = isalloc(tsd, ptr, config_prof);
1849
1850 prof_free(tsd, ptr, usize);
1851 npages = arena_mapbits_large_size_get(chunk,
1852 pageind) >> LG_PAGE;
1853 } else {
1854 /* Skip small run. */
1855 size_t binind = arena_mapbits_binind_get(chunk,
1856 pageind);
1857 arena_bin_info_t *bin_info =
1858 &arena_bin_info[binind];
1859 npages = bin_info->run_size >> LG_PAGE;
1860 }
1861 } else {
1862 /* Skip unallocated run. */
1863 npages = arena_mapbits_unallocated_size_get(chunk,
1864 pageind) >> LG_PAGE;
1865 }
1866 assert(pageind + npages <= chunk_npages);
1867 }
1868}
1869
1870void
1871arena_reset(tsd_t *tsd, arena_t *arena)
1872{
1873 unsigned i;
1874 extent_node_t *node;
1875
1876 /*
1877 * Locking in this function is unintuitive. The caller guarantees that
1878 * no concurrent operations are happening in this arena, but there are
1879 * still reasons that some locking is necessary:
1880 *
1881 * - Some of the functions in the transitive closure of calls assume
1882 * appropriate locks are held, and in some cases these locks are
1883 * temporarily dropped to avoid lock order reversal or deadlock due to
1884 * reentry.
1885 * - mallctl("epoch", ...) may concurrently refresh stats. While
1886 * strictly speaking this is a "concurrent operation", disallowing
1887 * stats refreshes would impose an inconvenient burden.
1888 */
1889
1890 /* Remove large allocations from prof sample set. */
1891 if (config_prof && opt_prof) {
1892 ql_foreach(node, &arena->achunks, ql_link) {
1893 arena_achunk_prof_reset(tsd, arena,
1894 extent_node_addr_get(node));
1895 }
1896 }
1897
Jason Evans7e674952016-04-25 13:26:54 -07001898 /* Reset curruns for large size classes. */
1899 if (config_stats) {
1900 for (i = 0; i < nlclasses; i++)
1901 arena->stats.lstats[i].curruns = 0;
1902 }
1903
Jason Evans19ff2ce2016-04-22 14:37:17 -07001904 /* Huge allocations. */
1905 malloc_mutex_lock(tsd, &arena->huge_mtx);
1906 for (node = ql_last(&arena->huge, ql_link); node != NULL; node =
1907 ql_last(&arena->huge, ql_link)) {
1908 void *ptr = extent_node_addr_get(node);
Jason Evans7e674952016-04-25 13:26:54 -07001909 size_t usize;
Jason Evans19ff2ce2016-04-22 14:37:17 -07001910
1911 malloc_mutex_unlock(tsd, &arena->huge_mtx);
Jason Evans7e674952016-04-25 13:26:54 -07001912 if (config_stats || (config_prof && opt_prof))
Jason Evans19ff2ce2016-04-22 14:37:17 -07001913 usize = isalloc(tsd, ptr, config_prof);
Jason Evans7e674952016-04-25 13:26:54 -07001914 /* Remove huge allocation from prof sample set. */
1915 if (config_prof && opt_prof)
Jason Evans19ff2ce2016-04-22 14:37:17 -07001916 prof_free(tsd, ptr, usize);
Jason Evans19ff2ce2016-04-22 14:37:17 -07001917 huge_dalloc(tsd, ptr);
1918 malloc_mutex_lock(tsd, &arena->huge_mtx);
Jason Evans7e674952016-04-25 13:26:54 -07001919 /* Cancel out unwanted effects on stats. */
1920 if (config_stats)
1921 arena_huge_reset_stats_cancel(arena, usize);
Jason Evans19ff2ce2016-04-22 14:37:17 -07001922 }
1923 malloc_mutex_unlock(tsd, &arena->huge_mtx);
1924
1925 malloc_mutex_lock(tsd, &arena->lock);
1926
1927 /* Bins. */
1928 for (i = 0; i < NBINS; i++) {
1929 arena_bin_t *bin = &arena->bins[i];
1930 malloc_mutex_lock(tsd, &bin->lock);
1931 bin->runcur = NULL;
1932 arena_run_heap_new(&bin->runs);
1933 if (config_stats) {
1934 bin->stats.curregs = 0;
1935 bin->stats.curruns = 0;
1936 }
1937 malloc_mutex_unlock(tsd, &bin->lock);
1938 }
1939
1940 /*
1941 * Re-initialize runs_dirty such that the chunks_cache and runs_dirty
1942 * chains directly correspond.
1943 */
1944 qr_new(&arena->runs_dirty, rd_link);
1945 for (node = qr_next(&arena->chunks_cache, cc_link);
1946 node != &arena->chunks_cache; node = qr_next(node, cc_link)) {
1947 qr_new(&node->rd, rd_link);
1948 qr_meld(&arena->runs_dirty, &node->rd, rd_link);
1949 }
1950
1951 /* Arena chunks. */
1952 for (node = ql_last(&arena->achunks, ql_link); node != NULL; node =
1953 ql_last(&arena->achunks, ql_link)) {
1954 ql_remove(&arena->achunks, node, ql_link);
1955 arena_chunk_discard(tsd, arena, extent_node_addr_get(node));
1956 }
1957
1958 /* Spare. */
1959 if (arena->spare != NULL) {
1960 arena_chunk_discard(tsd, arena, arena->spare);
1961 arena->spare = NULL;
1962 }
1963
1964 assert(!arena->purging);
1965 arena->nactive = 0;
1966
1967 for(i = 0; i < runs_avail_nclasses; i++)
1968 arena_run_heap_new(&arena->runs_avail[i]);
1969
1970 malloc_mutex_unlock(tsd, &arena->lock);
1971}
1972
1973static void
Jason Evansaa5113b2014-01-14 16:23:03 -08001974arena_run_coalesce(arena_t *arena, arena_chunk_t *chunk, size_t *p_size,
Jason Evans8fadb1a2015-08-04 10:49:46 -07001975 size_t *p_run_ind, size_t *p_run_pages, size_t flag_dirty,
1976 size_t flag_decommitted)
Jason Evanse476f8a2010-01-16 09:53:50 -08001977{
Jason Evansaa5113b2014-01-14 16:23:03 -08001978 size_t size = *p_size;
1979 size_t run_ind = *p_run_ind;
1980 size_t run_pages = *p_run_pages;
Jason Evanse476f8a2010-01-16 09:53:50 -08001981
1982 /* Try to coalesce forward. */
1983 if (run_ind + run_pages < chunk_npages &&
Jason Evans203484e2012-05-02 00:30:36 -07001984 arena_mapbits_allocated_get(chunk, run_ind+run_pages) == 0 &&
Jason Evans8fadb1a2015-08-04 10:49:46 -07001985 arena_mapbits_dirty_get(chunk, run_ind+run_pages) == flag_dirty &&
1986 arena_mapbits_decommitted_get(chunk, run_ind+run_pages) ==
1987 flag_decommitted) {
Jason Evans203484e2012-05-02 00:30:36 -07001988 size_t nrun_size = arena_mapbits_unallocated_size_get(chunk,
1989 run_ind+run_pages);
Jason Evansae4c7b42012-04-02 07:04:34 -07001990 size_t nrun_pages = nrun_size >> LG_PAGE;
Jason Evanse476f8a2010-01-16 09:53:50 -08001991
1992 /*
1993 * Remove successor from runs_avail; the coalesced run is
1994 * inserted later.
1995 */
Jason Evans203484e2012-05-02 00:30:36 -07001996 assert(arena_mapbits_unallocated_size_get(chunk,
1997 run_ind+run_pages+nrun_pages-1) == nrun_size);
1998 assert(arena_mapbits_dirty_get(chunk,
1999 run_ind+run_pages+nrun_pages-1) == flag_dirty);
Jason Evans8fadb1a2015-08-04 10:49:46 -07002000 assert(arena_mapbits_decommitted_get(chunk,
2001 run_ind+run_pages+nrun_pages-1) == flag_decommitted);
Qinfan Wu90737fc2014-07-21 19:39:20 -07002002 arena_avail_remove(arena, chunk, run_ind+run_pages, nrun_pages);
Jason Evanse476f8a2010-01-16 09:53:50 -08002003
Jason Evansee41ad42015-02-15 18:04:46 -08002004 /*
2005 * If the successor is dirty, remove it from the set of dirty
2006 * pages.
2007 */
Qinfan Wu04d60a12014-07-18 14:21:17 -07002008 if (flag_dirty != 0) {
Jason Evansee41ad42015-02-15 18:04:46 -08002009 arena_run_dirty_remove(arena, chunk, run_ind+run_pages,
Jason Evans070b3c32014-08-14 14:45:58 -07002010 nrun_pages);
Qinfan Wu04d60a12014-07-18 14:21:17 -07002011 }
2012
Jason Evanse476f8a2010-01-16 09:53:50 -08002013 size += nrun_size;
Jason Evans12ca9142010-10-17 19:56:09 -07002014 run_pages += nrun_pages;
Jason Evanse476f8a2010-01-16 09:53:50 -08002015
Jason Evans203484e2012-05-02 00:30:36 -07002016 arena_mapbits_unallocated_size_set(chunk, run_ind, size);
2017 arena_mapbits_unallocated_size_set(chunk, run_ind+run_pages-1,
2018 size);
Jason Evanse476f8a2010-01-16 09:53:50 -08002019 }
2020
2021 /* Try to coalesce backward. */
Jason Evansaa5113b2014-01-14 16:23:03 -08002022 if (run_ind > map_bias && arena_mapbits_allocated_get(chunk,
2023 run_ind-1) == 0 && arena_mapbits_dirty_get(chunk, run_ind-1) ==
Jason Evans8fadb1a2015-08-04 10:49:46 -07002024 flag_dirty && arena_mapbits_decommitted_get(chunk, run_ind-1) ==
2025 flag_decommitted) {
Jason Evans203484e2012-05-02 00:30:36 -07002026 size_t prun_size = arena_mapbits_unallocated_size_get(chunk,
2027 run_ind-1);
Jason Evansae4c7b42012-04-02 07:04:34 -07002028 size_t prun_pages = prun_size >> LG_PAGE;
Jason Evanse476f8a2010-01-16 09:53:50 -08002029
Jason Evans12ca9142010-10-17 19:56:09 -07002030 run_ind -= prun_pages;
Jason Evanse476f8a2010-01-16 09:53:50 -08002031
2032 /*
2033 * Remove predecessor from runs_avail; the coalesced run is
2034 * inserted later.
2035 */
Jason Evans203484e2012-05-02 00:30:36 -07002036 assert(arena_mapbits_unallocated_size_get(chunk, run_ind) ==
2037 prun_size);
2038 assert(arena_mapbits_dirty_get(chunk, run_ind) == flag_dirty);
Jason Evans8fadb1a2015-08-04 10:49:46 -07002039 assert(arena_mapbits_decommitted_get(chunk, run_ind) ==
2040 flag_decommitted);
Qinfan Wu90737fc2014-07-21 19:39:20 -07002041 arena_avail_remove(arena, chunk, run_ind, prun_pages);
Jason Evanse476f8a2010-01-16 09:53:50 -08002042
Jason Evansee41ad42015-02-15 18:04:46 -08002043 /*
2044 * If the predecessor is dirty, remove it from the set of dirty
2045 * pages.
2046 */
2047 if (flag_dirty != 0) {
2048 arena_run_dirty_remove(arena, chunk, run_ind,
2049 prun_pages);
2050 }
Qinfan Wu04d60a12014-07-18 14:21:17 -07002051
Jason Evanse476f8a2010-01-16 09:53:50 -08002052 size += prun_size;
Jason Evans12ca9142010-10-17 19:56:09 -07002053 run_pages += prun_pages;
Jason Evanse476f8a2010-01-16 09:53:50 -08002054
Jason Evans203484e2012-05-02 00:30:36 -07002055 arena_mapbits_unallocated_size_set(chunk, run_ind, size);
2056 arena_mapbits_unallocated_size_set(chunk, run_ind+run_pages-1,
2057 size);
Jason Evanse476f8a2010-01-16 09:53:50 -08002058 }
2059
Jason Evansaa5113b2014-01-14 16:23:03 -08002060 *p_size = size;
2061 *p_run_ind = run_ind;
2062 *p_run_pages = run_pages;
2063}
2064
Jason Evans8fadb1a2015-08-04 10:49:46 -07002065static size_t
2066arena_run_size_get(arena_t *arena, arena_chunk_t *chunk, arena_run_t *run,
2067 size_t run_ind)
2068{
2069 size_t size;
2070
2071 assert(run_ind >= map_bias);
2072 assert(run_ind < chunk_npages);
2073
2074 if (arena_mapbits_large_get(chunk, run_ind) != 0) {
2075 size = arena_mapbits_large_size_get(chunk, run_ind);
2076 assert(size == PAGE || arena_mapbits_large_size_get(chunk,
2077 run_ind+(size>>LG_PAGE)-1) == 0);
2078 } else {
2079 arena_bin_info_t *bin_info = &arena_bin_info[run->binind];
2080 size = bin_info->run_size;
2081 }
2082
2083 return (size);
2084}
2085
Jason Evansaa5113b2014-01-14 16:23:03 -08002086static void
Jason Evansb2c0d632016-04-13 23:36:15 -07002087arena_run_dalloc(tsd_t *tsd, arena_t *arena, arena_run_t *run, bool dirty,
2088 bool cleaned, bool decommitted)
Jason Evansaa5113b2014-01-14 16:23:03 -08002089{
2090 arena_chunk_t *chunk;
Jason Evans0c5dd032014-09-29 01:31:39 -07002091 arena_chunk_map_misc_t *miscelm;
Jason Evans8fadb1a2015-08-04 10:49:46 -07002092 size_t size, run_ind, run_pages, flag_dirty, flag_decommitted;
Jason Evansaa5113b2014-01-14 16:23:03 -08002093
2094 chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(run);
Jason Evans0c5dd032014-09-29 01:31:39 -07002095 miscelm = arena_run_to_miscelm(run);
2096 run_ind = arena_miscelm_to_pageind(miscelm);
Jason Evansaa5113b2014-01-14 16:23:03 -08002097 assert(run_ind >= map_bias);
2098 assert(run_ind < chunk_npages);
Jason Evans8fadb1a2015-08-04 10:49:46 -07002099 size = arena_run_size_get(arena, chunk, run, run_ind);
Jason Evansaa5113b2014-01-14 16:23:03 -08002100 run_pages = (size >> LG_PAGE);
Jason Evans40ee9aa2016-02-27 12:34:50 -08002101 arena_nactive_sub(arena, run_pages);
Jason Evansaa5113b2014-01-14 16:23:03 -08002102
2103 /*
2104 * The run is dirty if the caller claims to have dirtied it, as well as
2105 * if it was already dirty before being allocated and the caller
2106 * doesn't claim to have cleaned it.
2107 */
2108 assert(arena_mapbits_dirty_get(chunk, run_ind) ==
2109 arena_mapbits_dirty_get(chunk, run_ind+run_pages-1));
Jason Evans8fadb1a2015-08-04 10:49:46 -07002110 if (!cleaned && !decommitted && arena_mapbits_dirty_get(chunk, run_ind)
2111 != 0)
Jason Evansaa5113b2014-01-14 16:23:03 -08002112 dirty = true;
2113 flag_dirty = dirty ? CHUNK_MAP_DIRTY : 0;
Jason Evans8fadb1a2015-08-04 10:49:46 -07002114 flag_decommitted = decommitted ? CHUNK_MAP_DECOMMITTED : 0;
Jason Evansaa5113b2014-01-14 16:23:03 -08002115
2116 /* Mark pages as unallocated in the chunk map. */
Jason Evans8fadb1a2015-08-04 10:49:46 -07002117 if (dirty || decommitted) {
2118 size_t flags = flag_dirty | flag_decommitted;
2119 arena_mapbits_unallocated_set(chunk, run_ind, size, flags);
Jason Evansaa5113b2014-01-14 16:23:03 -08002120 arena_mapbits_unallocated_set(chunk, run_ind+run_pages-1, size,
Jason Evans8fadb1a2015-08-04 10:49:46 -07002121 flags);
Jason Evansaa5113b2014-01-14 16:23:03 -08002122 } else {
2123 arena_mapbits_unallocated_set(chunk, run_ind, size,
2124 arena_mapbits_unzeroed_get(chunk, run_ind));
2125 arena_mapbits_unallocated_set(chunk, run_ind+run_pages-1, size,
2126 arena_mapbits_unzeroed_get(chunk, run_ind+run_pages-1));
2127 }
2128
Jason Evans8fadb1a2015-08-04 10:49:46 -07002129 arena_run_coalesce(arena, chunk, &size, &run_ind, &run_pages,
2130 flag_dirty, flag_decommitted);
Jason Evansaa5113b2014-01-14 16:23:03 -08002131
Jason Evanse476f8a2010-01-16 09:53:50 -08002132 /* Insert into runs_avail, now that coalescing is complete. */
Jason Evans203484e2012-05-02 00:30:36 -07002133 assert(arena_mapbits_unallocated_size_get(chunk, run_ind) ==
2134 arena_mapbits_unallocated_size_get(chunk, run_ind+run_pages-1));
2135 assert(arena_mapbits_dirty_get(chunk, run_ind) ==
2136 arena_mapbits_dirty_get(chunk, run_ind+run_pages-1));
Jason Evans8fadb1a2015-08-04 10:49:46 -07002137 assert(arena_mapbits_decommitted_get(chunk, run_ind) ==
2138 arena_mapbits_decommitted_get(chunk, run_ind+run_pages-1));
Qinfan Wu90737fc2014-07-21 19:39:20 -07002139 arena_avail_insert(arena, chunk, run_ind, run_pages);
Jason Evans8d4203c2010-04-13 20:53:21 -07002140
Jason Evans070b3c32014-08-14 14:45:58 -07002141 if (dirty)
Jason Evansee41ad42015-02-15 18:04:46 -08002142 arena_run_dirty_insert(arena, chunk, run_ind, run_pages);
Qinfan Wu04d60a12014-07-18 14:21:17 -07002143
Jason Evans203484e2012-05-02 00:30:36 -07002144 /* Deallocate chunk if it is now completely unused. */
Jason Evans155bfa72014-10-05 17:54:10 -07002145 if (size == arena_maxrun) {
Jason Evans203484e2012-05-02 00:30:36 -07002146 assert(run_ind == map_bias);
Jason Evans155bfa72014-10-05 17:54:10 -07002147 assert(run_pages == (arena_maxrun >> LG_PAGE));
Jason Evansb2c0d632016-04-13 23:36:15 -07002148 arena_chunk_dalloc(tsd, arena, chunk);
Jason Evans203484e2012-05-02 00:30:36 -07002149 }
Jason Evanse476f8a2010-01-16 09:53:50 -08002150
Jason Evans4fb7f512010-01-27 18:27:09 -08002151 /*
Jason Evans8d4203c2010-04-13 20:53:21 -07002152 * It is okay to do dirty page processing here even if the chunk was
Jason Evans4fb7f512010-01-27 18:27:09 -08002153 * deallocated above, since in that case it is the spare. Waiting
2154 * until after possible chunk deallocation to do dirty processing
2155 * allows for an old spare to be fully deallocated, thus decreasing the
2156 * chances of spuriously crossing the dirty page purging threshold.
2157 */
Jason Evans8d4203c2010-04-13 20:53:21 -07002158 if (dirty)
Jason Evansb2c0d632016-04-13 23:36:15 -07002159 arena_maybe_purge(tsd, arena);
Jason Evanse476f8a2010-01-16 09:53:50 -08002160}
2161
2162static void
Jason Evansb2c0d632016-04-13 23:36:15 -07002163arena_run_trim_head(tsd_t *tsd, arena_t *arena, arena_chunk_t *chunk,
2164 arena_run_t *run, size_t oldsize, size_t newsize)
Jason Evanse476f8a2010-01-16 09:53:50 -08002165{
Jason Evans0c5dd032014-09-29 01:31:39 -07002166 arena_chunk_map_misc_t *miscelm = arena_run_to_miscelm(run);
2167 size_t pageind = arena_miscelm_to_pageind(miscelm);
Jason Evansae4c7b42012-04-02 07:04:34 -07002168 size_t head_npages = (oldsize - newsize) >> LG_PAGE;
Jason Evans203484e2012-05-02 00:30:36 -07002169 size_t flag_dirty = arena_mapbits_dirty_get(chunk, pageind);
Jason Evans1f27abc2015-08-11 12:42:33 -07002170 size_t flag_decommitted = arena_mapbits_decommitted_get(chunk, pageind);
2171 size_t flag_unzeroed_mask = (flag_dirty | flag_decommitted) == 0 ?
2172 CHUNK_MAP_UNZEROED : 0;
Jason Evanse476f8a2010-01-16 09:53:50 -08002173
2174 assert(oldsize > newsize);
2175
2176 /*
2177 * Update the chunk map so that arena_run_dalloc() can treat the
Jason Evans940a2e02010-10-17 17:51:37 -07002178 * leading run as separately allocated. Set the last element of each
2179 * run first, in case of single-page runs.
Jason Evanse476f8a2010-01-16 09:53:50 -08002180 */
Jason Evans203484e2012-05-02 00:30:36 -07002181 assert(arena_mapbits_large_size_get(chunk, pageind) == oldsize);
Jason Evans1f27abc2015-08-11 12:42:33 -07002182 arena_mapbits_large_set(chunk, pageind+head_npages-1, 0, flag_dirty |
2183 (flag_unzeroed_mask & arena_mapbits_unzeroed_get(chunk,
2184 pageind+head_npages-1)));
2185 arena_mapbits_large_set(chunk, pageind, oldsize-newsize, flag_dirty |
2186 (flag_unzeroed_mask & arena_mapbits_unzeroed_get(chunk, pageind)));
Jason Evans940a2e02010-10-17 17:51:37 -07002187
Jason Evans7372b152012-02-10 20:22:09 -08002188 if (config_debug) {
Jason Evansae4c7b42012-04-02 07:04:34 -07002189 UNUSED size_t tail_npages = newsize >> LG_PAGE;
Jason Evans203484e2012-05-02 00:30:36 -07002190 assert(arena_mapbits_large_size_get(chunk,
2191 pageind+head_npages+tail_npages-1) == 0);
2192 assert(arena_mapbits_dirty_get(chunk,
2193 pageind+head_npages+tail_npages-1) == flag_dirty);
Jason Evans940a2e02010-10-17 17:51:37 -07002194 }
Jason Evansd8ceef62012-05-10 20:59:39 -07002195 arena_mapbits_large_set(chunk, pageind+head_npages, newsize,
Jason Evans1f27abc2015-08-11 12:42:33 -07002196 flag_dirty | (flag_unzeroed_mask & arena_mapbits_unzeroed_get(chunk,
2197 pageind+head_npages)));
Jason Evanse476f8a2010-01-16 09:53:50 -08002198
Jason Evansb2c0d632016-04-13 23:36:15 -07002199 arena_run_dalloc(tsd, arena, run, false, false, (flag_decommitted !=
2200 0));
Jason Evanse476f8a2010-01-16 09:53:50 -08002201}
2202
2203static void
Jason Evansb2c0d632016-04-13 23:36:15 -07002204arena_run_trim_tail(tsd_t *tsd, arena_t *arena, arena_chunk_t *chunk,
2205 arena_run_t *run, size_t oldsize, size_t newsize, bool dirty)
Jason Evanse476f8a2010-01-16 09:53:50 -08002206{
Jason Evans0c5dd032014-09-29 01:31:39 -07002207 arena_chunk_map_misc_t *miscelm = arena_run_to_miscelm(run);
2208 size_t pageind = arena_miscelm_to_pageind(miscelm);
Jason Evansae4c7b42012-04-02 07:04:34 -07002209 size_t head_npages = newsize >> LG_PAGE;
Jason Evans203484e2012-05-02 00:30:36 -07002210 size_t flag_dirty = arena_mapbits_dirty_get(chunk, pageind);
Jason Evans1f27abc2015-08-11 12:42:33 -07002211 size_t flag_decommitted = arena_mapbits_decommitted_get(chunk, pageind);
2212 size_t flag_unzeroed_mask = (flag_dirty | flag_decommitted) == 0 ?
2213 CHUNK_MAP_UNZEROED : 0;
Jason Evans0c5dd032014-09-29 01:31:39 -07002214 arena_chunk_map_misc_t *tail_miscelm;
2215 arena_run_t *tail_run;
Jason Evanse476f8a2010-01-16 09:53:50 -08002216
2217 assert(oldsize > newsize);
2218
2219 /*
2220 * Update the chunk map so that arena_run_dalloc() can treat the
Jason Evans940a2e02010-10-17 17:51:37 -07002221 * trailing run as separately allocated. Set the last element of each
2222 * run first, in case of single-page runs.
Jason Evanse476f8a2010-01-16 09:53:50 -08002223 */
Jason Evans203484e2012-05-02 00:30:36 -07002224 assert(arena_mapbits_large_size_get(chunk, pageind) == oldsize);
Jason Evans1f27abc2015-08-11 12:42:33 -07002225 arena_mapbits_large_set(chunk, pageind+head_npages-1, 0, flag_dirty |
2226 (flag_unzeroed_mask & arena_mapbits_unzeroed_get(chunk,
2227 pageind+head_npages-1)));
2228 arena_mapbits_large_set(chunk, pageind, newsize, flag_dirty |
2229 (flag_unzeroed_mask & arena_mapbits_unzeroed_get(chunk, pageind)));
Jason Evans940a2e02010-10-17 17:51:37 -07002230
Jason Evans203484e2012-05-02 00:30:36 -07002231 if (config_debug) {
2232 UNUSED size_t tail_npages = (oldsize - newsize) >> LG_PAGE;
2233 assert(arena_mapbits_large_size_get(chunk,
2234 pageind+head_npages+tail_npages-1) == 0);
2235 assert(arena_mapbits_dirty_get(chunk,
2236 pageind+head_npages+tail_npages-1) == flag_dirty);
2237 }
2238 arena_mapbits_large_set(chunk, pageind+head_npages, oldsize-newsize,
Jason Evans1f27abc2015-08-11 12:42:33 -07002239 flag_dirty | (flag_unzeroed_mask & arena_mapbits_unzeroed_get(chunk,
2240 pageind+head_npages)));
Jason Evanse476f8a2010-01-16 09:53:50 -08002241
Jason Evans61a6dfc2016-03-23 16:04:38 -07002242 tail_miscelm = arena_miscelm_get_mutable(chunk, pageind + head_npages);
Jason Evans0c5dd032014-09-29 01:31:39 -07002243 tail_run = &tail_miscelm->run;
Jason Evansb2c0d632016-04-13 23:36:15 -07002244 arena_run_dalloc(tsd, arena, tail_run, dirty, false, (flag_decommitted
2245 != 0));
Jason Evanse476f8a2010-01-16 09:53:50 -08002246}
2247
Jason Evanse7a10582012-02-13 17:36:52 -08002248static void
2249arena_bin_runs_insert(arena_bin_t *bin, arena_run_t *run)
2250{
Jason Evans0c5dd032014-09-29 01:31:39 -07002251 arena_chunk_map_misc_t *miscelm = arena_run_to_miscelm(run);
Jason Evanse7a10582012-02-13 17:36:52 -08002252
Jason Evansc6a2c392016-03-26 17:30:37 -07002253 arena_run_heap_insert(&bin->runs, miscelm);
Jason Evanse7a10582012-02-13 17:36:52 -08002254}
2255
2256static arena_run_t *
2257arena_bin_nonfull_run_tryget(arena_bin_t *bin)
2258{
Jason Evansc6a2c392016-03-26 17:30:37 -07002259 arena_chunk_map_misc_t *miscelm;
2260
2261 miscelm = arena_run_heap_remove_first(&bin->runs);
2262 if (miscelm == NULL)
2263 return (NULL);
2264 if (config_stats)
2265 bin->stats.reruns++;
2266
2267 return (&miscelm->run);
Jason Evanse7a10582012-02-13 17:36:52 -08002268}
2269
2270static arena_run_t *
Jason Evansb2c0d632016-04-13 23:36:15 -07002271arena_bin_nonfull_run_get(tsd_t *tsd, arena_t *arena, arena_bin_t *bin)
Jason Evanse476f8a2010-01-16 09:53:50 -08002272{
Jason Evanse476f8a2010-01-16 09:53:50 -08002273 arena_run_t *run;
Jason Evansd01fd192015-08-19 15:21:32 -07002274 szind_t binind;
Jason Evans49f7e8f2011-03-15 13:59:15 -07002275 arena_bin_info_t *bin_info;
Jason Evanse476f8a2010-01-16 09:53:50 -08002276
2277 /* Look for a usable run. */
Jason Evanse7a10582012-02-13 17:36:52 -08002278 run = arena_bin_nonfull_run_tryget(bin);
2279 if (run != NULL)
Jason Evanse476f8a2010-01-16 09:53:50 -08002280 return (run);
Jason Evanse476f8a2010-01-16 09:53:50 -08002281 /* No existing runs have any space available. */
2282
Jason Evans49f7e8f2011-03-15 13:59:15 -07002283 binind = arena_bin_index(arena, bin);
2284 bin_info = &arena_bin_info[binind];
2285
Jason Evanse476f8a2010-01-16 09:53:50 -08002286 /* Allocate a new run. */
Jason Evansb2c0d632016-04-13 23:36:15 -07002287 malloc_mutex_unlock(tsd, &bin->lock);
Jason Evanse69bee02010-03-15 22:25:23 -07002288 /******************************/
Jason Evansb2c0d632016-04-13 23:36:15 -07002289 malloc_mutex_lock(tsd, &arena->lock);
2290 run = arena_run_alloc_small(tsd, arena, bin_info->run_size, binind);
Jason Evanse00572b2010-03-14 19:43:56 -07002291 if (run != NULL) {
2292 /* Initialize run internals. */
Jason Evans381c23d2014-10-10 23:01:03 -07002293 run->binind = binind;
Jason Evans49f7e8f2011-03-15 13:59:15 -07002294 run->nfree = bin_info->nregs;
Jason Evans0c5dd032014-09-29 01:31:39 -07002295 bitmap_init(run->bitmap, &bin_info->bitmap_info);
Jason Evanse69bee02010-03-15 22:25:23 -07002296 }
Jason Evansb2c0d632016-04-13 23:36:15 -07002297 malloc_mutex_unlock(tsd, &arena->lock);
Jason Evanse69bee02010-03-15 22:25:23 -07002298 /********************************/
Jason Evansb2c0d632016-04-13 23:36:15 -07002299 malloc_mutex_lock(tsd, &bin->lock);
Jason Evanse69bee02010-03-15 22:25:23 -07002300 if (run != NULL) {
Jason Evans7372b152012-02-10 20:22:09 -08002301 if (config_stats) {
2302 bin->stats.nruns++;
2303 bin->stats.curruns++;
Jason Evans7372b152012-02-10 20:22:09 -08002304 }
Jason Evanse00572b2010-03-14 19:43:56 -07002305 return (run);
2306 }
2307
2308 /*
Jason Evansaa5113b2014-01-14 16:23:03 -08002309 * arena_run_alloc_small() failed, but another thread may have made
Jason Evans940a2e02010-10-17 17:51:37 -07002310 * sufficient memory available while this one dropped bin->lock above,
Jason Evanse00572b2010-03-14 19:43:56 -07002311 * so search one more time.
2312 */
Jason Evanse7a10582012-02-13 17:36:52 -08002313 run = arena_bin_nonfull_run_tryget(bin);
2314 if (run != NULL)
Jason Evanse00572b2010-03-14 19:43:56 -07002315 return (run);
Jason Evanse00572b2010-03-14 19:43:56 -07002316
2317 return (NULL);
Jason Evanse476f8a2010-01-16 09:53:50 -08002318}
2319
Jason Evans1e0a6362010-03-13 13:41:58 -08002320/* Re-fill bin->runcur, then call arena_run_reg_alloc(). */
Jason Evanse476f8a2010-01-16 09:53:50 -08002321static void *
Jason Evansb2c0d632016-04-13 23:36:15 -07002322arena_bin_malloc_hard(tsd_t *tsd, arena_t *arena, arena_bin_t *bin)
Jason Evanse476f8a2010-01-16 09:53:50 -08002323{
Jason Evansd01fd192015-08-19 15:21:32 -07002324 szind_t binind;
Jason Evans49f7e8f2011-03-15 13:59:15 -07002325 arena_bin_info_t *bin_info;
Jason Evanse00572b2010-03-14 19:43:56 -07002326 arena_run_t *run;
Jason Evanse476f8a2010-01-16 09:53:50 -08002327
Jason Evans49f7e8f2011-03-15 13:59:15 -07002328 binind = arena_bin_index(arena, bin);
2329 bin_info = &arena_bin_info[binind];
Jason Evanse00572b2010-03-14 19:43:56 -07002330 bin->runcur = NULL;
Jason Evansb2c0d632016-04-13 23:36:15 -07002331 run = arena_bin_nonfull_run_get(tsd, arena, bin);
Jason Evanse00572b2010-03-14 19:43:56 -07002332 if (bin->runcur != NULL && bin->runcur->nfree > 0) {
2333 /*
2334 * Another thread updated runcur while this one ran without the
2335 * bin lock in arena_bin_nonfull_run_get().
2336 */
Dmitry-Mea306a602015-09-04 13:15:28 +03002337 void *ret;
Jason Evanse00572b2010-03-14 19:43:56 -07002338 assert(bin->runcur->nfree > 0);
Jason Evans49f7e8f2011-03-15 13:59:15 -07002339 ret = arena_run_reg_alloc(bin->runcur, bin_info);
Jason Evanse00572b2010-03-14 19:43:56 -07002340 if (run != NULL) {
Jason Evans940a2e02010-10-17 17:51:37 -07002341 arena_chunk_t *chunk;
2342
2343 /*
Jason Evansaa5113b2014-01-14 16:23:03 -08002344 * arena_run_alloc_small() may have allocated run, or
2345 * it may have pulled run from the bin's run tree.
2346 * Therefore it is unsafe to make any assumptions about
2347 * how run has previously been used, and
2348 * arena_bin_lower_run() must be called, as if a region
2349 * were just deallocated from the run.
Jason Evans940a2e02010-10-17 17:51:37 -07002350 */
2351 chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(run);
Jason Evansb2c0d632016-04-13 23:36:15 -07002352 if (run->nfree == bin_info->nregs) {
2353 arena_dalloc_bin_run(tsd, arena, chunk, run,
2354 bin);
2355 } else
Jason Evans8de6a022010-10-17 20:57:30 -07002356 arena_bin_lower_run(arena, chunk, run, bin);
Jason Evanse00572b2010-03-14 19:43:56 -07002357 }
2358 return (ret);
2359 }
2360
2361 if (run == NULL)
Jason Evanse476f8a2010-01-16 09:53:50 -08002362 return (NULL);
Jason Evanse00572b2010-03-14 19:43:56 -07002363
2364 bin->runcur = run;
2365
Jason Evanse476f8a2010-01-16 09:53:50 -08002366 assert(bin->runcur->nfree > 0);
2367
Jason Evans49f7e8f2011-03-15 13:59:15 -07002368 return (arena_run_reg_alloc(bin->runcur, bin_info));
Jason Evanse476f8a2010-01-16 09:53:50 -08002369}
2370
Jason Evans86815df2010-03-13 20:32:56 -08002371void
Jason Evans243f7a02016-02-19 20:09:31 -08002372arena_tcache_fill_small(tsd_t *tsd, arena_t *arena, tcache_bin_t *tbin,
2373 szind_t binind, uint64_t prof_accumbytes)
Jason Evanse476f8a2010-01-16 09:53:50 -08002374{
2375 unsigned i, nfill;
2376 arena_bin_t *bin;
Jason Evanse476f8a2010-01-16 09:53:50 -08002377
2378 assert(tbin->ncached == 0);
2379
Jason Evansb2c0d632016-04-13 23:36:15 -07002380 if (config_prof && arena_prof_accum(tsd, arena, prof_accumbytes))
2381 prof_idump(tsd);
Jason Evanse69bee02010-03-15 22:25:23 -07002382 bin = &arena->bins[binind];
Jason Evansb2c0d632016-04-13 23:36:15 -07002383 malloc_mutex_lock(tsd, &bin->lock);
Jason Evans1dcb4f82011-03-21 00:18:17 -07002384 for (i = 0, nfill = (tcache_bin_info[binind].ncached_max >>
2385 tbin->lg_fill_div); i < nfill; i++) {
Dmitry-Mea306a602015-09-04 13:15:28 +03002386 arena_run_t *run;
2387 void *ptr;
Jason Evanse476f8a2010-01-16 09:53:50 -08002388 if ((run = bin->runcur) != NULL && run->nfree > 0)
Jason Evans49f7e8f2011-03-15 13:59:15 -07002389 ptr = arena_run_reg_alloc(run, &arena_bin_info[binind]);
Jason Evanse476f8a2010-01-16 09:53:50 -08002390 else
Jason Evansb2c0d632016-04-13 23:36:15 -07002391 ptr = arena_bin_malloc_hard(tsd, arena, bin);
Jason Evansf11a6772014-10-05 13:05:10 -07002392 if (ptr == NULL) {
2393 /*
2394 * OOM. tbin->avail isn't yet filled down to its first
2395 * element, so the successful allocations (if any) must
Qi Wangf4a0f322015-10-27 15:12:10 -07002396 * be moved just before tbin->avail before bailing out.
Jason Evansf11a6772014-10-05 13:05:10 -07002397 */
2398 if (i > 0) {
Qi Wangf4a0f322015-10-27 15:12:10 -07002399 memmove(tbin->avail - i, tbin->avail - nfill,
Jason Evansf11a6772014-10-05 13:05:10 -07002400 i * sizeof(void *));
2401 }
Jason Evanse476f8a2010-01-16 09:53:50 -08002402 break;
Jason Evansf11a6772014-10-05 13:05:10 -07002403 }
Guilherme Goncalves2c5cb612014-12-08 19:12:41 -02002404 if (config_fill && unlikely(opt_junk_alloc)) {
Jason Evans122449b2012-04-06 00:35:09 -07002405 arena_alloc_junk_small(ptr, &arena_bin_info[binind],
2406 true);
2407 }
Jason Evans9c43c132011-03-18 10:53:15 -07002408 /* Insert such that low regions get used first. */
Qi Wangf4a0f322015-10-27 15:12:10 -07002409 *(tbin->avail - nfill + i) = ptr;
Jason Evanse476f8a2010-01-16 09:53:50 -08002410 }
Jason Evans7372b152012-02-10 20:22:09 -08002411 if (config_stats) {
Jason Evans7372b152012-02-10 20:22:09 -08002412 bin->stats.nmalloc += i;
2413 bin->stats.nrequests += tbin->tstats.nrequests;
Jason Evans3c4d92e2014-10-12 22:53:59 -07002414 bin->stats.curregs += i;
Jason Evans7372b152012-02-10 20:22:09 -08002415 bin->stats.nfills++;
2416 tbin->tstats.nrequests = 0;
2417 }
Jason Evansb2c0d632016-04-13 23:36:15 -07002418 malloc_mutex_unlock(tsd, &bin->lock);
Jason Evanse476f8a2010-01-16 09:53:50 -08002419 tbin->ncached = i;
Jason Evans243f7a02016-02-19 20:09:31 -08002420 arena_decay_tick(tsd, arena);
Jason Evanse476f8a2010-01-16 09:53:50 -08002421}
Jason Evanse476f8a2010-01-16 09:53:50 -08002422
Jason Evans122449b2012-04-06 00:35:09 -07002423void
2424arena_alloc_junk_small(void *ptr, arena_bin_info_t *bin_info, bool zero)
2425{
2426
Chris Petersona82070e2016-03-27 23:28:39 -07002427 size_t redzone_size = bin_info->redzone_size;
2428
Jason Evans122449b2012-04-06 00:35:09 -07002429 if (zero) {
Chris Petersona82070e2016-03-27 23:28:39 -07002430 memset((void *)((uintptr_t)ptr - redzone_size),
2431 JEMALLOC_ALLOC_JUNK, redzone_size);
2432 memset((void *)((uintptr_t)ptr + bin_info->reg_size),
2433 JEMALLOC_ALLOC_JUNK, redzone_size);
Jason Evans122449b2012-04-06 00:35:09 -07002434 } else {
Chris Petersona82070e2016-03-27 23:28:39 -07002435 memset((void *)((uintptr_t)ptr - redzone_size),
2436 JEMALLOC_ALLOC_JUNK, bin_info->reg_interval);
Jason Evans122449b2012-04-06 00:35:09 -07002437 }
2438}
2439
Jason Evans0d6c5d82013-12-17 15:14:36 -08002440#ifdef JEMALLOC_JET
2441#undef arena_redzone_corruption
Jason Evansab0cfe02016-04-18 15:11:20 -07002442#define arena_redzone_corruption JEMALLOC_N(n_arena_redzone_corruption)
Jason Evans0d6c5d82013-12-17 15:14:36 -08002443#endif
2444static void
2445arena_redzone_corruption(void *ptr, size_t usize, bool after,
2446 size_t offset, uint8_t byte)
2447{
2448
Jason Evans5fae7dc2015-07-23 13:56:25 -07002449 malloc_printf("<jemalloc>: Corrupt redzone %zu byte%s %s %p "
2450 "(size %zu), byte=%#x\n", offset, (offset == 1) ? "" : "s",
Jason Evans0d6c5d82013-12-17 15:14:36 -08002451 after ? "after" : "before", ptr, usize, byte);
2452}
2453#ifdef JEMALLOC_JET
Jason Evans0d6c5d82013-12-17 15:14:36 -08002454#undef arena_redzone_corruption
Jason Evans6b694c42014-01-07 16:47:56 -08002455#define arena_redzone_corruption JEMALLOC_N(arena_redzone_corruption)
2456arena_redzone_corruption_t *arena_redzone_corruption =
Jason Evansab0cfe02016-04-18 15:11:20 -07002457 JEMALLOC_N(n_arena_redzone_corruption);
Jason Evans0d6c5d82013-12-17 15:14:36 -08002458#endif
2459
2460static void
2461arena_redzones_validate(void *ptr, arena_bin_info_t *bin_info, bool reset)
Jason Evans122449b2012-04-06 00:35:09 -07002462{
Jason Evans122449b2012-04-06 00:35:09 -07002463 bool error = false;
2464
Guilherme Goncalves2c5cb612014-12-08 19:12:41 -02002465 if (opt_junk_alloc) {
Dmitry-Mea306a602015-09-04 13:15:28 +03002466 size_t size = bin_info->reg_size;
2467 size_t redzone_size = bin_info->redzone_size;
2468 size_t i;
2469
Guilherme Goncalves2c5cb612014-12-08 19:12:41 -02002470 for (i = 1; i <= redzone_size; i++) {
2471 uint8_t *byte = (uint8_t *)((uintptr_t)ptr - i);
Chris Petersona82070e2016-03-27 23:28:39 -07002472 if (*byte != JEMALLOC_ALLOC_JUNK) {
Guilherme Goncalves2c5cb612014-12-08 19:12:41 -02002473 error = true;
Jason Evans8fadb1a2015-08-04 10:49:46 -07002474 arena_redzone_corruption(ptr, size, false, i,
2475 *byte);
Guilherme Goncalves2c5cb612014-12-08 19:12:41 -02002476 if (reset)
Chris Petersona82070e2016-03-27 23:28:39 -07002477 *byte = JEMALLOC_ALLOC_JUNK;
Guilherme Goncalves2c5cb612014-12-08 19:12:41 -02002478 }
2479 }
2480 for (i = 0; i < redzone_size; i++) {
2481 uint8_t *byte = (uint8_t *)((uintptr_t)ptr + size + i);
Chris Petersona82070e2016-03-27 23:28:39 -07002482 if (*byte != JEMALLOC_ALLOC_JUNK) {
Guilherme Goncalves2c5cb612014-12-08 19:12:41 -02002483 error = true;
Jason Evans8fadb1a2015-08-04 10:49:46 -07002484 arena_redzone_corruption(ptr, size, true, i,
2485 *byte);
Guilherme Goncalves2c5cb612014-12-08 19:12:41 -02002486 if (reset)
Chris Petersona82070e2016-03-27 23:28:39 -07002487 *byte = JEMALLOC_ALLOC_JUNK;
Guilherme Goncalves2c5cb612014-12-08 19:12:41 -02002488 }
Jason Evans122449b2012-04-06 00:35:09 -07002489 }
2490 }
Guilherme Goncalves2c5cb612014-12-08 19:12:41 -02002491
Jason Evans122449b2012-04-06 00:35:09 -07002492 if (opt_abort && error)
2493 abort();
Jason Evans0d6c5d82013-12-17 15:14:36 -08002494}
Jason Evans122449b2012-04-06 00:35:09 -07002495
Jason Evans6b694c42014-01-07 16:47:56 -08002496#ifdef JEMALLOC_JET
2497#undef arena_dalloc_junk_small
Jason Evansab0cfe02016-04-18 15:11:20 -07002498#define arena_dalloc_junk_small JEMALLOC_N(n_arena_dalloc_junk_small)
Jason Evans6b694c42014-01-07 16:47:56 -08002499#endif
Jason Evans0d6c5d82013-12-17 15:14:36 -08002500void
2501arena_dalloc_junk_small(void *ptr, arena_bin_info_t *bin_info)
2502{
2503 size_t redzone_size = bin_info->redzone_size;
2504
2505 arena_redzones_validate(ptr, bin_info, false);
Chris Petersona82070e2016-03-27 23:28:39 -07002506 memset((void *)((uintptr_t)ptr - redzone_size), JEMALLOC_FREE_JUNK,
Jason Evans122449b2012-04-06 00:35:09 -07002507 bin_info->reg_interval);
2508}
Jason Evans6b694c42014-01-07 16:47:56 -08002509#ifdef JEMALLOC_JET
2510#undef arena_dalloc_junk_small
2511#define arena_dalloc_junk_small JEMALLOC_N(arena_dalloc_junk_small)
2512arena_dalloc_junk_small_t *arena_dalloc_junk_small =
Jason Evansab0cfe02016-04-18 15:11:20 -07002513 JEMALLOC_N(n_arena_dalloc_junk_small);
Jason Evans6b694c42014-01-07 16:47:56 -08002514#endif
Jason Evans122449b2012-04-06 00:35:09 -07002515
Jason Evans0d6c5d82013-12-17 15:14:36 -08002516void
2517arena_quarantine_junk_small(void *ptr, size_t usize)
2518{
Jason Evansd01fd192015-08-19 15:21:32 -07002519 szind_t binind;
Jason Evans0d6c5d82013-12-17 15:14:36 -08002520 arena_bin_info_t *bin_info;
2521 cassert(config_fill);
Guilherme Goncalves2c5cb612014-12-08 19:12:41 -02002522 assert(opt_junk_free);
Jason Evans0d6c5d82013-12-17 15:14:36 -08002523 assert(opt_quarantine);
2524 assert(usize <= SMALL_MAXCLASS);
2525
Jason Evans155bfa72014-10-05 17:54:10 -07002526 binind = size2index(usize);
Jason Evans0d6c5d82013-12-17 15:14:36 -08002527 bin_info = &arena_bin_info[binind];
2528 arena_redzones_validate(ptr, bin_info, true);
2529}
2530
Jason Evans578cd162016-02-19 18:40:03 -08002531static void *
Jason Evans0c516a02016-02-25 15:29:49 -08002532arena_malloc_small(tsd_t *tsd, arena_t *arena, szind_t binind, bool zero)
Jason Evanse476f8a2010-01-16 09:53:50 -08002533{
2534 void *ret;
2535 arena_bin_t *bin;
Jason Evans0c516a02016-02-25 15:29:49 -08002536 size_t usize;
Jason Evanse476f8a2010-01-16 09:53:50 -08002537 arena_run_t *run;
Jason Evanse476f8a2010-01-16 09:53:50 -08002538
Jason Evansb1726102012-02-28 16:50:47 -08002539 assert(binind < NBINS);
Jason Evanse476f8a2010-01-16 09:53:50 -08002540 bin = &arena->bins[binind];
Jason Evans0c516a02016-02-25 15:29:49 -08002541 usize = index2size(binind);
Jason Evanse476f8a2010-01-16 09:53:50 -08002542
Jason Evansb2c0d632016-04-13 23:36:15 -07002543 malloc_mutex_lock(tsd, &bin->lock);
Jason Evanse476f8a2010-01-16 09:53:50 -08002544 if ((run = bin->runcur) != NULL && run->nfree > 0)
Jason Evans49f7e8f2011-03-15 13:59:15 -07002545 ret = arena_run_reg_alloc(run, &arena_bin_info[binind]);
Jason Evanse476f8a2010-01-16 09:53:50 -08002546 else
Jason Evansb2c0d632016-04-13 23:36:15 -07002547 ret = arena_bin_malloc_hard(tsd, arena, bin);
Jason Evanse476f8a2010-01-16 09:53:50 -08002548
2549 if (ret == NULL) {
Jason Evansb2c0d632016-04-13 23:36:15 -07002550 malloc_mutex_unlock(tsd, &bin->lock);
Jason Evanse476f8a2010-01-16 09:53:50 -08002551 return (NULL);
2552 }
2553
Jason Evans7372b152012-02-10 20:22:09 -08002554 if (config_stats) {
Jason Evans7372b152012-02-10 20:22:09 -08002555 bin->stats.nmalloc++;
2556 bin->stats.nrequests++;
Jason Evans3c4d92e2014-10-12 22:53:59 -07002557 bin->stats.curregs++;
Jason Evans7372b152012-02-10 20:22:09 -08002558 }
Jason Evansb2c0d632016-04-13 23:36:15 -07002559 malloc_mutex_unlock(tsd, &bin->lock);
2560 if (config_prof && !isthreaded && arena_prof_accum(tsd, arena, usize))
2561 prof_idump(tsd);
Jason Evanse476f8a2010-01-16 09:53:50 -08002562
Jason Evans551ebc42014-10-03 10:16:09 -07002563 if (!zero) {
Jason Evans7372b152012-02-10 20:22:09 -08002564 if (config_fill) {
Guilherme Goncalves2c5cb612014-12-08 19:12:41 -02002565 if (unlikely(opt_junk_alloc)) {
Jason Evans122449b2012-04-06 00:35:09 -07002566 arena_alloc_junk_small(ret,
2567 &arena_bin_info[binind], false);
Jason Evans9c640bf2014-09-11 16:20:44 -07002568 } else if (unlikely(opt_zero))
Jason Evans0c516a02016-02-25 15:29:49 -08002569 memset(ret, 0, usize);
Jason Evans7372b152012-02-10 20:22:09 -08002570 }
Jason Evans0c516a02016-02-25 15:29:49 -08002571 JEMALLOC_VALGRIND_MAKE_MEM_UNDEFINED(ret, usize);
Jason Evans122449b2012-04-06 00:35:09 -07002572 } else {
Guilherme Goncalves2c5cb612014-12-08 19:12:41 -02002573 if (config_fill && unlikely(opt_junk_alloc)) {
Jason Evans122449b2012-04-06 00:35:09 -07002574 arena_alloc_junk_small(ret, &arena_bin_info[binind],
2575 true);
2576 }
Jason Evans0c516a02016-02-25 15:29:49 -08002577 JEMALLOC_VALGRIND_MAKE_MEM_UNDEFINED(ret, usize);
2578 memset(ret, 0, usize);
Jason Evans122449b2012-04-06 00:35:09 -07002579 }
Jason Evanse476f8a2010-01-16 09:53:50 -08002580
Jason Evans243f7a02016-02-19 20:09:31 -08002581 arena_decay_tick(tsd, arena);
Jason Evanse476f8a2010-01-16 09:53:50 -08002582 return (ret);
2583}
2584
2585void *
Jason Evans0c516a02016-02-25 15:29:49 -08002586arena_malloc_large(tsd_t *tsd, arena_t *arena, szind_t binind, bool zero)
Jason Evanse476f8a2010-01-16 09:53:50 -08002587{
2588 void *ret;
Jason Evans155bfa72014-10-05 17:54:10 -07002589 size_t usize;
Jason Evans8a03cf02015-05-04 09:58:36 -07002590 uintptr_t random_offset;
Jason Evans0c5dd032014-09-29 01:31:39 -07002591 arena_run_t *run;
2592 arena_chunk_map_misc_t *miscelm;
Dmitri Smirnov33184bf2016-02-29 14:30:19 -08002593 UNUSED bool idump JEMALLOC_CC_SILENCE_INIT(false);
Jason Evanse476f8a2010-01-16 09:53:50 -08002594
2595 /* Large allocation. */
Qi Wangf4a0f322015-10-27 15:12:10 -07002596 usize = index2size(binind);
Jason Evansb2c0d632016-04-13 23:36:15 -07002597 malloc_mutex_lock(tsd, &arena->lock);
Jason Evans8a03cf02015-05-04 09:58:36 -07002598 if (config_cache_oblivious) {
Jason Evansbce61d62015-07-07 09:32:05 -07002599 uint64_t r;
2600
Jason Evans8a03cf02015-05-04 09:58:36 -07002601 /*
2602 * Compute a uniformly distributed offset within the first page
2603 * that is a multiple of the cacheline size, e.g. [0 .. 63) * 64
2604 * for 4 KiB pages and 64-byte cachelines.
2605 */
Jason Evans34676d32016-02-09 16:28:40 -08002606 r = prng_lg_range(&arena->offset_state, LG_PAGE - LG_CACHELINE);
Jason Evans8a03cf02015-05-04 09:58:36 -07002607 random_offset = ((uintptr_t)r) << LG_CACHELINE;
2608 } else
2609 random_offset = 0;
Jason Evansb2c0d632016-04-13 23:36:15 -07002610 run = arena_run_alloc_large(tsd, arena, usize + large_pad, zero);
Jason Evans0c5dd032014-09-29 01:31:39 -07002611 if (run == NULL) {
Jason Evansb2c0d632016-04-13 23:36:15 -07002612 malloc_mutex_unlock(tsd, &arena->lock);
Jason Evanse476f8a2010-01-16 09:53:50 -08002613 return (NULL);
2614 }
Jason Evans0c5dd032014-09-29 01:31:39 -07002615 miscelm = arena_run_to_miscelm(run);
Jason Evans8a03cf02015-05-04 09:58:36 -07002616 ret = (void *)((uintptr_t)arena_miscelm_to_rpages(miscelm) +
2617 random_offset);
Jason Evans7372b152012-02-10 20:22:09 -08002618 if (config_stats) {
Qi Wangf4a0f322015-10-27 15:12:10 -07002619 szind_t index = binind - NBINS;
Jason Evans155bfa72014-10-05 17:54:10 -07002620
Jason Evans7372b152012-02-10 20:22:09 -08002621 arena->stats.nmalloc_large++;
2622 arena->stats.nrequests_large++;
Jason Evans155bfa72014-10-05 17:54:10 -07002623 arena->stats.allocated_large += usize;
2624 arena->stats.lstats[index].nmalloc++;
2625 arena->stats.lstats[index].nrequests++;
2626 arena->stats.lstats[index].curruns++;
Jason Evanse476f8a2010-01-16 09:53:50 -08002627 }
Jason Evans7372b152012-02-10 20:22:09 -08002628 if (config_prof)
Jason Evans155bfa72014-10-05 17:54:10 -07002629 idump = arena_prof_accum_locked(arena, usize);
Jason Evansb2c0d632016-04-13 23:36:15 -07002630 malloc_mutex_unlock(tsd, &arena->lock);
Jason Evans88c222c2013-02-06 11:59:30 -08002631 if (config_prof && idump)
Jason Evansb2c0d632016-04-13 23:36:15 -07002632 prof_idump(tsd);
Jason Evanse476f8a2010-01-16 09:53:50 -08002633
Jason Evans551ebc42014-10-03 10:16:09 -07002634 if (!zero) {
Jason Evans7372b152012-02-10 20:22:09 -08002635 if (config_fill) {
Guilherme Goncalves2c5cb612014-12-08 19:12:41 -02002636 if (unlikely(opt_junk_alloc))
Chris Petersona82070e2016-03-27 23:28:39 -07002637 memset(ret, JEMALLOC_ALLOC_JUNK, usize);
Jason Evans9c640bf2014-09-11 16:20:44 -07002638 else if (unlikely(opt_zero))
Jason Evans155bfa72014-10-05 17:54:10 -07002639 memset(ret, 0, usize);
Jason Evans7372b152012-02-10 20:22:09 -08002640 }
Jason Evanse476f8a2010-01-16 09:53:50 -08002641 }
2642
Jason Evans243f7a02016-02-19 20:09:31 -08002643 arena_decay_tick(tsd, arena);
Jason Evanse476f8a2010-01-16 09:53:50 -08002644 return (ret);
2645}
2646
Jason Evans578cd162016-02-19 18:40:03 -08002647void *
2648arena_malloc_hard(tsd_t *tsd, arena_t *arena, size_t size, szind_t ind,
Jason Evans66cd9532016-04-22 14:34:14 -07002649 bool zero)
Jason Evans578cd162016-02-19 18:40:03 -08002650{
2651
Jason Evans90827a32016-05-03 15:00:42 -07002652 arena = arena_choose(tsd, arena);
Jason Evans578cd162016-02-19 18:40:03 -08002653 if (unlikely(arena == NULL))
2654 return (NULL);
2655
2656 if (likely(size <= SMALL_MAXCLASS))
Jason Evans0c516a02016-02-25 15:29:49 -08002657 return (arena_malloc_small(tsd, arena, ind, zero));
Jason Evans578cd162016-02-19 18:40:03 -08002658 if (likely(size <= large_maxclass))
Jason Evans0c516a02016-02-25 15:29:49 -08002659 return (arena_malloc_large(tsd, arena, ind, zero));
Jason Evans66cd9532016-04-22 14:34:14 -07002660 return (huge_malloc(tsd, arena, index2size(ind), zero));
Jason Evans578cd162016-02-19 18:40:03 -08002661}
2662
Jason Evanse476f8a2010-01-16 09:53:50 -08002663/* Only handles large allocations that require more than page alignment. */
Jason Evans88fef7c2015-02-12 14:06:37 -08002664static void *
Jason Evans50883de2015-07-23 17:13:18 -07002665arena_palloc_large(tsd_t *tsd, arena_t *arena, size_t usize, size_t alignment,
Jason Evans88fef7c2015-02-12 14:06:37 -08002666 bool zero)
Jason Evanse476f8a2010-01-16 09:53:50 -08002667{
2668 void *ret;
Jason Evans5ff709c2012-04-11 18:13:45 -07002669 size_t alloc_size, leadsize, trailsize;
2670 arena_run_t *run;
Jason Evanse476f8a2010-01-16 09:53:50 -08002671 arena_chunk_t *chunk;
Jason Evans0c5dd032014-09-29 01:31:39 -07002672 arena_chunk_map_misc_t *miscelm;
2673 void *rpages;
Jason Evanse476f8a2010-01-16 09:53:50 -08002674
Jason Evans50883de2015-07-23 17:13:18 -07002675 assert(usize == PAGE_CEILING(usize));
Jason Evans93443682010-10-20 17:39:18 -07002676
Jason Evans90827a32016-05-03 15:00:42 -07002677 arena = arena_choose(tsd, arena);
Jason Evans88fef7c2015-02-12 14:06:37 -08002678 if (unlikely(arena == NULL))
2679 return (NULL);
2680
Jason Evans93443682010-10-20 17:39:18 -07002681 alignment = PAGE_CEILING(alignment);
Jason Evans245ae602016-04-06 11:54:44 -07002682 alloc_size = usize + large_pad + alignment;
Jason Evanse476f8a2010-01-16 09:53:50 -08002683
Jason Evansb2c0d632016-04-13 23:36:15 -07002684 malloc_mutex_lock(tsd, &arena->lock);
2685 run = arena_run_alloc_large(tsd, arena, alloc_size, false);
Jason Evans5ff709c2012-04-11 18:13:45 -07002686 if (run == NULL) {
Jason Evansb2c0d632016-04-13 23:36:15 -07002687 malloc_mutex_unlock(tsd, &arena->lock);
Jason Evanse476f8a2010-01-16 09:53:50 -08002688 return (NULL);
2689 }
Jason Evans5ff709c2012-04-11 18:13:45 -07002690 chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(run);
Jason Evans0c5dd032014-09-29 01:31:39 -07002691 miscelm = arena_run_to_miscelm(run);
2692 rpages = arena_miscelm_to_rpages(miscelm);
Jason Evanse476f8a2010-01-16 09:53:50 -08002693
Jason Evans0c5dd032014-09-29 01:31:39 -07002694 leadsize = ALIGNMENT_CEILING((uintptr_t)rpages, alignment) -
2695 (uintptr_t)rpages;
Jason Evans50883de2015-07-23 17:13:18 -07002696 assert(alloc_size >= leadsize + usize);
2697 trailsize = alloc_size - leadsize - usize - large_pad;
Jason Evans5ff709c2012-04-11 18:13:45 -07002698 if (leadsize != 0) {
Jason Evans0c5dd032014-09-29 01:31:39 -07002699 arena_chunk_map_misc_t *head_miscelm = miscelm;
2700 arena_run_t *head_run = run;
2701
Jason Evans61a6dfc2016-03-23 16:04:38 -07002702 miscelm = arena_miscelm_get_mutable(chunk,
Jason Evans0c5dd032014-09-29 01:31:39 -07002703 arena_miscelm_to_pageind(head_miscelm) + (leadsize >>
2704 LG_PAGE));
2705 run = &miscelm->run;
2706
Jason Evansb2c0d632016-04-13 23:36:15 -07002707 arena_run_trim_head(tsd, arena, chunk, head_run, alloc_size,
Jason Evans0c5dd032014-09-29 01:31:39 -07002708 alloc_size - leadsize);
Jason Evans5ff709c2012-04-11 18:13:45 -07002709 }
2710 if (trailsize != 0) {
Jason Evansb2c0d632016-04-13 23:36:15 -07002711 arena_run_trim_tail(tsd, arena, chunk, run, usize + large_pad +
Jason Evans50883de2015-07-23 17:13:18 -07002712 trailsize, usize + large_pad, false);
Jason Evanse476f8a2010-01-16 09:53:50 -08002713 }
Jason Evans8fadb1a2015-08-04 10:49:46 -07002714 if (arena_run_init_large(arena, run, usize + large_pad, zero)) {
2715 size_t run_ind =
2716 arena_miscelm_to_pageind(arena_run_to_miscelm(run));
Jason Evansde249c82015-08-09 16:47:27 -07002717 bool dirty = (arena_mapbits_dirty_get(chunk, run_ind) != 0);
2718 bool decommitted = (arena_mapbits_decommitted_get(chunk,
2719 run_ind) != 0);
Jason Evans8fadb1a2015-08-04 10:49:46 -07002720
Jason Evansde249c82015-08-09 16:47:27 -07002721 assert(decommitted); /* Cause of OOM. */
Jason Evansb2c0d632016-04-13 23:36:15 -07002722 arena_run_dalloc(tsd, arena, run, dirty, false, decommitted);
2723 malloc_mutex_unlock(tsd, &arena->lock);
Jason Evans8fadb1a2015-08-04 10:49:46 -07002724 return (NULL);
2725 }
Jason Evans0c5dd032014-09-29 01:31:39 -07002726 ret = arena_miscelm_to_rpages(miscelm);
Jason Evanse476f8a2010-01-16 09:53:50 -08002727
Jason Evans7372b152012-02-10 20:22:09 -08002728 if (config_stats) {
Jason Evansd01fd192015-08-19 15:21:32 -07002729 szind_t index = size2index(usize) - NBINS;
Jason Evans155bfa72014-10-05 17:54:10 -07002730
Jason Evans7372b152012-02-10 20:22:09 -08002731 arena->stats.nmalloc_large++;
2732 arena->stats.nrequests_large++;
Jason Evans50883de2015-07-23 17:13:18 -07002733 arena->stats.allocated_large += usize;
Jason Evans155bfa72014-10-05 17:54:10 -07002734 arena->stats.lstats[index].nmalloc++;
2735 arena->stats.lstats[index].nrequests++;
2736 arena->stats.lstats[index].curruns++;
Jason Evanse476f8a2010-01-16 09:53:50 -08002737 }
Jason Evansb2c0d632016-04-13 23:36:15 -07002738 malloc_mutex_unlock(tsd, &arena->lock);
Jason Evanse476f8a2010-01-16 09:53:50 -08002739
Jason Evans551ebc42014-10-03 10:16:09 -07002740 if (config_fill && !zero) {
Guilherme Goncalves2c5cb612014-12-08 19:12:41 -02002741 if (unlikely(opt_junk_alloc))
Chris Petersona82070e2016-03-27 23:28:39 -07002742 memset(ret, JEMALLOC_ALLOC_JUNK, usize);
Jason Evans9c640bf2014-09-11 16:20:44 -07002743 else if (unlikely(opt_zero))
Jason Evans50883de2015-07-23 17:13:18 -07002744 memset(ret, 0, usize);
Jason Evans8e3c3c62010-09-17 15:46:18 -07002745 }
Jason Evans243f7a02016-02-19 20:09:31 -08002746 arena_decay_tick(tsd, arena);
Jason Evanse476f8a2010-01-16 09:53:50 -08002747 return (ret);
2748}
2749
Jason Evans88fef7c2015-02-12 14:06:37 -08002750void *
2751arena_palloc(tsd_t *tsd, arena_t *arena, size_t usize, size_t alignment,
2752 bool zero, tcache_t *tcache)
2753{
2754 void *ret;
2755
Jason Evans8a03cf02015-05-04 09:58:36 -07002756 if (usize <= SMALL_MAXCLASS && (alignment < PAGE || (alignment == PAGE
Jason Evans51541752015-05-19 17:42:31 -07002757 && (usize & PAGE_MASK) == 0))) {
2758 /* Small; alignment doesn't require special run placement. */
Qi Wangf4a0f322015-10-27 15:12:10 -07002759 ret = arena_malloc(tsd, arena, usize, size2index(usize), zero,
2760 tcache, true);
Jason Evans676df882015-09-11 20:50:20 -07002761 } else if (usize <= large_maxclass && alignment <= PAGE) {
Jason Evans51541752015-05-19 17:42:31 -07002762 /*
2763 * Large; alignment doesn't require special run placement.
2764 * However, the cached pointer may be at a random offset from
2765 * the base of the run, so do some bit manipulation to retrieve
2766 * the base.
2767 */
Qi Wangf4a0f322015-10-27 15:12:10 -07002768 ret = arena_malloc(tsd, arena, usize, size2index(usize), zero,
2769 tcache, true);
Jason Evans51541752015-05-19 17:42:31 -07002770 if (config_cache_oblivious)
2771 ret = (void *)((uintptr_t)ret & ~PAGE_MASK);
2772 } else {
Jason Evans676df882015-09-11 20:50:20 -07002773 if (likely(usize <= large_maxclass)) {
Jason Evans88fef7c2015-02-12 14:06:37 -08002774 ret = arena_palloc_large(tsd, arena, usize, alignment,
2775 zero);
2776 } else if (likely(alignment <= chunksize))
Jason Evans66cd9532016-04-22 14:34:14 -07002777 ret = huge_malloc(tsd, arena, usize, zero);
Jason Evans88fef7c2015-02-12 14:06:37 -08002778 else {
Jason Evans66cd9532016-04-22 14:34:14 -07002779 ret = huge_palloc(tsd, arena, usize, alignment, zero);
Jason Evans88fef7c2015-02-12 14:06:37 -08002780 }
2781 }
2782 return (ret);
2783}
2784
Jason Evans0b270a92010-03-31 16:45:04 -07002785void
Jason Evansb2c0d632016-04-13 23:36:15 -07002786arena_prof_promoted(tsd_t *tsd, const void *ptr, size_t size)
Jason Evans0b270a92010-03-31 16:45:04 -07002787{
2788 arena_chunk_t *chunk;
Jason Evans155bfa72014-10-05 17:54:10 -07002789 size_t pageind;
Jason Evansd01fd192015-08-19 15:21:32 -07002790 szind_t binind;
Jason Evans0b270a92010-03-31 16:45:04 -07002791
Jason Evans78f73522012-04-18 13:38:40 -07002792 cassert(config_prof);
Jason Evans0b270a92010-03-31 16:45:04 -07002793 assert(ptr != NULL);
2794 assert(CHUNK_ADDR2BASE(ptr) != ptr);
Jason Evansb2c0d632016-04-13 23:36:15 -07002795 assert(isalloc(tsd, ptr, false) == LARGE_MINCLASS);
2796 assert(isalloc(tsd, ptr, true) == LARGE_MINCLASS);
Jason Evansb1726102012-02-28 16:50:47 -08002797 assert(size <= SMALL_MAXCLASS);
Jason Evans0b270a92010-03-31 16:45:04 -07002798
2799 chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr);
Jason Evansae4c7b42012-04-02 07:04:34 -07002800 pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> LG_PAGE;
Jason Evans155bfa72014-10-05 17:54:10 -07002801 binind = size2index(size);
Jason Evansb1726102012-02-28 16:50:47 -08002802 assert(binind < NBINS);
Jason Evans203484e2012-05-02 00:30:36 -07002803 arena_mapbits_large_binind_set(chunk, pageind, binind);
Jason Evans0b270a92010-03-31 16:45:04 -07002804
Jason Evansb2c0d632016-04-13 23:36:15 -07002805 assert(isalloc(tsd, ptr, false) == LARGE_MINCLASS);
2806 assert(isalloc(tsd, ptr, true) == size);
Jason Evans0b270a92010-03-31 16:45:04 -07002807}
Jason Evans6109fe02010-02-10 10:37:56 -08002808
Jason Evanse476f8a2010-01-16 09:53:50 -08002809static void
Jason Evans088e6a02010-10-18 00:04:44 -07002810arena_dissociate_bin_run(arena_chunk_t *chunk, arena_run_t *run,
Jason Evanse476f8a2010-01-16 09:53:50 -08002811 arena_bin_t *bin)
2812{
Jason Evanse476f8a2010-01-16 09:53:50 -08002813
Jason Evans19b3d612010-03-18 20:36:40 -07002814 /* Dissociate run from bin. */
Jason Evanse476f8a2010-01-16 09:53:50 -08002815 if (run == bin->runcur)
2816 bin->runcur = NULL;
Jason Evans49f7e8f2011-03-15 13:59:15 -07002817 else {
Jason Evansd01fd192015-08-19 15:21:32 -07002818 szind_t binind = arena_bin_index(extent_node_arena_get(
Jason Evansee41ad42015-02-15 18:04:46 -08002819 &chunk->node), bin);
Jason Evans49f7e8f2011-03-15 13:59:15 -07002820 arena_bin_info_t *bin_info = &arena_bin_info[binind];
2821
Jason Evansc6a2c392016-03-26 17:30:37 -07002822 /*
2823 * The following block's conditional is necessary because if the
2824 * run only contains one region, then it never gets inserted
2825 * into the non-full runs tree.
2826 */
Jason Evans49f7e8f2011-03-15 13:59:15 -07002827 if (bin_info->nregs != 1) {
Jason Evansc6a2c392016-03-26 17:30:37 -07002828 arena_chunk_map_misc_t *miscelm =
2829 arena_run_to_miscelm(run);
2830
2831 arena_run_heap_remove(&bin->runs, miscelm);
Jason Evans49f7e8f2011-03-15 13:59:15 -07002832 }
Jason Evanse476f8a2010-01-16 09:53:50 -08002833 }
Jason Evans088e6a02010-10-18 00:04:44 -07002834}
2835
2836static void
Jason Evansb2c0d632016-04-13 23:36:15 -07002837arena_dalloc_bin_run(tsd_t *tsd, arena_t *arena, arena_chunk_t *chunk,
2838 arena_run_t *run, arena_bin_t *bin)
Jason Evans088e6a02010-10-18 00:04:44 -07002839{
Jason Evans088e6a02010-10-18 00:04:44 -07002840
2841 assert(run != bin->runcur);
Jason Evans86815df2010-03-13 20:32:56 -08002842
Jason Evansb2c0d632016-04-13 23:36:15 -07002843 malloc_mutex_unlock(tsd, &bin->lock);
Jason Evanse00572b2010-03-14 19:43:56 -07002844 /******************************/
Jason Evansb2c0d632016-04-13 23:36:15 -07002845 malloc_mutex_lock(tsd, &arena->lock);
2846 arena_run_dalloc(tsd, arena, run, true, false, false);
2847 malloc_mutex_unlock(tsd, &arena->lock);
Jason Evanse00572b2010-03-14 19:43:56 -07002848 /****************************/
Jason Evansb2c0d632016-04-13 23:36:15 -07002849 malloc_mutex_lock(tsd, &bin->lock);
Jason Evans7372b152012-02-10 20:22:09 -08002850 if (config_stats)
2851 bin->stats.curruns--;
Jason Evanse476f8a2010-01-16 09:53:50 -08002852}
2853
Jason Evans940a2e02010-10-17 17:51:37 -07002854static void
2855arena_bin_lower_run(arena_t *arena, arena_chunk_t *chunk, arena_run_t *run,
2856 arena_bin_t *bin)
Jason Evanse476f8a2010-01-16 09:53:50 -08002857{
Jason Evanse476f8a2010-01-16 09:53:50 -08002858
Jason Evans8de6a022010-10-17 20:57:30 -07002859 /*
Jason Evanse7a10582012-02-13 17:36:52 -08002860 * Make sure that if bin->runcur is non-NULL, it refers to the lowest
2861 * non-full run. It is okay to NULL runcur out rather than proactively
2862 * keeping it pointing at the lowest non-full run.
Jason Evans8de6a022010-10-17 20:57:30 -07002863 */
Jason Evanse7a10582012-02-13 17:36:52 -08002864 if ((uintptr_t)run < (uintptr_t)bin->runcur) {
Jason Evans8de6a022010-10-17 20:57:30 -07002865 /* Switch runcur. */
Jason Evanse7a10582012-02-13 17:36:52 -08002866 if (bin->runcur->nfree > 0)
2867 arena_bin_runs_insert(bin, bin->runcur);
Jason Evans8de6a022010-10-17 20:57:30 -07002868 bin->runcur = run;
Jason Evanse7a10582012-02-13 17:36:52 -08002869 if (config_stats)
2870 bin->stats.reruns++;
2871 } else
2872 arena_bin_runs_insert(bin, run);
Jason Evans940a2e02010-10-17 17:51:37 -07002873}
2874
Jason Evansfc0b3b72014-10-09 17:54:06 -07002875static void
Jason Evansb2c0d632016-04-13 23:36:15 -07002876arena_dalloc_bin_locked_impl(tsd_t *tsd, arena_t *arena, arena_chunk_t *chunk,
2877 void *ptr, arena_chunk_map_bits_t *bitselm, bool junked)
Jason Evans940a2e02010-10-17 17:51:37 -07002878{
Jason Evans0c5dd032014-09-29 01:31:39 -07002879 size_t pageind, rpages_ind;
Jason Evans940a2e02010-10-17 17:51:37 -07002880 arena_run_t *run;
2881 arena_bin_t *bin;
Mike Hommey8b499712012-04-24 23:22:02 +02002882 arena_bin_info_t *bin_info;
Jason Evansd01fd192015-08-19 15:21:32 -07002883 szind_t binind;
Jason Evans940a2e02010-10-17 17:51:37 -07002884
Jason Evansae4c7b42012-04-02 07:04:34 -07002885 pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> LG_PAGE;
Jason Evans0c5dd032014-09-29 01:31:39 -07002886 rpages_ind = pageind - arena_mapbits_small_runind_get(chunk, pageind);
Jason Evans61a6dfc2016-03-23 16:04:38 -07002887 run = &arena_miscelm_get_mutable(chunk, rpages_ind)->run;
Jason Evans381c23d2014-10-10 23:01:03 -07002888 binind = run->binind;
2889 bin = &arena->bins[binind];
Mike Hommey8b499712012-04-24 23:22:02 +02002890 bin_info = &arena_bin_info[binind];
Jason Evans940a2e02010-10-17 17:51:37 -07002891
Guilherme Goncalves2c5cb612014-12-08 19:12:41 -02002892 if (!junked && config_fill && unlikely(opt_junk_free))
Jason Evans122449b2012-04-06 00:35:09 -07002893 arena_dalloc_junk_small(ptr, bin_info);
Jason Evans940a2e02010-10-17 17:51:37 -07002894
2895 arena_run_reg_dalloc(run, ptr);
Jason Evans49f7e8f2011-03-15 13:59:15 -07002896 if (run->nfree == bin_info->nregs) {
Jason Evans088e6a02010-10-18 00:04:44 -07002897 arena_dissociate_bin_run(chunk, run, bin);
Jason Evansb2c0d632016-04-13 23:36:15 -07002898 arena_dalloc_bin_run(tsd, arena, chunk, run, bin);
Jason Evans088e6a02010-10-18 00:04:44 -07002899 } else if (run->nfree == 1 && run != bin->runcur)
Jason Evans8de6a022010-10-17 20:57:30 -07002900 arena_bin_lower_run(arena, chunk, run, bin);
Jason Evanse476f8a2010-01-16 09:53:50 -08002901
Jason Evans7372b152012-02-10 20:22:09 -08002902 if (config_stats) {
Jason Evans7372b152012-02-10 20:22:09 -08002903 bin->stats.ndalloc++;
Jason Evans3c4d92e2014-10-12 22:53:59 -07002904 bin->stats.curregs--;
Jason Evans7372b152012-02-10 20:22:09 -08002905 }
Jason Evanse476f8a2010-01-16 09:53:50 -08002906}
2907
Jason Evanse476f8a2010-01-16 09:53:50 -08002908void
Jason Evansb2c0d632016-04-13 23:36:15 -07002909arena_dalloc_bin_junked_locked(tsd_t *tsd, arena_t *arena, arena_chunk_t *chunk,
2910 void *ptr, arena_chunk_map_bits_t *bitselm)
Jason Evansfc0b3b72014-10-09 17:54:06 -07002911{
2912
Jason Evansb2c0d632016-04-13 23:36:15 -07002913 arena_dalloc_bin_locked_impl(tsd, arena, chunk, ptr, bitselm, true);
Jason Evansfc0b3b72014-10-09 17:54:06 -07002914}
2915
2916void
Jason Evansb2c0d632016-04-13 23:36:15 -07002917arena_dalloc_bin(tsd_t *tsd, arena_t *arena, arena_chunk_t *chunk, void *ptr,
Qinfan Wuff6a31d2014-08-29 13:34:40 -07002918 size_t pageind, arena_chunk_map_bits_t *bitselm)
Jason Evans203484e2012-05-02 00:30:36 -07002919{
2920 arena_run_t *run;
2921 arena_bin_t *bin;
Jason Evans0c5dd032014-09-29 01:31:39 -07002922 size_t rpages_ind;
Jason Evans203484e2012-05-02 00:30:36 -07002923
Jason Evans0c5dd032014-09-29 01:31:39 -07002924 rpages_ind = pageind - arena_mapbits_small_runind_get(chunk, pageind);
Jason Evans61a6dfc2016-03-23 16:04:38 -07002925 run = &arena_miscelm_get_mutable(chunk, rpages_ind)->run;
Jason Evans381c23d2014-10-10 23:01:03 -07002926 bin = &arena->bins[run->binind];
Jason Evansb2c0d632016-04-13 23:36:15 -07002927 malloc_mutex_lock(tsd, &bin->lock);
2928 arena_dalloc_bin_locked_impl(tsd, arena, chunk, ptr, bitselm, false);
2929 malloc_mutex_unlock(tsd, &bin->lock);
Jason Evans203484e2012-05-02 00:30:36 -07002930}
2931
2932void
Jason Evans243f7a02016-02-19 20:09:31 -08002933arena_dalloc_small(tsd_t *tsd, arena_t *arena, arena_chunk_t *chunk, void *ptr,
Jason Evans203484e2012-05-02 00:30:36 -07002934 size_t pageind)
2935{
Qinfan Wuff6a31d2014-08-29 13:34:40 -07002936 arena_chunk_map_bits_t *bitselm;
Jason Evans203484e2012-05-02 00:30:36 -07002937
2938 if (config_debug) {
Jason Evans80737c32012-05-02 16:11:03 -07002939 /* arena_ptr_small_binind_get() does extra sanity checking. */
2940 assert(arena_ptr_small_binind_get(ptr, arena_mapbits_get(chunk,
2941 pageind)) != BININD_INVALID);
Jason Evans203484e2012-05-02 00:30:36 -07002942 }
Jason Evans61a6dfc2016-03-23 16:04:38 -07002943 bitselm = arena_bitselm_get_mutable(chunk, pageind);
Jason Evansb2c0d632016-04-13 23:36:15 -07002944 arena_dalloc_bin(tsd, arena, chunk, ptr, pageind, bitselm);
Jason Evans243f7a02016-02-19 20:09:31 -08002945 arena_decay_tick(tsd, arena);
Jason Evans203484e2012-05-02 00:30:36 -07002946}
Jason Evanse476f8a2010-01-16 09:53:50 -08002947
Jason Evans6b694c42014-01-07 16:47:56 -08002948#ifdef JEMALLOC_JET
2949#undef arena_dalloc_junk_large
Jason Evansab0cfe02016-04-18 15:11:20 -07002950#define arena_dalloc_junk_large JEMALLOC_N(n_arena_dalloc_junk_large)
Jason Evans6b694c42014-01-07 16:47:56 -08002951#endif
Jason Evansfc0b3b72014-10-09 17:54:06 -07002952void
Jason Evans6b694c42014-01-07 16:47:56 -08002953arena_dalloc_junk_large(void *ptr, size_t usize)
2954{
2955
Guilherme Goncalves2c5cb612014-12-08 19:12:41 -02002956 if (config_fill && unlikely(opt_junk_free))
Chris Petersona82070e2016-03-27 23:28:39 -07002957 memset(ptr, JEMALLOC_FREE_JUNK, usize);
Jason Evans6b694c42014-01-07 16:47:56 -08002958}
2959#ifdef JEMALLOC_JET
2960#undef arena_dalloc_junk_large
2961#define arena_dalloc_junk_large JEMALLOC_N(arena_dalloc_junk_large)
2962arena_dalloc_junk_large_t *arena_dalloc_junk_large =
Jason Evansab0cfe02016-04-18 15:11:20 -07002963 JEMALLOC_N(n_arena_dalloc_junk_large);
Jason Evans6b694c42014-01-07 16:47:56 -08002964#endif
2965
Jason Evanse56b24e2015-09-20 09:58:10 -07002966static void
Jason Evansb2c0d632016-04-13 23:36:15 -07002967arena_dalloc_large_locked_impl(tsd_t *tsd, arena_t *arena, arena_chunk_t *chunk,
Jason Evansfc0b3b72014-10-09 17:54:06 -07002968 void *ptr, bool junked)
Jason Evanse476f8a2010-01-16 09:53:50 -08002969{
Jason Evans0c5dd032014-09-29 01:31:39 -07002970 size_t pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> LG_PAGE;
Jason Evans61a6dfc2016-03-23 16:04:38 -07002971 arena_chunk_map_misc_t *miscelm = arena_miscelm_get_mutable(chunk,
2972 pageind);
Jason Evans0c5dd032014-09-29 01:31:39 -07002973 arena_run_t *run = &miscelm->run;
Jason Evans13668262010-01-31 03:57:29 -08002974
Jason Evans7372b152012-02-10 20:22:09 -08002975 if (config_fill || config_stats) {
Jason Evans8a03cf02015-05-04 09:58:36 -07002976 size_t usize = arena_mapbits_large_size_get(chunk, pageind) -
2977 large_pad;
Jason Evanse476f8a2010-01-16 09:53:50 -08002978
Jason Evansfc0b3b72014-10-09 17:54:06 -07002979 if (!junked)
2980 arena_dalloc_junk_large(ptr, usize);
Jason Evans7372b152012-02-10 20:22:09 -08002981 if (config_stats) {
Jason Evansd01fd192015-08-19 15:21:32 -07002982 szind_t index = size2index(usize) - NBINS;
Jason Evans155bfa72014-10-05 17:54:10 -07002983
Jason Evans7372b152012-02-10 20:22:09 -08002984 arena->stats.ndalloc_large++;
Jason Evans6b694c42014-01-07 16:47:56 -08002985 arena->stats.allocated_large -= usize;
Jason Evans155bfa72014-10-05 17:54:10 -07002986 arena->stats.lstats[index].ndalloc++;
2987 arena->stats.lstats[index].curruns--;
Jason Evans7372b152012-02-10 20:22:09 -08002988 }
Jason Evanse476f8a2010-01-16 09:53:50 -08002989 }
Jason Evanse476f8a2010-01-16 09:53:50 -08002990
Jason Evansb2c0d632016-04-13 23:36:15 -07002991 arena_run_dalloc(tsd, arena, run, true, false, false);
Jason Evanse476f8a2010-01-16 09:53:50 -08002992}
2993
Jason Evans203484e2012-05-02 00:30:36 -07002994void
Jason Evansb2c0d632016-04-13 23:36:15 -07002995arena_dalloc_large_junked_locked(tsd_t *tsd, arena_t *arena,
2996 arena_chunk_t *chunk, void *ptr)
Jason Evansfc0b3b72014-10-09 17:54:06 -07002997{
2998
Jason Evansb2c0d632016-04-13 23:36:15 -07002999 arena_dalloc_large_locked_impl(tsd, arena, chunk, ptr, true);
Jason Evansfc0b3b72014-10-09 17:54:06 -07003000}
3001
3002void
Jason Evans243f7a02016-02-19 20:09:31 -08003003arena_dalloc_large(tsd_t *tsd, arena_t *arena, arena_chunk_t *chunk, void *ptr)
Jason Evans203484e2012-05-02 00:30:36 -07003004{
3005
Jason Evansb2c0d632016-04-13 23:36:15 -07003006 malloc_mutex_lock(tsd, &arena->lock);
3007 arena_dalloc_large_locked_impl(tsd, arena, chunk, ptr, false);
3008 malloc_mutex_unlock(tsd, &arena->lock);
Jason Evans243f7a02016-02-19 20:09:31 -08003009 arena_decay_tick(tsd, arena);
Jason Evans203484e2012-05-02 00:30:36 -07003010}
3011
Jason Evanse476f8a2010-01-16 09:53:50 -08003012static void
Jason Evansb2c0d632016-04-13 23:36:15 -07003013arena_ralloc_large_shrink(tsd_t *tsd, arena_t *arena, arena_chunk_t *chunk,
3014 void *ptr, size_t oldsize, size_t size)
Jason Evanse476f8a2010-01-16 09:53:50 -08003015{
Jason Evans0c5dd032014-09-29 01:31:39 -07003016 size_t pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> LG_PAGE;
Jason Evans61a6dfc2016-03-23 16:04:38 -07003017 arena_chunk_map_misc_t *miscelm = arena_miscelm_get_mutable(chunk,
3018 pageind);
Jason Evans0c5dd032014-09-29 01:31:39 -07003019 arena_run_t *run = &miscelm->run;
Jason Evanse476f8a2010-01-16 09:53:50 -08003020
3021 assert(size < oldsize);
3022
3023 /*
3024 * Shrink the run, and make trailing pages available for other
3025 * allocations.
3026 */
Jason Evansb2c0d632016-04-13 23:36:15 -07003027 malloc_mutex_lock(tsd, &arena->lock);
3028 arena_run_trim_tail(tsd, arena, chunk, run, oldsize + large_pad, size +
Jason Evans8a03cf02015-05-04 09:58:36 -07003029 large_pad, true);
Jason Evans7372b152012-02-10 20:22:09 -08003030 if (config_stats) {
Jason Evansd01fd192015-08-19 15:21:32 -07003031 szind_t oldindex = size2index(oldsize) - NBINS;
3032 szind_t index = size2index(size) - NBINS;
Jason Evans155bfa72014-10-05 17:54:10 -07003033
Jason Evans7372b152012-02-10 20:22:09 -08003034 arena->stats.ndalloc_large++;
3035 arena->stats.allocated_large -= oldsize;
Jason Evans155bfa72014-10-05 17:54:10 -07003036 arena->stats.lstats[oldindex].ndalloc++;
3037 arena->stats.lstats[oldindex].curruns--;
Jason Evans990d10c2010-01-31 03:49:35 -08003038
Jason Evans7372b152012-02-10 20:22:09 -08003039 arena->stats.nmalloc_large++;
3040 arena->stats.nrequests_large++;
3041 arena->stats.allocated_large += size;
Jason Evans155bfa72014-10-05 17:54:10 -07003042 arena->stats.lstats[index].nmalloc++;
3043 arena->stats.lstats[index].nrequests++;
3044 arena->stats.lstats[index].curruns++;
Jason Evanse476f8a2010-01-16 09:53:50 -08003045 }
Jason Evansb2c0d632016-04-13 23:36:15 -07003046 malloc_mutex_unlock(tsd, &arena->lock);
Jason Evanse476f8a2010-01-16 09:53:50 -08003047}
3048
3049static bool
Jason Evansb2c0d632016-04-13 23:36:15 -07003050arena_ralloc_large_grow(tsd_t *tsd, arena_t *arena, arena_chunk_t *chunk,
3051 void *ptr, size_t oldsize, size_t usize_min, size_t usize_max, bool zero)
Jason Evanse476f8a2010-01-16 09:53:50 -08003052{
Jason Evansae4c7b42012-04-02 07:04:34 -07003053 size_t pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> LG_PAGE;
Jason Evans5716d972015-08-06 23:34:12 -07003054 size_t npages = (oldsize + large_pad) >> LG_PAGE;
Jason Evans8e3c3c62010-09-17 15:46:18 -07003055 size_t followsize;
Jason Evanse476f8a2010-01-16 09:53:50 -08003056
Jason Evans8a03cf02015-05-04 09:58:36 -07003057 assert(oldsize == arena_mapbits_large_size_get(chunk, pageind) -
3058 large_pad);
Jason Evanse476f8a2010-01-16 09:53:50 -08003059
3060 /* Try to extend the run. */
Jason Evansb2c0d632016-04-13 23:36:15 -07003061 malloc_mutex_lock(tsd, &arena->lock);
Jason Evans560a4e12015-09-11 16:18:53 -07003062 if (pageind+npages >= chunk_npages || arena_mapbits_allocated_get(chunk,
3063 pageind+npages) != 0)
3064 goto label_fail;
3065 followsize = arena_mapbits_unallocated_size_get(chunk, pageind+npages);
3066 if (oldsize + followsize >= usize_min) {
Jason Evanse476f8a2010-01-16 09:53:50 -08003067 /*
3068 * The next run is available and sufficiently large. Split the
3069 * following run, then merge the first part with the existing
3070 * allocation.
3071 */
Guilherme Goncalves9c6a8d32014-12-17 14:46:35 -02003072 arena_run_t *run;
Jason Evans560a4e12015-09-11 16:18:53 -07003073 size_t usize, splitsize, size, flag_dirty, flag_unzeroed_mask;
Jason Evans155bfa72014-10-05 17:54:10 -07003074
Jason Evans560a4e12015-09-11 16:18:53 -07003075 usize = usize_max;
Jason Evans155bfa72014-10-05 17:54:10 -07003076 while (oldsize + followsize < usize)
3077 usize = index2size(size2index(usize)-1);
3078 assert(usize >= usize_min);
Jason Evans560a4e12015-09-11 16:18:53 -07003079 assert(usize >= oldsize);
Jason Evans5716d972015-08-06 23:34:12 -07003080 splitsize = usize - oldsize;
Jason Evans560a4e12015-09-11 16:18:53 -07003081 if (splitsize == 0)
3082 goto label_fail;
Jason Evans155bfa72014-10-05 17:54:10 -07003083
Jason Evans61a6dfc2016-03-23 16:04:38 -07003084 run = &arena_miscelm_get_mutable(chunk, pageind+npages)->run;
Jason Evans560a4e12015-09-11 16:18:53 -07003085 if (arena_run_split_large(arena, run, splitsize, zero))
3086 goto label_fail;
Jason Evanse476f8a2010-01-16 09:53:50 -08003087
Jason Evansd260f442015-09-24 16:38:45 -07003088 if (config_cache_oblivious && zero) {
3089 /*
3090 * Zero the trailing bytes of the original allocation's
3091 * last page, since they are in an indeterminate state.
Jason Evansa784e412015-09-24 22:21:55 -07003092 * There will always be trailing bytes, because ptr's
3093 * offset from the beginning of the run is a multiple of
3094 * CACHELINE in [0 .. PAGE).
Jason Evansd260f442015-09-24 16:38:45 -07003095 */
Jason Evansa784e412015-09-24 22:21:55 -07003096 void *zbase = (void *)((uintptr_t)ptr + oldsize);
3097 void *zpast = PAGE_ADDR2BASE((void *)((uintptr_t)zbase +
3098 PAGE));
3099 size_t nzero = (uintptr_t)zpast - (uintptr_t)zbase;
3100 assert(nzero > 0);
3101 memset(zbase, 0, nzero);
Jason Evansd260f442015-09-24 16:38:45 -07003102 }
3103
Jason Evans088e6a02010-10-18 00:04:44 -07003104 size = oldsize + splitsize;
Jason Evans5716d972015-08-06 23:34:12 -07003105 npages = (size + large_pad) >> LG_PAGE;
Jason Evans940a2e02010-10-17 17:51:37 -07003106
3107 /*
3108 * Mark the extended run as dirty if either portion of the run
3109 * was dirty before allocation. This is rather pedantic,
3110 * because there's not actually any sequence of events that
3111 * could cause the resulting run to be passed to
3112 * arena_run_dalloc() with the dirty argument set to false
3113 * (which is when dirty flag consistency would really matter).
3114 */
Jason Evans203484e2012-05-02 00:30:36 -07003115 flag_dirty = arena_mapbits_dirty_get(chunk, pageind) |
3116 arena_mapbits_dirty_get(chunk, pageind+npages-1);
Jason Evans1f27abc2015-08-11 12:42:33 -07003117 flag_unzeroed_mask = flag_dirty == 0 ? CHUNK_MAP_UNZEROED : 0;
Jason Evans5716d972015-08-06 23:34:12 -07003118 arena_mapbits_large_set(chunk, pageind, size + large_pad,
Jason Evans1f27abc2015-08-11 12:42:33 -07003119 flag_dirty | (flag_unzeroed_mask &
3120 arena_mapbits_unzeroed_get(chunk, pageind)));
3121 arena_mapbits_large_set(chunk, pageind+npages-1, 0, flag_dirty |
3122 (flag_unzeroed_mask & arena_mapbits_unzeroed_get(chunk,
3123 pageind+npages-1)));
Jason Evanse476f8a2010-01-16 09:53:50 -08003124
Jason Evans7372b152012-02-10 20:22:09 -08003125 if (config_stats) {
Jason Evansd01fd192015-08-19 15:21:32 -07003126 szind_t oldindex = size2index(oldsize) - NBINS;
3127 szind_t index = size2index(size) - NBINS;
Jason Evans155bfa72014-10-05 17:54:10 -07003128
Jason Evans7372b152012-02-10 20:22:09 -08003129 arena->stats.ndalloc_large++;
3130 arena->stats.allocated_large -= oldsize;
Jason Evans155bfa72014-10-05 17:54:10 -07003131 arena->stats.lstats[oldindex].ndalloc++;
3132 arena->stats.lstats[oldindex].curruns--;
Jason Evans990d10c2010-01-31 03:49:35 -08003133
Jason Evans7372b152012-02-10 20:22:09 -08003134 arena->stats.nmalloc_large++;
3135 arena->stats.nrequests_large++;
3136 arena->stats.allocated_large += size;
Jason Evans155bfa72014-10-05 17:54:10 -07003137 arena->stats.lstats[index].nmalloc++;
3138 arena->stats.lstats[index].nrequests++;
3139 arena->stats.lstats[index].curruns++;
Jason Evans940a2e02010-10-17 17:51:37 -07003140 }
Jason Evansb2c0d632016-04-13 23:36:15 -07003141 malloc_mutex_unlock(tsd, &arena->lock);
Jason Evanse476f8a2010-01-16 09:53:50 -08003142 return (false);
3143 }
Jason Evans560a4e12015-09-11 16:18:53 -07003144label_fail:
Jason Evansb2c0d632016-04-13 23:36:15 -07003145 malloc_mutex_unlock(tsd, &arena->lock);
Jason Evanse476f8a2010-01-16 09:53:50 -08003146 return (true);
3147}
3148
Jason Evans6b694c42014-01-07 16:47:56 -08003149#ifdef JEMALLOC_JET
3150#undef arena_ralloc_junk_large
Jason Evansab0cfe02016-04-18 15:11:20 -07003151#define arena_ralloc_junk_large JEMALLOC_N(n_arena_ralloc_junk_large)
Jason Evans6b694c42014-01-07 16:47:56 -08003152#endif
3153static void
3154arena_ralloc_junk_large(void *ptr, size_t old_usize, size_t usize)
3155{
3156
Guilherme Goncalves2c5cb612014-12-08 19:12:41 -02003157 if (config_fill && unlikely(opt_junk_free)) {
Chris Petersona82070e2016-03-27 23:28:39 -07003158 memset((void *)((uintptr_t)ptr + usize), JEMALLOC_FREE_JUNK,
Jason Evans6b694c42014-01-07 16:47:56 -08003159 old_usize - usize);
3160 }
3161}
3162#ifdef JEMALLOC_JET
3163#undef arena_ralloc_junk_large
3164#define arena_ralloc_junk_large JEMALLOC_N(arena_ralloc_junk_large)
3165arena_ralloc_junk_large_t *arena_ralloc_junk_large =
Jason Evansab0cfe02016-04-18 15:11:20 -07003166 JEMALLOC_N(n_arena_ralloc_junk_large);
Jason Evans6b694c42014-01-07 16:47:56 -08003167#endif
3168
Jason Evanse476f8a2010-01-16 09:53:50 -08003169/*
3170 * Try to resize a large allocation, in order to avoid copying. This will
3171 * always fail if growing an object, and the following run is already in use.
3172 */
3173static bool
Jason Evansb2c0d632016-04-13 23:36:15 -07003174arena_ralloc_large(tsd_t *tsd, void *ptr, size_t oldsize, size_t usize_min,
Jason Evans560a4e12015-09-11 16:18:53 -07003175 size_t usize_max, bool zero)
Jason Evanse476f8a2010-01-16 09:53:50 -08003176{
Jason Evans560a4e12015-09-11 16:18:53 -07003177 arena_chunk_t *chunk;
3178 arena_t *arena;
Jason Evanse476f8a2010-01-16 09:53:50 -08003179
Jason Evans560a4e12015-09-11 16:18:53 -07003180 if (oldsize == usize_max) {
3181 /* Current size class is compatible and maximal. */
Jason Evanse476f8a2010-01-16 09:53:50 -08003182 return (false);
Jason Evanse476f8a2010-01-16 09:53:50 -08003183 }
Jason Evans560a4e12015-09-11 16:18:53 -07003184
3185 chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr);
3186 arena = extent_node_arena_get(&chunk->node);
3187
3188 if (oldsize < usize_max) {
Jason Evansb2c0d632016-04-13 23:36:15 -07003189 bool ret = arena_ralloc_large_grow(tsd, arena, chunk, ptr,
3190 oldsize, usize_min, usize_max, zero);
Jason Evans560a4e12015-09-11 16:18:53 -07003191 if (config_fill && !ret && !zero) {
3192 if (unlikely(opt_junk_alloc)) {
Chris Petersona82070e2016-03-27 23:28:39 -07003193 memset((void *)((uintptr_t)ptr + oldsize),
3194 JEMALLOC_ALLOC_JUNK,
Jason Evansb2c0d632016-04-13 23:36:15 -07003195 isalloc(tsd, ptr, config_prof) - oldsize);
Jason Evans560a4e12015-09-11 16:18:53 -07003196 } else if (unlikely(opt_zero)) {
3197 memset((void *)((uintptr_t)ptr + oldsize), 0,
Jason Evansb2c0d632016-04-13 23:36:15 -07003198 isalloc(tsd, ptr, config_prof) - oldsize);
Jason Evans560a4e12015-09-11 16:18:53 -07003199 }
3200 }
3201 return (ret);
3202 }
3203
3204 assert(oldsize > usize_max);
3205 /* Fill before shrinking in order avoid a race. */
3206 arena_ralloc_junk_large(ptr, oldsize, usize_max);
Jason Evansb2c0d632016-04-13 23:36:15 -07003207 arena_ralloc_large_shrink(tsd, arena, chunk, ptr, oldsize, usize_max);
Jason Evans560a4e12015-09-11 16:18:53 -07003208 return (false);
Jason Evanse476f8a2010-01-16 09:53:50 -08003209}
3210
Jason Evansb2c31662014-01-12 15:05:44 -08003211bool
Jason Evans243f7a02016-02-19 20:09:31 -08003212arena_ralloc_no_move(tsd_t *tsd, void *ptr, size_t oldsize, size_t size,
3213 size_t extra, bool zero)
Jason Evanse476f8a2010-01-16 09:53:50 -08003214{
Jason Evans560a4e12015-09-11 16:18:53 -07003215 size_t usize_min, usize_max;
Jason Evanse476f8a2010-01-16 09:53:50 -08003216
Jason Evans0c516a02016-02-25 15:29:49 -08003217 /* Calls with non-zero extra had to clamp extra. */
3218 assert(extra == 0 || size + extra <= HUGE_MAXCLASS);
3219
Jason Evans0c516a02016-02-25 15:29:49 -08003220 if (unlikely(size > HUGE_MAXCLASS))
3221 return (true);
3222
Jason Evans560a4e12015-09-11 16:18:53 -07003223 usize_min = s2u(size);
Jason Evans560a4e12015-09-11 16:18:53 -07003224 usize_max = s2u(size + extra);
Jason Evans676df882015-09-11 20:50:20 -07003225 if (likely(oldsize <= large_maxclass && usize_min <= large_maxclass)) {
Jason Evans243f7a02016-02-19 20:09:31 -08003226 arena_chunk_t *chunk;
3227
Jason Evans88fef7c2015-02-12 14:06:37 -08003228 /*
3229 * Avoid moving the allocation if the size class can be left the
3230 * same.
3231 */
Jason Evans560a4e12015-09-11 16:18:53 -07003232 if (oldsize <= SMALL_MAXCLASS) {
3233 assert(arena_bin_info[size2index(oldsize)].reg_size ==
3234 oldsize);
Jason Evans4985dc62016-02-19 19:24:58 -08003235 if ((usize_max > SMALL_MAXCLASS ||
3236 size2index(usize_max) != size2index(oldsize)) &&
3237 (size > oldsize || usize_max < oldsize))
3238 return (true);
Jason Evans560a4e12015-09-11 16:18:53 -07003239 } else {
Jason Evans4985dc62016-02-19 19:24:58 -08003240 if (usize_max <= SMALL_MAXCLASS)
3241 return (true);
Jason Evansb2c0d632016-04-13 23:36:15 -07003242 if (arena_ralloc_large(tsd, ptr, oldsize, usize_min,
Jason Evans4985dc62016-02-19 19:24:58 -08003243 usize_max, zero))
3244 return (true);
Jason Evanse476f8a2010-01-16 09:53:50 -08003245 }
Jason Evanse476f8a2010-01-16 09:53:50 -08003246
Jason Evans243f7a02016-02-19 20:09:31 -08003247 chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr);
3248 arena_decay_tick(tsd, extent_node_arena_get(&chunk->node));
Jason Evans4985dc62016-02-19 19:24:58 -08003249 return (false);
Jason Evans560a4e12015-09-11 16:18:53 -07003250 } else {
Jason Evans243f7a02016-02-19 20:09:31 -08003251 return (huge_ralloc_no_move(tsd, ptr, oldsize, usize_min,
3252 usize_max, zero));
Jason Evans560a4e12015-09-11 16:18:53 -07003253 }
3254}
3255
3256static void *
3257arena_ralloc_move_helper(tsd_t *tsd, arena_t *arena, size_t usize,
3258 size_t alignment, bool zero, tcache_t *tcache)
3259{
3260
3261 if (alignment == 0)
Qi Wangf4a0f322015-10-27 15:12:10 -07003262 return (arena_malloc(tsd, arena, usize, size2index(usize), zero,
3263 tcache, true));
Jason Evans560a4e12015-09-11 16:18:53 -07003264 usize = sa2u(usize, alignment);
Jason Evans0c516a02016-02-25 15:29:49 -08003265 if (unlikely(usize == 0 || usize > HUGE_MAXCLASS))
Jason Evans560a4e12015-09-11 16:18:53 -07003266 return (NULL);
3267 return (ipalloct(tsd, usize, alignment, zero, tcache, arena));
Jason Evans8e3c3c62010-09-17 15:46:18 -07003268}
Jason Evanse476f8a2010-01-16 09:53:50 -08003269
Jason Evans8e3c3c62010-09-17 15:46:18 -07003270void *
Jason Evans5460aa62014-09-22 21:09:23 -07003271arena_ralloc(tsd_t *tsd, arena_t *arena, void *ptr, size_t oldsize, size_t size,
Jason Evans560a4e12015-09-11 16:18:53 -07003272 size_t alignment, bool zero, tcache_t *tcache)
Jason Evans8e3c3c62010-09-17 15:46:18 -07003273{
3274 void *ret;
Jason Evans560a4e12015-09-11 16:18:53 -07003275 size_t usize;
Jason Evans8e3c3c62010-09-17 15:46:18 -07003276
Jason Evans560a4e12015-09-11 16:18:53 -07003277 usize = s2u(size);
Jason Evans0c516a02016-02-25 15:29:49 -08003278 if (unlikely(usize == 0 || size > HUGE_MAXCLASS))
Jason Evans560a4e12015-09-11 16:18:53 -07003279 return (NULL);
3280
Jason Evans676df882015-09-11 20:50:20 -07003281 if (likely(usize <= large_maxclass)) {
Jason Evans88fef7c2015-02-12 14:06:37 -08003282 size_t copysize;
Jason Evans8e3c3c62010-09-17 15:46:18 -07003283
Jason Evans88fef7c2015-02-12 14:06:37 -08003284 /* Try to avoid moving the allocation. */
Jason Evans243f7a02016-02-19 20:09:31 -08003285 if (!arena_ralloc_no_move(tsd, ptr, oldsize, usize, 0, zero))
Jason Evans88fef7c2015-02-12 14:06:37 -08003286 return (ptr);
Jason Evans8e3c3c62010-09-17 15:46:18 -07003287
Jason Evans88fef7c2015-02-12 14:06:37 -08003288 /*
3289 * size and oldsize are different enough that we need to move
3290 * the object. In that case, fall back to allocating new space
3291 * and copying.
3292 */
Jason Evans560a4e12015-09-11 16:18:53 -07003293 ret = arena_ralloc_move_helper(tsd, arena, usize, alignment,
3294 zero, tcache);
3295 if (ret == NULL)
3296 return (NULL);
Jason Evans88fef7c2015-02-12 14:06:37 -08003297
3298 /*
3299 * Junk/zero-filling were already done by
3300 * ipalloc()/arena_malloc().
3301 */
3302
Jason Evans560a4e12015-09-11 16:18:53 -07003303 copysize = (usize < oldsize) ? usize : oldsize;
Jason Evans88fef7c2015-02-12 14:06:37 -08003304 JEMALLOC_VALGRIND_MAKE_MEM_UNDEFINED(ret, copysize);
3305 memcpy(ret, ptr, copysize);
3306 isqalloc(tsd, ptr, oldsize, tcache);
3307 } else {
Jason Evans560a4e12015-09-11 16:18:53 -07003308 ret = huge_ralloc(tsd, arena, ptr, oldsize, usize, alignment,
3309 zero, tcache);
Jason Evans8e3c3c62010-09-17 15:46:18 -07003310 }
Jason Evanse476f8a2010-01-16 09:53:50 -08003311 return (ret);
Jason Evanse476f8a2010-01-16 09:53:50 -08003312}
3313
Jason Evans609ae592012-10-11 13:53:15 -07003314dss_prec_t
Jason Evansb2c0d632016-04-13 23:36:15 -07003315arena_dss_prec_get(tsd_t *tsd, arena_t *arena)
Jason Evans609ae592012-10-11 13:53:15 -07003316{
3317 dss_prec_t ret;
3318
Jason Evansb2c0d632016-04-13 23:36:15 -07003319 malloc_mutex_lock(tsd, &arena->lock);
Jason Evans609ae592012-10-11 13:53:15 -07003320 ret = arena->dss_prec;
Jason Evansb2c0d632016-04-13 23:36:15 -07003321 malloc_mutex_unlock(tsd, &arena->lock);
Jason Evans609ae592012-10-11 13:53:15 -07003322 return (ret);
3323}
3324
Jason Evans4d434ad2014-04-15 12:09:48 -07003325bool
Jason Evansb2c0d632016-04-13 23:36:15 -07003326arena_dss_prec_set(tsd_t *tsd, arena_t *arena, dss_prec_t dss_prec)
Jason Evans609ae592012-10-11 13:53:15 -07003327{
3328
Jason Evans551ebc42014-10-03 10:16:09 -07003329 if (!have_dss)
Jason Evans4d434ad2014-04-15 12:09:48 -07003330 return (dss_prec != dss_prec_disabled);
Jason Evansb2c0d632016-04-13 23:36:15 -07003331 malloc_mutex_lock(tsd, &arena->lock);
Jason Evans609ae592012-10-11 13:53:15 -07003332 arena->dss_prec = dss_prec;
Jason Evansb2c0d632016-04-13 23:36:15 -07003333 malloc_mutex_unlock(tsd, &arena->lock);
Jason Evans4d434ad2014-04-15 12:09:48 -07003334 return (false);
Jason Evans609ae592012-10-11 13:53:15 -07003335}
3336
Jason Evans8d6a3e82015-03-18 18:55:33 -07003337ssize_t
3338arena_lg_dirty_mult_default_get(void)
3339{
3340
3341 return ((ssize_t)atomic_read_z((size_t *)&lg_dirty_mult_default));
3342}
3343
3344bool
3345arena_lg_dirty_mult_default_set(ssize_t lg_dirty_mult)
3346{
3347
Jason Evans243f7a02016-02-19 20:09:31 -08003348 if (opt_purge != purge_mode_ratio)
3349 return (true);
Jason Evans8d6a3e82015-03-18 18:55:33 -07003350 if (!arena_lg_dirty_mult_valid(lg_dirty_mult))
3351 return (true);
3352 atomic_write_z((size_t *)&lg_dirty_mult_default, (size_t)lg_dirty_mult);
3353 return (false);
3354}
3355
Jason Evans243f7a02016-02-19 20:09:31 -08003356ssize_t
3357arena_decay_time_default_get(void)
3358{
3359
3360 return ((ssize_t)atomic_read_z((size_t *)&decay_time_default));
3361}
3362
3363bool
3364arena_decay_time_default_set(ssize_t decay_time)
3365{
3366
3367 if (opt_purge != purge_mode_decay)
3368 return (true);
3369 if (!arena_decay_time_valid(decay_time))
3370 return (true);
3371 atomic_write_z((size_t *)&decay_time_default, (size_t)decay_time);
3372 return (false);
3373}
3374
Jason Evans3c07f802016-02-27 20:40:13 -08003375static void
3376arena_basic_stats_merge_locked(arena_t *arena, unsigned *nthreads,
3377 const char **dss, ssize_t *lg_dirty_mult, ssize_t *decay_time,
3378 size_t *nactive, size_t *ndirty)
Jason Evans609ae592012-10-11 13:53:15 -07003379{
Jason Evans609ae592012-10-11 13:53:15 -07003380
Jason Evans66cd9532016-04-22 14:34:14 -07003381 *nthreads += arena_nthreads_get(arena, false);
Jason Evans609ae592012-10-11 13:53:15 -07003382 *dss = dss_prec_names[arena->dss_prec];
Jason Evans562d2662015-03-24 16:36:12 -07003383 *lg_dirty_mult = arena->lg_dirty_mult;
Jason Evans243f7a02016-02-19 20:09:31 -08003384 *decay_time = arena->decay_time;
Jason Evans609ae592012-10-11 13:53:15 -07003385 *nactive += arena->nactive;
3386 *ndirty += arena->ndirty;
Jason Evans3c07f802016-02-27 20:40:13 -08003387}
3388
3389void
Jason Evansb2c0d632016-04-13 23:36:15 -07003390arena_basic_stats_merge(tsd_t *tsd, arena_t *arena, unsigned *nthreads,
3391 const char **dss, ssize_t *lg_dirty_mult, ssize_t *decay_time,
3392 size_t *nactive, size_t *ndirty)
Jason Evans3c07f802016-02-27 20:40:13 -08003393{
3394
Jason Evansb2c0d632016-04-13 23:36:15 -07003395 malloc_mutex_lock(tsd, &arena->lock);
Jason Evans3c07f802016-02-27 20:40:13 -08003396 arena_basic_stats_merge_locked(arena, nthreads, dss, lg_dirty_mult,
3397 decay_time, nactive, ndirty);
Jason Evansb2c0d632016-04-13 23:36:15 -07003398 malloc_mutex_unlock(tsd, &arena->lock);
Jason Evans3c07f802016-02-27 20:40:13 -08003399}
3400
3401void
Jason Evansb2c0d632016-04-13 23:36:15 -07003402arena_stats_merge(tsd_t *tsd, arena_t *arena, unsigned *nthreads,
3403 const char **dss, ssize_t *lg_dirty_mult, ssize_t *decay_time,
3404 size_t *nactive, size_t *ndirty, arena_stats_t *astats,
3405 malloc_bin_stats_t *bstats, malloc_large_stats_t *lstats,
3406 malloc_huge_stats_t *hstats)
Jason Evans3c07f802016-02-27 20:40:13 -08003407{
3408 unsigned i;
3409
3410 cassert(config_stats);
3411
Jason Evansb2c0d632016-04-13 23:36:15 -07003412 malloc_mutex_lock(tsd, &arena->lock);
Jason Evans3c07f802016-02-27 20:40:13 -08003413 arena_basic_stats_merge_locked(arena, nthreads, dss, lg_dirty_mult,
3414 decay_time, nactive, ndirty);
Jason Evans609ae592012-10-11 13:53:15 -07003415
3416 astats->mapped += arena->stats.mapped;
3417 astats->npurge += arena->stats.npurge;
3418 astats->nmadvise += arena->stats.nmadvise;
3419 astats->purged += arena->stats.purged;
Jason Evans4581b972014-11-27 17:22:36 -02003420 astats->metadata_mapped += arena->stats.metadata_mapped;
3421 astats->metadata_allocated += arena_metadata_allocated_get(arena);
Jason Evans609ae592012-10-11 13:53:15 -07003422 astats->allocated_large += arena->stats.allocated_large;
3423 astats->nmalloc_large += arena->stats.nmalloc_large;
3424 astats->ndalloc_large += arena->stats.ndalloc_large;
3425 astats->nrequests_large += arena->stats.nrequests_large;
Jason Evanse2deab72014-05-15 22:22:27 -07003426 astats->allocated_huge += arena->stats.allocated_huge;
3427 astats->nmalloc_huge += arena->stats.nmalloc_huge;
3428 astats->ndalloc_huge += arena->stats.ndalloc_huge;
Jason Evans609ae592012-10-11 13:53:15 -07003429
3430 for (i = 0; i < nlclasses; i++) {
3431 lstats[i].nmalloc += arena->stats.lstats[i].nmalloc;
3432 lstats[i].ndalloc += arena->stats.lstats[i].ndalloc;
3433 lstats[i].nrequests += arena->stats.lstats[i].nrequests;
3434 lstats[i].curruns += arena->stats.lstats[i].curruns;
3435 }
Jason Evans3c4d92e2014-10-12 22:53:59 -07003436
3437 for (i = 0; i < nhclasses; i++) {
3438 hstats[i].nmalloc += arena->stats.hstats[i].nmalloc;
3439 hstats[i].ndalloc += arena->stats.hstats[i].ndalloc;
3440 hstats[i].curhchunks += arena->stats.hstats[i].curhchunks;
3441 }
Jason Evansb2c0d632016-04-13 23:36:15 -07003442 malloc_mutex_unlock(tsd, &arena->lock);
Jason Evans609ae592012-10-11 13:53:15 -07003443
3444 for (i = 0; i < NBINS; i++) {
3445 arena_bin_t *bin = &arena->bins[i];
3446
Jason Evansb2c0d632016-04-13 23:36:15 -07003447 malloc_mutex_lock(tsd, &bin->lock);
Jason Evans609ae592012-10-11 13:53:15 -07003448 bstats[i].nmalloc += bin->stats.nmalloc;
3449 bstats[i].ndalloc += bin->stats.ndalloc;
3450 bstats[i].nrequests += bin->stats.nrequests;
Jason Evans3c4d92e2014-10-12 22:53:59 -07003451 bstats[i].curregs += bin->stats.curregs;
Jason Evans609ae592012-10-11 13:53:15 -07003452 if (config_tcache) {
3453 bstats[i].nfills += bin->stats.nfills;
3454 bstats[i].nflushes += bin->stats.nflushes;
3455 }
3456 bstats[i].nruns += bin->stats.nruns;
3457 bstats[i].reruns += bin->stats.reruns;
3458 bstats[i].curruns += bin->stats.curruns;
Jason Evansb2c0d632016-04-13 23:36:15 -07003459 malloc_mutex_unlock(tsd, &bin->lock);
Jason Evans609ae592012-10-11 13:53:15 -07003460 }
3461}
3462
Jason Evans767d8502016-02-24 23:58:10 -08003463unsigned
Jason Evans66cd9532016-04-22 14:34:14 -07003464arena_nthreads_get(arena_t *arena, bool internal)
Jason Evans767d8502016-02-24 23:58:10 -08003465{
3466
Jason Evans66cd9532016-04-22 14:34:14 -07003467 return (atomic_read_u(&arena->nthreads[internal]));
Jason Evans767d8502016-02-24 23:58:10 -08003468}
3469
3470void
Jason Evans66cd9532016-04-22 14:34:14 -07003471arena_nthreads_inc(arena_t *arena, bool internal)
Jason Evans767d8502016-02-24 23:58:10 -08003472{
3473
Jason Evans66cd9532016-04-22 14:34:14 -07003474 atomic_add_u(&arena->nthreads[internal], 1);
Jason Evans767d8502016-02-24 23:58:10 -08003475}
3476
3477void
Jason Evans66cd9532016-04-22 14:34:14 -07003478arena_nthreads_dec(arena_t *arena, bool internal)
Jason Evans767d8502016-02-24 23:58:10 -08003479{
3480
Jason Evans66cd9532016-04-22 14:34:14 -07003481 atomic_sub_u(&arena->nthreads[internal], 1);
Jason Evans767d8502016-02-24 23:58:10 -08003482}
3483
Jason Evans8bb31982014-10-07 23:14:57 -07003484arena_t *
Jason Evansb2c0d632016-04-13 23:36:15 -07003485arena_new(tsd_t *tsd, unsigned ind)
Jason Evanse476f8a2010-01-16 09:53:50 -08003486{
Jason Evans8bb31982014-10-07 23:14:57 -07003487 arena_t *arena;
Dave Watson3417a302016-02-23 12:06:21 -08003488 size_t arena_size;
Jason Evanse476f8a2010-01-16 09:53:50 -08003489 unsigned i;
Jason Evanse476f8a2010-01-16 09:53:50 -08003490
Dave Watson3417a302016-02-23 12:06:21 -08003491 /* Compute arena size to incorporate sufficient runs_avail elements. */
Jason Evansc6a2c392016-03-26 17:30:37 -07003492 arena_size = offsetof(arena_t, runs_avail) + (sizeof(arena_run_heap_t) *
Dave Watson38127292016-02-24 20:10:02 -08003493 runs_avail_nclasses);
Jason Evans8bb31982014-10-07 23:14:57 -07003494 /*
Jason Evans3c4d92e2014-10-12 22:53:59 -07003495 * Allocate arena, arena->lstats, and arena->hstats contiguously, mainly
3496 * because there is no way to clean up if base_alloc() OOMs.
Jason Evans8bb31982014-10-07 23:14:57 -07003497 */
3498 if (config_stats) {
Jason Evansb2c0d632016-04-13 23:36:15 -07003499 arena = (arena_t *)base_alloc(tsd, CACHELINE_CEILING(arena_size)
3500 + QUANTUM_CEILING(nlclasses * sizeof(malloc_large_stats_t) +
Jason Evans3c4d92e2014-10-12 22:53:59 -07003501 nhclasses) * sizeof(malloc_huge_stats_t));
Jason Evans8bb31982014-10-07 23:14:57 -07003502 } else
Jason Evansb2c0d632016-04-13 23:36:15 -07003503 arena = (arena_t *)base_alloc(tsd, arena_size);
Jason Evans8bb31982014-10-07 23:14:57 -07003504 if (arena == NULL)
3505 return (NULL);
3506
Jason Evans6109fe02010-02-10 10:37:56 -08003507 arena->ind = ind;
Jason Evans66cd9532016-04-22 14:34:14 -07003508 arena->nthreads[0] = arena->nthreads[1] = 0;
Jason Evansb2c0d632016-04-13 23:36:15 -07003509 if (malloc_mutex_init(&arena->lock, "arena", WITNESS_RANK_ARENA))
Jason Evanscbf3a6d2015-02-11 12:24:27 -08003510 return (NULL);
Jason Evanse476f8a2010-01-16 09:53:50 -08003511
Jason Evans7372b152012-02-10 20:22:09 -08003512 if (config_stats) {
3513 memset(&arena->stats, 0, sizeof(arena_stats_t));
Jason Evansd49cb682014-11-17 10:31:59 -08003514 arena->stats.lstats = (malloc_large_stats_t *)((uintptr_t)arena
Dave Watson3417a302016-02-23 12:06:21 -08003515 + CACHELINE_CEILING(arena_size));
Jason Evans7372b152012-02-10 20:22:09 -08003516 memset(arena->stats.lstats, 0, nlclasses *
3517 sizeof(malloc_large_stats_t));
Jason Evansd49cb682014-11-17 10:31:59 -08003518 arena->stats.hstats = (malloc_huge_stats_t *)((uintptr_t)arena
Dave Watson3417a302016-02-23 12:06:21 -08003519 + CACHELINE_CEILING(arena_size) +
Jason Evans3c4d92e2014-10-12 22:53:59 -07003520 QUANTUM_CEILING(nlclasses * sizeof(malloc_large_stats_t)));
3521 memset(arena->stats.hstats, 0, nhclasses *
3522 sizeof(malloc_huge_stats_t));
Jason Evans7372b152012-02-10 20:22:09 -08003523 if (config_tcache)
3524 ql_new(&arena->tcache_ql);
3525 }
Jason Evanse476f8a2010-01-16 09:53:50 -08003526
Jason Evans7372b152012-02-10 20:22:09 -08003527 if (config_prof)
3528 arena->prof_accumbytes = 0;
Jason Evansd34f9e72010-02-11 13:19:21 -08003529
Jason Evans8a03cf02015-05-04 09:58:36 -07003530 if (config_cache_oblivious) {
3531 /*
3532 * A nondeterministic seed based on the address of arena reduces
3533 * the likelihood of lockstep non-uniform cache index
3534 * utilization among identical concurrent processes, but at the
3535 * cost of test repeatability. For debug builds, instead use a
3536 * deterministic seed.
3537 */
3538 arena->offset_state = config_debug ? ind :
3539 (uint64_t)(uintptr_t)arena;
3540 }
3541
Jason Evansb2c0d632016-04-13 23:36:15 -07003542 arena->dss_prec = chunk_dss_prec_get(tsd);
Jason Evans609ae592012-10-11 13:53:15 -07003543
Jason Evans19ff2ce2016-04-22 14:37:17 -07003544 ql_new(&arena->achunks);
3545
Jason Evanse476f8a2010-01-16 09:53:50 -08003546 arena->spare = NULL;
3547
Jason Evans8d6a3e82015-03-18 18:55:33 -07003548 arena->lg_dirty_mult = arena_lg_dirty_mult_default_get();
Jason Evans0a9f9a42015-06-22 18:50:32 -07003549 arena->purging = false;
Jason Evanse476f8a2010-01-16 09:53:50 -08003550 arena->nactive = 0;
3551 arena->ndirty = 0;
3552
Dave Watson3417a302016-02-23 12:06:21 -08003553 for(i = 0; i < runs_avail_nclasses; i++)
Jason Evansc6a2c392016-03-26 17:30:37 -07003554 arena_run_heap_new(&arena->runs_avail[i]);
Jason Evansee41ad42015-02-15 18:04:46 -08003555 qr_new(&arena->runs_dirty, rd_link);
Jason Evans738e0892015-02-18 01:15:50 -08003556 qr_new(&arena->chunks_cache, cc_link);
Jason Evansee41ad42015-02-15 18:04:46 -08003557
Jason Evans243f7a02016-02-19 20:09:31 -08003558 if (opt_purge == purge_mode_decay)
3559 arena_decay_init(arena, arena_decay_time_default_get());
3560
Jason Evansee41ad42015-02-15 18:04:46 -08003561 ql_new(&arena->huge);
Jason Evansb2c0d632016-04-13 23:36:15 -07003562 if (malloc_mutex_init(&arena->huge_mtx, "arena_huge",
3563 WITNESS_RANK_ARENA_HUGE))
Jason Evansee41ad42015-02-15 18:04:46 -08003564 return (NULL);
3565
Jason Evansb49a3342015-07-28 11:28:19 -04003566 extent_tree_szad_new(&arena->chunks_szad_cached);
3567 extent_tree_ad_new(&arena->chunks_ad_cached);
3568 extent_tree_szad_new(&arena->chunks_szad_retained);
3569 extent_tree_ad_new(&arena->chunks_ad_retained);
Jason Evansb2c0d632016-04-13 23:36:15 -07003570 if (malloc_mutex_init(&arena->chunks_mtx, "arena_chunks",
3571 WITNESS_RANK_ARENA_CHUNKS))
Jason Evansee41ad42015-02-15 18:04:46 -08003572 return (NULL);
3573 ql_new(&arena->node_cache);
Jason Evansb2c0d632016-04-13 23:36:15 -07003574 if (malloc_mutex_init(&arena->node_cache_mtx, "arena_node_cache",
3575 WITNESS_RANK_ARENA_NODE_CACHE))
Jason Evansee41ad42015-02-15 18:04:46 -08003576 return (NULL);
3577
Jason Evansb49a3342015-07-28 11:28:19 -04003578 arena->chunk_hooks = chunk_hooks_default;
Jason Evanse476f8a2010-01-16 09:53:50 -08003579
3580 /* Initialize bins. */
Jason Evansb1726102012-02-28 16:50:47 -08003581 for (i = 0; i < NBINS; i++) {
Jason Evansc9a4bf92016-04-22 14:36:48 -07003582 arena_bin_t *bin = &arena->bins[i];
Jason Evansb2c0d632016-04-13 23:36:15 -07003583 if (malloc_mutex_init(&bin->lock, "arena_bin",
3584 WITNESS_RANK_ARENA_BIN))
Jason Evans8bb31982014-10-07 23:14:57 -07003585 return (NULL);
Jason Evanse476f8a2010-01-16 09:53:50 -08003586 bin->runcur = NULL;
Jason Evansc6a2c392016-03-26 17:30:37 -07003587 arena_run_heap_new(&bin->runs);
Jason Evans7372b152012-02-10 20:22:09 -08003588 if (config_stats)
3589 memset(&bin->stats, 0, sizeof(malloc_bin_stats_t));
Jason Evanse476f8a2010-01-16 09:53:50 -08003590 }
3591
Jason Evans8bb31982014-10-07 23:14:57 -07003592 return (arena);
Jason Evanse476f8a2010-01-16 09:53:50 -08003593}
3594
Jason Evans49f7e8f2011-03-15 13:59:15 -07003595/*
3596 * Calculate bin_info->run_size such that it meets the following constraints:
3597 *
Jason Evans155bfa72014-10-05 17:54:10 -07003598 * *) bin_info->run_size <= arena_maxrun
Jason Evans47e57f92011-03-22 09:00:56 -07003599 * *) bin_info->nregs <= RUN_MAXREGS
Jason Evans49f7e8f2011-03-15 13:59:15 -07003600 *
Jason Evans0c5dd032014-09-29 01:31:39 -07003601 * bin_info->nregs and bin_info->reg0_offset are also calculated here, since
3602 * these settings are all interdependent.
Jason Evans49f7e8f2011-03-15 13:59:15 -07003603 */
Jason Evans0c5dd032014-09-29 01:31:39 -07003604static void
3605bin_info_run_size_calc(arena_bin_info_t *bin_info)
Jason Evans49f7e8f2011-03-15 13:59:15 -07003606{
Jason Evans122449b2012-04-06 00:35:09 -07003607 size_t pad_size;
Jason Evans0c5dd032014-09-29 01:31:39 -07003608 size_t try_run_size, perfect_run_size, actual_run_size;
3609 uint32_t try_nregs, perfect_nregs, actual_nregs;
Jason Evans49f7e8f2011-03-15 13:59:15 -07003610
3611 /*
Jason Evans122449b2012-04-06 00:35:09 -07003612 * Determine redzone size based on minimum alignment and minimum
3613 * redzone size. Add padding to the end of the run if it is needed to
3614 * align the regions. The padding allows each redzone to be half the
3615 * minimum alignment; without the padding, each redzone would have to
3616 * be twice as large in order to maintain alignment.
3617 */
Jason Evans9c640bf2014-09-11 16:20:44 -07003618 if (config_fill && unlikely(opt_redzone)) {
Jason Evans9f4ee602016-02-24 10:32:45 -08003619 size_t align_min = ZU(1) << (ffs_zu(bin_info->reg_size) - 1);
Jason Evans122449b2012-04-06 00:35:09 -07003620 if (align_min <= REDZONE_MINSIZE) {
3621 bin_info->redzone_size = REDZONE_MINSIZE;
3622 pad_size = 0;
3623 } else {
3624 bin_info->redzone_size = align_min >> 1;
3625 pad_size = bin_info->redzone_size;
3626 }
3627 } else {
3628 bin_info->redzone_size = 0;
3629 pad_size = 0;
3630 }
3631 bin_info->reg_interval = bin_info->reg_size +
3632 (bin_info->redzone_size << 1);
3633
3634 /*
Jason Evans0c5dd032014-09-29 01:31:39 -07003635 * Compute run size under ideal conditions (no redzones, no limit on run
3636 * size).
Jason Evans49f7e8f2011-03-15 13:59:15 -07003637 */
Jason Evans0c5dd032014-09-29 01:31:39 -07003638 try_run_size = PAGE;
Jason Evans9e1810c2016-02-24 12:42:23 -08003639 try_nregs = (uint32_t)(try_run_size / bin_info->reg_size);
Jason Evans49f7e8f2011-03-15 13:59:15 -07003640 do {
Jason Evans0c5dd032014-09-29 01:31:39 -07003641 perfect_run_size = try_run_size;
3642 perfect_nregs = try_nregs;
Jason Evans49f7e8f2011-03-15 13:59:15 -07003643
Jason Evansae4c7b42012-04-02 07:04:34 -07003644 try_run_size += PAGE;
Jason Evans9e1810c2016-02-24 12:42:23 -08003645 try_nregs = (uint32_t)(try_run_size / bin_info->reg_size);
Jason Evans0c5dd032014-09-29 01:31:39 -07003646 } while (perfect_run_size != perfect_nregs * bin_info->reg_size);
3647 assert(perfect_nregs <= RUN_MAXREGS);
Jason Evans49f7e8f2011-03-15 13:59:15 -07003648
Jason Evans0c5dd032014-09-29 01:31:39 -07003649 actual_run_size = perfect_run_size;
Jason Evans9e1810c2016-02-24 12:42:23 -08003650 actual_nregs = (uint32_t)((actual_run_size - pad_size) /
3651 bin_info->reg_interval);
Jason Evans0c5dd032014-09-29 01:31:39 -07003652
3653 /*
3654 * Redzones can require enough padding that not even a single region can
3655 * fit within the number of pages that would normally be dedicated to a
3656 * run for this size class. Increase the run size until at least one
3657 * region fits.
3658 */
3659 while (actual_nregs == 0) {
3660 assert(config_fill && unlikely(opt_redzone));
3661
3662 actual_run_size += PAGE;
Jason Evans9e1810c2016-02-24 12:42:23 -08003663 actual_nregs = (uint32_t)((actual_run_size - pad_size) /
3664 bin_info->reg_interval);
Jason Evans0c5dd032014-09-29 01:31:39 -07003665 }
3666
3667 /*
3668 * Make sure that the run will fit within an arena chunk.
3669 */
Jason Evans155bfa72014-10-05 17:54:10 -07003670 while (actual_run_size > arena_maxrun) {
Jason Evans0c5dd032014-09-29 01:31:39 -07003671 actual_run_size -= PAGE;
Jason Evans9e1810c2016-02-24 12:42:23 -08003672 actual_nregs = (uint32_t)((actual_run_size - pad_size) /
3673 bin_info->reg_interval);
Jason Evans0c5dd032014-09-29 01:31:39 -07003674 }
3675 assert(actual_nregs > 0);
Jason Evans5707d6f2015-03-06 17:14:05 -08003676 assert(actual_run_size == s2u(actual_run_size));
Jason Evans49f7e8f2011-03-15 13:59:15 -07003677
3678 /* Copy final settings. */
Jason Evans0c5dd032014-09-29 01:31:39 -07003679 bin_info->run_size = actual_run_size;
3680 bin_info->nregs = actual_nregs;
Jason Evans9e1810c2016-02-24 12:42:23 -08003681 bin_info->reg0_offset = (uint32_t)(actual_run_size - (actual_nregs *
3682 bin_info->reg_interval) - pad_size + bin_info->redzone_size);
Jason Evans122449b2012-04-06 00:35:09 -07003683
Jason Evans8a03cf02015-05-04 09:58:36 -07003684 if (actual_run_size > small_maxrun)
3685 small_maxrun = actual_run_size;
3686
Jason Evans122449b2012-04-06 00:35:09 -07003687 assert(bin_info->reg0_offset - bin_info->redzone_size + (bin_info->nregs
3688 * bin_info->reg_interval) + pad_size == bin_info->run_size);
Jason Evans49f7e8f2011-03-15 13:59:15 -07003689}
3690
Jason Evansb1726102012-02-28 16:50:47 -08003691static void
Jason Evans49f7e8f2011-03-15 13:59:15 -07003692bin_info_init(void)
3693{
3694 arena_bin_info_t *bin_info;
Jason Evans49f7e8f2011-03-15 13:59:15 -07003695
Jason Evans8a03cf02015-05-04 09:58:36 -07003696#define BIN_INFO_INIT_bin_yes(index, size) \
Jason Evansd04047c2014-05-28 16:11:55 -07003697 bin_info = &arena_bin_info[index]; \
Jason Evansb1726102012-02-28 16:50:47 -08003698 bin_info->reg_size = size; \
Jason Evans0c5dd032014-09-29 01:31:39 -07003699 bin_info_run_size_calc(bin_info); \
Jason Evansb1726102012-02-28 16:50:47 -08003700 bitmap_info_init(&bin_info->bitmap_info, bin_info->nregs);
Jason Evansd04047c2014-05-28 16:11:55 -07003701#define BIN_INFO_INIT_bin_no(index, size)
3702#define SC(index, lg_grp, lg_delta, ndelta, bin, lg_delta_lookup) \
3703 BIN_INFO_INIT_bin_##bin(index, (ZU(1)<<lg_grp) + (ZU(ndelta)<<lg_delta))
Jason Evansb1726102012-02-28 16:50:47 -08003704 SIZE_CLASSES
Jason Evansd04047c2014-05-28 16:11:55 -07003705#undef BIN_INFO_INIT_bin_yes
3706#undef BIN_INFO_INIT_bin_no
3707#undef SC
Jason Evans49f7e8f2011-03-15 13:59:15 -07003708}
3709
Jason Evans8a03cf02015-05-04 09:58:36 -07003710static bool
3711small_run_size_init(void)
3712{
3713
3714 assert(small_maxrun != 0);
3715
Jason Evansb2c0d632016-04-13 23:36:15 -07003716 small_run_tab = (bool *)base_alloc(NULL, sizeof(bool) * (small_maxrun >>
Jason Evans8a03cf02015-05-04 09:58:36 -07003717 LG_PAGE));
3718 if (small_run_tab == NULL)
3719 return (true);
3720
3721#define TAB_INIT_bin_yes(index, size) { \
3722 arena_bin_info_t *bin_info = &arena_bin_info[index]; \
3723 small_run_tab[bin_info->run_size >> LG_PAGE] = true; \
3724 }
3725#define TAB_INIT_bin_no(index, size)
3726#define SC(index, lg_grp, lg_delta, ndelta, bin, lg_delta_lookup) \
3727 TAB_INIT_bin_##bin(index, (ZU(1)<<lg_grp) + (ZU(ndelta)<<lg_delta))
3728 SIZE_CLASSES
3729#undef TAB_INIT_bin_yes
3730#undef TAB_INIT_bin_no
3731#undef SC
3732
3733 return (false);
3734}
3735
Jason Evans0da8ce12016-02-22 16:20:56 -08003736static bool
3737run_quantize_init(void)
3738{
3739 unsigned i;
3740
3741 run_quantize_max = chunksize + large_pad;
3742
Jason Evansb2c0d632016-04-13 23:36:15 -07003743 run_quantize_floor_tab = (size_t *)base_alloc(NULL, sizeof(size_t) *
Jason Evans0da8ce12016-02-22 16:20:56 -08003744 (run_quantize_max >> LG_PAGE));
3745 if (run_quantize_floor_tab == NULL)
3746 return (true);
3747
Jason Evansb2c0d632016-04-13 23:36:15 -07003748 run_quantize_ceil_tab = (size_t *)base_alloc(NULL, sizeof(size_t) *
Jason Evans0da8ce12016-02-22 16:20:56 -08003749 (run_quantize_max >> LG_PAGE));
3750 if (run_quantize_ceil_tab == NULL)
3751 return (true);
3752
3753 for (i = 1; i <= run_quantize_max >> LG_PAGE; i++) {
3754 size_t run_size = i << LG_PAGE;
3755
3756 run_quantize_floor_tab[i-1] =
3757 run_quantize_floor_compute(run_size);
3758 run_quantize_ceil_tab[i-1] =
3759 run_quantize_ceil_compute(run_size);
3760 }
3761
3762 return (false);
3763}
3764
Jason Evans8a03cf02015-05-04 09:58:36 -07003765bool
Jason Evansa0bf2422010-01-29 14:30:41 -08003766arena_boot(void)
Jason Evanse476f8a2010-01-16 09:53:50 -08003767{
Jason Evans7393f442010-10-01 17:35:43 -07003768 unsigned i;
Jason Evanse476f8a2010-01-16 09:53:50 -08003769
Jason Evans8d6a3e82015-03-18 18:55:33 -07003770 arena_lg_dirty_mult_default_set(opt_lg_dirty_mult);
Jason Evans243f7a02016-02-19 20:09:31 -08003771 arena_decay_time_default_set(opt_decay_time);
Jason Evans8d6a3e82015-03-18 18:55:33 -07003772
Jason Evanse476f8a2010-01-16 09:53:50 -08003773 /*
3774 * Compute the header size such that it is large enough to contain the
Jason Evans7393f442010-10-01 17:35:43 -07003775 * page map. The page map is biased to omit entries for the header
3776 * itself, so some iteration is necessary to compute the map bias.
3777 *
3778 * 1) Compute safe header_size and map_bias values that include enough
3779 * space for an unbiased page map.
3780 * 2) Refine map_bias based on (1) to omit the header pages in the page
3781 * map. The resulting map_bias may be one too small.
3782 * 3) Refine map_bias based on (2). The result will be >= the result
3783 * from (2), and will always be correct.
Jason Evanse476f8a2010-01-16 09:53:50 -08003784 */
Jason Evans7393f442010-10-01 17:35:43 -07003785 map_bias = 0;
3786 for (i = 0; i < 3; i++) {
Dmitry-Mea306a602015-09-04 13:15:28 +03003787 size_t header_size = offsetof(arena_chunk_t, map_bits) +
Qinfan Wuff6a31d2014-08-29 13:34:40 -07003788 ((sizeof(arena_chunk_map_bits_t) +
3789 sizeof(arena_chunk_map_misc_t)) * (chunk_npages-map_bias));
Jason Evans0c5dd032014-09-29 01:31:39 -07003790 map_bias = (header_size + PAGE_MASK) >> LG_PAGE;
Jason Evans7393f442010-10-01 17:35:43 -07003791 }
3792 assert(map_bias > 0);
3793
Qinfan Wuff6a31d2014-08-29 13:34:40 -07003794 map_misc_offset = offsetof(arena_chunk_t, map_bits) +
3795 sizeof(arena_chunk_map_bits_t) * (chunk_npages-map_bias);
3796
Jason Evans155bfa72014-10-05 17:54:10 -07003797 arena_maxrun = chunksize - (map_bias << LG_PAGE);
Jason Evansfc0b3b72014-10-09 17:54:06 -07003798 assert(arena_maxrun > 0);
Jason Evans676df882015-09-11 20:50:20 -07003799 large_maxclass = index2size(size2index(chunksize)-1);
3800 if (large_maxclass > arena_maxrun) {
Jason Evans155bfa72014-10-05 17:54:10 -07003801 /*
3802 * For small chunk sizes it's possible for there to be fewer
3803 * non-header pages available than are necessary to serve the
3804 * size classes just below chunksize.
3805 */
Jason Evans676df882015-09-11 20:50:20 -07003806 large_maxclass = arena_maxrun;
Jason Evans155bfa72014-10-05 17:54:10 -07003807 }
Jason Evans676df882015-09-11 20:50:20 -07003808 assert(large_maxclass > 0);
3809 nlclasses = size2index(large_maxclass) - size2index(SMALL_MAXCLASS);
Jason Evans3c4d92e2014-10-12 22:53:59 -07003810 nhclasses = NSIZES - nlclasses - NBINS;
Jason Evansa0bf2422010-01-29 14:30:41 -08003811
Jason Evansb1726102012-02-28 16:50:47 -08003812 bin_info_init();
Jason Evans0da8ce12016-02-22 16:20:56 -08003813 if (small_run_size_init())
3814 return (true);
3815 if (run_quantize_init())
3816 return (true);
3817
Dave Watson3417a302016-02-23 12:06:21 -08003818 runs_avail_bias = size2index(PAGE);
3819 runs_avail_nclasses = size2index(run_quantize_max)+1 - runs_avail_bias;
3820
Jason Evans0da8ce12016-02-22 16:20:56 -08003821 return (false);
Jason Evanse476f8a2010-01-16 09:53:50 -08003822}
Jason Evans4e2e3dd2012-03-13 16:31:41 -07003823
3824void
Jason Evans174c0c32016-04-25 23:14:40 -07003825arena_prefork0(tsd_t *tsd, arena_t *arena)
3826{
3827
3828 malloc_mutex_prefork(tsd, &arena->lock);
3829}
3830
3831void
3832arena_prefork1(tsd_t *tsd, arena_t *arena)
3833{
3834
3835 malloc_mutex_prefork(tsd, &arena->chunks_mtx);
3836}
3837
3838void
3839arena_prefork2(tsd_t *tsd, arena_t *arena)
3840{
3841
3842 malloc_mutex_prefork(tsd, &arena->node_cache_mtx);
3843}
3844
3845void
3846arena_prefork3(tsd_t *tsd, arena_t *arena)
Jason Evans4e2e3dd2012-03-13 16:31:41 -07003847{
3848 unsigned i;
3849
Jason Evans4e2e3dd2012-03-13 16:31:41 -07003850 for (i = 0; i < NBINS; i++)
Jason Evansb2c0d632016-04-13 23:36:15 -07003851 malloc_mutex_prefork(tsd, &arena->bins[i].lock);
Jason Evans174c0c32016-04-25 23:14:40 -07003852 malloc_mutex_prefork(tsd, &arena->huge_mtx);
Jason Evans4e2e3dd2012-03-13 16:31:41 -07003853}
3854
3855void
Jason Evansb2c0d632016-04-13 23:36:15 -07003856arena_postfork_parent(tsd_t *tsd, arena_t *arena)
Jason Evans4e2e3dd2012-03-13 16:31:41 -07003857{
3858 unsigned i;
3859
Jason Evans174c0c32016-04-25 23:14:40 -07003860 malloc_mutex_postfork_parent(tsd, &arena->huge_mtx);
Jason Evans4e2e3dd2012-03-13 16:31:41 -07003861 for (i = 0; i < NBINS; i++)
Jason Evansb2c0d632016-04-13 23:36:15 -07003862 malloc_mutex_postfork_parent(tsd, &arena->bins[i].lock);
3863 malloc_mutex_postfork_parent(tsd, &arena->node_cache_mtx);
3864 malloc_mutex_postfork_parent(tsd, &arena->chunks_mtx);
Jason Evansb2c0d632016-04-13 23:36:15 -07003865 malloc_mutex_postfork_parent(tsd, &arena->lock);
Jason Evans4e2e3dd2012-03-13 16:31:41 -07003866}
3867
3868void
Jason Evansb2c0d632016-04-13 23:36:15 -07003869arena_postfork_child(tsd_t *tsd, arena_t *arena)
Jason Evans4e2e3dd2012-03-13 16:31:41 -07003870{
3871 unsigned i;
3872
Jason Evans174c0c32016-04-25 23:14:40 -07003873 malloc_mutex_postfork_child(tsd, &arena->huge_mtx);
Jason Evans4e2e3dd2012-03-13 16:31:41 -07003874 for (i = 0; i < NBINS; i++)
Jason Evansb2c0d632016-04-13 23:36:15 -07003875 malloc_mutex_postfork_child(tsd, &arena->bins[i].lock);
3876 malloc_mutex_postfork_child(tsd, &arena->node_cache_mtx);
3877 malloc_mutex_postfork_child(tsd, &arena->chunks_mtx);
Jason Evansb2c0d632016-04-13 23:36:15 -07003878 malloc_mutex_postfork_child(tsd, &arena->lock);
Jason Evans4e2e3dd2012-03-13 16:31:41 -07003879}