blob: 45964787907f27059793764caa8b418f2f3cc1df [file] [log] [blame]
Jason Evanse476f8a2010-01-16 09:53:50 -08001#define JEMALLOC_ARENA_C_
Jason Evans376b1522010-02-11 14:45:59 -08002#include "jemalloc/internal/jemalloc_internal.h"
Jason Evanse476f8a2010-01-16 09:53:50 -08003
4/******************************************************************************/
5/* Data. */
6
Jason Evans243f7a02016-02-19 20:09:31 -08007purge_mode_t opt_purge = PURGE_DEFAULT;
8const char *purge_mode_names[] = {
9 "ratio",
10 "decay",
11 "N/A"
12};
Jason Evanse476f8a2010-01-16 09:53:50 -080013ssize_t opt_lg_dirty_mult = LG_DIRTY_MULT_DEFAULT;
Jason Evans8d6a3e82015-03-18 18:55:33 -070014static ssize_t lg_dirty_mult_default;
Jason Evans243f7a02016-02-19 20:09:31 -080015ssize_t opt_decay_time = DECAY_TIME_DEFAULT;
16static ssize_t decay_time_default;
17
Jason Evansb1726102012-02-28 16:50:47 -080018arena_bin_info_t arena_bin_info[NBINS];
Jason Evanse476f8a2010-01-16 09:53:50 -080019
Jason Evans155bfa72014-10-05 17:54:10 -070020size_t map_bias;
21size_t map_misc_offset;
22size_t arena_maxrun; /* Max run size for arenas. */
Jason Evans676df882015-09-11 20:50:20 -070023size_t large_maxclass; /* Max large size class. */
Jason Evans0da8ce12016-02-22 16:20:56 -080024size_t run_quantize_max; /* Max run_quantize_*() input. */
25static size_t small_maxrun; /* Max run size for small size classes. */
Jason Evans8a03cf02015-05-04 09:58:36 -070026static bool *small_run_tab; /* Valid small run page multiples. */
Jason Evans0da8ce12016-02-22 16:20:56 -080027static size_t *run_quantize_floor_tab; /* run_quantize_floor() memoization. */
28static size_t *run_quantize_ceil_tab; /* run_quantize_ceil() memoization. */
Jason Evans3c4d92e2014-10-12 22:53:59 -070029unsigned nlclasses; /* Number of large size classes. */
30unsigned nhclasses; /* Number of huge size classes. */
Dave Watson3417a302016-02-23 12:06:21 -080031static szind_t runs_avail_bias; /* Size index for first runs_avail tree. */
32static szind_t runs_avail_nclasses; /* Number of runs_avail trees. */
Jason Evanse476f8a2010-01-16 09:53:50 -080033
34/******************************************************************************/
Jason Evansaa5113b2014-01-14 16:23:03 -080035/*
36 * Function prototypes for static functions that are referenced prior to
37 * definition.
38 */
Jason Evanse476f8a2010-01-16 09:53:50 -080039
Jason Evans1a4ad3c2016-02-19 19:51:23 -080040static void arena_purge_to_limit(arena_t *arena, size_t ndirty_limit);
Jason Evanse3d13062012-10-30 15:42:37 -070041static void arena_run_dalloc(arena_t *arena, arena_run_t *run, bool dirty,
Jason Evans8fadb1a2015-08-04 10:49:46 -070042 bool cleaned, bool decommitted);
Jason Evanse476f8a2010-01-16 09:53:50 -080043static void arena_dalloc_bin_run(arena_t *arena, arena_chunk_t *chunk,
44 arena_run_t *run, arena_bin_t *bin);
Jason Evans940a2e02010-10-17 17:51:37 -070045static void arena_bin_lower_run(arena_t *arena, arena_chunk_t *chunk,
46 arena_run_t *run, arena_bin_t *bin);
Jason Evanse476f8a2010-01-16 09:53:50 -080047
48/******************************************************************************/
49
Jason Evans8fadb1a2015-08-04 10:49:46 -070050JEMALLOC_INLINE_C size_t
Joshua Kahn13b40152015-09-18 16:58:17 -040051arena_miscelm_size_get(const arena_chunk_map_misc_t *miscelm)
Jason Evans8fadb1a2015-08-04 10:49:46 -070052{
53 arena_chunk_t *chunk;
54 size_t pageind, mapbits;
55
Jason Evans8fadb1a2015-08-04 10:49:46 -070056 chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(miscelm);
57 pageind = arena_miscelm_to_pageind(miscelm);
58 mapbits = arena_mapbits_get(chunk, pageind);
Jason Evans5ef33a92015-08-19 14:12:05 -070059 return (arena_mapbits_size_decode(mapbits));
Ben Maurerf9ff6032014-04-06 13:24:16 -070060}
61
Jason Evans8a03cf02015-05-04 09:58:36 -070062static size_t
Jason Evans0da8ce12016-02-22 16:20:56 -080063run_quantize_floor_compute(size_t size)
Jason Evans8a03cf02015-05-04 09:58:36 -070064{
65 size_t qsize;
66
67 assert(size != 0);
68 assert(size == PAGE_CEILING(size));
69
70 /* Don't change sizes that are valid small run sizes. */
71 if (size <= small_maxrun && small_run_tab[size >> LG_PAGE])
72 return (size);
73
74 /*
75 * Round down to the nearest run size that can actually be requested
76 * during normal large allocation. Add large_pad so that cache index
77 * randomization can offset the allocation from the page boundary.
78 */
79 qsize = index2size(size2index(size - large_pad + 1) - 1) + large_pad;
80 if (qsize <= SMALL_MAXCLASS + large_pad)
Jason Evans0da8ce12016-02-22 16:20:56 -080081 return (run_quantize_floor_compute(size - large_pad));
Jason Evans8a03cf02015-05-04 09:58:36 -070082 assert(qsize <= size);
83 return (qsize);
84}
85
86static size_t
Jason Evans0da8ce12016-02-22 16:20:56 -080087run_quantize_ceil_compute_hard(size_t size)
Jason Evans8a03cf02015-05-04 09:58:36 -070088{
89 size_t large_run_size_next;
90
91 assert(size != 0);
92 assert(size == PAGE_CEILING(size));
93
94 /*
95 * Return the next quantized size greater than the input size.
96 * Quantized sizes comprise the union of run sizes that back small
97 * region runs, and run sizes that back large regions with no explicit
98 * alignment constraints.
99 */
100
101 if (size > SMALL_MAXCLASS) {
102 large_run_size_next = PAGE_CEILING(index2size(size2index(size -
103 large_pad) + 1) + large_pad);
104 } else
105 large_run_size_next = SIZE_T_MAX;
106 if (size >= small_maxrun)
107 return (large_run_size_next);
108
109 while (true) {
110 size += PAGE;
111 assert(size <= small_maxrun);
112 if (small_run_tab[size >> LG_PAGE]) {
113 if (large_run_size_next < size)
114 return (large_run_size_next);
115 return (size);
116 }
117 }
118}
119
120static size_t
Jason Evans0da8ce12016-02-22 16:20:56 -0800121run_quantize_ceil_compute(size_t size)
Jason Evans8a03cf02015-05-04 09:58:36 -0700122{
Jason Evans0da8ce12016-02-22 16:20:56 -0800123 size_t qsize = run_quantize_floor_compute(size);
Jason Evans8a03cf02015-05-04 09:58:36 -0700124
125 if (qsize < size) {
126 /*
127 * Skip a quantization that may have an adequately large run,
128 * because under-sized runs may be mixed in. This only happens
129 * when an unusual size is requested, i.e. for aligned
130 * allocation, and is just one of several places where linear
131 * search would potentially find sufficiently aligned available
132 * memory somewhere lower.
133 */
Jason Evans0da8ce12016-02-22 16:20:56 -0800134 qsize = run_quantize_ceil_compute_hard(qsize);
Jason Evans8a03cf02015-05-04 09:58:36 -0700135 }
136 return (qsize);
137}
Jason Evans0da8ce12016-02-22 16:20:56 -0800138
139#ifdef JEMALLOC_JET
140#undef run_quantize_floor
141#define run_quantize_floor JEMALLOC_N(run_quantize_floor_impl)
142#endif
143static size_t
144run_quantize_floor(size_t size)
145{
146 size_t ret;
147
148 assert(size > 0);
149 assert(size <= run_quantize_max);
150 assert((size & PAGE_MASK) == 0);
151
152 ret = run_quantize_floor_tab[(size >> LG_PAGE) - 1];
153 assert(ret == run_quantize_floor_compute(size));
154 return (ret);
155}
156#ifdef JEMALLOC_JET
157#undef run_quantize_floor
158#define run_quantize_floor JEMALLOC_N(run_quantize_floor)
159run_quantize_t *run_quantize_floor = JEMALLOC_N(run_quantize_floor_impl);
160#endif
161
162#ifdef JEMALLOC_JET
163#undef run_quantize_ceil
164#define run_quantize_ceil JEMALLOC_N(run_quantize_ceil_impl)
165#endif
166static size_t
167run_quantize_ceil(size_t size)
168{
169 size_t ret;
170
171 assert(size > 0);
172 assert(size <= run_quantize_max);
173 assert((size & PAGE_MASK) == 0);
174
175 ret = run_quantize_ceil_tab[(size >> LG_PAGE) - 1];
176 assert(ret == run_quantize_ceil_compute(size));
177 return (ret);
178}
Jason Evansa9a46842016-02-22 14:58:05 -0800179#ifdef JEMALLOC_JET
180#undef run_quantize_ceil
181#define run_quantize_ceil JEMALLOC_N(run_quantize_ceil)
182run_quantize_t *run_quantize_ceil = JEMALLOC_N(run_quantize_ceil_impl);
183#endif
Jason Evans8a03cf02015-05-04 09:58:36 -0700184
Dave Watson4a0dbb52016-02-29 11:54:42 -0800185static ph_heap_t *
Dave Watson3417a302016-02-23 12:06:21 -0800186arena_runs_avail_get(arena_t *arena, szind_t ind)
187{
188
189 assert(ind >= runs_avail_bias);
190 assert(ind - runs_avail_bias < runs_avail_nclasses);
191
192 return (&arena->runs_avail[ind - runs_avail_bias]);
193}
Jason Evanse476f8a2010-01-16 09:53:50 -0800194
Jason Evanse3d13062012-10-30 15:42:37 -0700195static void
196arena_avail_insert(arena_t *arena, arena_chunk_t *chunk, size_t pageind,
Qinfan Wu90737fc2014-07-21 19:39:20 -0700197 size_t npages)
Jason Evanse3d13062012-10-30 15:42:37 -0700198{
Dave Watson3417a302016-02-23 12:06:21 -0800199 szind_t ind = size2index(run_quantize_floor(arena_miscelm_size_get(
Jason Evans61a6dfc2016-03-23 16:04:38 -0700200 arena_miscelm_get_const(chunk, pageind))));
Jason Evanse3d13062012-10-30 15:42:37 -0700201 assert(npages == (arena_mapbits_unallocated_size_get(chunk, pageind) >>
202 LG_PAGE));
Dave Watson4a0dbb52016-02-29 11:54:42 -0800203 ph_insert(arena_runs_avail_get(arena, ind),
Jason Evans61a6dfc2016-03-23 16:04:38 -0700204 &arena_miscelm_get_mutable(chunk, pageind)->ph_link);
Jason Evanse3d13062012-10-30 15:42:37 -0700205}
206
207static void
208arena_avail_remove(arena_t *arena, arena_chunk_t *chunk, size_t pageind,
Qinfan Wu90737fc2014-07-21 19:39:20 -0700209 size_t npages)
Jason Evanse3d13062012-10-30 15:42:37 -0700210{
Dave Watson3417a302016-02-23 12:06:21 -0800211 szind_t ind = size2index(run_quantize_floor(arena_miscelm_size_get(
Jason Evans61a6dfc2016-03-23 16:04:38 -0700212 arena_miscelm_get_const(chunk, pageind))));
Jason Evanse3d13062012-10-30 15:42:37 -0700213 assert(npages == (arena_mapbits_unallocated_size_get(chunk, pageind) >>
214 LG_PAGE));
Dave Watson4a0dbb52016-02-29 11:54:42 -0800215 ph_remove(arena_runs_avail_get(arena, ind),
Jason Evans61a6dfc2016-03-23 16:04:38 -0700216 &arena_miscelm_get_mutable(chunk, pageind)->ph_link);
Jason Evanse3d13062012-10-30 15:42:37 -0700217}
218
Jason Evans070b3c32014-08-14 14:45:58 -0700219static void
Jason Evansee41ad42015-02-15 18:04:46 -0800220arena_run_dirty_insert(arena_t *arena, arena_chunk_t *chunk, size_t pageind,
Jason Evans070b3c32014-08-14 14:45:58 -0700221 size_t npages)
222{
Jason Evans61a6dfc2016-03-23 16:04:38 -0700223 arena_chunk_map_misc_t *miscelm = arena_miscelm_get_mutable(chunk,
224 pageind);
Jason Evansee41ad42015-02-15 18:04:46 -0800225
Jason Evans070b3c32014-08-14 14:45:58 -0700226 assert(npages == (arena_mapbits_unallocated_size_get(chunk, pageind) >>
227 LG_PAGE));
228 assert(arena_mapbits_dirty_get(chunk, pageind) == CHUNK_MAP_DIRTY);
229 assert(arena_mapbits_dirty_get(chunk, pageind+npages-1) ==
230 CHUNK_MAP_DIRTY);
Jason Evansee41ad42015-02-15 18:04:46 -0800231
Jason Evans613cdc82016-03-08 01:04:48 -0800232 qr_new(&miscelm->rd, rd_link);
233 qr_meld(&arena->runs_dirty, &miscelm->rd, rd_link);
Jason Evans070b3c32014-08-14 14:45:58 -0700234 arena->ndirty += npages;
235}
236
237static void
Jason Evansee41ad42015-02-15 18:04:46 -0800238arena_run_dirty_remove(arena_t *arena, arena_chunk_t *chunk, size_t pageind,
Jason Evans070b3c32014-08-14 14:45:58 -0700239 size_t npages)
240{
Jason Evans61a6dfc2016-03-23 16:04:38 -0700241 arena_chunk_map_misc_t *miscelm = arena_miscelm_get_mutable(chunk,
242 pageind);
Jason Evansee41ad42015-02-15 18:04:46 -0800243
Jason Evans070b3c32014-08-14 14:45:58 -0700244 assert(npages == (arena_mapbits_unallocated_size_get(chunk, pageind) >>
245 LG_PAGE));
246 assert(arena_mapbits_dirty_get(chunk, pageind) == CHUNK_MAP_DIRTY);
247 assert(arena_mapbits_dirty_get(chunk, pageind+npages-1) ==
248 CHUNK_MAP_DIRTY);
Jason Evansee41ad42015-02-15 18:04:46 -0800249
Jason Evans613cdc82016-03-08 01:04:48 -0800250 qr_remove(&miscelm->rd, rd_link);
Jason Evansee41ad42015-02-15 18:04:46 -0800251 assert(arena->ndirty >= npages);
Jason Evans070b3c32014-08-14 14:45:58 -0700252 arena->ndirty -= npages;
253}
254
Jason Evansee41ad42015-02-15 18:04:46 -0800255static size_t
256arena_chunk_dirty_npages(const extent_node_t *node)
257{
258
259 return (extent_node_size_get(node) >> LG_PAGE);
260}
261
Jason Evansee41ad42015-02-15 18:04:46 -0800262void
Jason Evans738e0892015-02-18 01:15:50 -0800263arena_chunk_cache_maybe_insert(arena_t *arena, extent_node_t *node, bool cache)
Jason Evansee41ad42015-02-15 18:04:46 -0800264{
265
Jason Evans738e0892015-02-18 01:15:50 -0800266 if (cache) {
Jason Evans47701b22015-02-17 22:23:10 -0800267 extent_node_dirty_linkage_init(node);
Jason Evans738e0892015-02-18 01:15:50 -0800268 extent_node_dirty_insert(node, &arena->runs_dirty,
269 &arena->chunks_cache);
Jason Evansee41ad42015-02-15 18:04:46 -0800270 arena->ndirty += arena_chunk_dirty_npages(node);
271 }
272}
273
274void
Jason Evans738e0892015-02-18 01:15:50 -0800275arena_chunk_cache_maybe_remove(arena_t *arena, extent_node_t *node, bool dirty)
Jason Evansee41ad42015-02-15 18:04:46 -0800276{
277
278 if (dirty) {
Jason Evans738e0892015-02-18 01:15:50 -0800279 extent_node_dirty_remove(node);
Jason Evansee41ad42015-02-15 18:04:46 -0800280 assert(arena->ndirty >= arena_chunk_dirty_npages(node));
281 arena->ndirty -= arena_chunk_dirty_npages(node);
282 }
283}
284
Jason Evansaf1f5922014-10-30 16:38:08 -0700285JEMALLOC_INLINE_C void *
Jason Evans49f7e8f2011-03-15 13:59:15 -0700286arena_run_reg_alloc(arena_run_t *run, arena_bin_info_t *bin_info)
Jason Evanse476f8a2010-01-16 09:53:50 -0800287{
288 void *ret;
Jason Evans42ce80e2016-02-25 20:51:00 -0800289 size_t regind;
Jason Evans0c5dd032014-09-29 01:31:39 -0700290 arena_chunk_map_misc_t *miscelm;
291 void *rpages;
Jason Evanse476f8a2010-01-16 09:53:50 -0800292
Jason Evans1e0a6362010-03-13 13:41:58 -0800293 assert(run->nfree > 0);
Jason Evans551ebc42014-10-03 10:16:09 -0700294 assert(!bitmap_full(run->bitmap, &bin_info->bitmap_info));
Jason Evanse476f8a2010-01-16 09:53:50 -0800295
Jason Evans9e1810c2016-02-24 12:42:23 -0800296 regind = (unsigned)bitmap_sfu(run->bitmap, &bin_info->bitmap_info);
Jason Evans0c5dd032014-09-29 01:31:39 -0700297 miscelm = arena_run_to_miscelm(run);
298 rpages = arena_miscelm_to_rpages(miscelm);
299 ret = (void *)((uintptr_t)rpages + (uintptr_t)bin_info->reg0_offset +
Jason Evans122449b2012-04-06 00:35:09 -0700300 (uintptr_t)(bin_info->reg_interval * regind));
Jason Evans1e0a6362010-03-13 13:41:58 -0800301 run->nfree--;
Jason Evans1e0a6362010-03-13 13:41:58 -0800302 return (ret);
Jason Evans6109fe02010-02-10 10:37:56 -0800303}
304
Jason Evansaf1f5922014-10-30 16:38:08 -0700305JEMALLOC_INLINE_C void
Jason Evans1e0a6362010-03-13 13:41:58 -0800306arena_run_reg_dalloc(arena_run_t *run, void *ptr)
Jason Evans6109fe02010-02-10 10:37:56 -0800307{
Jason Evans49f7e8f2011-03-15 13:59:15 -0700308 arena_chunk_t *chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(run);
Jason Evans203484e2012-05-02 00:30:36 -0700309 size_t pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> LG_PAGE;
310 size_t mapbits = arena_mapbits_get(chunk, pageind);
Jason Evansd01fd192015-08-19 15:21:32 -0700311 szind_t binind = arena_ptr_small_binind_get(ptr, mapbits);
Jason Evans49f7e8f2011-03-15 13:59:15 -0700312 arena_bin_info_t *bin_info = &arena_bin_info[binind];
Jason Evans42ce80e2016-02-25 20:51:00 -0800313 size_t regind = arena_run_regind(run, bin_info, ptr);
Jason Evans84c8eef2011-03-16 10:30:13 -0700314
Jason Evans49f7e8f2011-03-15 13:59:15 -0700315 assert(run->nfree < bin_info->nregs);
Jason Evans1e0a6362010-03-13 13:41:58 -0800316 /* Freeing an interior pointer can cause assertion failure. */
Jason Evans0c5dd032014-09-29 01:31:39 -0700317 assert(((uintptr_t)ptr -
318 ((uintptr_t)arena_miscelm_to_rpages(arena_run_to_miscelm(run)) +
Jason Evans122449b2012-04-06 00:35:09 -0700319 (uintptr_t)bin_info->reg0_offset)) %
320 (uintptr_t)bin_info->reg_interval == 0);
Jason Evans0c5dd032014-09-29 01:31:39 -0700321 assert((uintptr_t)ptr >=
322 (uintptr_t)arena_miscelm_to_rpages(arena_run_to_miscelm(run)) +
Jason Evans49f7e8f2011-03-15 13:59:15 -0700323 (uintptr_t)bin_info->reg0_offset);
Jason Evans84c8eef2011-03-16 10:30:13 -0700324 /* Freeing an unallocated pointer can cause assertion failure. */
Jason Evans0c5dd032014-09-29 01:31:39 -0700325 assert(bitmap_get(run->bitmap, &bin_info->bitmap_info, regind));
Jason Evanse476f8a2010-01-16 09:53:50 -0800326
Jason Evans0c5dd032014-09-29 01:31:39 -0700327 bitmap_unset(run->bitmap, &bin_info->bitmap_info, regind);
Jason Evans1e0a6362010-03-13 13:41:58 -0800328 run->nfree++;
Jason Evanse476f8a2010-01-16 09:53:50 -0800329}
330
Jason Evansaf1f5922014-10-30 16:38:08 -0700331JEMALLOC_INLINE_C void
Jason Evans38067482013-01-21 20:04:42 -0800332arena_run_zero(arena_chunk_t *chunk, size_t run_ind, size_t npages)
333{
334
Jason Evansbd87b012014-04-15 16:35:08 -0700335 JEMALLOC_VALGRIND_MAKE_MEM_UNDEFINED((void *)((uintptr_t)chunk +
336 (run_ind << LG_PAGE)), (npages << LG_PAGE));
Jason Evans38067482013-01-21 20:04:42 -0800337 memset((void *)((uintptr_t)chunk + (run_ind << LG_PAGE)), 0,
338 (npages << LG_PAGE));
Jason Evans38067482013-01-21 20:04:42 -0800339}
340
Jason Evansaf1f5922014-10-30 16:38:08 -0700341JEMALLOC_INLINE_C void
Jason Evansdda90f52013-10-19 23:48:40 -0700342arena_run_page_mark_zeroed(arena_chunk_t *chunk, size_t run_ind)
343{
344
Jason Evansbd87b012014-04-15 16:35:08 -0700345 JEMALLOC_VALGRIND_MAKE_MEM_DEFINED((void *)((uintptr_t)chunk + (run_ind
346 << LG_PAGE)), PAGE);
Jason Evansdda90f52013-10-19 23:48:40 -0700347}
348
Jason Evansaf1f5922014-10-30 16:38:08 -0700349JEMALLOC_INLINE_C void
Jason Evans38067482013-01-21 20:04:42 -0800350arena_run_page_validate_zeroed(arena_chunk_t *chunk, size_t run_ind)
Jason Evans21fb95b2010-10-18 17:45:40 -0700351{
Jason Evansd4bab212010-10-24 20:08:37 -0700352 size_t i;
Jason Evansae4c7b42012-04-02 07:04:34 -0700353 UNUSED size_t *p = (size_t *)((uintptr_t)chunk + (run_ind << LG_PAGE));
Jason Evansd4bab212010-10-24 20:08:37 -0700354
Jason Evansdda90f52013-10-19 23:48:40 -0700355 arena_run_page_mark_zeroed(chunk, run_ind);
Jason Evansae4c7b42012-04-02 07:04:34 -0700356 for (i = 0; i < PAGE / sizeof(size_t); i++)
Jason Evans21fb95b2010-10-18 17:45:40 -0700357 assert(p[i] == 0);
358}
Jason Evans21fb95b2010-10-18 17:45:40 -0700359
Jason Evanse476f8a2010-01-16 09:53:50 -0800360static void
Jason Evans40ee9aa2016-02-27 12:34:50 -0800361arena_nactive_add(arena_t *arena, size_t add_pages)
Jason Evansaa5113b2014-01-14 16:23:03 -0800362{
363
364 if (config_stats) {
Jason Evans3763d3b2016-02-26 17:29:35 -0800365 size_t cactive_add = CHUNK_CEILING((arena->nactive +
366 add_pages) << LG_PAGE) - CHUNK_CEILING(arena->nactive <<
Jason Evans15229372014-08-06 23:38:39 -0700367 LG_PAGE);
Jason Evans3763d3b2016-02-26 17:29:35 -0800368 if (cactive_add != 0)
369 stats_cactive_add(cactive_add);
370 }
Jason Evans40ee9aa2016-02-27 12:34:50 -0800371 arena->nactive += add_pages;
Jason Evans3763d3b2016-02-26 17:29:35 -0800372}
373
374static void
Jason Evans40ee9aa2016-02-27 12:34:50 -0800375arena_nactive_sub(arena_t *arena, size_t sub_pages)
Jason Evans3763d3b2016-02-26 17:29:35 -0800376{
377
378 if (config_stats) {
379 size_t cactive_sub = CHUNK_CEILING(arena->nactive << LG_PAGE) -
380 CHUNK_CEILING((arena->nactive - sub_pages) << LG_PAGE);
381 if (cactive_sub != 0)
382 stats_cactive_sub(cactive_sub);
Jason Evansaa5113b2014-01-14 16:23:03 -0800383 }
Jason Evans40ee9aa2016-02-27 12:34:50 -0800384 arena->nactive -= sub_pages;
Jason Evansaa5113b2014-01-14 16:23:03 -0800385}
386
387static void
388arena_run_split_remove(arena_t *arena, arena_chunk_t *chunk, size_t run_ind,
Jason Evans8fadb1a2015-08-04 10:49:46 -0700389 size_t flag_dirty, size_t flag_decommitted, size_t need_pages)
Jason Evansaa5113b2014-01-14 16:23:03 -0800390{
391 size_t total_pages, rem_pages;
392
Jason Evans8fadb1a2015-08-04 10:49:46 -0700393 assert(flag_dirty == 0 || flag_decommitted == 0);
394
Jason Evansaa5113b2014-01-14 16:23:03 -0800395 total_pages = arena_mapbits_unallocated_size_get(chunk, run_ind) >>
396 LG_PAGE;
397 assert(arena_mapbits_dirty_get(chunk, run_ind+total_pages-1) ==
398 flag_dirty);
399 assert(need_pages <= total_pages);
400 rem_pages = total_pages - need_pages;
401
Qinfan Wu90737fc2014-07-21 19:39:20 -0700402 arena_avail_remove(arena, chunk, run_ind, total_pages);
Jason Evans070b3c32014-08-14 14:45:58 -0700403 if (flag_dirty != 0)
Jason Evansee41ad42015-02-15 18:04:46 -0800404 arena_run_dirty_remove(arena, chunk, run_ind, total_pages);
Jason Evans40ee9aa2016-02-27 12:34:50 -0800405 arena_nactive_add(arena, need_pages);
Jason Evansaa5113b2014-01-14 16:23:03 -0800406
407 /* Keep track of trailing unused pages for later use. */
408 if (rem_pages > 0) {
Jason Evans8fadb1a2015-08-04 10:49:46 -0700409 size_t flags = flag_dirty | flag_decommitted;
Jason Evans1f27abc2015-08-11 12:42:33 -0700410 size_t flag_unzeroed_mask = (flags == 0) ? CHUNK_MAP_UNZEROED :
411 0;
Jason Evans8fadb1a2015-08-04 10:49:46 -0700412
Jason Evans1f27abc2015-08-11 12:42:33 -0700413 arena_mapbits_unallocated_set(chunk, run_ind+need_pages,
414 (rem_pages << LG_PAGE), flags |
415 (arena_mapbits_unzeroed_get(chunk, run_ind+need_pages) &
416 flag_unzeroed_mask));
417 arena_mapbits_unallocated_set(chunk, run_ind+total_pages-1,
418 (rem_pages << LG_PAGE), flags |
419 (arena_mapbits_unzeroed_get(chunk, run_ind+total_pages-1) &
420 flag_unzeroed_mask));
421 if (flag_dirty != 0) {
422 arena_run_dirty_insert(arena, chunk, run_ind+need_pages,
423 rem_pages);
Jason Evansaa5113b2014-01-14 16:23:03 -0800424 }
Qinfan Wu90737fc2014-07-21 19:39:20 -0700425 arena_avail_insert(arena, chunk, run_ind+need_pages, rem_pages);
Jason Evansaa5113b2014-01-14 16:23:03 -0800426 }
427}
428
Jason Evans8fadb1a2015-08-04 10:49:46 -0700429static bool
Jason Evansaa5113b2014-01-14 16:23:03 -0800430arena_run_split_large_helper(arena_t *arena, arena_run_t *run, size_t size,
431 bool remove, bool zero)
Jason Evanse476f8a2010-01-16 09:53:50 -0800432{
433 arena_chunk_t *chunk;
Jason Evans0c5dd032014-09-29 01:31:39 -0700434 arena_chunk_map_misc_t *miscelm;
Dmitry-Mea306a602015-09-04 13:15:28 +0300435 size_t flag_dirty, flag_decommitted, run_ind, need_pages;
Jason Evans1f27abc2015-08-11 12:42:33 -0700436 size_t flag_unzeroed_mask;
Jason Evans203484e2012-05-02 00:30:36 -0700437
Jason Evanse476f8a2010-01-16 09:53:50 -0800438 chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(run);
Jason Evans0c5dd032014-09-29 01:31:39 -0700439 miscelm = arena_run_to_miscelm(run);
440 run_ind = arena_miscelm_to_pageind(miscelm);
Jason Evans203484e2012-05-02 00:30:36 -0700441 flag_dirty = arena_mapbits_dirty_get(chunk, run_ind);
Jason Evans8fadb1a2015-08-04 10:49:46 -0700442 flag_decommitted = arena_mapbits_decommitted_get(chunk, run_ind);
Jason Evansae4c7b42012-04-02 07:04:34 -0700443 need_pages = (size >> LG_PAGE);
Jason Evanse476f8a2010-01-16 09:53:50 -0800444 assert(need_pages > 0);
Jason Evanse476f8a2010-01-16 09:53:50 -0800445
Jason Evansde249c82015-08-09 16:47:27 -0700446 if (flag_decommitted != 0 && arena->chunk_hooks.commit(chunk, chunksize,
447 run_ind << LG_PAGE, size, arena->ind))
Jason Evans8fadb1a2015-08-04 10:49:46 -0700448 return (true);
449
Jason Evansc368f8c2013-10-29 18:17:42 -0700450 if (remove) {
Jason Evansaa5113b2014-01-14 16:23:03 -0800451 arena_run_split_remove(arena, chunk, run_ind, flag_dirty,
Jason Evans8fadb1a2015-08-04 10:49:46 -0700452 flag_decommitted, need_pages);
Jason Evans19b3d612010-03-18 20:36:40 -0700453 }
454
Jason Evansaa5113b2014-01-14 16:23:03 -0800455 if (zero) {
Jason Evans1f27abc2015-08-11 12:42:33 -0700456 if (flag_decommitted != 0) {
457 /* The run is untouched, and therefore zeroed. */
458 JEMALLOC_VALGRIND_MAKE_MEM_DEFINED((void
459 *)((uintptr_t)chunk + (run_ind << LG_PAGE)),
460 (need_pages << LG_PAGE));
461 } else if (flag_dirty != 0) {
462 /* The run is dirty, so all pages must be zeroed. */
463 arena_run_zero(chunk, run_ind, need_pages);
464 } else {
Jason Evansaa5113b2014-01-14 16:23:03 -0800465 /*
466 * The run is clean, so some pages may be zeroed (i.e.
467 * never before touched).
468 */
Dmitry-Mea306a602015-09-04 13:15:28 +0300469 size_t i;
Jason Evansaa5113b2014-01-14 16:23:03 -0800470 for (i = 0; i < need_pages; i++) {
471 if (arena_mapbits_unzeroed_get(chunk, run_ind+i)
472 != 0)
473 arena_run_zero(chunk, run_ind+i, 1);
474 else if (config_debug) {
475 arena_run_page_validate_zeroed(chunk,
476 run_ind+i);
477 } else {
478 arena_run_page_mark_zeroed(chunk,
479 run_ind+i);
Jason Evans19b3d612010-03-18 20:36:40 -0700480 }
Jason Evanse476f8a2010-01-16 09:53:50 -0800481 }
482 }
Jason Evans19b3d612010-03-18 20:36:40 -0700483 } else {
Jason Evansbd87b012014-04-15 16:35:08 -0700484 JEMALLOC_VALGRIND_MAKE_MEM_UNDEFINED((void *)((uintptr_t)chunk +
Jason Evansdda90f52013-10-19 23:48:40 -0700485 (run_ind << LG_PAGE)), (need_pages << LG_PAGE));
Jason Evanse476f8a2010-01-16 09:53:50 -0800486 }
Jason Evansaa5113b2014-01-14 16:23:03 -0800487
488 /*
489 * Set the last element first, in case the run only contains one page
490 * (i.e. both statements set the same element).
491 */
Jason Evans1f27abc2015-08-11 12:42:33 -0700492 flag_unzeroed_mask = (flag_dirty | flag_decommitted) == 0 ?
493 CHUNK_MAP_UNZEROED : 0;
494 arena_mapbits_large_set(chunk, run_ind+need_pages-1, 0, flag_dirty |
495 (flag_unzeroed_mask & arena_mapbits_unzeroed_get(chunk,
496 run_ind+need_pages-1)));
497 arena_mapbits_large_set(chunk, run_ind, size, flag_dirty |
498 (flag_unzeroed_mask & arena_mapbits_unzeroed_get(chunk, run_ind)));
Jason Evans8fadb1a2015-08-04 10:49:46 -0700499 return (false);
Jason Evanse476f8a2010-01-16 09:53:50 -0800500}
501
Jason Evans8fadb1a2015-08-04 10:49:46 -0700502static bool
Jason Evansaa5113b2014-01-14 16:23:03 -0800503arena_run_split_large(arena_t *arena, arena_run_t *run, size_t size, bool zero)
Jason Evansc368f8c2013-10-29 18:17:42 -0700504{
505
Jason Evans8fadb1a2015-08-04 10:49:46 -0700506 return (arena_run_split_large_helper(arena, run, size, true, zero));
Jason Evansc368f8c2013-10-29 18:17:42 -0700507}
508
Jason Evans8fadb1a2015-08-04 10:49:46 -0700509static bool
Jason Evansaa5113b2014-01-14 16:23:03 -0800510arena_run_init_large(arena_t *arena, arena_run_t *run, size_t size, bool zero)
Jason Evansc368f8c2013-10-29 18:17:42 -0700511{
512
Jason Evans8fadb1a2015-08-04 10:49:46 -0700513 return (arena_run_split_large_helper(arena, run, size, false, zero));
Jason Evansaa5113b2014-01-14 16:23:03 -0800514}
515
Jason Evans8fadb1a2015-08-04 10:49:46 -0700516static bool
Jason Evansaa5113b2014-01-14 16:23:03 -0800517arena_run_split_small(arena_t *arena, arena_run_t *run, size_t size,
Jason Evansd01fd192015-08-19 15:21:32 -0700518 szind_t binind)
Jason Evansaa5113b2014-01-14 16:23:03 -0800519{
520 arena_chunk_t *chunk;
Jason Evans0c5dd032014-09-29 01:31:39 -0700521 arena_chunk_map_misc_t *miscelm;
Jason Evans8fadb1a2015-08-04 10:49:46 -0700522 size_t flag_dirty, flag_decommitted, run_ind, need_pages, i;
Jason Evansaa5113b2014-01-14 16:23:03 -0800523
524 assert(binind != BININD_INVALID);
525
526 chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(run);
Jason Evans0c5dd032014-09-29 01:31:39 -0700527 miscelm = arena_run_to_miscelm(run);
528 run_ind = arena_miscelm_to_pageind(miscelm);
Jason Evansaa5113b2014-01-14 16:23:03 -0800529 flag_dirty = arena_mapbits_dirty_get(chunk, run_ind);
Jason Evans8fadb1a2015-08-04 10:49:46 -0700530 flag_decommitted = arena_mapbits_decommitted_get(chunk, run_ind);
Jason Evansaa5113b2014-01-14 16:23:03 -0800531 need_pages = (size >> LG_PAGE);
532 assert(need_pages > 0);
533
Jason Evans8fadb1a2015-08-04 10:49:46 -0700534 if (flag_decommitted != 0 && arena->chunk_hooks.commit(chunk, chunksize,
535 run_ind << LG_PAGE, size, arena->ind))
536 return (true);
537
538 arena_run_split_remove(arena, chunk, run_ind, flag_dirty,
539 flag_decommitted, need_pages);
Jason Evansaa5113b2014-01-14 16:23:03 -0800540
Jason Evans381c23d2014-10-10 23:01:03 -0700541 for (i = 0; i < need_pages; i++) {
Jason Evans1f27abc2015-08-11 12:42:33 -0700542 size_t flag_unzeroed = arena_mapbits_unzeroed_get(chunk,
543 run_ind+i);
544 arena_mapbits_small_set(chunk, run_ind+i, i, binind,
545 flag_unzeroed);
546 if (config_debug && flag_dirty == 0 && flag_unzeroed == 0)
Jason Evansaa5113b2014-01-14 16:23:03 -0800547 arena_run_page_validate_zeroed(chunk, run_ind+i);
548 }
Jason Evansbd87b012014-04-15 16:35:08 -0700549 JEMALLOC_VALGRIND_MAKE_MEM_UNDEFINED((void *)((uintptr_t)chunk +
Jason Evansaa5113b2014-01-14 16:23:03 -0800550 (run_ind << LG_PAGE)), (need_pages << LG_PAGE));
Jason Evans8fadb1a2015-08-04 10:49:46 -0700551 return (false);
Jason Evansaa5113b2014-01-14 16:23:03 -0800552}
553
554static arena_chunk_t *
555arena_chunk_init_spare(arena_t *arena)
556{
557 arena_chunk_t *chunk;
558
559 assert(arena->spare != NULL);
560
561 chunk = arena->spare;
562 arena->spare = NULL;
563
564 assert(arena_mapbits_allocated_get(chunk, map_bias) == 0);
565 assert(arena_mapbits_allocated_get(chunk, chunk_npages-1) == 0);
566 assert(arena_mapbits_unallocated_size_get(chunk, map_bias) ==
Jason Evans155bfa72014-10-05 17:54:10 -0700567 arena_maxrun);
Jason Evansaa5113b2014-01-14 16:23:03 -0800568 assert(arena_mapbits_unallocated_size_get(chunk, chunk_npages-1) ==
Jason Evans155bfa72014-10-05 17:54:10 -0700569 arena_maxrun);
Jason Evansaa5113b2014-01-14 16:23:03 -0800570 assert(arena_mapbits_dirty_get(chunk, map_bias) ==
571 arena_mapbits_dirty_get(chunk, chunk_npages-1));
572
573 return (chunk);
574}
575
Jason Evans99bd94f2015-02-18 16:40:53 -0800576static bool
577arena_chunk_register(arena_t *arena, arena_chunk_t *chunk, bool zero)
578{
579
Jason Evans8fadb1a2015-08-04 10:49:46 -0700580 /*
581 * The extent node notion of "committed" doesn't directly apply to
Jason Evansde249c82015-08-09 16:47:27 -0700582 * arena chunks. Arbitrarily mark them as committed. The commit state
583 * of runs is tracked individually, and upon chunk deallocation the
584 * entire chunk is in a consistent commit state.
Jason Evans8fadb1a2015-08-04 10:49:46 -0700585 */
586 extent_node_init(&chunk->node, arena, chunk, chunksize, zero, true);
Jason Evans99bd94f2015-02-18 16:40:53 -0800587 extent_node_achunk_set(&chunk->node, true);
588 return (chunk_register(chunk, &chunk->node));
589}
590
591static arena_chunk_t *
Jason Evansb49a3342015-07-28 11:28:19 -0400592arena_chunk_alloc_internal_hard(arena_t *arena, chunk_hooks_t *chunk_hooks,
Jason Evans8fadb1a2015-08-04 10:49:46 -0700593 bool *zero, bool *commit)
Jason Evans99bd94f2015-02-18 16:40:53 -0800594{
595 arena_chunk_t *chunk;
Jason Evans99bd94f2015-02-18 16:40:53 -0800596
597 malloc_mutex_unlock(&arena->lock);
Jason Evansb49a3342015-07-28 11:28:19 -0400598
599 chunk = (arena_chunk_t *)chunk_alloc_wrapper(arena, chunk_hooks, NULL,
Jason Evans8fadb1a2015-08-04 10:49:46 -0700600 chunksize, chunksize, zero, commit);
601 if (chunk != NULL && !*commit) {
602 /* Commit header. */
603 if (chunk_hooks->commit(chunk, chunksize, 0, map_bias <<
604 LG_PAGE, arena->ind)) {
Jason Evansce7c0f92016-03-30 18:36:04 -0700605 chunk_dalloc_wrapper(arena, chunk_hooks, (void *)chunk,
606 chunksize, *zero, *commit);
Jason Evans8fadb1a2015-08-04 10:49:46 -0700607 chunk = NULL;
608 }
609 }
Jason Evans99bd94f2015-02-18 16:40:53 -0800610 if (chunk != NULL && arena_chunk_register(arena, chunk, *zero)) {
Jason Evans8fadb1a2015-08-04 10:49:46 -0700611 if (!*commit) {
612 /* Undo commit of header. */
613 chunk_hooks->decommit(chunk, chunksize, 0, map_bias <<
614 LG_PAGE, arena->ind);
615 }
Jason Evansb49a3342015-07-28 11:28:19 -0400616 chunk_dalloc_wrapper(arena, chunk_hooks, (void *)chunk,
Jason Evansce7c0f92016-03-30 18:36:04 -0700617 chunksize, *zero, *commit);
Jason Evans99bd94f2015-02-18 16:40:53 -0800618 chunk = NULL;
619 }
Jason Evans99bd94f2015-02-18 16:40:53 -0800620
Jason Evans8fadb1a2015-08-04 10:49:46 -0700621 malloc_mutex_lock(&arena->lock);
Jason Evans99bd94f2015-02-18 16:40:53 -0800622 return (chunk);
623}
624
Jason Evansaa5113b2014-01-14 16:23:03 -0800625static arena_chunk_t *
Jason Evans8fadb1a2015-08-04 10:49:46 -0700626arena_chunk_alloc_internal(arena_t *arena, bool *zero, bool *commit)
Jason Evanse2deab72014-05-15 22:22:27 -0700627{
628 arena_chunk_t *chunk;
Jason Evansb49a3342015-07-28 11:28:19 -0400629 chunk_hooks_t chunk_hooks = CHUNK_HOOKS_INITIALIZER;
Jason Evanse2deab72014-05-15 22:22:27 -0700630
Jason Evansb49a3342015-07-28 11:28:19 -0400631 chunk = chunk_alloc_cache(arena, &chunk_hooks, NULL, chunksize,
632 chunksize, zero, true);
Jason Evans8fadb1a2015-08-04 10:49:46 -0700633 if (chunk != NULL) {
634 if (arena_chunk_register(arena, chunk, *zero)) {
635 chunk_dalloc_cache(arena, &chunk_hooks, chunk,
Jason Evansde249c82015-08-09 16:47:27 -0700636 chunksize, true);
Jason Evans8fadb1a2015-08-04 10:49:46 -0700637 return (NULL);
638 }
639 *commit = true;
Jason Evansb49a3342015-07-28 11:28:19 -0400640 }
641 if (chunk == NULL) {
642 chunk = arena_chunk_alloc_internal_hard(arena, &chunk_hooks,
Jason Evans8fadb1a2015-08-04 10:49:46 -0700643 zero, commit);
Jason Evansb49a3342015-07-28 11:28:19 -0400644 }
Jason Evans99bd94f2015-02-18 16:40:53 -0800645
Jason Evans4581b972014-11-27 17:22:36 -0200646 if (config_stats && chunk != NULL) {
Jason Evanse2deab72014-05-15 22:22:27 -0700647 arena->stats.mapped += chunksize;
Jason Evans4581b972014-11-27 17:22:36 -0200648 arena->stats.metadata_mapped += (map_bias << LG_PAGE);
649 }
Jason Evanse2deab72014-05-15 22:22:27 -0700650
651 return (chunk);
652}
653
Jason Evanse2deab72014-05-15 22:22:27 -0700654static arena_chunk_t *
Jason Evansaa5113b2014-01-14 16:23:03 -0800655arena_chunk_init_hard(arena_t *arena)
656{
657 arena_chunk_t *chunk;
Jason Evans8fadb1a2015-08-04 10:49:46 -0700658 bool zero, commit;
Jason Evans45186f02015-08-10 23:03:34 -0700659 size_t flag_unzeroed, flag_decommitted, i;
Jason Evansaa5113b2014-01-14 16:23:03 -0800660
661 assert(arena->spare == NULL);
662
663 zero = false;
Jason Evans8fadb1a2015-08-04 10:49:46 -0700664 commit = false;
665 chunk = arena_chunk_alloc_internal(arena, &zero, &commit);
Jason Evansaa5113b2014-01-14 16:23:03 -0800666 if (chunk == NULL)
667 return (NULL);
Jason Evansaa5113b2014-01-14 16:23:03 -0800668
Jason Evansaa5113b2014-01-14 16:23:03 -0800669 /*
Jason Evansaa5113b2014-01-14 16:23:03 -0800670 * Initialize the map to contain one maximal free untouched run. Mark
Jason Evans8fadb1a2015-08-04 10:49:46 -0700671 * the pages as zeroed if chunk_alloc() returned a zeroed or decommitted
672 * chunk.
Jason Evansaa5113b2014-01-14 16:23:03 -0800673 */
Jason Evans45186f02015-08-10 23:03:34 -0700674 flag_unzeroed = (zero || !commit) ? 0 : CHUNK_MAP_UNZEROED;
675 flag_decommitted = commit ? 0 : CHUNK_MAP_DECOMMITTED;
676 arena_mapbits_unallocated_set(chunk, map_bias, arena_maxrun,
677 flag_unzeroed | flag_decommitted);
Jason Evansaa5113b2014-01-14 16:23:03 -0800678 /*
679 * There is no need to initialize the internal page map entries unless
680 * the chunk is not zeroed.
681 */
Jason Evans551ebc42014-10-03 10:16:09 -0700682 if (!zero) {
Jason Evansbd87b012014-04-15 16:35:08 -0700683 JEMALLOC_VALGRIND_MAKE_MEM_UNDEFINED(
Jason Evans61a6dfc2016-03-23 16:04:38 -0700684 (void *)arena_bitselm_get_const(chunk, map_bias+1),
685 (size_t)((uintptr_t)arena_bitselm_get_const(chunk,
686 chunk_npages-1) -
687 (uintptr_t)arena_bitselm_get_const(chunk, map_bias+1)));
Jason Evansaa5113b2014-01-14 16:23:03 -0800688 for (i = map_bias+1; i < chunk_npages-1; i++)
Jason Evans45186f02015-08-10 23:03:34 -0700689 arena_mapbits_internal_set(chunk, i, flag_unzeroed);
Jason Evansaa5113b2014-01-14 16:23:03 -0800690 } else {
Qinfan Wuff6a31d2014-08-29 13:34:40 -0700691 JEMALLOC_VALGRIND_MAKE_MEM_DEFINED((void
Jason Evans61a6dfc2016-03-23 16:04:38 -0700692 *)arena_bitselm_get_const(chunk, map_bias+1),
693 (size_t)((uintptr_t)arena_bitselm_get_const(chunk,
694 chunk_npages-1) -
695 (uintptr_t)arena_bitselm_get_const(chunk, map_bias+1)));
Jason Evansaa5113b2014-01-14 16:23:03 -0800696 if (config_debug) {
697 for (i = map_bias+1; i < chunk_npages-1; i++) {
698 assert(arena_mapbits_unzeroed_get(chunk, i) ==
Jason Evans45186f02015-08-10 23:03:34 -0700699 flag_unzeroed);
Jason Evansaa5113b2014-01-14 16:23:03 -0800700 }
701 }
702 }
Jason Evans155bfa72014-10-05 17:54:10 -0700703 arena_mapbits_unallocated_set(chunk, chunk_npages-1, arena_maxrun,
Jason Evans45186f02015-08-10 23:03:34 -0700704 flag_unzeroed);
Jason Evansaa5113b2014-01-14 16:23:03 -0800705
706 return (chunk);
Jason Evansc368f8c2013-10-29 18:17:42 -0700707}
708
Jason Evanse476f8a2010-01-16 09:53:50 -0800709static arena_chunk_t *
710arena_chunk_alloc(arena_t *arena)
711{
712 arena_chunk_t *chunk;
Jason Evanse476f8a2010-01-16 09:53:50 -0800713
Jason Evansaa5113b2014-01-14 16:23:03 -0800714 if (arena->spare != NULL)
715 chunk = arena_chunk_init_spare(arena);
Chris Pride20a8c782014-03-25 22:36:05 -0700716 else {
Jason Evansaa5113b2014-01-14 16:23:03 -0800717 chunk = arena_chunk_init_hard(arena);
Chris Pride20a8c782014-03-25 22:36:05 -0700718 if (chunk == NULL)
719 return (NULL);
720 }
Jason Evanse476f8a2010-01-16 09:53:50 -0800721
Qinfan Wu90737fc2014-07-21 19:39:20 -0700722 arena_avail_insert(arena, chunk, map_bias, chunk_npages-map_bias);
Jason Evanse3d13062012-10-30 15:42:37 -0700723
Jason Evanse476f8a2010-01-16 09:53:50 -0800724 return (chunk);
725}
726
727static void
Jason Evanse2deab72014-05-15 22:22:27 -0700728arena_chunk_dalloc(arena_t *arena, arena_chunk_t *chunk)
Jason Evanse476f8a2010-01-16 09:53:50 -0800729{
Qinfan Wu04d60a12014-07-18 14:21:17 -0700730
Jason Evans30fe12b2012-05-10 17:09:17 -0700731 assert(arena_mapbits_allocated_get(chunk, map_bias) == 0);
732 assert(arena_mapbits_allocated_get(chunk, chunk_npages-1) == 0);
733 assert(arena_mapbits_unallocated_size_get(chunk, map_bias) ==
Jason Evans155bfa72014-10-05 17:54:10 -0700734 arena_maxrun);
Jason Evans30fe12b2012-05-10 17:09:17 -0700735 assert(arena_mapbits_unallocated_size_get(chunk, chunk_npages-1) ==
Jason Evans155bfa72014-10-05 17:54:10 -0700736 arena_maxrun);
Jason Evans30fe12b2012-05-10 17:09:17 -0700737 assert(arena_mapbits_dirty_get(chunk, map_bias) ==
738 arena_mapbits_dirty_get(chunk, chunk_npages-1));
Jason Evansde249c82015-08-09 16:47:27 -0700739 assert(arena_mapbits_decommitted_get(chunk, map_bias) ==
740 arena_mapbits_decommitted_get(chunk, chunk_npages-1));
Jason Evans30fe12b2012-05-10 17:09:17 -0700741
Dave Watson3417a302016-02-23 12:06:21 -0800742 /* Remove run from runs_avail, so that the arena does not use it. */
Qinfan Wu90737fc2014-07-21 19:39:20 -0700743 arena_avail_remove(arena, chunk, map_bias, chunk_npages-map_bias);
Jason Evanse476f8a2010-01-16 09:53:50 -0800744
Jason Evans8d4203c2010-04-13 20:53:21 -0700745 if (arena->spare != NULL) {
746 arena_chunk_t *spare = arena->spare;
Jason Evansb49a3342015-07-28 11:28:19 -0400747 chunk_hooks_t chunk_hooks = CHUNK_HOOKS_INITIALIZER;
Jason Evansde249c82015-08-09 16:47:27 -0700748 bool committed;
Jason Evans8d4203c2010-04-13 20:53:21 -0700749
750 arena->spare = chunk;
Jason Evans070b3c32014-08-14 14:45:58 -0700751 if (arena_mapbits_dirty_get(spare, map_bias) != 0) {
Jason Evansee41ad42015-02-15 18:04:46 -0800752 arena_run_dirty_remove(arena, spare, map_bias,
Jason Evans070b3c32014-08-14 14:45:58 -0700753 chunk_npages-map_bias);
754 }
Jason Evans99bd94f2015-02-18 16:40:53 -0800755
Jason Evanscbf3a6d2015-02-11 12:24:27 -0800756 chunk_deregister(spare, &spare->node);
Jason Evans99bd94f2015-02-18 16:40:53 -0800757
Jason Evansde249c82015-08-09 16:47:27 -0700758 committed = (arena_mapbits_decommitted_get(spare, map_bias) ==
759 0);
760 if (!committed) {
761 /*
762 * Decommit the header. Mark the chunk as decommitted
763 * even if header decommit fails, since treating a
764 * partially committed chunk as committed has a high
765 * potential for causing later access of decommitted
766 * memory.
767 */
768 chunk_hooks = chunk_hooks_get(arena);
769 chunk_hooks.decommit(spare, chunksize, 0, map_bias <<
770 LG_PAGE, arena->ind);
771 }
772
Jason Evansb49a3342015-07-28 11:28:19 -0400773 chunk_dalloc_cache(arena, &chunk_hooks, (void *)spare,
Jason Evansde249c82015-08-09 16:47:27 -0700774 chunksize, committed);
Jason Evans99bd94f2015-02-18 16:40:53 -0800775
Jason Evans4581b972014-11-27 17:22:36 -0200776 if (config_stats) {
Jason Evans9b41ac92014-10-14 22:20:00 -0700777 arena->stats.mapped -= chunksize;
Jason Evans4581b972014-11-27 17:22:36 -0200778 arena->stats.metadata_mapped -= (map_bias << LG_PAGE);
779 }
Jason Evans8d4203c2010-04-13 20:53:21 -0700780 } else
781 arena->spare = chunk;
Jason Evanse476f8a2010-01-16 09:53:50 -0800782}
783
Jason Evans9b41ac92014-10-14 22:20:00 -0700784static void
785arena_huge_malloc_stats_update(arena_t *arena, size_t usize)
786{
Jason Evansd01fd192015-08-19 15:21:32 -0700787 szind_t index = size2index(usize) - nlclasses - NBINS;
Jason Evans9b41ac92014-10-14 22:20:00 -0700788
789 cassert(config_stats);
790
791 arena->stats.nmalloc_huge++;
792 arena->stats.allocated_huge += usize;
793 arena->stats.hstats[index].nmalloc++;
794 arena->stats.hstats[index].curhchunks++;
795}
796
797static void
798arena_huge_malloc_stats_update_undo(arena_t *arena, size_t usize)
799{
Jason Evansd01fd192015-08-19 15:21:32 -0700800 szind_t index = size2index(usize) - nlclasses - NBINS;
Jason Evans9b41ac92014-10-14 22:20:00 -0700801
802 cassert(config_stats);
803
804 arena->stats.nmalloc_huge--;
805 arena->stats.allocated_huge -= usize;
806 arena->stats.hstats[index].nmalloc--;
807 arena->stats.hstats[index].curhchunks--;
808}
809
810static void
811arena_huge_dalloc_stats_update(arena_t *arena, size_t usize)
812{
Jason Evansd01fd192015-08-19 15:21:32 -0700813 szind_t index = size2index(usize) - nlclasses - NBINS;
Jason Evans9b41ac92014-10-14 22:20:00 -0700814
815 cassert(config_stats);
816
817 arena->stats.ndalloc_huge++;
818 arena->stats.allocated_huge -= usize;
819 arena->stats.hstats[index].ndalloc++;
820 arena->stats.hstats[index].curhchunks--;
821}
822
823static void
824arena_huge_dalloc_stats_update_undo(arena_t *arena, size_t usize)
825{
Jason Evansd01fd192015-08-19 15:21:32 -0700826 szind_t index = size2index(usize) - nlclasses - NBINS;
Jason Evans9b41ac92014-10-14 22:20:00 -0700827
828 cassert(config_stats);
829
830 arena->stats.ndalloc_huge--;
831 arena->stats.allocated_huge += usize;
832 arena->stats.hstats[index].ndalloc--;
833 arena->stats.hstats[index].curhchunks++;
834}
835
836static void
837arena_huge_ralloc_stats_update(arena_t *arena, size_t oldsize, size_t usize)
838{
839
840 arena_huge_dalloc_stats_update(arena, oldsize);
841 arena_huge_malloc_stats_update(arena, usize);
842}
843
844static void
845arena_huge_ralloc_stats_update_undo(arena_t *arena, size_t oldsize,
846 size_t usize)
847{
848
849 arena_huge_dalloc_stats_update_undo(arena, oldsize);
850 arena_huge_malloc_stats_update_undo(arena, usize);
851}
852
Jason Evanscbf3a6d2015-02-11 12:24:27 -0800853extent_node_t *
854arena_node_alloc(arena_t *arena)
855{
856 extent_node_t *node;
857
858 malloc_mutex_lock(&arena->node_cache_mtx);
Jason Evans2195ba42015-02-15 16:43:52 -0800859 node = ql_last(&arena->node_cache, ql_link);
Jason Evanscbf3a6d2015-02-11 12:24:27 -0800860 if (node == NULL) {
861 malloc_mutex_unlock(&arena->node_cache_mtx);
862 return (base_alloc(sizeof(extent_node_t)));
863 }
Jason Evans2195ba42015-02-15 16:43:52 -0800864 ql_tail_remove(&arena->node_cache, extent_node_t, ql_link);
Jason Evanscbf3a6d2015-02-11 12:24:27 -0800865 malloc_mutex_unlock(&arena->node_cache_mtx);
866 return (node);
867}
868
869void
870arena_node_dalloc(arena_t *arena, extent_node_t *node)
871{
872
873 malloc_mutex_lock(&arena->node_cache_mtx);
Jason Evans2195ba42015-02-15 16:43:52 -0800874 ql_elm_new(node, ql_link);
875 ql_tail_insert(&arena->node_cache, node, ql_link);
Jason Evanscbf3a6d2015-02-11 12:24:27 -0800876 malloc_mutex_unlock(&arena->node_cache_mtx);
877}
878
Jason Evans99bd94f2015-02-18 16:40:53 -0800879static void *
Jason Evansb49a3342015-07-28 11:28:19 -0400880arena_chunk_alloc_huge_hard(arena_t *arena, chunk_hooks_t *chunk_hooks,
Jason Evans99bd94f2015-02-18 16:40:53 -0800881 size_t usize, size_t alignment, bool *zero, size_t csize)
Jason Evans9b41ac92014-10-14 22:20:00 -0700882{
883 void *ret;
Jason Evans8fadb1a2015-08-04 10:49:46 -0700884 bool commit = true;
Jason Evans9b41ac92014-10-14 22:20:00 -0700885
Jason Evansb49a3342015-07-28 11:28:19 -0400886 ret = chunk_alloc_wrapper(arena, chunk_hooks, NULL, csize, alignment,
Jason Evans8fadb1a2015-08-04 10:49:46 -0700887 zero, &commit);
Jason Evans9b41ac92014-10-14 22:20:00 -0700888 if (ret == NULL) {
889 /* Revert optimistic stats updates. */
890 malloc_mutex_lock(&arena->lock);
891 if (config_stats) {
892 arena_huge_malloc_stats_update_undo(arena, usize);
893 arena->stats.mapped -= usize;
894 }
Jason Evans40ee9aa2016-02-27 12:34:50 -0800895 arena_nactive_sub(arena, usize >> LG_PAGE);
Jason Evans9b41ac92014-10-14 22:20:00 -0700896 malloc_mutex_unlock(&arena->lock);
Jason Evans9b41ac92014-10-14 22:20:00 -0700897 }
898
Jason Evans99bd94f2015-02-18 16:40:53 -0800899 return (ret);
900}
Jason Evans9b41ac92014-10-14 22:20:00 -0700901
Jason Evans99bd94f2015-02-18 16:40:53 -0800902void *
903arena_chunk_alloc_huge(arena_t *arena, size_t usize, size_t alignment,
904 bool *zero)
905{
906 void *ret;
Jason Evansb49a3342015-07-28 11:28:19 -0400907 chunk_hooks_t chunk_hooks = CHUNK_HOOKS_INITIALIZER;
Jason Evans99bd94f2015-02-18 16:40:53 -0800908 size_t csize = CHUNK_CEILING(usize);
909
910 malloc_mutex_lock(&arena->lock);
911
912 /* Optimistically update stats. */
913 if (config_stats) {
914 arena_huge_malloc_stats_update(arena, usize);
915 arena->stats.mapped += usize;
916 }
Jason Evans40ee9aa2016-02-27 12:34:50 -0800917 arena_nactive_add(arena, usize >> LG_PAGE);
Jason Evans99bd94f2015-02-18 16:40:53 -0800918
Jason Evansb49a3342015-07-28 11:28:19 -0400919 ret = chunk_alloc_cache(arena, &chunk_hooks, NULL, csize, alignment,
920 zero, true);
Jason Evans99bd94f2015-02-18 16:40:53 -0800921 malloc_mutex_unlock(&arena->lock);
922 if (ret == NULL) {
Jason Evansb49a3342015-07-28 11:28:19 -0400923 ret = arena_chunk_alloc_huge_hard(arena, &chunk_hooks, usize,
Jason Evans99bd94f2015-02-18 16:40:53 -0800924 alignment, zero, csize);
925 }
926
Jason Evans9b41ac92014-10-14 22:20:00 -0700927 return (ret);
928}
929
930void
931arena_chunk_dalloc_huge(arena_t *arena, void *chunk, size_t usize)
932{
Jason Evansb49a3342015-07-28 11:28:19 -0400933 chunk_hooks_t chunk_hooks = CHUNK_HOOKS_INITIALIZER;
Jason Evans99bd94f2015-02-18 16:40:53 -0800934 size_t csize;
Jason Evans9b41ac92014-10-14 22:20:00 -0700935
Jason Evans99bd94f2015-02-18 16:40:53 -0800936 csize = CHUNK_CEILING(usize);
Jason Evans9b41ac92014-10-14 22:20:00 -0700937 malloc_mutex_lock(&arena->lock);
Jason Evans9b41ac92014-10-14 22:20:00 -0700938 if (config_stats) {
939 arena_huge_dalloc_stats_update(arena, usize);
940 arena->stats.mapped -= usize;
Jason Evans9b41ac92014-10-14 22:20:00 -0700941 }
Jason Evans40ee9aa2016-02-27 12:34:50 -0800942 arena_nactive_sub(arena, usize >> LG_PAGE);
Jason Evans99bd94f2015-02-18 16:40:53 -0800943
Jason Evansde249c82015-08-09 16:47:27 -0700944 chunk_dalloc_cache(arena, &chunk_hooks, chunk, csize, true);
Jason Evansb49a3342015-07-28 11:28:19 -0400945 malloc_mutex_unlock(&arena->lock);
Jason Evans9b41ac92014-10-14 22:20:00 -0700946}
947
948void
949arena_chunk_ralloc_huge_similar(arena_t *arena, void *chunk, size_t oldsize,
950 size_t usize)
951{
952
953 assert(CHUNK_CEILING(oldsize) == CHUNK_CEILING(usize));
954 assert(oldsize != usize);
955
956 malloc_mutex_lock(&arena->lock);
957 if (config_stats)
958 arena_huge_ralloc_stats_update(arena, oldsize, usize);
Jason Evans40ee9aa2016-02-27 12:34:50 -0800959 if (oldsize < usize)
960 arena_nactive_add(arena, (usize - oldsize) >> LG_PAGE);
961 else
962 arena_nactive_sub(arena, (oldsize - usize) >> LG_PAGE);
Jason Evans9b41ac92014-10-14 22:20:00 -0700963 malloc_mutex_unlock(&arena->lock);
964}
965
966void
967arena_chunk_ralloc_huge_shrink(arena_t *arena, void *chunk, size_t oldsize,
968 size_t usize)
969{
Jason Evans9b41ac92014-10-14 22:20:00 -0700970 size_t udiff = oldsize - usize;
971 size_t cdiff = CHUNK_CEILING(oldsize) - CHUNK_CEILING(usize);
972
973 malloc_mutex_lock(&arena->lock);
Jason Evans9b41ac92014-10-14 22:20:00 -0700974 if (config_stats) {
975 arena_huge_ralloc_stats_update(arena, oldsize, usize);
Jason Evans40ee9aa2016-02-27 12:34:50 -0800976 if (cdiff != 0)
Jason Evans9b41ac92014-10-14 22:20:00 -0700977 arena->stats.mapped -= cdiff;
Jason Evans9b41ac92014-10-14 22:20:00 -0700978 }
Jason Evans40ee9aa2016-02-27 12:34:50 -0800979 arena_nactive_sub(arena, udiff >> LG_PAGE);
Jason Evans99bd94f2015-02-18 16:40:53 -0800980
Jason Evans2012d5a2014-11-17 09:54:49 -0800981 if (cdiff != 0) {
Jason Evansb49a3342015-07-28 11:28:19 -0400982 chunk_hooks_t chunk_hooks = CHUNK_HOOKS_INITIALIZER;
Jason Evans99bd94f2015-02-18 16:40:53 -0800983 void *nchunk = (void *)((uintptr_t)chunk +
984 CHUNK_CEILING(usize));
985
Jason Evansde249c82015-08-09 16:47:27 -0700986 chunk_dalloc_cache(arena, &chunk_hooks, nchunk, cdiff, true);
Jason Evansb49a3342015-07-28 11:28:19 -0400987 }
988 malloc_mutex_unlock(&arena->lock);
Jason Evans99bd94f2015-02-18 16:40:53 -0800989}
990
Jason Evansb49a3342015-07-28 11:28:19 -0400991static bool
992arena_chunk_ralloc_huge_expand_hard(arena_t *arena, chunk_hooks_t *chunk_hooks,
993 void *chunk, size_t oldsize, size_t usize, bool *zero, void *nchunk,
994 size_t udiff, size_t cdiff)
Jason Evans99bd94f2015-02-18 16:40:53 -0800995{
996 bool err;
Jason Evans8fadb1a2015-08-04 10:49:46 -0700997 bool commit = true;
Jason Evans99bd94f2015-02-18 16:40:53 -0800998
Jason Evansb49a3342015-07-28 11:28:19 -0400999 err = (chunk_alloc_wrapper(arena, chunk_hooks, nchunk, cdiff, chunksize,
Jason Evans8fadb1a2015-08-04 10:49:46 -07001000 zero, &commit) == NULL);
Jason Evans99bd94f2015-02-18 16:40:53 -08001001 if (err) {
1002 /* Revert optimistic stats updates. */
1003 malloc_mutex_lock(&arena->lock);
1004 if (config_stats) {
1005 arena_huge_ralloc_stats_update_undo(arena, oldsize,
1006 usize);
1007 arena->stats.mapped -= cdiff;
1008 }
Jason Evans40ee9aa2016-02-27 12:34:50 -08001009 arena_nactive_sub(arena, udiff >> LG_PAGE);
Jason Evans99bd94f2015-02-18 16:40:53 -08001010 malloc_mutex_unlock(&arena->lock);
Jason Evansb49a3342015-07-28 11:28:19 -04001011 } else if (chunk_hooks->merge(chunk, CHUNK_CEILING(oldsize), nchunk,
1012 cdiff, true, arena->ind)) {
Jason Evansce7c0f92016-03-30 18:36:04 -07001013 chunk_dalloc_wrapper(arena, chunk_hooks, nchunk, cdiff, *zero,
Jason Evans8fadb1a2015-08-04 10:49:46 -07001014 true);
Jason Evansb49a3342015-07-28 11:28:19 -04001015 err = true;
Jason Evans2012d5a2014-11-17 09:54:49 -08001016 }
Jason Evans99bd94f2015-02-18 16:40:53 -08001017 return (err);
Jason Evans9b41ac92014-10-14 22:20:00 -07001018}
1019
1020bool
1021arena_chunk_ralloc_huge_expand(arena_t *arena, void *chunk, size_t oldsize,
1022 size_t usize, bool *zero)
1023{
Jason Evans99bd94f2015-02-18 16:40:53 -08001024 bool err;
Jason Evans8fadb1a2015-08-04 10:49:46 -07001025 chunk_hooks_t chunk_hooks = chunk_hooks_get(arena);
Jason Evans99bd94f2015-02-18 16:40:53 -08001026 void *nchunk = (void *)((uintptr_t)chunk + CHUNK_CEILING(oldsize));
Jason Evans9b41ac92014-10-14 22:20:00 -07001027 size_t udiff = usize - oldsize;
1028 size_t cdiff = CHUNK_CEILING(usize) - CHUNK_CEILING(oldsize);
1029
1030 malloc_mutex_lock(&arena->lock);
Jason Evans99bd94f2015-02-18 16:40:53 -08001031
1032 /* Optimistically update stats. */
Jason Evans9b41ac92014-10-14 22:20:00 -07001033 if (config_stats) {
Jason Evans9b41ac92014-10-14 22:20:00 -07001034 arena_huge_ralloc_stats_update(arena, oldsize, usize);
1035 arena->stats.mapped += cdiff;
1036 }
Jason Evans40ee9aa2016-02-27 12:34:50 -08001037 arena_nactive_add(arena, udiff >> LG_PAGE);
Jason Evans9b41ac92014-10-14 22:20:00 -07001038
Jason Evansce7c0f92016-03-30 18:36:04 -07001039 err = (chunk_alloc_cache(arena, &chunk_hooks, nchunk, cdiff, chunksize,
1040 zero, true) == NULL);
Jason Evans99bd94f2015-02-18 16:40:53 -08001041 malloc_mutex_unlock(&arena->lock);
1042 if (err) {
Jason Evansb49a3342015-07-28 11:28:19 -04001043 err = arena_chunk_ralloc_huge_expand_hard(arena, &chunk_hooks,
1044 chunk, oldsize, usize, zero, nchunk, udiff,
1045 cdiff);
1046 } else if (chunk_hooks.merge(chunk, CHUNK_CEILING(oldsize), nchunk,
1047 cdiff, true, arena->ind)) {
Jason Evansce7c0f92016-03-30 18:36:04 -07001048 chunk_dalloc_wrapper(arena, &chunk_hooks, nchunk, cdiff, *zero,
Jason Evans8fadb1a2015-08-04 10:49:46 -07001049 true);
Jason Evansb49a3342015-07-28 11:28:19 -04001050 err = true;
Jason Evans9b41ac92014-10-14 22:20:00 -07001051 }
1052
Jason Evans99bd94f2015-02-18 16:40:53 -08001053 return (err);
Jason Evans9b41ac92014-10-14 22:20:00 -07001054}
1055
Jason Evansaa282662015-07-15 16:02:21 -07001056/*
1057 * Do first-best-fit run selection, i.e. select the lowest run that best fits.
Dave Watson3417a302016-02-23 12:06:21 -08001058 * Run sizes are indexed, so not all candidate runs are necessarily exactly the
1059 * same size.
Jason Evansaa282662015-07-15 16:02:21 -07001060 */
Jason Evans97c04a92015-03-06 19:57:36 -08001061static arena_run_t *
Jason Evansaa282662015-07-15 16:02:21 -07001062arena_run_first_best_fit(arena_t *arena, size_t size)
Jason Evans97c04a92015-03-06 19:57:36 -08001063{
Dave Watson3417a302016-02-23 12:06:21 -08001064 szind_t ind, i;
1065
1066 ind = size2index(run_quantize_ceil(size));
Dave Watsoncd86c142016-02-24 11:02:49 -08001067 for (i = ind; i < runs_avail_nclasses + runs_avail_bias; i++) {
Dave Watson4a0dbb52016-02-29 11:54:42 -08001068 ph_node_t *node = ph_first(arena_runs_avail_get(arena, i));
1069 if (node != NULL) {
1070 arena_chunk_map_misc_t *miscelm =
1071 arena_ph_to_miscelm(node);
Dave Watson3417a302016-02-23 12:06:21 -08001072 return (&miscelm->run);
Dave Watson4a0dbb52016-02-29 11:54:42 -08001073 }
Dave Watson3417a302016-02-23 12:06:21 -08001074 }
1075
1076 return (NULL);
Jason Evans97c04a92015-03-06 19:57:36 -08001077}
1078
Jason Evanse476f8a2010-01-16 09:53:50 -08001079static arena_run_t *
Jason Evansaa5113b2014-01-14 16:23:03 -08001080arena_run_alloc_large_helper(arena_t *arena, size_t size, bool zero)
Jason Evanse476f8a2010-01-16 09:53:50 -08001081{
Jason Evansaa282662015-07-15 16:02:21 -07001082 arena_run_t *run = arena_run_first_best_fit(arena, s2u(size));
Jason Evans8fadb1a2015-08-04 10:49:46 -07001083 if (run != NULL) {
1084 if (arena_run_split_large(arena, run, size, zero))
1085 run = NULL;
1086 }
Jason Evans97c04a92015-03-06 19:57:36 -08001087 return (run);
Jason Evans5b0c9962012-05-10 15:47:24 -07001088}
1089
1090static arena_run_t *
Jason Evansaa5113b2014-01-14 16:23:03 -08001091arena_run_alloc_large(arena_t *arena, size_t size, bool zero)
Jason Evans5b0c9962012-05-10 15:47:24 -07001092{
1093 arena_chunk_t *chunk;
1094 arena_run_t *run;
1095
Jason Evansfc0b3b72014-10-09 17:54:06 -07001096 assert(size <= arena_maxrun);
Jason Evans8a03cf02015-05-04 09:58:36 -07001097 assert(size == PAGE_CEILING(size));
Jason Evans5b0c9962012-05-10 15:47:24 -07001098
1099 /* Search the arena's chunks for the lowest best fit. */
Jason Evansaa5113b2014-01-14 16:23:03 -08001100 run = arena_run_alloc_large_helper(arena, size, zero);
Jason Evans5b0c9962012-05-10 15:47:24 -07001101 if (run != NULL)
1102 return (run);
1103
Jason Evanse476f8a2010-01-16 09:53:50 -08001104 /*
1105 * No usable runs. Create a new chunk from which to allocate the run.
1106 */
1107 chunk = arena_chunk_alloc(arena);
Jason Evanse00572b2010-03-14 19:43:56 -07001108 if (chunk != NULL) {
Jason Evans61a6dfc2016-03-23 16:04:38 -07001109 run = &arena_miscelm_get_mutable(chunk, map_bias)->run;
Jason Evans8fadb1a2015-08-04 10:49:46 -07001110 if (arena_run_split_large(arena, run, size, zero))
1111 run = NULL;
Jason Evanse00572b2010-03-14 19:43:56 -07001112 return (run);
1113 }
1114
1115 /*
1116 * arena_chunk_alloc() failed, but another thread may have made
1117 * sufficient memory available while this one dropped arena->lock in
1118 * arena_chunk_alloc(), so search one more time.
1119 */
Jason Evansaa5113b2014-01-14 16:23:03 -08001120 return (arena_run_alloc_large_helper(arena, size, zero));
1121}
1122
1123static arena_run_t *
Jason Evansd01fd192015-08-19 15:21:32 -07001124arena_run_alloc_small_helper(arena_t *arena, size_t size, szind_t binind)
Jason Evansaa5113b2014-01-14 16:23:03 -08001125{
Jason Evansaa282662015-07-15 16:02:21 -07001126 arena_run_t *run = arena_run_first_best_fit(arena, size);
Jason Evans8fadb1a2015-08-04 10:49:46 -07001127 if (run != NULL) {
1128 if (arena_run_split_small(arena, run, size, binind))
1129 run = NULL;
1130 }
Jason Evans97c04a92015-03-06 19:57:36 -08001131 return (run);
Jason Evansaa5113b2014-01-14 16:23:03 -08001132}
1133
1134static arena_run_t *
Jason Evansd01fd192015-08-19 15:21:32 -07001135arena_run_alloc_small(arena_t *arena, size_t size, szind_t binind)
Jason Evansaa5113b2014-01-14 16:23:03 -08001136{
1137 arena_chunk_t *chunk;
1138 arena_run_t *run;
1139
Jason Evansfc0b3b72014-10-09 17:54:06 -07001140 assert(size <= arena_maxrun);
Jason Evans8a03cf02015-05-04 09:58:36 -07001141 assert(size == PAGE_CEILING(size));
Jason Evansaa5113b2014-01-14 16:23:03 -08001142 assert(binind != BININD_INVALID);
1143
1144 /* Search the arena's chunks for the lowest best fit. */
1145 run = arena_run_alloc_small_helper(arena, size, binind);
1146 if (run != NULL)
1147 return (run);
1148
1149 /*
1150 * No usable runs. Create a new chunk from which to allocate the run.
1151 */
1152 chunk = arena_chunk_alloc(arena);
1153 if (chunk != NULL) {
Jason Evans61a6dfc2016-03-23 16:04:38 -07001154 run = &arena_miscelm_get_mutable(chunk, map_bias)->run;
Jason Evans8fadb1a2015-08-04 10:49:46 -07001155 if (arena_run_split_small(arena, run, size, binind))
1156 run = NULL;
Jason Evansaa5113b2014-01-14 16:23:03 -08001157 return (run);
1158 }
1159
1160 /*
1161 * arena_chunk_alloc() failed, but another thread may have made
1162 * sufficient memory available while this one dropped arena->lock in
1163 * arena_chunk_alloc(), so search one more time.
1164 */
1165 return (arena_run_alloc_small_helper(arena, size, binind));
Jason Evanse476f8a2010-01-16 09:53:50 -08001166}
1167
Jason Evans8d6a3e82015-03-18 18:55:33 -07001168static bool
1169arena_lg_dirty_mult_valid(ssize_t lg_dirty_mult)
1170{
1171
Jason Evansbd16ea42015-03-24 15:59:28 -07001172 return (lg_dirty_mult >= -1 && lg_dirty_mult < (ssize_t)(sizeof(size_t)
1173 << 3));
Jason Evans8d6a3e82015-03-18 18:55:33 -07001174}
1175
1176ssize_t
1177arena_lg_dirty_mult_get(arena_t *arena)
1178{
1179 ssize_t lg_dirty_mult;
1180
1181 malloc_mutex_lock(&arena->lock);
1182 lg_dirty_mult = arena->lg_dirty_mult;
1183 malloc_mutex_unlock(&arena->lock);
1184
1185 return (lg_dirty_mult);
1186}
1187
1188bool
1189arena_lg_dirty_mult_set(arena_t *arena, ssize_t lg_dirty_mult)
1190{
1191
1192 if (!arena_lg_dirty_mult_valid(lg_dirty_mult))
1193 return (true);
1194
1195 malloc_mutex_lock(&arena->lock);
1196 arena->lg_dirty_mult = lg_dirty_mult;
1197 arena_maybe_purge(arena);
1198 malloc_mutex_unlock(&arena->lock);
1199
1200 return (false);
1201}
1202
Jason Evans1a4ad3c2016-02-19 19:51:23 -08001203static void
Jason Evans243f7a02016-02-19 20:09:31 -08001204arena_decay_deadline_init(arena_t *arena)
1205{
1206
1207 assert(opt_purge == purge_mode_decay);
1208
1209 /*
1210 * Generate a new deadline that is uniformly random within the next
1211 * epoch after the current one.
1212 */
Jason Evans9bad0792016-02-21 11:25:02 -08001213 nstime_copy(&arena->decay_deadline, &arena->decay_epoch);
1214 nstime_add(&arena->decay_deadline, &arena->decay_interval);
Jason Evans243f7a02016-02-19 20:09:31 -08001215 if (arena->decay_time > 0) {
Jason Evans9bad0792016-02-21 11:25:02 -08001216 nstime_t jitter;
Jason Evans243f7a02016-02-19 20:09:31 -08001217
Jason Evans9bad0792016-02-21 11:25:02 -08001218 nstime_init(&jitter, prng_range(&arena->decay_jitter_state,
1219 nstime_ns(&arena->decay_interval)));
1220 nstime_add(&arena->decay_deadline, &jitter);
Jason Evans243f7a02016-02-19 20:09:31 -08001221 }
1222}
1223
1224static bool
Jason Evans9bad0792016-02-21 11:25:02 -08001225arena_decay_deadline_reached(const arena_t *arena, const nstime_t *time)
Jason Evans243f7a02016-02-19 20:09:31 -08001226{
1227
1228 assert(opt_purge == purge_mode_decay);
1229
Jason Evans9bad0792016-02-21 11:25:02 -08001230 return (nstime_compare(&arena->decay_deadline, time) <= 0);
Jason Evans243f7a02016-02-19 20:09:31 -08001231}
1232
1233static size_t
1234arena_decay_backlog_npages_limit(const arena_t *arena)
1235{
1236 static const uint64_t h_steps[] = {
1237#define STEP(step, h, x, y) \
1238 h,
1239 SMOOTHSTEP
1240#undef STEP
1241 };
1242 uint64_t sum;
1243 size_t npages_limit_backlog;
1244 unsigned i;
1245
1246 assert(opt_purge == purge_mode_decay);
1247
1248 /*
1249 * For each element of decay_backlog, multiply by the corresponding
1250 * fixed-point smoothstep decay factor. Sum the products, then divide
1251 * to round down to the nearest whole number of pages.
1252 */
1253 sum = 0;
1254 for (i = 0; i < SMOOTHSTEP_NSTEPS; i++)
1255 sum += arena->decay_backlog[i] * h_steps[i];
1256 npages_limit_backlog = (sum >> SMOOTHSTEP_BFP);
1257
1258 return (npages_limit_backlog);
1259}
1260
1261static void
Jason Evans9bad0792016-02-21 11:25:02 -08001262arena_decay_epoch_advance(arena_t *arena, const nstime_t *time)
Jason Evans243f7a02016-02-19 20:09:31 -08001263{
1264 uint64_t nadvance;
Jason Evans9bad0792016-02-21 11:25:02 -08001265 nstime_t delta;
Jason Evans243f7a02016-02-19 20:09:31 -08001266 size_t ndirty_delta;
1267
1268 assert(opt_purge == purge_mode_decay);
1269 assert(arena_decay_deadline_reached(arena, time));
1270
Jason Evans9bad0792016-02-21 11:25:02 -08001271 nstime_copy(&delta, time);
1272 nstime_subtract(&delta, &arena->decay_epoch);
1273 nadvance = nstime_divide(&delta, &arena->decay_interval);
Jason Evans243f7a02016-02-19 20:09:31 -08001274 assert(nadvance > 0);
1275
1276 /* Add nadvance decay intervals to epoch. */
Jason Evans9bad0792016-02-21 11:25:02 -08001277 nstime_copy(&delta, &arena->decay_interval);
1278 nstime_imultiply(&delta, nadvance);
1279 nstime_add(&arena->decay_epoch, &delta);
Jason Evans243f7a02016-02-19 20:09:31 -08001280
1281 /* Set a new deadline. */
1282 arena_decay_deadline_init(arena);
1283
1284 /* Update the backlog. */
1285 if (nadvance >= SMOOTHSTEP_NSTEPS) {
1286 memset(arena->decay_backlog, 0, (SMOOTHSTEP_NSTEPS-1) *
1287 sizeof(size_t));
1288 } else {
1289 memmove(arena->decay_backlog, &arena->decay_backlog[nadvance],
1290 (SMOOTHSTEP_NSTEPS - nadvance) * sizeof(size_t));
1291 if (nadvance > 1) {
1292 memset(&arena->decay_backlog[SMOOTHSTEP_NSTEPS -
1293 nadvance], 0, (nadvance-1) * sizeof(size_t));
1294 }
1295 }
1296 ndirty_delta = (arena->ndirty > arena->decay_ndirty) ? arena->ndirty -
1297 arena->decay_ndirty : 0;
1298 arena->decay_ndirty = arena->ndirty;
1299 arena->decay_backlog[SMOOTHSTEP_NSTEPS-1] = ndirty_delta;
1300 arena->decay_backlog_npages_limit =
1301 arena_decay_backlog_npages_limit(arena);
1302}
1303
1304static size_t
1305arena_decay_npages_limit(arena_t *arena)
1306{
1307 size_t npages_limit;
1308
1309 assert(opt_purge == purge_mode_decay);
1310
1311 npages_limit = arena->decay_backlog_npages_limit;
1312
1313 /* Add in any dirty pages created during the current epoch. */
1314 if (arena->ndirty > arena->decay_ndirty)
1315 npages_limit += arena->ndirty - arena->decay_ndirty;
1316
1317 return (npages_limit);
1318}
1319
1320static void
1321arena_decay_init(arena_t *arena, ssize_t decay_time)
1322{
1323
1324 arena->decay_time = decay_time;
1325 if (decay_time > 0) {
Jason Evans9bad0792016-02-21 11:25:02 -08001326 nstime_init2(&arena->decay_interval, decay_time, 0);
1327 nstime_idivide(&arena->decay_interval, SMOOTHSTEP_NSTEPS);
Jason Evans243f7a02016-02-19 20:09:31 -08001328 }
1329
Jason Evans9bad0792016-02-21 11:25:02 -08001330 nstime_init(&arena->decay_epoch, 0);
1331 nstime_update(&arena->decay_epoch);
Jason Evans243f7a02016-02-19 20:09:31 -08001332 arena->decay_jitter_state = (uint64_t)(uintptr_t)arena;
1333 arena_decay_deadline_init(arena);
1334 arena->decay_ndirty = arena->ndirty;
1335 arena->decay_backlog_npages_limit = 0;
1336 memset(arena->decay_backlog, 0, SMOOTHSTEP_NSTEPS * sizeof(size_t));
1337}
1338
1339static bool
1340arena_decay_time_valid(ssize_t decay_time)
1341{
1342
Jason Evans022f6892016-03-02 22:41:32 -08001343 if (decay_time < -1)
1344 return (false);
1345 if (decay_time == -1 || (uint64_t)decay_time <= NSTIME_SEC_MAX)
1346 return (true);
1347 return (false);
Jason Evans243f7a02016-02-19 20:09:31 -08001348}
1349
1350ssize_t
1351arena_decay_time_get(arena_t *arena)
1352{
1353 ssize_t decay_time;
1354
1355 malloc_mutex_lock(&arena->lock);
1356 decay_time = arena->decay_time;
1357 malloc_mutex_unlock(&arena->lock);
1358
1359 return (decay_time);
1360}
1361
1362bool
1363arena_decay_time_set(arena_t *arena, ssize_t decay_time)
1364{
1365
1366 if (!arena_decay_time_valid(decay_time))
1367 return (true);
1368
1369 malloc_mutex_lock(&arena->lock);
1370 /*
1371 * Restart decay backlog from scratch, which may cause many dirty pages
1372 * to be immediately purged. It would conceptually be possible to map
1373 * the old backlog onto the new backlog, but there is no justification
1374 * for such complexity since decay_time changes are intended to be
1375 * infrequent, either between the {-1, 0, >0} states, or a one-time
1376 * arbitrary change during initial arena configuration.
1377 */
1378 arena_decay_init(arena, decay_time);
1379 arena_maybe_purge(arena);
1380 malloc_mutex_unlock(&arena->lock);
1381
1382 return (false);
1383}
1384
1385static void
Jason Evans1a4ad3c2016-02-19 19:51:23 -08001386arena_maybe_purge_ratio(arena_t *arena)
Jason Evans05b21be2010-03-14 17:36:10 -07001387{
1388
Jason Evans243f7a02016-02-19 20:09:31 -08001389 assert(opt_purge == purge_mode_ratio);
1390
Jason Evanse3d13062012-10-30 15:42:37 -07001391 /* Don't purge if the option is disabled. */
Jason Evans8d6a3e82015-03-18 18:55:33 -07001392 if (arena->lg_dirty_mult < 0)
Jason Evanse3d13062012-10-30 15:42:37 -07001393 return;
Jason Evans1a4ad3c2016-02-19 19:51:23 -08001394
Jason Evans0a9f9a42015-06-22 18:50:32 -07001395 /*
1396 * Iterate, since preventing recursive purging could otherwise leave too
1397 * many dirty pages.
1398 */
1399 while (true) {
1400 size_t threshold = (arena->nactive >> arena->lg_dirty_mult);
1401 if (threshold < chunk_npages)
1402 threshold = chunk_npages;
1403 /*
1404 * Don't purge unless the number of purgeable pages exceeds the
1405 * threshold.
1406 */
1407 if (arena->ndirty <= threshold)
1408 return;
Jason Evans1a4ad3c2016-02-19 19:51:23 -08001409 arena_purge_to_limit(arena, threshold);
Jason Evans0a9f9a42015-06-22 18:50:32 -07001410 }
Jason Evans05b21be2010-03-14 17:36:10 -07001411}
1412
Jason Evans243f7a02016-02-19 20:09:31 -08001413static void
1414arena_maybe_purge_decay(arena_t *arena)
1415{
Jason Evans9bad0792016-02-21 11:25:02 -08001416 nstime_t time;
Jason Evans243f7a02016-02-19 20:09:31 -08001417 size_t ndirty_limit;
1418
1419 assert(opt_purge == purge_mode_decay);
1420
1421 /* Purge all or nothing if the option is disabled. */
1422 if (arena->decay_time <= 0) {
1423 if (arena->decay_time == 0)
1424 arena_purge_to_limit(arena, 0);
1425 return;
1426 }
1427
Jason Evans9bad0792016-02-21 11:25:02 -08001428 nstime_copy(&time, &arena->decay_epoch);
1429 if (unlikely(nstime_update(&time))) {
Jason Evans243f7a02016-02-19 20:09:31 -08001430 /* Time went backwards. Force an epoch advance. */
Jason Evans9bad0792016-02-21 11:25:02 -08001431 nstime_copy(&time, &arena->decay_deadline);
Jason Evans243f7a02016-02-19 20:09:31 -08001432 }
1433
1434 if (arena_decay_deadline_reached(arena, &time))
1435 arena_decay_epoch_advance(arena, &time);
1436
1437 ndirty_limit = arena_decay_npages_limit(arena);
1438
1439 /*
1440 * Don't try to purge unless the number of purgeable pages exceeds the
1441 * current limit.
1442 */
1443 if (arena->ndirty <= ndirty_limit)
1444 return;
1445 arena_purge_to_limit(arena, ndirty_limit);
1446}
1447
Jason Evans1a4ad3c2016-02-19 19:51:23 -08001448void
1449arena_maybe_purge(arena_t *arena)
1450{
1451
1452 /* Don't recursively purge. */
1453 if (arena->purging)
1454 return;
1455
Jason Evans243f7a02016-02-19 20:09:31 -08001456 if (opt_purge == purge_mode_ratio)
1457 arena_maybe_purge_ratio(arena);
1458 else
1459 arena_maybe_purge_decay(arena);
Jason Evans1a4ad3c2016-02-19 19:51:23 -08001460}
1461
Qinfan Wua244e502014-07-21 10:23:36 -07001462static size_t
1463arena_dirty_count(arena_t *arena)
1464{
1465 size_t ndirty = 0;
Jason Evans38e42d32015-03-10 18:15:40 -07001466 arena_runs_dirty_link_t *rdelm;
Jason Evansee41ad42015-02-15 18:04:46 -08001467 extent_node_t *chunkselm;
Qinfan Wua244e502014-07-21 10:23:36 -07001468
Jason Evans38e42d32015-03-10 18:15:40 -07001469 for (rdelm = qr_next(&arena->runs_dirty, rd_link),
Jason Evans738e0892015-02-18 01:15:50 -08001470 chunkselm = qr_next(&arena->chunks_cache, cc_link);
Jason Evans38e42d32015-03-10 18:15:40 -07001471 rdelm != &arena->runs_dirty; rdelm = qr_next(rdelm, rd_link)) {
Jason Evansee41ad42015-02-15 18:04:46 -08001472 size_t npages;
1473
Jason Evansf5c8f372015-03-10 18:29:49 -07001474 if (rdelm == &chunkselm->rd) {
Jason Evansee41ad42015-02-15 18:04:46 -08001475 npages = extent_node_size_get(chunkselm) >> LG_PAGE;
Jason Evans738e0892015-02-18 01:15:50 -08001476 chunkselm = qr_next(chunkselm, cc_link);
Jason Evansee41ad42015-02-15 18:04:46 -08001477 } else {
Jason Evans38e42d32015-03-10 18:15:40 -07001478 arena_chunk_t *chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(
1479 rdelm);
1480 arena_chunk_map_misc_t *miscelm =
1481 arena_rd_to_miscelm(rdelm);
1482 size_t pageind = arena_miscelm_to_pageind(miscelm);
Jason Evansee41ad42015-02-15 18:04:46 -08001483 assert(arena_mapbits_allocated_get(chunk, pageind) ==
1484 0);
1485 assert(arena_mapbits_large_get(chunk, pageind) == 0);
1486 assert(arena_mapbits_dirty_get(chunk, pageind) != 0);
1487 npages = arena_mapbits_unallocated_size_get(chunk,
1488 pageind) >> LG_PAGE;
1489 }
Qinfan Wua244e502014-07-21 10:23:36 -07001490 ndirty += npages;
1491 }
1492
Jason Evans2b2f6dc2014-11-01 02:29:10 -07001493 return (ndirty);
Jason Evansaa5113b2014-01-14 16:23:03 -08001494}
1495
1496static size_t
Jason Evans1a4ad3c2016-02-19 19:51:23 -08001497arena_stash_dirty(arena_t *arena, chunk_hooks_t *chunk_hooks,
1498 size_t ndirty_limit, arena_runs_dirty_link_t *purge_runs_sentinel,
Jason Evansee41ad42015-02-15 18:04:46 -08001499 extent_node_t *purge_chunks_sentinel)
Jason Evansaa5113b2014-01-14 16:23:03 -08001500{
Jason Evans38e42d32015-03-10 18:15:40 -07001501 arena_runs_dirty_link_t *rdelm, *rdelm_next;
Jason Evansee41ad42015-02-15 18:04:46 -08001502 extent_node_t *chunkselm;
Qinfan Wue9708002014-07-21 18:09:04 -07001503 size_t nstashed = 0;
Jason Evansaa5113b2014-01-14 16:23:03 -08001504
Jason Evans1a4ad3c2016-02-19 19:51:23 -08001505 /* Stash runs/chunks according to ndirty_limit. */
Jason Evans38e42d32015-03-10 18:15:40 -07001506 for (rdelm = qr_next(&arena->runs_dirty, rd_link),
Jason Evans738e0892015-02-18 01:15:50 -08001507 chunkselm = qr_next(&arena->chunks_cache, cc_link);
Jason Evans38e42d32015-03-10 18:15:40 -07001508 rdelm != &arena->runs_dirty; rdelm = rdelm_next) {
Jason Evansee41ad42015-02-15 18:04:46 -08001509 size_t npages;
Jason Evans38e42d32015-03-10 18:15:40 -07001510 rdelm_next = qr_next(rdelm, rd_link);
Jason Evansaa5113b2014-01-14 16:23:03 -08001511
Jason Evansf5c8f372015-03-10 18:29:49 -07001512 if (rdelm == &chunkselm->rd) {
Jason Evans99bd94f2015-02-18 16:40:53 -08001513 extent_node_t *chunkselm_next;
1514 bool zero;
Jason Evansee41ad42015-02-15 18:04:46 -08001515 UNUSED void *chunk;
Jason Evansaa5113b2014-01-14 16:23:03 -08001516
Jason Evans1a4ad3c2016-02-19 19:51:23 -08001517 npages = extent_node_size_get(chunkselm) >> LG_PAGE;
Jason Evans243f7a02016-02-19 20:09:31 -08001518 if (opt_purge == purge_mode_decay && arena->ndirty -
1519 (nstashed + npages) < ndirty_limit)
1520 break;
Jason Evans1a4ad3c2016-02-19 19:51:23 -08001521
Jason Evans738e0892015-02-18 01:15:50 -08001522 chunkselm_next = qr_next(chunkselm, cc_link);
Jason Evansee41ad42015-02-15 18:04:46 -08001523 /*
Jason Evans99bd94f2015-02-18 16:40:53 -08001524 * Allocate. chunkselm remains valid due to the
1525 * dalloc_node=false argument to chunk_alloc_cache().
Jason Evansee41ad42015-02-15 18:04:46 -08001526 */
Jason Evansee41ad42015-02-15 18:04:46 -08001527 zero = false;
Jason Evansb49a3342015-07-28 11:28:19 -04001528 chunk = chunk_alloc_cache(arena, chunk_hooks,
Jason Evans99bd94f2015-02-18 16:40:53 -08001529 extent_node_addr_get(chunkselm),
1530 extent_node_size_get(chunkselm), chunksize, &zero,
1531 false);
1532 assert(chunk == extent_node_addr_get(chunkselm));
1533 assert(zero == extent_node_zeroed_get(chunkselm));
1534 extent_node_dirty_insert(chunkselm, purge_runs_sentinel,
Jason Evans738e0892015-02-18 01:15:50 -08001535 purge_chunks_sentinel);
Jason Evans1a4ad3c2016-02-19 19:51:23 -08001536 assert(npages == (extent_node_size_get(chunkselm) >>
1537 LG_PAGE));
Jason Evansee41ad42015-02-15 18:04:46 -08001538 chunkselm = chunkselm_next;
1539 } else {
1540 arena_chunk_t *chunk =
Jason Evans38e42d32015-03-10 18:15:40 -07001541 (arena_chunk_t *)CHUNK_ADDR2BASE(rdelm);
1542 arena_chunk_map_misc_t *miscelm =
1543 arena_rd_to_miscelm(rdelm);
1544 size_t pageind = arena_miscelm_to_pageind(miscelm);
1545 arena_run_t *run = &miscelm->run;
Jason Evansee41ad42015-02-15 18:04:46 -08001546 size_t run_size =
1547 arena_mapbits_unallocated_size_get(chunk, pageind);
Jason Evans070b3c32014-08-14 14:45:58 -07001548
Jason Evansee41ad42015-02-15 18:04:46 -08001549 npages = run_size >> LG_PAGE;
Jason Evans243f7a02016-02-19 20:09:31 -08001550 if (opt_purge == purge_mode_decay && arena->ndirty -
1551 (nstashed + npages) < ndirty_limit)
1552 break;
Jason Evansee41ad42015-02-15 18:04:46 -08001553
1554 assert(pageind + npages <= chunk_npages);
1555 assert(arena_mapbits_dirty_get(chunk, pageind) ==
1556 arena_mapbits_dirty_get(chunk, pageind+npages-1));
1557
1558 /*
1559 * If purging the spare chunk's run, make it available
1560 * prior to allocation.
1561 */
1562 if (chunk == arena->spare)
1563 arena_chunk_alloc(arena);
1564
1565 /* Temporarily allocate the free dirty run. */
1566 arena_run_split_large(arena, run, run_size, false);
Jason Evans339c2b22015-02-17 22:25:56 -08001567 /* Stash. */
Jason Evansee41ad42015-02-15 18:04:46 -08001568 if (false)
Jason Evans38e42d32015-03-10 18:15:40 -07001569 qr_new(rdelm, rd_link); /* Redundant. */
Jason Evansee41ad42015-02-15 18:04:46 -08001570 else {
Jason Evans38e42d32015-03-10 18:15:40 -07001571 assert(qr_next(rdelm, rd_link) == rdelm);
1572 assert(qr_prev(rdelm, rd_link) == rdelm);
Jason Evansee41ad42015-02-15 18:04:46 -08001573 }
Jason Evans38e42d32015-03-10 18:15:40 -07001574 qr_meld(purge_runs_sentinel, rdelm, rd_link);
Jason Evansee41ad42015-02-15 18:04:46 -08001575 }
Jason Evansaa5113b2014-01-14 16:23:03 -08001576
Qinfan Wue9708002014-07-21 18:09:04 -07001577 nstashed += npages;
Jason Evans243f7a02016-02-19 20:09:31 -08001578 if (opt_purge == purge_mode_ratio && arena->ndirty - nstashed <=
1579 ndirty_limit)
Qinfan Wue9708002014-07-21 18:09:04 -07001580 break;
Jason Evansaa5113b2014-01-14 16:23:03 -08001581 }
Qinfan Wue9708002014-07-21 18:09:04 -07001582
1583 return (nstashed);
Jason Evansaa5113b2014-01-14 16:23:03 -08001584}
1585
1586static size_t
Jason Evansb49a3342015-07-28 11:28:19 -04001587arena_purge_stashed(arena_t *arena, chunk_hooks_t *chunk_hooks,
Jason Evans38e42d32015-03-10 18:15:40 -07001588 arena_runs_dirty_link_t *purge_runs_sentinel,
Jason Evansee41ad42015-02-15 18:04:46 -08001589 extent_node_t *purge_chunks_sentinel)
Jason Evansaa5113b2014-01-14 16:23:03 -08001590{
Qinfan Wue9708002014-07-21 18:09:04 -07001591 size_t npurged, nmadvise;
Jason Evans38e42d32015-03-10 18:15:40 -07001592 arena_runs_dirty_link_t *rdelm;
Jason Evansee41ad42015-02-15 18:04:46 -08001593 extent_node_t *chunkselm;
Jason Evansaa5113b2014-01-14 16:23:03 -08001594
Jason Evansaa5113b2014-01-14 16:23:03 -08001595 if (config_stats)
1596 nmadvise = 0;
1597 npurged = 0;
Qinfan Wue9708002014-07-21 18:09:04 -07001598
1599 malloc_mutex_unlock(&arena->lock);
Jason Evans38e42d32015-03-10 18:15:40 -07001600 for (rdelm = qr_next(purge_runs_sentinel, rd_link),
Jason Evans738e0892015-02-18 01:15:50 -08001601 chunkselm = qr_next(purge_chunks_sentinel, cc_link);
Jason Evans38e42d32015-03-10 18:15:40 -07001602 rdelm != purge_runs_sentinel; rdelm = qr_next(rdelm, rd_link)) {
Jason Evansee41ad42015-02-15 18:04:46 -08001603 size_t npages;
Qinfan Wue9708002014-07-21 18:09:04 -07001604
Jason Evansf5c8f372015-03-10 18:29:49 -07001605 if (rdelm == &chunkselm->rd) {
Jason Evansb49a3342015-07-28 11:28:19 -04001606 /*
1607 * Don't actually purge the chunk here because 1)
1608 * chunkselm is embedded in the chunk and must remain
1609 * valid, and 2) we deallocate the chunk in
1610 * arena_unstash_purged(), where it is destroyed,
1611 * decommitted, or purged, depending on chunk
1612 * deallocation policy.
1613 */
Jason Evansee41ad42015-02-15 18:04:46 -08001614 size_t size = extent_node_size_get(chunkselm);
Jason Evansee41ad42015-02-15 18:04:46 -08001615 npages = size >> LG_PAGE;
Jason Evans738e0892015-02-18 01:15:50 -08001616 chunkselm = qr_next(chunkselm, cc_link);
Jason Evansee41ad42015-02-15 18:04:46 -08001617 } else {
Jason Evans45186f02015-08-10 23:03:34 -07001618 size_t pageind, run_size, flag_unzeroed, flags, i;
1619 bool decommitted;
Jason Evans8fadb1a2015-08-04 10:49:46 -07001620 arena_chunk_t *chunk =
1621 (arena_chunk_t *)CHUNK_ADDR2BASE(rdelm);
Jason Evans38e42d32015-03-10 18:15:40 -07001622 arena_chunk_map_misc_t *miscelm =
1623 arena_rd_to_miscelm(rdelm);
1624 pageind = arena_miscelm_to_pageind(miscelm);
Jason Evansee41ad42015-02-15 18:04:46 -08001625 run_size = arena_mapbits_large_size_get(chunk, pageind);
1626 npages = run_size >> LG_PAGE;
Qinfan Wue9708002014-07-21 18:09:04 -07001627
Jason Evansee41ad42015-02-15 18:04:46 -08001628 assert(pageind + npages <= chunk_npages);
Jason Evansde249c82015-08-09 16:47:27 -07001629 assert(!arena_mapbits_decommitted_get(chunk, pageind));
1630 assert(!arena_mapbits_decommitted_get(chunk,
1631 pageind+npages-1));
Jason Evans8fadb1a2015-08-04 10:49:46 -07001632 decommitted = !chunk_hooks->decommit(chunk, chunksize,
1633 pageind << LG_PAGE, npages << LG_PAGE, arena->ind);
1634 if (decommitted) {
Jason Evans45186f02015-08-10 23:03:34 -07001635 flag_unzeroed = 0;
1636 flags = CHUNK_MAP_DECOMMITTED;
Jason Evans8fadb1a2015-08-04 10:49:46 -07001637 } else {
Jason Evans45186f02015-08-10 23:03:34 -07001638 flag_unzeroed = chunk_purge_wrapper(arena,
Jason Evans8fadb1a2015-08-04 10:49:46 -07001639 chunk_hooks, chunk, chunksize, pageind <<
Jason Evans45186f02015-08-10 23:03:34 -07001640 LG_PAGE, run_size) ? CHUNK_MAP_UNZEROED : 0;
1641 flags = flag_unzeroed;
Jason Evans8fadb1a2015-08-04 10:49:46 -07001642 }
Jason Evans45186f02015-08-10 23:03:34 -07001643 arena_mapbits_large_set(chunk, pageind+npages-1, 0,
1644 flags);
1645 arena_mapbits_large_set(chunk, pageind, run_size,
1646 flags);
Jason Evansee41ad42015-02-15 18:04:46 -08001647
1648 /*
Jason Evans45186f02015-08-10 23:03:34 -07001649 * Set the unzeroed flag for internal pages, now that
Jason Evans8d6a3e82015-03-18 18:55:33 -07001650 * chunk_purge_wrapper() has returned whether the pages
1651 * were zeroed as a side effect of purging. This chunk
1652 * map modification is safe even though the arena mutex
Jason Evansee41ad42015-02-15 18:04:46 -08001653 * isn't currently owned by this thread, because the run
1654 * is marked as allocated, thus protecting it from being
1655 * modified by any other thread. As long as these
1656 * writes don't perturb the first and last elements'
1657 * CHUNK_MAP_ALLOCATED bits, behavior is well defined.
1658 */
Jason Evans45186f02015-08-10 23:03:34 -07001659 for (i = 1; i < npages-1; i++) {
1660 arena_mapbits_internal_set(chunk, pageind+i,
Jason Evansee41ad42015-02-15 18:04:46 -08001661 flag_unzeroed);
1662 }
Jason Evansaa5113b2014-01-14 16:23:03 -08001663 }
Qinfan Wue9708002014-07-21 18:09:04 -07001664
Jason Evansaa5113b2014-01-14 16:23:03 -08001665 npurged += npages;
1666 if (config_stats)
1667 nmadvise++;
1668 }
1669 malloc_mutex_lock(&arena->lock);
Qinfan Wue9708002014-07-21 18:09:04 -07001670
1671 if (config_stats) {
Jason Evansaa5113b2014-01-14 16:23:03 -08001672 arena->stats.nmadvise += nmadvise;
Qinfan Wue9708002014-07-21 18:09:04 -07001673 arena->stats.purged += npurged;
1674 }
Jason Evansaa5113b2014-01-14 16:23:03 -08001675
1676 return (npurged);
1677}
1678
1679static void
Jason Evansb49a3342015-07-28 11:28:19 -04001680arena_unstash_purged(arena_t *arena, chunk_hooks_t *chunk_hooks,
Jason Evans38e42d32015-03-10 18:15:40 -07001681 arena_runs_dirty_link_t *purge_runs_sentinel,
Jason Evansee41ad42015-02-15 18:04:46 -08001682 extent_node_t *purge_chunks_sentinel)
Jason Evansaa5113b2014-01-14 16:23:03 -08001683{
Jason Evans38e42d32015-03-10 18:15:40 -07001684 arena_runs_dirty_link_t *rdelm, *rdelm_next;
Jason Evansee41ad42015-02-15 18:04:46 -08001685 extent_node_t *chunkselm;
Jason Evansaa5113b2014-01-14 16:23:03 -08001686
Jason Evansb49a3342015-07-28 11:28:19 -04001687 /* Deallocate chunks/runs. */
Jason Evans38e42d32015-03-10 18:15:40 -07001688 for (rdelm = qr_next(purge_runs_sentinel, rd_link),
Jason Evans738e0892015-02-18 01:15:50 -08001689 chunkselm = qr_next(purge_chunks_sentinel, cc_link);
Jason Evans38e42d32015-03-10 18:15:40 -07001690 rdelm != purge_runs_sentinel; rdelm = rdelm_next) {
1691 rdelm_next = qr_next(rdelm, rd_link);
Jason Evansf5c8f372015-03-10 18:29:49 -07001692 if (rdelm == &chunkselm->rd) {
Jason Evansee41ad42015-02-15 18:04:46 -08001693 extent_node_t *chunkselm_next = qr_next(chunkselm,
Jason Evans738e0892015-02-18 01:15:50 -08001694 cc_link);
Jason Evans339c2b22015-02-17 22:25:56 -08001695 void *addr = extent_node_addr_get(chunkselm);
1696 size_t size = extent_node_size_get(chunkselm);
Jason Evans738e0892015-02-18 01:15:50 -08001697 bool zeroed = extent_node_zeroed_get(chunkselm);
Jason Evansde249c82015-08-09 16:47:27 -07001698 bool committed = extent_node_committed_get(chunkselm);
Jason Evans738e0892015-02-18 01:15:50 -08001699 extent_node_dirty_remove(chunkselm);
Jason Evansee41ad42015-02-15 18:04:46 -08001700 arena_node_dalloc(arena, chunkselm);
1701 chunkselm = chunkselm_next;
Jason Evansce7c0f92016-03-30 18:36:04 -07001702 chunk_dalloc_wrapper(arena, chunk_hooks, addr, size,
Jason Evansde249c82015-08-09 16:47:27 -07001703 zeroed, committed);
Jason Evansee41ad42015-02-15 18:04:46 -08001704 } else {
Jason Evans8fadb1a2015-08-04 10:49:46 -07001705 arena_chunk_t *chunk =
1706 (arena_chunk_t *)CHUNK_ADDR2BASE(rdelm);
Jason Evans38e42d32015-03-10 18:15:40 -07001707 arena_chunk_map_misc_t *miscelm =
1708 arena_rd_to_miscelm(rdelm);
Jason Evans8fadb1a2015-08-04 10:49:46 -07001709 size_t pageind = arena_miscelm_to_pageind(miscelm);
1710 bool decommitted = (arena_mapbits_decommitted_get(chunk,
1711 pageind) != 0);
Jason Evans38e42d32015-03-10 18:15:40 -07001712 arena_run_t *run = &miscelm->run;
1713 qr_remove(rdelm, rd_link);
Jason Evans8fadb1a2015-08-04 10:49:46 -07001714 arena_run_dalloc(arena, run, false, true, decommitted);
Jason Evansee41ad42015-02-15 18:04:46 -08001715 }
Jason Evansaa5113b2014-01-14 16:23:03 -08001716 }
1717}
1718
Jason Evans243f7a02016-02-19 20:09:31 -08001719/*
1720 * NB: ndirty_limit is interpreted differently depending on opt_purge:
1721 * - purge_mode_ratio: Purge as few dirty run/chunks as possible to reach the
1722 * desired state:
1723 * (arena->ndirty <= ndirty_limit)
1724 * - purge_mode_decay: Purge as many dirty runs/chunks as possible without
1725 * violating the invariant:
1726 * (arena->ndirty >= ndirty_limit)
1727 */
Jason Evans8d6a3e82015-03-18 18:55:33 -07001728static void
Jason Evans1a4ad3c2016-02-19 19:51:23 -08001729arena_purge_to_limit(arena_t *arena, size_t ndirty_limit)
Jason Evanse476f8a2010-01-16 09:53:50 -08001730{
Jason Evans8fadb1a2015-08-04 10:49:46 -07001731 chunk_hooks_t chunk_hooks = chunk_hooks_get(arena);
Jason Evans1a4ad3c2016-02-19 19:51:23 -08001732 size_t npurge, npurged;
Jason Evans38e42d32015-03-10 18:15:40 -07001733 arena_runs_dirty_link_t purge_runs_sentinel;
Jason Evansee41ad42015-02-15 18:04:46 -08001734 extent_node_t purge_chunks_sentinel;
Qinfan Wue9708002014-07-21 18:09:04 -07001735
Jason Evans0a9f9a42015-06-22 18:50:32 -07001736 arena->purging = true;
1737
Jason Evans2b2f6dc2014-11-01 02:29:10 -07001738 /*
1739 * Calls to arena_dirty_count() are disabled even for debug builds
1740 * because overhead grows nonlinearly as memory usage increases.
1741 */
1742 if (false && config_debug) {
Qinfan Wu90737fc2014-07-21 19:39:20 -07001743 size_t ndirty = arena_dirty_count(arena);
Qinfan Wua244e502014-07-21 10:23:36 -07001744 assert(ndirty == arena->ndirty);
Jason Evans2caa4712010-03-04 21:35:07 -08001745 }
Jason Evans243f7a02016-02-19 20:09:31 -08001746 assert(opt_purge != purge_mode_ratio || (arena->nactive >>
1747 arena->lg_dirty_mult) < arena->ndirty || ndirty_limit == 0);
Jason Evans1a4ad3c2016-02-19 19:51:23 -08001748
1749 qr_new(&purge_runs_sentinel, rd_link);
1750 extent_node_dirty_linkage_init(&purge_chunks_sentinel);
1751
1752 npurge = arena_stash_dirty(arena, &chunk_hooks, ndirty_limit,
1753 &purge_runs_sentinel, &purge_chunks_sentinel);
1754 if (npurge == 0)
1755 goto label_return;
1756 npurged = arena_purge_stashed(arena, &chunk_hooks, &purge_runs_sentinel,
1757 &purge_chunks_sentinel);
1758 assert(npurged == npurge);
1759 arena_unstash_purged(arena, &chunk_hooks, &purge_runs_sentinel,
1760 &purge_chunks_sentinel);
Jason Evanse476f8a2010-01-16 09:53:50 -08001761
Jason Evans7372b152012-02-10 20:22:09 -08001762 if (config_stats)
1763 arena->stats.npurge++;
Jason Evanse476f8a2010-01-16 09:53:50 -08001764
Jason Evans1a4ad3c2016-02-19 19:51:23 -08001765label_return:
Jason Evans0a9f9a42015-06-22 18:50:32 -07001766 arena->purging = false;
Jason Evanse476f8a2010-01-16 09:53:50 -08001767}
1768
Jason Evans6005f072010-09-30 16:55:08 -07001769void
Jason Evans243f7a02016-02-19 20:09:31 -08001770arena_purge(arena_t *arena, bool all)
Jason Evans6005f072010-09-30 16:55:08 -07001771{
1772
1773 malloc_mutex_lock(&arena->lock);
Jason Evans243f7a02016-02-19 20:09:31 -08001774 if (all)
1775 arena_purge_to_limit(arena, 0);
1776 else
1777 arena_maybe_purge(arena);
Jason Evans6005f072010-09-30 16:55:08 -07001778 malloc_mutex_unlock(&arena->lock);
1779}
1780
Jason Evanse476f8a2010-01-16 09:53:50 -08001781static void
Jason Evansaa5113b2014-01-14 16:23:03 -08001782arena_run_coalesce(arena_t *arena, arena_chunk_t *chunk, size_t *p_size,
Jason Evans8fadb1a2015-08-04 10:49:46 -07001783 size_t *p_run_ind, size_t *p_run_pages, size_t flag_dirty,
1784 size_t flag_decommitted)
Jason Evanse476f8a2010-01-16 09:53:50 -08001785{
Jason Evansaa5113b2014-01-14 16:23:03 -08001786 size_t size = *p_size;
1787 size_t run_ind = *p_run_ind;
1788 size_t run_pages = *p_run_pages;
Jason Evanse476f8a2010-01-16 09:53:50 -08001789
1790 /* Try to coalesce forward. */
1791 if (run_ind + run_pages < chunk_npages &&
Jason Evans203484e2012-05-02 00:30:36 -07001792 arena_mapbits_allocated_get(chunk, run_ind+run_pages) == 0 &&
Jason Evans8fadb1a2015-08-04 10:49:46 -07001793 arena_mapbits_dirty_get(chunk, run_ind+run_pages) == flag_dirty &&
1794 arena_mapbits_decommitted_get(chunk, run_ind+run_pages) ==
1795 flag_decommitted) {
Jason Evans203484e2012-05-02 00:30:36 -07001796 size_t nrun_size = arena_mapbits_unallocated_size_get(chunk,
1797 run_ind+run_pages);
Jason Evansae4c7b42012-04-02 07:04:34 -07001798 size_t nrun_pages = nrun_size >> LG_PAGE;
Jason Evanse476f8a2010-01-16 09:53:50 -08001799
1800 /*
1801 * Remove successor from runs_avail; the coalesced run is
1802 * inserted later.
1803 */
Jason Evans203484e2012-05-02 00:30:36 -07001804 assert(arena_mapbits_unallocated_size_get(chunk,
1805 run_ind+run_pages+nrun_pages-1) == nrun_size);
1806 assert(arena_mapbits_dirty_get(chunk,
1807 run_ind+run_pages+nrun_pages-1) == flag_dirty);
Jason Evans8fadb1a2015-08-04 10:49:46 -07001808 assert(arena_mapbits_decommitted_get(chunk,
1809 run_ind+run_pages+nrun_pages-1) == flag_decommitted);
Qinfan Wu90737fc2014-07-21 19:39:20 -07001810 arena_avail_remove(arena, chunk, run_ind+run_pages, nrun_pages);
Jason Evanse476f8a2010-01-16 09:53:50 -08001811
Jason Evansee41ad42015-02-15 18:04:46 -08001812 /*
1813 * If the successor is dirty, remove it from the set of dirty
1814 * pages.
1815 */
Qinfan Wu04d60a12014-07-18 14:21:17 -07001816 if (flag_dirty != 0) {
Jason Evansee41ad42015-02-15 18:04:46 -08001817 arena_run_dirty_remove(arena, chunk, run_ind+run_pages,
Jason Evans070b3c32014-08-14 14:45:58 -07001818 nrun_pages);
Qinfan Wu04d60a12014-07-18 14:21:17 -07001819 }
1820
Jason Evanse476f8a2010-01-16 09:53:50 -08001821 size += nrun_size;
Jason Evans12ca9142010-10-17 19:56:09 -07001822 run_pages += nrun_pages;
Jason Evanse476f8a2010-01-16 09:53:50 -08001823
Jason Evans203484e2012-05-02 00:30:36 -07001824 arena_mapbits_unallocated_size_set(chunk, run_ind, size);
1825 arena_mapbits_unallocated_size_set(chunk, run_ind+run_pages-1,
1826 size);
Jason Evanse476f8a2010-01-16 09:53:50 -08001827 }
1828
1829 /* Try to coalesce backward. */
Jason Evansaa5113b2014-01-14 16:23:03 -08001830 if (run_ind > map_bias && arena_mapbits_allocated_get(chunk,
1831 run_ind-1) == 0 && arena_mapbits_dirty_get(chunk, run_ind-1) ==
Jason Evans8fadb1a2015-08-04 10:49:46 -07001832 flag_dirty && arena_mapbits_decommitted_get(chunk, run_ind-1) ==
1833 flag_decommitted) {
Jason Evans203484e2012-05-02 00:30:36 -07001834 size_t prun_size = arena_mapbits_unallocated_size_get(chunk,
1835 run_ind-1);
Jason Evansae4c7b42012-04-02 07:04:34 -07001836 size_t prun_pages = prun_size >> LG_PAGE;
Jason Evanse476f8a2010-01-16 09:53:50 -08001837
Jason Evans12ca9142010-10-17 19:56:09 -07001838 run_ind -= prun_pages;
Jason Evanse476f8a2010-01-16 09:53:50 -08001839
1840 /*
1841 * Remove predecessor from runs_avail; the coalesced run is
1842 * inserted later.
1843 */
Jason Evans203484e2012-05-02 00:30:36 -07001844 assert(arena_mapbits_unallocated_size_get(chunk, run_ind) ==
1845 prun_size);
1846 assert(arena_mapbits_dirty_get(chunk, run_ind) == flag_dirty);
Jason Evans8fadb1a2015-08-04 10:49:46 -07001847 assert(arena_mapbits_decommitted_get(chunk, run_ind) ==
1848 flag_decommitted);
Qinfan Wu90737fc2014-07-21 19:39:20 -07001849 arena_avail_remove(arena, chunk, run_ind, prun_pages);
Jason Evanse476f8a2010-01-16 09:53:50 -08001850
Jason Evansee41ad42015-02-15 18:04:46 -08001851 /*
1852 * If the predecessor is dirty, remove it from the set of dirty
1853 * pages.
1854 */
1855 if (flag_dirty != 0) {
1856 arena_run_dirty_remove(arena, chunk, run_ind,
1857 prun_pages);
1858 }
Qinfan Wu04d60a12014-07-18 14:21:17 -07001859
Jason Evanse476f8a2010-01-16 09:53:50 -08001860 size += prun_size;
Jason Evans12ca9142010-10-17 19:56:09 -07001861 run_pages += prun_pages;
Jason Evanse476f8a2010-01-16 09:53:50 -08001862
Jason Evans203484e2012-05-02 00:30:36 -07001863 arena_mapbits_unallocated_size_set(chunk, run_ind, size);
1864 arena_mapbits_unallocated_size_set(chunk, run_ind+run_pages-1,
1865 size);
Jason Evanse476f8a2010-01-16 09:53:50 -08001866 }
1867
Jason Evansaa5113b2014-01-14 16:23:03 -08001868 *p_size = size;
1869 *p_run_ind = run_ind;
1870 *p_run_pages = run_pages;
1871}
1872
Jason Evans8fadb1a2015-08-04 10:49:46 -07001873static size_t
1874arena_run_size_get(arena_t *arena, arena_chunk_t *chunk, arena_run_t *run,
1875 size_t run_ind)
1876{
1877 size_t size;
1878
1879 assert(run_ind >= map_bias);
1880 assert(run_ind < chunk_npages);
1881
1882 if (arena_mapbits_large_get(chunk, run_ind) != 0) {
1883 size = arena_mapbits_large_size_get(chunk, run_ind);
1884 assert(size == PAGE || arena_mapbits_large_size_get(chunk,
1885 run_ind+(size>>LG_PAGE)-1) == 0);
1886 } else {
1887 arena_bin_info_t *bin_info = &arena_bin_info[run->binind];
1888 size = bin_info->run_size;
1889 }
1890
1891 return (size);
1892}
1893
Jason Evansaa5113b2014-01-14 16:23:03 -08001894static void
Jason Evans8fadb1a2015-08-04 10:49:46 -07001895arena_run_dalloc(arena_t *arena, arena_run_t *run, bool dirty, bool cleaned,
1896 bool decommitted)
Jason Evansaa5113b2014-01-14 16:23:03 -08001897{
1898 arena_chunk_t *chunk;
Jason Evans0c5dd032014-09-29 01:31:39 -07001899 arena_chunk_map_misc_t *miscelm;
Jason Evans8fadb1a2015-08-04 10:49:46 -07001900 size_t size, run_ind, run_pages, flag_dirty, flag_decommitted;
Jason Evansaa5113b2014-01-14 16:23:03 -08001901
1902 chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(run);
Jason Evans0c5dd032014-09-29 01:31:39 -07001903 miscelm = arena_run_to_miscelm(run);
1904 run_ind = arena_miscelm_to_pageind(miscelm);
Jason Evansaa5113b2014-01-14 16:23:03 -08001905 assert(run_ind >= map_bias);
1906 assert(run_ind < chunk_npages);
Jason Evans8fadb1a2015-08-04 10:49:46 -07001907 size = arena_run_size_get(arena, chunk, run, run_ind);
Jason Evansaa5113b2014-01-14 16:23:03 -08001908 run_pages = (size >> LG_PAGE);
Jason Evans40ee9aa2016-02-27 12:34:50 -08001909 arena_nactive_sub(arena, run_pages);
Jason Evansaa5113b2014-01-14 16:23:03 -08001910
1911 /*
1912 * The run is dirty if the caller claims to have dirtied it, as well as
1913 * if it was already dirty before being allocated and the caller
1914 * doesn't claim to have cleaned it.
1915 */
1916 assert(arena_mapbits_dirty_get(chunk, run_ind) ==
1917 arena_mapbits_dirty_get(chunk, run_ind+run_pages-1));
Jason Evans8fadb1a2015-08-04 10:49:46 -07001918 if (!cleaned && !decommitted && arena_mapbits_dirty_get(chunk, run_ind)
1919 != 0)
Jason Evansaa5113b2014-01-14 16:23:03 -08001920 dirty = true;
1921 flag_dirty = dirty ? CHUNK_MAP_DIRTY : 0;
Jason Evans8fadb1a2015-08-04 10:49:46 -07001922 flag_decommitted = decommitted ? CHUNK_MAP_DECOMMITTED : 0;
Jason Evansaa5113b2014-01-14 16:23:03 -08001923
1924 /* Mark pages as unallocated in the chunk map. */
Jason Evans8fadb1a2015-08-04 10:49:46 -07001925 if (dirty || decommitted) {
1926 size_t flags = flag_dirty | flag_decommitted;
1927 arena_mapbits_unallocated_set(chunk, run_ind, size, flags);
Jason Evansaa5113b2014-01-14 16:23:03 -08001928 arena_mapbits_unallocated_set(chunk, run_ind+run_pages-1, size,
Jason Evans8fadb1a2015-08-04 10:49:46 -07001929 flags);
Jason Evansaa5113b2014-01-14 16:23:03 -08001930 } else {
1931 arena_mapbits_unallocated_set(chunk, run_ind, size,
1932 arena_mapbits_unzeroed_get(chunk, run_ind));
1933 arena_mapbits_unallocated_set(chunk, run_ind+run_pages-1, size,
1934 arena_mapbits_unzeroed_get(chunk, run_ind+run_pages-1));
1935 }
1936
Jason Evans8fadb1a2015-08-04 10:49:46 -07001937 arena_run_coalesce(arena, chunk, &size, &run_ind, &run_pages,
1938 flag_dirty, flag_decommitted);
Jason Evansaa5113b2014-01-14 16:23:03 -08001939
Jason Evanse476f8a2010-01-16 09:53:50 -08001940 /* Insert into runs_avail, now that coalescing is complete. */
Jason Evans203484e2012-05-02 00:30:36 -07001941 assert(arena_mapbits_unallocated_size_get(chunk, run_ind) ==
1942 arena_mapbits_unallocated_size_get(chunk, run_ind+run_pages-1));
1943 assert(arena_mapbits_dirty_get(chunk, run_ind) ==
1944 arena_mapbits_dirty_get(chunk, run_ind+run_pages-1));
Jason Evans8fadb1a2015-08-04 10:49:46 -07001945 assert(arena_mapbits_decommitted_get(chunk, run_ind) ==
1946 arena_mapbits_decommitted_get(chunk, run_ind+run_pages-1));
Qinfan Wu90737fc2014-07-21 19:39:20 -07001947 arena_avail_insert(arena, chunk, run_ind, run_pages);
Jason Evans8d4203c2010-04-13 20:53:21 -07001948
Jason Evans070b3c32014-08-14 14:45:58 -07001949 if (dirty)
Jason Evansee41ad42015-02-15 18:04:46 -08001950 arena_run_dirty_insert(arena, chunk, run_ind, run_pages);
Qinfan Wu04d60a12014-07-18 14:21:17 -07001951
Jason Evans203484e2012-05-02 00:30:36 -07001952 /* Deallocate chunk if it is now completely unused. */
Jason Evans155bfa72014-10-05 17:54:10 -07001953 if (size == arena_maxrun) {
Jason Evans203484e2012-05-02 00:30:36 -07001954 assert(run_ind == map_bias);
Jason Evans155bfa72014-10-05 17:54:10 -07001955 assert(run_pages == (arena_maxrun >> LG_PAGE));
Jason Evanse2deab72014-05-15 22:22:27 -07001956 arena_chunk_dalloc(arena, chunk);
Jason Evans203484e2012-05-02 00:30:36 -07001957 }
Jason Evanse476f8a2010-01-16 09:53:50 -08001958
Jason Evans4fb7f512010-01-27 18:27:09 -08001959 /*
Jason Evans8d4203c2010-04-13 20:53:21 -07001960 * It is okay to do dirty page processing here even if the chunk was
Jason Evans4fb7f512010-01-27 18:27:09 -08001961 * deallocated above, since in that case it is the spare. Waiting
1962 * until after possible chunk deallocation to do dirty processing
1963 * allows for an old spare to be fully deallocated, thus decreasing the
1964 * chances of spuriously crossing the dirty page purging threshold.
1965 */
Jason Evans8d4203c2010-04-13 20:53:21 -07001966 if (dirty)
Jason Evans05b21be2010-03-14 17:36:10 -07001967 arena_maybe_purge(arena);
Jason Evanse476f8a2010-01-16 09:53:50 -08001968}
1969
1970static void
1971arena_run_trim_head(arena_t *arena, arena_chunk_t *chunk, arena_run_t *run,
1972 size_t oldsize, size_t newsize)
1973{
Jason Evans0c5dd032014-09-29 01:31:39 -07001974 arena_chunk_map_misc_t *miscelm = arena_run_to_miscelm(run);
1975 size_t pageind = arena_miscelm_to_pageind(miscelm);
Jason Evansae4c7b42012-04-02 07:04:34 -07001976 size_t head_npages = (oldsize - newsize) >> LG_PAGE;
Jason Evans203484e2012-05-02 00:30:36 -07001977 size_t flag_dirty = arena_mapbits_dirty_get(chunk, pageind);
Jason Evans1f27abc2015-08-11 12:42:33 -07001978 size_t flag_decommitted = arena_mapbits_decommitted_get(chunk, pageind);
1979 size_t flag_unzeroed_mask = (flag_dirty | flag_decommitted) == 0 ?
1980 CHUNK_MAP_UNZEROED : 0;
Jason Evanse476f8a2010-01-16 09:53:50 -08001981
1982 assert(oldsize > newsize);
1983
1984 /*
1985 * Update the chunk map so that arena_run_dalloc() can treat the
Jason Evans940a2e02010-10-17 17:51:37 -07001986 * leading run as separately allocated. Set the last element of each
1987 * run first, in case of single-page runs.
Jason Evanse476f8a2010-01-16 09:53:50 -08001988 */
Jason Evans203484e2012-05-02 00:30:36 -07001989 assert(arena_mapbits_large_size_get(chunk, pageind) == oldsize);
Jason Evans1f27abc2015-08-11 12:42:33 -07001990 arena_mapbits_large_set(chunk, pageind+head_npages-1, 0, flag_dirty |
1991 (flag_unzeroed_mask & arena_mapbits_unzeroed_get(chunk,
1992 pageind+head_npages-1)));
1993 arena_mapbits_large_set(chunk, pageind, oldsize-newsize, flag_dirty |
1994 (flag_unzeroed_mask & arena_mapbits_unzeroed_get(chunk, pageind)));
Jason Evans940a2e02010-10-17 17:51:37 -07001995
Jason Evans7372b152012-02-10 20:22:09 -08001996 if (config_debug) {
Jason Evansae4c7b42012-04-02 07:04:34 -07001997 UNUSED size_t tail_npages = newsize >> LG_PAGE;
Jason Evans203484e2012-05-02 00:30:36 -07001998 assert(arena_mapbits_large_size_get(chunk,
1999 pageind+head_npages+tail_npages-1) == 0);
2000 assert(arena_mapbits_dirty_get(chunk,
2001 pageind+head_npages+tail_npages-1) == flag_dirty);
Jason Evans940a2e02010-10-17 17:51:37 -07002002 }
Jason Evansd8ceef62012-05-10 20:59:39 -07002003 arena_mapbits_large_set(chunk, pageind+head_npages, newsize,
Jason Evans1f27abc2015-08-11 12:42:33 -07002004 flag_dirty | (flag_unzeroed_mask & arena_mapbits_unzeroed_get(chunk,
2005 pageind+head_npages)));
Jason Evanse476f8a2010-01-16 09:53:50 -08002006
Jason Evans1f27abc2015-08-11 12:42:33 -07002007 arena_run_dalloc(arena, run, false, false, (flag_decommitted != 0));
Jason Evanse476f8a2010-01-16 09:53:50 -08002008}
2009
2010static void
2011arena_run_trim_tail(arena_t *arena, arena_chunk_t *chunk, arena_run_t *run,
2012 size_t oldsize, size_t newsize, bool dirty)
2013{
Jason Evans0c5dd032014-09-29 01:31:39 -07002014 arena_chunk_map_misc_t *miscelm = arena_run_to_miscelm(run);
2015 size_t pageind = arena_miscelm_to_pageind(miscelm);
Jason Evansae4c7b42012-04-02 07:04:34 -07002016 size_t head_npages = newsize >> LG_PAGE;
Jason Evans203484e2012-05-02 00:30:36 -07002017 size_t flag_dirty = arena_mapbits_dirty_get(chunk, pageind);
Jason Evans1f27abc2015-08-11 12:42:33 -07002018 size_t flag_decommitted = arena_mapbits_decommitted_get(chunk, pageind);
2019 size_t flag_unzeroed_mask = (flag_dirty | flag_decommitted) == 0 ?
2020 CHUNK_MAP_UNZEROED : 0;
Jason Evans0c5dd032014-09-29 01:31:39 -07002021 arena_chunk_map_misc_t *tail_miscelm;
2022 arena_run_t *tail_run;
Jason Evanse476f8a2010-01-16 09:53:50 -08002023
2024 assert(oldsize > newsize);
2025
2026 /*
2027 * Update the chunk map so that arena_run_dalloc() can treat the
Jason Evans940a2e02010-10-17 17:51:37 -07002028 * trailing run as separately allocated. Set the last element of each
2029 * run first, in case of single-page runs.
Jason Evanse476f8a2010-01-16 09:53:50 -08002030 */
Jason Evans203484e2012-05-02 00:30:36 -07002031 assert(arena_mapbits_large_size_get(chunk, pageind) == oldsize);
Jason Evans1f27abc2015-08-11 12:42:33 -07002032 arena_mapbits_large_set(chunk, pageind+head_npages-1, 0, flag_dirty |
2033 (flag_unzeroed_mask & arena_mapbits_unzeroed_get(chunk,
2034 pageind+head_npages-1)));
2035 arena_mapbits_large_set(chunk, pageind, newsize, flag_dirty |
2036 (flag_unzeroed_mask & arena_mapbits_unzeroed_get(chunk, pageind)));
Jason Evans940a2e02010-10-17 17:51:37 -07002037
Jason Evans203484e2012-05-02 00:30:36 -07002038 if (config_debug) {
2039 UNUSED size_t tail_npages = (oldsize - newsize) >> LG_PAGE;
2040 assert(arena_mapbits_large_size_get(chunk,
2041 pageind+head_npages+tail_npages-1) == 0);
2042 assert(arena_mapbits_dirty_get(chunk,
2043 pageind+head_npages+tail_npages-1) == flag_dirty);
2044 }
2045 arena_mapbits_large_set(chunk, pageind+head_npages, oldsize-newsize,
Jason Evans1f27abc2015-08-11 12:42:33 -07002046 flag_dirty | (flag_unzeroed_mask & arena_mapbits_unzeroed_get(chunk,
2047 pageind+head_npages)));
Jason Evanse476f8a2010-01-16 09:53:50 -08002048
Jason Evans61a6dfc2016-03-23 16:04:38 -07002049 tail_miscelm = arena_miscelm_get_mutable(chunk, pageind + head_npages);
Jason Evans0c5dd032014-09-29 01:31:39 -07002050 tail_run = &tail_miscelm->run;
Jason Evans1f27abc2015-08-11 12:42:33 -07002051 arena_run_dalloc(arena, tail_run, dirty, false, (flag_decommitted !=
2052 0));
Jason Evanse476f8a2010-01-16 09:53:50 -08002053}
2054
2055static arena_run_t *
Jason Evanse7a10582012-02-13 17:36:52 -08002056arena_bin_runs_first(arena_bin_t *bin)
2057{
Jason Evans613cdc82016-03-08 01:04:48 -08002058 ph_node_t *node;
2059 arena_chunk_map_misc_t *miscelm;
Jason Evanse7a10582012-02-13 17:36:52 -08002060
Jason Evans613cdc82016-03-08 01:04:48 -08002061 node = ph_first(&bin->runs);
2062 if (node == NULL)
2063 return (NULL);
2064 miscelm = arena_ph_to_miscelm(node);
2065 return (&miscelm->run);
Jason Evanse7a10582012-02-13 17:36:52 -08002066}
2067
2068static void
2069arena_bin_runs_insert(arena_bin_t *bin, arena_run_t *run)
2070{
Jason Evans0c5dd032014-09-29 01:31:39 -07002071 arena_chunk_map_misc_t *miscelm = arena_run_to_miscelm(run);
Jason Evanse7a10582012-02-13 17:36:52 -08002072
Jason Evans613cdc82016-03-08 01:04:48 -08002073 ph_insert(&bin->runs, &miscelm->ph_link);
Jason Evanse7a10582012-02-13 17:36:52 -08002074}
2075
2076static void
2077arena_bin_runs_remove(arena_bin_t *bin, arena_run_t *run)
2078{
Jason Evans0c5dd032014-09-29 01:31:39 -07002079 arena_chunk_map_misc_t *miscelm = arena_run_to_miscelm(run);
Jason Evanse7a10582012-02-13 17:36:52 -08002080
Jason Evans613cdc82016-03-08 01:04:48 -08002081 ph_remove(&bin->runs, &miscelm->ph_link);
Jason Evanse7a10582012-02-13 17:36:52 -08002082}
2083
2084static arena_run_t *
2085arena_bin_nonfull_run_tryget(arena_bin_t *bin)
2086{
2087 arena_run_t *run = arena_bin_runs_first(bin);
2088 if (run != NULL) {
2089 arena_bin_runs_remove(bin, run);
2090 if (config_stats)
2091 bin->stats.reruns++;
2092 }
2093 return (run);
2094}
2095
2096static arena_run_t *
Jason Evanse476f8a2010-01-16 09:53:50 -08002097arena_bin_nonfull_run_get(arena_t *arena, arena_bin_t *bin)
2098{
Jason Evanse476f8a2010-01-16 09:53:50 -08002099 arena_run_t *run;
Jason Evansd01fd192015-08-19 15:21:32 -07002100 szind_t binind;
Jason Evans49f7e8f2011-03-15 13:59:15 -07002101 arena_bin_info_t *bin_info;
Jason Evanse476f8a2010-01-16 09:53:50 -08002102
2103 /* Look for a usable run. */
Jason Evanse7a10582012-02-13 17:36:52 -08002104 run = arena_bin_nonfull_run_tryget(bin);
2105 if (run != NULL)
Jason Evanse476f8a2010-01-16 09:53:50 -08002106 return (run);
Jason Evanse476f8a2010-01-16 09:53:50 -08002107 /* No existing runs have any space available. */
2108
Jason Evans49f7e8f2011-03-15 13:59:15 -07002109 binind = arena_bin_index(arena, bin);
2110 bin_info = &arena_bin_info[binind];
2111
Jason Evanse476f8a2010-01-16 09:53:50 -08002112 /* Allocate a new run. */
Jason Evanse00572b2010-03-14 19:43:56 -07002113 malloc_mutex_unlock(&bin->lock);
Jason Evanse69bee02010-03-15 22:25:23 -07002114 /******************************/
Jason Evans86815df2010-03-13 20:32:56 -08002115 malloc_mutex_lock(&arena->lock);
Jason Evansaa5113b2014-01-14 16:23:03 -08002116 run = arena_run_alloc_small(arena, bin_info->run_size, binind);
Jason Evanse00572b2010-03-14 19:43:56 -07002117 if (run != NULL) {
2118 /* Initialize run internals. */
Jason Evans381c23d2014-10-10 23:01:03 -07002119 run->binind = binind;
Jason Evans49f7e8f2011-03-15 13:59:15 -07002120 run->nfree = bin_info->nregs;
Jason Evans0c5dd032014-09-29 01:31:39 -07002121 bitmap_init(run->bitmap, &bin_info->bitmap_info);
Jason Evanse69bee02010-03-15 22:25:23 -07002122 }
2123 malloc_mutex_unlock(&arena->lock);
2124 /********************************/
2125 malloc_mutex_lock(&bin->lock);
2126 if (run != NULL) {
Jason Evans7372b152012-02-10 20:22:09 -08002127 if (config_stats) {
2128 bin->stats.nruns++;
2129 bin->stats.curruns++;
Jason Evans7372b152012-02-10 20:22:09 -08002130 }
Jason Evanse00572b2010-03-14 19:43:56 -07002131 return (run);
2132 }
2133
2134 /*
Jason Evansaa5113b2014-01-14 16:23:03 -08002135 * arena_run_alloc_small() failed, but another thread may have made
Jason Evans940a2e02010-10-17 17:51:37 -07002136 * sufficient memory available while this one dropped bin->lock above,
Jason Evanse00572b2010-03-14 19:43:56 -07002137 * so search one more time.
2138 */
Jason Evanse7a10582012-02-13 17:36:52 -08002139 run = arena_bin_nonfull_run_tryget(bin);
2140 if (run != NULL)
Jason Evanse00572b2010-03-14 19:43:56 -07002141 return (run);
Jason Evanse00572b2010-03-14 19:43:56 -07002142
2143 return (NULL);
Jason Evanse476f8a2010-01-16 09:53:50 -08002144}
2145
Jason Evans1e0a6362010-03-13 13:41:58 -08002146/* Re-fill bin->runcur, then call arena_run_reg_alloc(). */
Jason Evanse476f8a2010-01-16 09:53:50 -08002147static void *
2148arena_bin_malloc_hard(arena_t *arena, arena_bin_t *bin)
2149{
Jason Evansd01fd192015-08-19 15:21:32 -07002150 szind_t binind;
Jason Evans49f7e8f2011-03-15 13:59:15 -07002151 arena_bin_info_t *bin_info;
Jason Evanse00572b2010-03-14 19:43:56 -07002152 arena_run_t *run;
Jason Evanse476f8a2010-01-16 09:53:50 -08002153
Jason Evans49f7e8f2011-03-15 13:59:15 -07002154 binind = arena_bin_index(arena, bin);
2155 bin_info = &arena_bin_info[binind];
Jason Evanse00572b2010-03-14 19:43:56 -07002156 bin->runcur = NULL;
2157 run = arena_bin_nonfull_run_get(arena, bin);
2158 if (bin->runcur != NULL && bin->runcur->nfree > 0) {
2159 /*
2160 * Another thread updated runcur while this one ran without the
2161 * bin lock in arena_bin_nonfull_run_get().
2162 */
Dmitry-Mea306a602015-09-04 13:15:28 +03002163 void *ret;
Jason Evanse00572b2010-03-14 19:43:56 -07002164 assert(bin->runcur->nfree > 0);
Jason Evans49f7e8f2011-03-15 13:59:15 -07002165 ret = arena_run_reg_alloc(bin->runcur, bin_info);
Jason Evanse00572b2010-03-14 19:43:56 -07002166 if (run != NULL) {
Jason Evans940a2e02010-10-17 17:51:37 -07002167 arena_chunk_t *chunk;
2168
2169 /*
Jason Evansaa5113b2014-01-14 16:23:03 -08002170 * arena_run_alloc_small() may have allocated run, or
2171 * it may have pulled run from the bin's run tree.
2172 * Therefore it is unsafe to make any assumptions about
2173 * how run has previously been used, and
2174 * arena_bin_lower_run() must be called, as if a region
2175 * were just deallocated from the run.
Jason Evans940a2e02010-10-17 17:51:37 -07002176 */
2177 chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(run);
Jason Evans49f7e8f2011-03-15 13:59:15 -07002178 if (run->nfree == bin_info->nregs)
Jason Evans8de6a022010-10-17 20:57:30 -07002179 arena_dalloc_bin_run(arena, chunk, run, bin);
2180 else
2181 arena_bin_lower_run(arena, chunk, run, bin);
Jason Evanse00572b2010-03-14 19:43:56 -07002182 }
2183 return (ret);
2184 }
2185
2186 if (run == NULL)
Jason Evanse476f8a2010-01-16 09:53:50 -08002187 return (NULL);
Jason Evanse00572b2010-03-14 19:43:56 -07002188
2189 bin->runcur = run;
2190
Jason Evanse476f8a2010-01-16 09:53:50 -08002191 assert(bin->runcur->nfree > 0);
2192
Jason Evans49f7e8f2011-03-15 13:59:15 -07002193 return (arena_run_reg_alloc(bin->runcur, bin_info));
Jason Evanse476f8a2010-01-16 09:53:50 -08002194}
2195
Jason Evans86815df2010-03-13 20:32:56 -08002196void
Jason Evans243f7a02016-02-19 20:09:31 -08002197arena_tcache_fill_small(tsd_t *tsd, arena_t *arena, tcache_bin_t *tbin,
2198 szind_t binind, uint64_t prof_accumbytes)
Jason Evanse476f8a2010-01-16 09:53:50 -08002199{
2200 unsigned i, nfill;
2201 arena_bin_t *bin;
Jason Evanse476f8a2010-01-16 09:53:50 -08002202
2203 assert(tbin->ncached == 0);
2204
Jason Evans88c222c2013-02-06 11:59:30 -08002205 if (config_prof && arena_prof_accum(arena, prof_accumbytes))
2206 prof_idump();
Jason Evanse69bee02010-03-15 22:25:23 -07002207 bin = &arena->bins[binind];
2208 malloc_mutex_lock(&bin->lock);
Jason Evans1dcb4f82011-03-21 00:18:17 -07002209 for (i = 0, nfill = (tcache_bin_info[binind].ncached_max >>
2210 tbin->lg_fill_div); i < nfill; i++) {
Dmitry-Mea306a602015-09-04 13:15:28 +03002211 arena_run_t *run;
2212 void *ptr;
Jason Evanse476f8a2010-01-16 09:53:50 -08002213 if ((run = bin->runcur) != NULL && run->nfree > 0)
Jason Evans49f7e8f2011-03-15 13:59:15 -07002214 ptr = arena_run_reg_alloc(run, &arena_bin_info[binind]);
Jason Evanse476f8a2010-01-16 09:53:50 -08002215 else
2216 ptr = arena_bin_malloc_hard(arena, bin);
Jason Evansf11a6772014-10-05 13:05:10 -07002217 if (ptr == NULL) {
2218 /*
2219 * OOM. tbin->avail isn't yet filled down to its first
2220 * element, so the successful allocations (if any) must
Qi Wangf4a0f322015-10-27 15:12:10 -07002221 * be moved just before tbin->avail before bailing out.
Jason Evansf11a6772014-10-05 13:05:10 -07002222 */
2223 if (i > 0) {
Qi Wangf4a0f322015-10-27 15:12:10 -07002224 memmove(tbin->avail - i, tbin->avail - nfill,
Jason Evansf11a6772014-10-05 13:05:10 -07002225 i * sizeof(void *));
2226 }
Jason Evanse476f8a2010-01-16 09:53:50 -08002227 break;
Jason Evansf11a6772014-10-05 13:05:10 -07002228 }
Guilherme Goncalves2c5cb612014-12-08 19:12:41 -02002229 if (config_fill && unlikely(opt_junk_alloc)) {
Jason Evans122449b2012-04-06 00:35:09 -07002230 arena_alloc_junk_small(ptr, &arena_bin_info[binind],
2231 true);
2232 }
Jason Evans9c43c132011-03-18 10:53:15 -07002233 /* Insert such that low regions get used first. */
Qi Wangf4a0f322015-10-27 15:12:10 -07002234 *(tbin->avail - nfill + i) = ptr;
Jason Evanse476f8a2010-01-16 09:53:50 -08002235 }
Jason Evans7372b152012-02-10 20:22:09 -08002236 if (config_stats) {
Jason Evans7372b152012-02-10 20:22:09 -08002237 bin->stats.nmalloc += i;
2238 bin->stats.nrequests += tbin->tstats.nrequests;
Jason Evans3c4d92e2014-10-12 22:53:59 -07002239 bin->stats.curregs += i;
Jason Evans7372b152012-02-10 20:22:09 -08002240 bin->stats.nfills++;
2241 tbin->tstats.nrequests = 0;
2242 }
Jason Evans86815df2010-03-13 20:32:56 -08002243 malloc_mutex_unlock(&bin->lock);
Jason Evanse476f8a2010-01-16 09:53:50 -08002244 tbin->ncached = i;
Jason Evans243f7a02016-02-19 20:09:31 -08002245 arena_decay_tick(tsd, arena);
Jason Evanse476f8a2010-01-16 09:53:50 -08002246}
Jason Evanse476f8a2010-01-16 09:53:50 -08002247
Jason Evans122449b2012-04-06 00:35:09 -07002248void
2249arena_alloc_junk_small(void *ptr, arena_bin_info_t *bin_info, bool zero)
2250{
2251
2252 if (zero) {
2253 size_t redzone_size = bin_info->redzone_size;
2254 memset((void *)((uintptr_t)ptr - redzone_size), 0xa5,
2255 redzone_size);
2256 memset((void *)((uintptr_t)ptr + bin_info->reg_size), 0xa5,
2257 redzone_size);
2258 } else {
2259 memset((void *)((uintptr_t)ptr - bin_info->redzone_size), 0xa5,
2260 bin_info->reg_interval);
2261 }
2262}
2263
Jason Evans0d6c5d82013-12-17 15:14:36 -08002264#ifdef JEMALLOC_JET
2265#undef arena_redzone_corruption
2266#define arena_redzone_corruption JEMALLOC_N(arena_redzone_corruption_impl)
2267#endif
2268static void
2269arena_redzone_corruption(void *ptr, size_t usize, bool after,
2270 size_t offset, uint8_t byte)
2271{
2272
Jason Evans5fae7dc2015-07-23 13:56:25 -07002273 malloc_printf("<jemalloc>: Corrupt redzone %zu byte%s %s %p "
2274 "(size %zu), byte=%#x\n", offset, (offset == 1) ? "" : "s",
Jason Evans0d6c5d82013-12-17 15:14:36 -08002275 after ? "after" : "before", ptr, usize, byte);
2276}
2277#ifdef JEMALLOC_JET
Jason Evans0d6c5d82013-12-17 15:14:36 -08002278#undef arena_redzone_corruption
Jason Evans6b694c42014-01-07 16:47:56 -08002279#define arena_redzone_corruption JEMALLOC_N(arena_redzone_corruption)
2280arena_redzone_corruption_t *arena_redzone_corruption =
2281 JEMALLOC_N(arena_redzone_corruption_impl);
Jason Evans0d6c5d82013-12-17 15:14:36 -08002282#endif
2283
2284static void
2285arena_redzones_validate(void *ptr, arena_bin_info_t *bin_info, bool reset)
Jason Evans122449b2012-04-06 00:35:09 -07002286{
Jason Evans122449b2012-04-06 00:35:09 -07002287 bool error = false;
2288
Guilherme Goncalves2c5cb612014-12-08 19:12:41 -02002289 if (opt_junk_alloc) {
Dmitry-Mea306a602015-09-04 13:15:28 +03002290 size_t size = bin_info->reg_size;
2291 size_t redzone_size = bin_info->redzone_size;
2292 size_t i;
2293
Guilherme Goncalves2c5cb612014-12-08 19:12:41 -02002294 for (i = 1; i <= redzone_size; i++) {
2295 uint8_t *byte = (uint8_t *)((uintptr_t)ptr - i);
2296 if (*byte != 0xa5) {
2297 error = true;
Jason Evans8fadb1a2015-08-04 10:49:46 -07002298 arena_redzone_corruption(ptr, size, false, i,
2299 *byte);
Guilherme Goncalves2c5cb612014-12-08 19:12:41 -02002300 if (reset)
2301 *byte = 0xa5;
2302 }
2303 }
2304 for (i = 0; i < redzone_size; i++) {
2305 uint8_t *byte = (uint8_t *)((uintptr_t)ptr + size + i);
2306 if (*byte != 0xa5) {
2307 error = true;
Jason Evans8fadb1a2015-08-04 10:49:46 -07002308 arena_redzone_corruption(ptr, size, true, i,
2309 *byte);
Guilherme Goncalves2c5cb612014-12-08 19:12:41 -02002310 if (reset)
2311 *byte = 0xa5;
2312 }
Jason Evans122449b2012-04-06 00:35:09 -07002313 }
2314 }
Guilherme Goncalves2c5cb612014-12-08 19:12:41 -02002315
Jason Evans122449b2012-04-06 00:35:09 -07002316 if (opt_abort && error)
2317 abort();
Jason Evans0d6c5d82013-12-17 15:14:36 -08002318}
Jason Evans122449b2012-04-06 00:35:09 -07002319
Jason Evans6b694c42014-01-07 16:47:56 -08002320#ifdef JEMALLOC_JET
2321#undef arena_dalloc_junk_small
2322#define arena_dalloc_junk_small JEMALLOC_N(arena_dalloc_junk_small_impl)
2323#endif
Jason Evans0d6c5d82013-12-17 15:14:36 -08002324void
2325arena_dalloc_junk_small(void *ptr, arena_bin_info_t *bin_info)
2326{
2327 size_t redzone_size = bin_info->redzone_size;
2328
2329 arena_redzones_validate(ptr, bin_info, false);
Jason Evans122449b2012-04-06 00:35:09 -07002330 memset((void *)((uintptr_t)ptr - redzone_size), 0x5a,
2331 bin_info->reg_interval);
2332}
Jason Evans6b694c42014-01-07 16:47:56 -08002333#ifdef JEMALLOC_JET
2334#undef arena_dalloc_junk_small
2335#define arena_dalloc_junk_small JEMALLOC_N(arena_dalloc_junk_small)
2336arena_dalloc_junk_small_t *arena_dalloc_junk_small =
2337 JEMALLOC_N(arena_dalloc_junk_small_impl);
2338#endif
Jason Evans122449b2012-04-06 00:35:09 -07002339
Jason Evans0d6c5d82013-12-17 15:14:36 -08002340void
2341arena_quarantine_junk_small(void *ptr, size_t usize)
2342{
Jason Evansd01fd192015-08-19 15:21:32 -07002343 szind_t binind;
Jason Evans0d6c5d82013-12-17 15:14:36 -08002344 arena_bin_info_t *bin_info;
2345 cassert(config_fill);
Guilherme Goncalves2c5cb612014-12-08 19:12:41 -02002346 assert(opt_junk_free);
Jason Evans0d6c5d82013-12-17 15:14:36 -08002347 assert(opt_quarantine);
2348 assert(usize <= SMALL_MAXCLASS);
2349
Jason Evans155bfa72014-10-05 17:54:10 -07002350 binind = size2index(usize);
Jason Evans0d6c5d82013-12-17 15:14:36 -08002351 bin_info = &arena_bin_info[binind];
2352 arena_redzones_validate(ptr, bin_info, true);
2353}
2354
Jason Evans578cd162016-02-19 18:40:03 -08002355static void *
Jason Evans0c516a02016-02-25 15:29:49 -08002356arena_malloc_small(tsd_t *tsd, arena_t *arena, szind_t binind, bool zero)
Jason Evanse476f8a2010-01-16 09:53:50 -08002357{
2358 void *ret;
2359 arena_bin_t *bin;
Jason Evans0c516a02016-02-25 15:29:49 -08002360 size_t usize;
Jason Evanse476f8a2010-01-16 09:53:50 -08002361 arena_run_t *run;
Jason Evanse476f8a2010-01-16 09:53:50 -08002362
Jason Evansb1726102012-02-28 16:50:47 -08002363 assert(binind < NBINS);
Jason Evanse476f8a2010-01-16 09:53:50 -08002364 bin = &arena->bins[binind];
Jason Evans0c516a02016-02-25 15:29:49 -08002365 usize = index2size(binind);
Jason Evanse476f8a2010-01-16 09:53:50 -08002366
Jason Evans86815df2010-03-13 20:32:56 -08002367 malloc_mutex_lock(&bin->lock);
Jason Evanse476f8a2010-01-16 09:53:50 -08002368 if ((run = bin->runcur) != NULL && run->nfree > 0)
Jason Evans49f7e8f2011-03-15 13:59:15 -07002369 ret = arena_run_reg_alloc(run, &arena_bin_info[binind]);
Jason Evanse476f8a2010-01-16 09:53:50 -08002370 else
2371 ret = arena_bin_malloc_hard(arena, bin);
2372
2373 if (ret == NULL) {
Jason Evans86815df2010-03-13 20:32:56 -08002374 malloc_mutex_unlock(&bin->lock);
Jason Evanse476f8a2010-01-16 09:53:50 -08002375 return (NULL);
2376 }
2377
Jason Evans7372b152012-02-10 20:22:09 -08002378 if (config_stats) {
Jason Evans7372b152012-02-10 20:22:09 -08002379 bin->stats.nmalloc++;
2380 bin->stats.nrequests++;
Jason Evans3c4d92e2014-10-12 22:53:59 -07002381 bin->stats.curregs++;
Jason Evans7372b152012-02-10 20:22:09 -08002382 }
Jason Evans86815df2010-03-13 20:32:56 -08002383 malloc_mutex_unlock(&bin->lock);
Jason Evans0c516a02016-02-25 15:29:49 -08002384 if (config_prof && !isthreaded && arena_prof_accum(arena, usize))
Jason Evans88c222c2013-02-06 11:59:30 -08002385 prof_idump();
Jason Evanse476f8a2010-01-16 09:53:50 -08002386
Jason Evans551ebc42014-10-03 10:16:09 -07002387 if (!zero) {
Jason Evans7372b152012-02-10 20:22:09 -08002388 if (config_fill) {
Guilherme Goncalves2c5cb612014-12-08 19:12:41 -02002389 if (unlikely(opt_junk_alloc)) {
Jason Evans122449b2012-04-06 00:35:09 -07002390 arena_alloc_junk_small(ret,
2391 &arena_bin_info[binind], false);
Jason Evans9c640bf2014-09-11 16:20:44 -07002392 } else if (unlikely(opt_zero))
Jason Evans0c516a02016-02-25 15:29:49 -08002393 memset(ret, 0, usize);
Jason Evans7372b152012-02-10 20:22:09 -08002394 }
Jason Evans0c516a02016-02-25 15:29:49 -08002395 JEMALLOC_VALGRIND_MAKE_MEM_UNDEFINED(ret, usize);
Jason Evans122449b2012-04-06 00:35:09 -07002396 } else {
Guilherme Goncalves2c5cb612014-12-08 19:12:41 -02002397 if (config_fill && unlikely(opt_junk_alloc)) {
Jason Evans122449b2012-04-06 00:35:09 -07002398 arena_alloc_junk_small(ret, &arena_bin_info[binind],
2399 true);
2400 }
Jason Evans0c516a02016-02-25 15:29:49 -08002401 JEMALLOC_VALGRIND_MAKE_MEM_UNDEFINED(ret, usize);
2402 memset(ret, 0, usize);
Jason Evans122449b2012-04-06 00:35:09 -07002403 }
Jason Evanse476f8a2010-01-16 09:53:50 -08002404
Jason Evans243f7a02016-02-19 20:09:31 -08002405 arena_decay_tick(tsd, arena);
Jason Evanse476f8a2010-01-16 09:53:50 -08002406 return (ret);
2407}
2408
2409void *
Jason Evans0c516a02016-02-25 15:29:49 -08002410arena_malloc_large(tsd_t *tsd, arena_t *arena, szind_t binind, bool zero)
Jason Evanse476f8a2010-01-16 09:53:50 -08002411{
2412 void *ret;
Jason Evans155bfa72014-10-05 17:54:10 -07002413 size_t usize;
Jason Evans8a03cf02015-05-04 09:58:36 -07002414 uintptr_t random_offset;
Jason Evans0c5dd032014-09-29 01:31:39 -07002415 arena_run_t *run;
2416 arena_chunk_map_misc_t *miscelm;
Dmitri Smirnov33184bf2016-02-29 14:30:19 -08002417 UNUSED bool idump JEMALLOC_CC_SILENCE_INIT(false);
Jason Evanse476f8a2010-01-16 09:53:50 -08002418
2419 /* Large allocation. */
Qi Wangf4a0f322015-10-27 15:12:10 -07002420 usize = index2size(binind);
Jason Evanse476f8a2010-01-16 09:53:50 -08002421 malloc_mutex_lock(&arena->lock);
Jason Evans8a03cf02015-05-04 09:58:36 -07002422 if (config_cache_oblivious) {
Jason Evansbce61d62015-07-07 09:32:05 -07002423 uint64_t r;
2424
Jason Evans8a03cf02015-05-04 09:58:36 -07002425 /*
2426 * Compute a uniformly distributed offset within the first page
2427 * that is a multiple of the cacheline size, e.g. [0 .. 63) * 64
2428 * for 4 KiB pages and 64-byte cachelines.
2429 */
Jason Evans34676d32016-02-09 16:28:40 -08002430 r = prng_lg_range(&arena->offset_state, LG_PAGE - LG_CACHELINE);
Jason Evans8a03cf02015-05-04 09:58:36 -07002431 random_offset = ((uintptr_t)r) << LG_CACHELINE;
2432 } else
2433 random_offset = 0;
2434 run = arena_run_alloc_large(arena, usize + large_pad, zero);
Jason Evans0c5dd032014-09-29 01:31:39 -07002435 if (run == NULL) {
Jason Evanse476f8a2010-01-16 09:53:50 -08002436 malloc_mutex_unlock(&arena->lock);
2437 return (NULL);
2438 }
Jason Evans0c5dd032014-09-29 01:31:39 -07002439 miscelm = arena_run_to_miscelm(run);
Jason Evans8a03cf02015-05-04 09:58:36 -07002440 ret = (void *)((uintptr_t)arena_miscelm_to_rpages(miscelm) +
2441 random_offset);
Jason Evans7372b152012-02-10 20:22:09 -08002442 if (config_stats) {
Qi Wangf4a0f322015-10-27 15:12:10 -07002443 szind_t index = binind - NBINS;
Jason Evans155bfa72014-10-05 17:54:10 -07002444
Jason Evans7372b152012-02-10 20:22:09 -08002445 arena->stats.nmalloc_large++;
2446 arena->stats.nrequests_large++;
Jason Evans155bfa72014-10-05 17:54:10 -07002447 arena->stats.allocated_large += usize;
2448 arena->stats.lstats[index].nmalloc++;
2449 arena->stats.lstats[index].nrequests++;
2450 arena->stats.lstats[index].curruns++;
Jason Evanse476f8a2010-01-16 09:53:50 -08002451 }
Jason Evans7372b152012-02-10 20:22:09 -08002452 if (config_prof)
Jason Evans155bfa72014-10-05 17:54:10 -07002453 idump = arena_prof_accum_locked(arena, usize);
Jason Evanse476f8a2010-01-16 09:53:50 -08002454 malloc_mutex_unlock(&arena->lock);
Jason Evans88c222c2013-02-06 11:59:30 -08002455 if (config_prof && idump)
2456 prof_idump();
Jason Evanse476f8a2010-01-16 09:53:50 -08002457
Jason Evans551ebc42014-10-03 10:16:09 -07002458 if (!zero) {
Jason Evans7372b152012-02-10 20:22:09 -08002459 if (config_fill) {
Guilherme Goncalves2c5cb612014-12-08 19:12:41 -02002460 if (unlikely(opt_junk_alloc))
Jason Evans155bfa72014-10-05 17:54:10 -07002461 memset(ret, 0xa5, usize);
Jason Evans9c640bf2014-09-11 16:20:44 -07002462 else if (unlikely(opt_zero))
Jason Evans155bfa72014-10-05 17:54:10 -07002463 memset(ret, 0, usize);
Jason Evans7372b152012-02-10 20:22:09 -08002464 }
Jason Evanse476f8a2010-01-16 09:53:50 -08002465 }
2466
Jason Evans243f7a02016-02-19 20:09:31 -08002467 arena_decay_tick(tsd, arena);
Jason Evanse476f8a2010-01-16 09:53:50 -08002468 return (ret);
2469}
2470
Jason Evans578cd162016-02-19 18:40:03 -08002471void *
2472arena_malloc_hard(tsd_t *tsd, arena_t *arena, size_t size, szind_t ind,
2473 bool zero, tcache_t *tcache)
2474{
2475
2476 arena = arena_choose(tsd, arena);
2477 if (unlikely(arena == NULL))
2478 return (NULL);
2479
2480 if (likely(size <= SMALL_MAXCLASS))
Jason Evans0c516a02016-02-25 15:29:49 -08002481 return (arena_malloc_small(tsd, arena, ind, zero));
Jason Evans578cd162016-02-19 18:40:03 -08002482 if (likely(size <= large_maxclass))
Jason Evans0c516a02016-02-25 15:29:49 -08002483 return (arena_malloc_large(tsd, arena, ind, zero));
2484 return (huge_malloc(tsd, arena, index2size(ind), zero, tcache));
Jason Evans578cd162016-02-19 18:40:03 -08002485}
2486
Jason Evanse476f8a2010-01-16 09:53:50 -08002487/* Only handles large allocations that require more than page alignment. */
Jason Evans88fef7c2015-02-12 14:06:37 -08002488static void *
Jason Evans50883de2015-07-23 17:13:18 -07002489arena_palloc_large(tsd_t *tsd, arena_t *arena, size_t usize, size_t alignment,
Jason Evans88fef7c2015-02-12 14:06:37 -08002490 bool zero)
Jason Evanse476f8a2010-01-16 09:53:50 -08002491{
2492 void *ret;
Jason Evans5ff709c2012-04-11 18:13:45 -07002493 size_t alloc_size, leadsize, trailsize;
2494 arena_run_t *run;
Jason Evanse476f8a2010-01-16 09:53:50 -08002495 arena_chunk_t *chunk;
Jason Evans0c5dd032014-09-29 01:31:39 -07002496 arena_chunk_map_misc_t *miscelm;
2497 void *rpages;
Jason Evanse476f8a2010-01-16 09:53:50 -08002498
Jason Evans50883de2015-07-23 17:13:18 -07002499 assert(usize == PAGE_CEILING(usize));
Jason Evans93443682010-10-20 17:39:18 -07002500
Jason Evans88fef7c2015-02-12 14:06:37 -08002501 arena = arena_choose(tsd, arena);
2502 if (unlikely(arena == NULL))
2503 return (NULL);
2504
Jason Evans93443682010-10-20 17:39:18 -07002505 alignment = PAGE_CEILING(alignment);
Jason Evans50883de2015-07-23 17:13:18 -07002506 alloc_size = usize + large_pad + alignment - PAGE;
Jason Evanse476f8a2010-01-16 09:53:50 -08002507
2508 malloc_mutex_lock(&arena->lock);
Jason Evansaa5113b2014-01-14 16:23:03 -08002509 run = arena_run_alloc_large(arena, alloc_size, false);
Jason Evans5ff709c2012-04-11 18:13:45 -07002510 if (run == NULL) {
Jason Evanse476f8a2010-01-16 09:53:50 -08002511 malloc_mutex_unlock(&arena->lock);
2512 return (NULL);
2513 }
Jason Evans5ff709c2012-04-11 18:13:45 -07002514 chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(run);
Jason Evans0c5dd032014-09-29 01:31:39 -07002515 miscelm = arena_run_to_miscelm(run);
2516 rpages = arena_miscelm_to_rpages(miscelm);
Jason Evanse476f8a2010-01-16 09:53:50 -08002517
Jason Evans0c5dd032014-09-29 01:31:39 -07002518 leadsize = ALIGNMENT_CEILING((uintptr_t)rpages, alignment) -
2519 (uintptr_t)rpages;
Jason Evans50883de2015-07-23 17:13:18 -07002520 assert(alloc_size >= leadsize + usize);
2521 trailsize = alloc_size - leadsize - usize - large_pad;
Jason Evans5ff709c2012-04-11 18:13:45 -07002522 if (leadsize != 0) {
Jason Evans0c5dd032014-09-29 01:31:39 -07002523 arena_chunk_map_misc_t *head_miscelm = miscelm;
2524 arena_run_t *head_run = run;
2525
Jason Evans61a6dfc2016-03-23 16:04:38 -07002526 miscelm = arena_miscelm_get_mutable(chunk,
Jason Evans0c5dd032014-09-29 01:31:39 -07002527 arena_miscelm_to_pageind(head_miscelm) + (leadsize >>
2528 LG_PAGE));
2529 run = &miscelm->run;
2530
2531 arena_run_trim_head(arena, chunk, head_run, alloc_size,
2532 alloc_size - leadsize);
Jason Evans5ff709c2012-04-11 18:13:45 -07002533 }
2534 if (trailsize != 0) {
Jason Evans50883de2015-07-23 17:13:18 -07002535 arena_run_trim_tail(arena, chunk, run, usize + large_pad +
2536 trailsize, usize + large_pad, false);
Jason Evanse476f8a2010-01-16 09:53:50 -08002537 }
Jason Evans8fadb1a2015-08-04 10:49:46 -07002538 if (arena_run_init_large(arena, run, usize + large_pad, zero)) {
2539 size_t run_ind =
2540 arena_miscelm_to_pageind(arena_run_to_miscelm(run));
Jason Evansde249c82015-08-09 16:47:27 -07002541 bool dirty = (arena_mapbits_dirty_get(chunk, run_ind) != 0);
2542 bool decommitted = (arena_mapbits_decommitted_get(chunk,
2543 run_ind) != 0);
Jason Evans8fadb1a2015-08-04 10:49:46 -07002544
Jason Evansde249c82015-08-09 16:47:27 -07002545 assert(decommitted); /* Cause of OOM. */
2546 arena_run_dalloc(arena, run, dirty, false, decommitted);
Jason Evans8fadb1a2015-08-04 10:49:46 -07002547 malloc_mutex_unlock(&arena->lock);
2548 return (NULL);
2549 }
Jason Evans0c5dd032014-09-29 01:31:39 -07002550 ret = arena_miscelm_to_rpages(miscelm);
Jason Evanse476f8a2010-01-16 09:53:50 -08002551
Jason Evans7372b152012-02-10 20:22:09 -08002552 if (config_stats) {
Jason Evansd01fd192015-08-19 15:21:32 -07002553 szind_t index = size2index(usize) - NBINS;
Jason Evans155bfa72014-10-05 17:54:10 -07002554
Jason Evans7372b152012-02-10 20:22:09 -08002555 arena->stats.nmalloc_large++;
2556 arena->stats.nrequests_large++;
Jason Evans50883de2015-07-23 17:13:18 -07002557 arena->stats.allocated_large += usize;
Jason Evans155bfa72014-10-05 17:54:10 -07002558 arena->stats.lstats[index].nmalloc++;
2559 arena->stats.lstats[index].nrequests++;
2560 arena->stats.lstats[index].curruns++;
Jason Evanse476f8a2010-01-16 09:53:50 -08002561 }
Jason Evanse476f8a2010-01-16 09:53:50 -08002562 malloc_mutex_unlock(&arena->lock);
2563
Jason Evans551ebc42014-10-03 10:16:09 -07002564 if (config_fill && !zero) {
Guilherme Goncalves2c5cb612014-12-08 19:12:41 -02002565 if (unlikely(opt_junk_alloc))
Jason Evans50883de2015-07-23 17:13:18 -07002566 memset(ret, 0xa5, usize);
Jason Evans9c640bf2014-09-11 16:20:44 -07002567 else if (unlikely(opt_zero))
Jason Evans50883de2015-07-23 17:13:18 -07002568 memset(ret, 0, usize);
Jason Evans8e3c3c62010-09-17 15:46:18 -07002569 }
Jason Evans243f7a02016-02-19 20:09:31 -08002570 arena_decay_tick(tsd, arena);
Jason Evanse476f8a2010-01-16 09:53:50 -08002571 return (ret);
2572}
2573
Jason Evans88fef7c2015-02-12 14:06:37 -08002574void *
2575arena_palloc(tsd_t *tsd, arena_t *arena, size_t usize, size_t alignment,
2576 bool zero, tcache_t *tcache)
2577{
2578 void *ret;
2579
Jason Evans8a03cf02015-05-04 09:58:36 -07002580 if (usize <= SMALL_MAXCLASS && (alignment < PAGE || (alignment == PAGE
Jason Evans51541752015-05-19 17:42:31 -07002581 && (usize & PAGE_MASK) == 0))) {
2582 /* Small; alignment doesn't require special run placement. */
Qi Wangf4a0f322015-10-27 15:12:10 -07002583 ret = arena_malloc(tsd, arena, usize, size2index(usize), zero,
2584 tcache, true);
Jason Evans676df882015-09-11 20:50:20 -07002585 } else if (usize <= large_maxclass && alignment <= PAGE) {
Jason Evans51541752015-05-19 17:42:31 -07002586 /*
2587 * Large; alignment doesn't require special run placement.
2588 * However, the cached pointer may be at a random offset from
2589 * the base of the run, so do some bit manipulation to retrieve
2590 * the base.
2591 */
Qi Wangf4a0f322015-10-27 15:12:10 -07002592 ret = arena_malloc(tsd, arena, usize, size2index(usize), zero,
2593 tcache, true);
Jason Evans51541752015-05-19 17:42:31 -07002594 if (config_cache_oblivious)
2595 ret = (void *)((uintptr_t)ret & ~PAGE_MASK);
2596 } else {
Jason Evans676df882015-09-11 20:50:20 -07002597 if (likely(usize <= large_maxclass)) {
Jason Evans88fef7c2015-02-12 14:06:37 -08002598 ret = arena_palloc_large(tsd, arena, usize, alignment,
2599 zero);
2600 } else if (likely(alignment <= chunksize))
2601 ret = huge_malloc(tsd, arena, usize, zero, tcache);
2602 else {
2603 ret = huge_palloc(tsd, arena, usize, alignment, zero,
2604 tcache);
2605 }
2606 }
2607 return (ret);
2608}
2609
Jason Evans0b270a92010-03-31 16:45:04 -07002610void
2611arena_prof_promoted(const void *ptr, size_t size)
2612{
2613 arena_chunk_t *chunk;
Jason Evans155bfa72014-10-05 17:54:10 -07002614 size_t pageind;
Jason Evansd01fd192015-08-19 15:21:32 -07002615 szind_t binind;
Jason Evans0b270a92010-03-31 16:45:04 -07002616
Jason Evans78f73522012-04-18 13:38:40 -07002617 cassert(config_prof);
Jason Evans0b270a92010-03-31 16:45:04 -07002618 assert(ptr != NULL);
2619 assert(CHUNK_ADDR2BASE(ptr) != ptr);
Jason Evans155bfa72014-10-05 17:54:10 -07002620 assert(isalloc(ptr, false) == LARGE_MINCLASS);
2621 assert(isalloc(ptr, true) == LARGE_MINCLASS);
Jason Evansb1726102012-02-28 16:50:47 -08002622 assert(size <= SMALL_MAXCLASS);
Jason Evans0b270a92010-03-31 16:45:04 -07002623
2624 chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr);
Jason Evansae4c7b42012-04-02 07:04:34 -07002625 pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> LG_PAGE;
Jason Evans155bfa72014-10-05 17:54:10 -07002626 binind = size2index(size);
Jason Evansb1726102012-02-28 16:50:47 -08002627 assert(binind < NBINS);
Jason Evans203484e2012-05-02 00:30:36 -07002628 arena_mapbits_large_binind_set(chunk, pageind, binind);
Jason Evans0b270a92010-03-31 16:45:04 -07002629
Jason Evans155bfa72014-10-05 17:54:10 -07002630 assert(isalloc(ptr, false) == LARGE_MINCLASS);
Jason Evans122449b2012-04-06 00:35:09 -07002631 assert(isalloc(ptr, true) == size);
Jason Evans0b270a92010-03-31 16:45:04 -07002632}
Jason Evans6109fe02010-02-10 10:37:56 -08002633
Jason Evanse476f8a2010-01-16 09:53:50 -08002634static void
Jason Evans088e6a02010-10-18 00:04:44 -07002635arena_dissociate_bin_run(arena_chunk_t *chunk, arena_run_t *run,
Jason Evanse476f8a2010-01-16 09:53:50 -08002636 arena_bin_t *bin)
2637{
Jason Evanse476f8a2010-01-16 09:53:50 -08002638
Jason Evans19b3d612010-03-18 20:36:40 -07002639 /* Dissociate run from bin. */
Jason Evanse476f8a2010-01-16 09:53:50 -08002640 if (run == bin->runcur)
2641 bin->runcur = NULL;
Jason Evans49f7e8f2011-03-15 13:59:15 -07002642 else {
Jason Evansd01fd192015-08-19 15:21:32 -07002643 szind_t binind = arena_bin_index(extent_node_arena_get(
Jason Evansee41ad42015-02-15 18:04:46 -08002644 &chunk->node), bin);
Jason Evans49f7e8f2011-03-15 13:59:15 -07002645 arena_bin_info_t *bin_info = &arena_bin_info[binind];
2646
2647 if (bin_info->nregs != 1) {
Jason Evans49f7e8f2011-03-15 13:59:15 -07002648 /*
2649 * This block's conditional is necessary because if the
2650 * run only contains one region, then it never gets
2651 * inserted into the non-full runs tree.
2652 */
Jason Evanse7a10582012-02-13 17:36:52 -08002653 arena_bin_runs_remove(bin, run);
Jason Evans49f7e8f2011-03-15 13:59:15 -07002654 }
Jason Evanse476f8a2010-01-16 09:53:50 -08002655 }
Jason Evans088e6a02010-10-18 00:04:44 -07002656}
2657
2658static void
2659arena_dalloc_bin_run(arena_t *arena, arena_chunk_t *chunk, arena_run_t *run,
2660 arena_bin_t *bin)
2661{
Jason Evans088e6a02010-10-18 00:04:44 -07002662
2663 assert(run != bin->runcur);
Jason Evans86815df2010-03-13 20:32:56 -08002664
Jason Evanse00572b2010-03-14 19:43:56 -07002665 malloc_mutex_unlock(&bin->lock);
2666 /******************************/
Jason Evans86815df2010-03-13 20:32:56 -08002667 malloc_mutex_lock(&arena->lock);
Mike Hommeyf97298b2015-09-03 20:32:57 +09002668 arena_run_dalloc(arena, run, true, false, false);
Jason Evans86815df2010-03-13 20:32:56 -08002669 malloc_mutex_unlock(&arena->lock);
Jason Evanse00572b2010-03-14 19:43:56 -07002670 /****************************/
2671 malloc_mutex_lock(&bin->lock);
Jason Evans7372b152012-02-10 20:22:09 -08002672 if (config_stats)
2673 bin->stats.curruns--;
Jason Evanse476f8a2010-01-16 09:53:50 -08002674}
2675
Jason Evans940a2e02010-10-17 17:51:37 -07002676static void
2677arena_bin_lower_run(arena_t *arena, arena_chunk_t *chunk, arena_run_t *run,
2678 arena_bin_t *bin)
Jason Evanse476f8a2010-01-16 09:53:50 -08002679{
Jason Evanse476f8a2010-01-16 09:53:50 -08002680
Jason Evans8de6a022010-10-17 20:57:30 -07002681 /*
Jason Evanse7a10582012-02-13 17:36:52 -08002682 * Make sure that if bin->runcur is non-NULL, it refers to the lowest
2683 * non-full run. It is okay to NULL runcur out rather than proactively
2684 * keeping it pointing at the lowest non-full run.
Jason Evans8de6a022010-10-17 20:57:30 -07002685 */
Jason Evanse7a10582012-02-13 17:36:52 -08002686 if ((uintptr_t)run < (uintptr_t)bin->runcur) {
Jason Evans8de6a022010-10-17 20:57:30 -07002687 /* Switch runcur. */
Jason Evanse7a10582012-02-13 17:36:52 -08002688 if (bin->runcur->nfree > 0)
2689 arena_bin_runs_insert(bin, bin->runcur);
Jason Evans8de6a022010-10-17 20:57:30 -07002690 bin->runcur = run;
Jason Evanse7a10582012-02-13 17:36:52 -08002691 if (config_stats)
2692 bin->stats.reruns++;
2693 } else
2694 arena_bin_runs_insert(bin, run);
Jason Evans940a2e02010-10-17 17:51:37 -07002695}
2696
Jason Evansfc0b3b72014-10-09 17:54:06 -07002697static void
2698arena_dalloc_bin_locked_impl(arena_t *arena, arena_chunk_t *chunk, void *ptr,
2699 arena_chunk_map_bits_t *bitselm, bool junked)
Jason Evans940a2e02010-10-17 17:51:37 -07002700{
Jason Evans0c5dd032014-09-29 01:31:39 -07002701 size_t pageind, rpages_ind;
Jason Evans940a2e02010-10-17 17:51:37 -07002702 arena_run_t *run;
2703 arena_bin_t *bin;
Mike Hommey8b499712012-04-24 23:22:02 +02002704 arena_bin_info_t *bin_info;
Jason Evansd01fd192015-08-19 15:21:32 -07002705 szind_t binind;
Jason Evans940a2e02010-10-17 17:51:37 -07002706
Jason Evansae4c7b42012-04-02 07:04:34 -07002707 pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> LG_PAGE;
Jason Evans0c5dd032014-09-29 01:31:39 -07002708 rpages_ind = pageind - arena_mapbits_small_runind_get(chunk, pageind);
Jason Evans61a6dfc2016-03-23 16:04:38 -07002709 run = &arena_miscelm_get_mutable(chunk, rpages_ind)->run;
Jason Evans381c23d2014-10-10 23:01:03 -07002710 binind = run->binind;
2711 bin = &arena->bins[binind];
Mike Hommey8b499712012-04-24 23:22:02 +02002712 bin_info = &arena_bin_info[binind];
Jason Evans940a2e02010-10-17 17:51:37 -07002713
Guilherme Goncalves2c5cb612014-12-08 19:12:41 -02002714 if (!junked && config_fill && unlikely(opt_junk_free))
Jason Evans122449b2012-04-06 00:35:09 -07002715 arena_dalloc_junk_small(ptr, bin_info);
Jason Evans940a2e02010-10-17 17:51:37 -07002716
2717 arena_run_reg_dalloc(run, ptr);
Jason Evans49f7e8f2011-03-15 13:59:15 -07002718 if (run->nfree == bin_info->nregs) {
Jason Evans088e6a02010-10-18 00:04:44 -07002719 arena_dissociate_bin_run(chunk, run, bin);
Jason Evans8de6a022010-10-17 20:57:30 -07002720 arena_dalloc_bin_run(arena, chunk, run, bin);
Jason Evans088e6a02010-10-18 00:04:44 -07002721 } else if (run->nfree == 1 && run != bin->runcur)
Jason Evans8de6a022010-10-17 20:57:30 -07002722 arena_bin_lower_run(arena, chunk, run, bin);
Jason Evanse476f8a2010-01-16 09:53:50 -08002723
Jason Evans7372b152012-02-10 20:22:09 -08002724 if (config_stats) {
Jason Evans7372b152012-02-10 20:22:09 -08002725 bin->stats.ndalloc++;
Jason Evans3c4d92e2014-10-12 22:53:59 -07002726 bin->stats.curregs--;
Jason Evans7372b152012-02-10 20:22:09 -08002727 }
Jason Evanse476f8a2010-01-16 09:53:50 -08002728}
2729
Jason Evanse476f8a2010-01-16 09:53:50 -08002730void
Jason Evansfc0b3b72014-10-09 17:54:06 -07002731arena_dalloc_bin_junked_locked(arena_t *arena, arena_chunk_t *chunk, void *ptr,
2732 arena_chunk_map_bits_t *bitselm)
2733{
2734
2735 arena_dalloc_bin_locked_impl(arena, chunk, ptr, bitselm, true);
2736}
2737
2738void
Jason Evans203484e2012-05-02 00:30:36 -07002739arena_dalloc_bin(arena_t *arena, arena_chunk_t *chunk, void *ptr,
Qinfan Wuff6a31d2014-08-29 13:34:40 -07002740 size_t pageind, arena_chunk_map_bits_t *bitselm)
Jason Evans203484e2012-05-02 00:30:36 -07002741{
2742 arena_run_t *run;
2743 arena_bin_t *bin;
Jason Evans0c5dd032014-09-29 01:31:39 -07002744 size_t rpages_ind;
Jason Evans203484e2012-05-02 00:30:36 -07002745
Jason Evans0c5dd032014-09-29 01:31:39 -07002746 rpages_ind = pageind - arena_mapbits_small_runind_get(chunk, pageind);
Jason Evans61a6dfc2016-03-23 16:04:38 -07002747 run = &arena_miscelm_get_mutable(chunk, rpages_ind)->run;
Jason Evans381c23d2014-10-10 23:01:03 -07002748 bin = &arena->bins[run->binind];
Jason Evans203484e2012-05-02 00:30:36 -07002749 malloc_mutex_lock(&bin->lock);
Jason Evansfc0b3b72014-10-09 17:54:06 -07002750 arena_dalloc_bin_locked_impl(arena, chunk, ptr, bitselm, false);
Jason Evans203484e2012-05-02 00:30:36 -07002751 malloc_mutex_unlock(&bin->lock);
2752}
2753
2754void
Jason Evans243f7a02016-02-19 20:09:31 -08002755arena_dalloc_small(tsd_t *tsd, arena_t *arena, arena_chunk_t *chunk, void *ptr,
Jason Evans203484e2012-05-02 00:30:36 -07002756 size_t pageind)
2757{
Qinfan Wuff6a31d2014-08-29 13:34:40 -07002758 arena_chunk_map_bits_t *bitselm;
Jason Evans203484e2012-05-02 00:30:36 -07002759
2760 if (config_debug) {
Jason Evans80737c32012-05-02 16:11:03 -07002761 /* arena_ptr_small_binind_get() does extra sanity checking. */
2762 assert(arena_ptr_small_binind_get(ptr, arena_mapbits_get(chunk,
2763 pageind)) != BININD_INVALID);
Jason Evans203484e2012-05-02 00:30:36 -07002764 }
Jason Evans61a6dfc2016-03-23 16:04:38 -07002765 bitselm = arena_bitselm_get_mutable(chunk, pageind);
Qinfan Wuff6a31d2014-08-29 13:34:40 -07002766 arena_dalloc_bin(arena, chunk, ptr, pageind, bitselm);
Jason Evans243f7a02016-02-19 20:09:31 -08002767 arena_decay_tick(tsd, arena);
Jason Evans203484e2012-05-02 00:30:36 -07002768}
Jason Evanse476f8a2010-01-16 09:53:50 -08002769
Jason Evans6b694c42014-01-07 16:47:56 -08002770#ifdef JEMALLOC_JET
2771#undef arena_dalloc_junk_large
2772#define arena_dalloc_junk_large JEMALLOC_N(arena_dalloc_junk_large_impl)
2773#endif
Jason Evansfc0b3b72014-10-09 17:54:06 -07002774void
Jason Evans6b694c42014-01-07 16:47:56 -08002775arena_dalloc_junk_large(void *ptr, size_t usize)
2776{
2777
Guilherme Goncalves2c5cb612014-12-08 19:12:41 -02002778 if (config_fill && unlikely(opt_junk_free))
Jason Evans6b694c42014-01-07 16:47:56 -08002779 memset(ptr, 0x5a, usize);
2780}
2781#ifdef JEMALLOC_JET
2782#undef arena_dalloc_junk_large
2783#define arena_dalloc_junk_large JEMALLOC_N(arena_dalloc_junk_large)
2784arena_dalloc_junk_large_t *arena_dalloc_junk_large =
2785 JEMALLOC_N(arena_dalloc_junk_large_impl);
2786#endif
2787
Jason Evanse56b24e2015-09-20 09:58:10 -07002788static void
Jason Evansfc0b3b72014-10-09 17:54:06 -07002789arena_dalloc_large_locked_impl(arena_t *arena, arena_chunk_t *chunk,
2790 void *ptr, bool junked)
Jason Evanse476f8a2010-01-16 09:53:50 -08002791{
Jason Evans0c5dd032014-09-29 01:31:39 -07002792 size_t pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> LG_PAGE;
Jason Evans61a6dfc2016-03-23 16:04:38 -07002793 arena_chunk_map_misc_t *miscelm = arena_miscelm_get_mutable(chunk,
2794 pageind);
Jason Evans0c5dd032014-09-29 01:31:39 -07002795 arena_run_t *run = &miscelm->run;
Jason Evans13668262010-01-31 03:57:29 -08002796
Jason Evans7372b152012-02-10 20:22:09 -08002797 if (config_fill || config_stats) {
Jason Evans8a03cf02015-05-04 09:58:36 -07002798 size_t usize = arena_mapbits_large_size_get(chunk, pageind) -
2799 large_pad;
Jason Evanse476f8a2010-01-16 09:53:50 -08002800
Jason Evansfc0b3b72014-10-09 17:54:06 -07002801 if (!junked)
2802 arena_dalloc_junk_large(ptr, usize);
Jason Evans7372b152012-02-10 20:22:09 -08002803 if (config_stats) {
Jason Evansd01fd192015-08-19 15:21:32 -07002804 szind_t index = size2index(usize) - NBINS;
Jason Evans155bfa72014-10-05 17:54:10 -07002805
Jason Evans7372b152012-02-10 20:22:09 -08002806 arena->stats.ndalloc_large++;
Jason Evans6b694c42014-01-07 16:47:56 -08002807 arena->stats.allocated_large -= usize;
Jason Evans155bfa72014-10-05 17:54:10 -07002808 arena->stats.lstats[index].ndalloc++;
2809 arena->stats.lstats[index].curruns--;
Jason Evans7372b152012-02-10 20:22:09 -08002810 }
Jason Evanse476f8a2010-01-16 09:53:50 -08002811 }
Jason Evanse476f8a2010-01-16 09:53:50 -08002812
Mike Hommeyf97298b2015-09-03 20:32:57 +09002813 arena_run_dalloc(arena, run, true, false, false);
Jason Evanse476f8a2010-01-16 09:53:50 -08002814}
2815
Jason Evans203484e2012-05-02 00:30:36 -07002816void
Jason Evansfc0b3b72014-10-09 17:54:06 -07002817arena_dalloc_large_junked_locked(arena_t *arena, arena_chunk_t *chunk,
2818 void *ptr)
2819{
2820
2821 arena_dalloc_large_locked_impl(arena, chunk, ptr, true);
2822}
2823
2824void
Jason Evans243f7a02016-02-19 20:09:31 -08002825arena_dalloc_large(tsd_t *tsd, arena_t *arena, arena_chunk_t *chunk, void *ptr)
Jason Evans203484e2012-05-02 00:30:36 -07002826{
2827
2828 malloc_mutex_lock(&arena->lock);
Jason Evansfc0b3b72014-10-09 17:54:06 -07002829 arena_dalloc_large_locked_impl(arena, chunk, ptr, false);
Jason Evans203484e2012-05-02 00:30:36 -07002830 malloc_mutex_unlock(&arena->lock);
Jason Evans243f7a02016-02-19 20:09:31 -08002831 arena_decay_tick(tsd, arena);
Jason Evans203484e2012-05-02 00:30:36 -07002832}
2833
Jason Evanse476f8a2010-01-16 09:53:50 -08002834static void
2835arena_ralloc_large_shrink(arena_t *arena, arena_chunk_t *chunk, void *ptr,
Jason Evans8e3c3c62010-09-17 15:46:18 -07002836 size_t oldsize, size_t size)
Jason Evanse476f8a2010-01-16 09:53:50 -08002837{
Jason Evans0c5dd032014-09-29 01:31:39 -07002838 size_t pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> LG_PAGE;
Jason Evans61a6dfc2016-03-23 16:04:38 -07002839 arena_chunk_map_misc_t *miscelm = arena_miscelm_get_mutable(chunk,
2840 pageind);
Jason Evans0c5dd032014-09-29 01:31:39 -07002841 arena_run_t *run = &miscelm->run;
Jason Evanse476f8a2010-01-16 09:53:50 -08002842
2843 assert(size < oldsize);
2844
2845 /*
2846 * Shrink the run, and make trailing pages available for other
2847 * allocations.
2848 */
2849 malloc_mutex_lock(&arena->lock);
Jason Evans8a03cf02015-05-04 09:58:36 -07002850 arena_run_trim_tail(arena, chunk, run, oldsize + large_pad, size +
2851 large_pad, true);
Jason Evans7372b152012-02-10 20:22:09 -08002852 if (config_stats) {
Jason Evansd01fd192015-08-19 15:21:32 -07002853 szind_t oldindex = size2index(oldsize) - NBINS;
2854 szind_t index = size2index(size) - NBINS;
Jason Evans155bfa72014-10-05 17:54:10 -07002855
Jason Evans7372b152012-02-10 20:22:09 -08002856 arena->stats.ndalloc_large++;
2857 arena->stats.allocated_large -= oldsize;
Jason Evans155bfa72014-10-05 17:54:10 -07002858 arena->stats.lstats[oldindex].ndalloc++;
2859 arena->stats.lstats[oldindex].curruns--;
Jason Evans990d10c2010-01-31 03:49:35 -08002860
Jason Evans7372b152012-02-10 20:22:09 -08002861 arena->stats.nmalloc_large++;
2862 arena->stats.nrequests_large++;
2863 arena->stats.allocated_large += size;
Jason Evans155bfa72014-10-05 17:54:10 -07002864 arena->stats.lstats[index].nmalloc++;
2865 arena->stats.lstats[index].nrequests++;
2866 arena->stats.lstats[index].curruns++;
Jason Evanse476f8a2010-01-16 09:53:50 -08002867 }
Jason Evanse476f8a2010-01-16 09:53:50 -08002868 malloc_mutex_unlock(&arena->lock);
2869}
2870
2871static bool
2872arena_ralloc_large_grow(arena_t *arena, arena_chunk_t *chunk, void *ptr,
Jason Evans560a4e12015-09-11 16:18:53 -07002873 size_t oldsize, size_t usize_min, size_t usize_max, bool zero)
Jason Evanse476f8a2010-01-16 09:53:50 -08002874{
Jason Evansae4c7b42012-04-02 07:04:34 -07002875 size_t pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> LG_PAGE;
Jason Evans5716d972015-08-06 23:34:12 -07002876 size_t npages = (oldsize + large_pad) >> LG_PAGE;
Jason Evans8e3c3c62010-09-17 15:46:18 -07002877 size_t followsize;
Jason Evanse476f8a2010-01-16 09:53:50 -08002878
Jason Evans8a03cf02015-05-04 09:58:36 -07002879 assert(oldsize == arena_mapbits_large_size_get(chunk, pageind) -
2880 large_pad);
Jason Evanse476f8a2010-01-16 09:53:50 -08002881
2882 /* Try to extend the run. */
Jason Evanse476f8a2010-01-16 09:53:50 -08002883 malloc_mutex_lock(&arena->lock);
Jason Evans560a4e12015-09-11 16:18:53 -07002884 if (pageind+npages >= chunk_npages || arena_mapbits_allocated_get(chunk,
2885 pageind+npages) != 0)
2886 goto label_fail;
2887 followsize = arena_mapbits_unallocated_size_get(chunk, pageind+npages);
2888 if (oldsize + followsize >= usize_min) {
Jason Evanse476f8a2010-01-16 09:53:50 -08002889 /*
2890 * The next run is available and sufficiently large. Split the
2891 * following run, then merge the first part with the existing
2892 * allocation.
2893 */
Guilherme Goncalves9c6a8d32014-12-17 14:46:35 -02002894 arena_run_t *run;
Jason Evans560a4e12015-09-11 16:18:53 -07002895 size_t usize, splitsize, size, flag_dirty, flag_unzeroed_mask;
Jason Evans155bfa72014-10-05 17:54:10 -07002896
Jason Evans560a4e12015-09-11 16:18:53 -07002897 usize = usize_max;
Jason Evans155bfa72014-10-05 17:54:10 -07002898 while (oldsize + followsize < usize)
2899 usize = index2size(size2index(usize)-1);
2900 assert(usize >= usize_min);
Jason Evans560a4e12015-09-11 16:18:53 -07002901 assert(usize >= oldsize);
Jason Evans5716d972015-08-06 23:34:12 -07002902 splitsize = usize - oldsize;
Jason Evans560a4e12015-09-11 16:18:53 -07002903 if (splitsize == 0)
2904 goto label_fail;
Jason Evans155bfa72014-10-05 17:54:10 -07002905
Jason Evans61a6dfc2016-03-23 16:04:38 -07002906 run = &arena_miscelm_get_mutable(chunk, pageind+npages)->run;
Jason Evans560a4e12015-09-11 16:18:53 -07002907 if (arena_run_split_large(arena, run, splitsize, zero))
2908 goto label_fail;
Jason Evanse476f8a2010-01-16 09:53:50 -08002909
Jason Evansd260f442015-09-24 16:38:45 -07002910 if (config_cache_oblivious && zero) {
2911 /*
2912 * Zero the trailing bytes of the original allocation's
2913 * last page, since they are in an indeterminate state.
Jason Evansa784e412015-09-24 22:21:55 -07002914 * There will always be trailing bytes, because ptr's
2915 * offset from the beginning of the run is a multiple of
2916 * CACHELINE in [0 .. PAGE).
Jason Evansd260f442015-09-24 16:38:45 -07002917 */
Jason Evansa784e412015-09-24 22:21:55 -07002918 void *zbase = (void *)((uintptr_t)ptr + oldsize);
2919 void *zpast = PAGE_ADDR2BASE((void *)((uintptr_t)zbase +
2920 PAGE));
2921 size_t nzero = (uintptr_t)zpast - (uintptr_t)zbase;
2922 assert(nzero > 0);
2923 memset(zbase, 0, nzero);
Jason Evansd260f442015-09-24 16:38:45 -07002924 }
2925
Jason Evans088e6a02010-10-18 00:04:44 -07002926 size = oldsize + splitsize;
Jason Evans5716d972015-08-06 23:34:12 -07002927 npages = (size + large_pad) >> LG_PAGE;
Jason Evans940a2e02010-10-17 17:51:37 -07002928
2929 /*
2930 * Mark the extended run as dirty if either portion of the run
2931 * was dirty before allocation. This is rather pedantic,
2932 * because there's not actually any sequence of events that
2933 * could cause the resulting run to be passed to
2934 * arena_run_dalloc() with the dirty argument set to false
2935 * (which is when dirty flag consistency would really matter).
2936 */
Jason Evans203484e2012-05-02 00:30:36 -07002937 flag_dirty = arena_mapbits_dirty_get(chunk, pageind) |
2938 arena_mapbits_dirty_get(chunk, pageind+npages-1);
Jason Evans1f27abc2015-08-11 12:42:33 -07002939 flag_unzeroed_mask = flag_dirty == 0 ? CHUNK_MAP_UNZEROED : 0;
Jason Evans5716d972015-08-06 23:34:12 -07002940 arena_mapbits_large_set(chunk, pageind, size + large_pad,
Jason Evans1f27abc2015-08-11 12:42:33 -07002941 flag_dirty | (flag_unzeroed_mask &
2942 arena_mapbits_unzeroed_get(chunk, pageind)));
2943 arena_mapbits_large_set(chunk, pageind+npages-1, 0, flag_dirty |
2944 (flag_unzeroed_mask & arena_mapbits_unzeroed_get(chunk,
2945 pageind+npages-1)));
Jason Evanse476f8a2010-01-16 09:53:50 -08002946
Jason Evans7372b152012-02-10 20:22:09 -08002947 if (config_stats) {
Jason Evansd01fd192015-08-19 15:21:32 -07002948 szind_t oldindex = size2index(oldsize) - NBINS;
2949 szind_t index = size2index(size) - NBINS;
Jason Evans155bfa72014-10-05 17:54:10 -07002950
Jason Evans7372b152012-02-10 20:22:09 -08002951 arena->stats.ndalloc_large++;
2952 arena->stats.allocated_large -= oldsize;
Jason Evans155bfa72014-10-05 17:54:10 -07002953 arena->stats.lstats[oldindex].ndalloc++;
2954 arena->stats.lstats[oldindex].curruns--;
Jason Evans990d10c2010-01-31 03:49:35 -08002955
Jason Evans7372b152012-02-10 20:22:09 -08002956 arena->stats.nmalloc_large++;
2957 arena->stats.nrequests_large++;
2958 arena->stats.allocated_large += size;
Jason Evans155bfa72014-10-05 17:54:10 -07002959 arena->stats.lstats[index].nmalloc++;
2960 arena->stats.lstats[index].nrequests++;
2961 arena->stats.lstats[index].curruns++;
Jason Evans940a2e02010-10-17 17:51:37 -07002962 }
Jason Evanse476f8a2010-01-16 09:53:50 -08002963 malloc_mutex_unlock(&arena->lock);
2964 return (false);
2965 }
Jason Evans560a4e12015-09-11 16:18:53 -07002966label_fail:
Jason Evanse476f8a2010-01-16 09:53:50 -08002967 malloc_mutex_unlock(&arena->lock);
Jason Evanse476f8a2010-01-16 09:53:50 -08002968 return (true);
2969}
2970
Jason Evans6b694c42014-01-07 16:47:56 -08002971#ifdef JEMALLOC_JET
2972#undef arena_ralloc_junk_large
2973#define arena_ralloc_junk_large JEMALLOC_N(arena_ralloc_junk_large_impl)
2974#endif
2975static void
2976arena_ralloc_junk_large(void *ptr, size_t old_usize, size_t usize)
2977{
2978
Guilherme Goncalves2c5cb612014-12-08 19:12:41 -02002979 if (config_fill && unlikely(opt_junk_free)) {
Jason Evans6b694c42014-01-07 16:47:56 -08002980 memset((void *)((uintptr_t)ptr + usize), 0x5a,
2981 old_usize - usize);
2982 }
2983}
2984#ifdef JEMALLOC_JET
2985#undef arena_ralloc_junk_large
2986#define arena_ralloc_junk_large JEMALLOC_N(arena_ralloc_junk_large)
2987arena_ralloc_junk_large_t *arena_ralloc_junk_large =
2988 JEMALLOC_N(arena_ralloc_junk_large_impl);
2989#endif
2990
Jason Evanse476f8a2010-01-16 09:53:50 -08002991/*
2992 * Try to resize a large allocation, in order to avoid copying. This will
2993 * always fail if growing an object, and the following run is already in use.
2994 */
2995static bool
Jason Evans560a4e12015-09-11 16:18:53 -07002996arena_ralloc_large(void *ptr, size_t oldsize, size_t usize_min,
2997 size_t usize_max, bool zero)
Jason Evanse476f8a2010-01-16 09:53:50 -08002998{
Jason Evans560a4e12015-09-11 16:18:53 -07002999 arena_chunk_t *chunk;
3000 arena_t *arena;
Jason Evanse476f8a2010-01-16 09:53:50 -08003001
Jason Evans560a4e12015-09-11 16:18:53 -07003002 if (oldsize == usize_max) {
3003 /* Current size class is compatible and maximal. */
Jason Evanse476f8a2010-01-16 09:53:50 -08003004 return (false);
Jason Evanse476f8a2010-01-16 09:53:50 -08003005 }
Jason Evans560a4e12015-09-11 16:18:53 -07003006
3007 chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr);
3008 arena = extent_node_arena_get(&chunk->node);
3009
3010 if (oldsize < usize_max) {
3011 bool ret = arena_ralloc_large_grow(arena, chunk, ptr, oldsize,
3012 usize_min, usize_max, zero);
3013 if (config_fill && !ret && !zero) {
3014 if (unlikely(opt_junk_alloc)) {
3015 memset((void *)((uintptr_t)ptr + oldsize), 0xa5,
3016 isalloc(ptr, config_prof) - oldsize);
3017 } else if (unlikely(opt_zero)) {
3018 memset((void *)((uintptr_t)ptr + oldsize), 0,
3019 isalloc(ptr, config_prof) - oldsize);
3020 }
3021 }
3022 return (ret);
3023 }
3024
3025 assert(oldsize > usize_max);
3026 /* Fill before shrinking in order avoid a race. */
3027 arena_ralloc_junk_large(ptr, oldsize, usize_max);
3028 arena_ralloc_large_shrink(arena, chunk, ptr, oldsize, usize_max);
3029 return (false);
Jason Evanse476f8a2010-01-16 09:53:50 -08003030}
3031
Jason Evansb2c31662014-01-12 15:05:44 -08003032bool
Jason Evans243f7a02016-02-19 20:09:31 -08003033arena_ralloc_no_move(tsd_t *tsd, void *ptr, size_t oldsize, size_t size,
3034 size_t extra, bool zero)
Jason Evanse476f8a2010-01-16 09:53:50 -08003035{
Jason Evans560a4e12015-09-11 16:18:53 -07003036 size_t usize_min, usize_max;
Jason Evanse476f8a2010-01-16 09:53:50 -08003037
Jason Evans0c516a02016-02-25 15:29:49 -08003038 /* Calls with non-zero extra had to clamp extra. */
3039 assert(extra == 0 || size + extra <= HUGE_MAXCLASS);
3040
Jason Evans0c516a02016-02-25 15:29:49 -08003041 if (unlikely(size > HUGE_MAXCLASS))
3042 return (true);
3043
Jason Evans560a4e12015-09-11 16:18:53 -07003044 usize_min = s2u(size);
Jason Evans560a4e12015-09-11 16:18:53 -07003045 usize_max = s2u(size + extra);
Jason Evans676df882015-09-11 20:50:20 -07003046 if (likely(oldsize <= large_maxclass && usize_min <= large_maxclass)) {
Jason Evans243f7a02016-02-19 20:09:31 -08003047 arena_chunk_t *chunk;
3048
Jason Evans88fef7c2015-02-12 14:06:37 -08003049 /*
3050 * Avoid moving the allocation if the size class can be left the
3051 * same.
3052 */
Jason Evans560a4e12015-09-11 16:18:53 -07003053 if (oldsize <= SMALL_MAXCLASS) {
3054 assert(arena_bin_info[size2index(oldsize)].reg_size ==
3055 oldsize);
Jason Evans4985dc62016-02-19 19:24:58 -08003056 if ((usize_max > SMALL_MAXCLASS ||
3057 size2index(usize_max) != size2index(oldsize)) &&
3058 (size > oldsize || usize_max < oldsize))
3059 return (true);
Jason Evans560a4e12015-09-11 16:18:53 -07003060 } else {
Jason Evans4985dc62016-02-19 19:24:58 -08003061 if (usize_max <= SMALL_MAXCLASS)
3062 return (true);
3063 if (arena_ralloc_large(ptr, oldsize, usize_min,
3064 usize_max, zero))
3065 return (true);
Jason Evanse476f8a2010-01-16 09:53:50 -08003066 }
Jason Evanse476f8a2010-01-16 09:53:50 -08003067
Jason Evans243f7a02016-02-19 20:09:31 -08003068 chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr);
3069 arena_decay_tick(tsd, extent_node_arena_get(&chunk->node));
Jason Evans4985dc62016-02-19 19:24:58 -08003070 return (false);
Jason Evans560a4e12015-09-11 16:18:53 -07003071 } else {
Jason Evans243f7a02016-02-19 20:09:31 -08003072 return (huge_ralloc_no_move(tsd, ptr, oldsize, usize_min,
3073 usize_max, zero));
Jason Evans560a4e12015-09-11 16:18:53 -07003074 }
3075}
3076
3077static void *
3078arena_ralloc_move_helper(tsd_t *tsd, arena_t *arena, size_t usize,
3079 size_t alignment, bool zero, tcache_t *tcache)
3080{
3081
3082 if (alignment == 0)
Qi Wangf4a0f322015-10-27 15:12:10 -07003083 return (arena_malloc(tsd, arena, usize, size2index(usize), zero,
3084 tcache, true));
Jason Evans560a4e12015-09-11 16:18:53 -07003085 usize = sa2u(usize, alignment);
Jason Evans0c516a02016-02-25 15:29:49 -08003086 if (unlikely(usize == 0 || usize > HUGE_MAXCLASS))
Jason Evans560a4e12015-09-11 16:18:53 -07003087 return (NULL);
3088 return (ipalloct(tsd, usize, alignment, zero, tcache, arena));
Jason Evans8e3c3c62010-09-17 15:46:18 -07003089}
Jason Evanse476f8a2010-01-16 09:53:50 -08003090
Jason Evans8e3c3c62010-09-17 15:46:18 -07003091void *
Jason Evans5460aa62014-09-22 21:09:23 -07003092arena_ralloc(tsd_t *tsd, arena_t *arena, void *ptr, size_t oldsize, size_t size,
Jason Evans560a4e12015-09-11 16:18:53 -07003093 size_t alignment, bool zero, tcache_t *tcache)
Jason Evans8e3c3c62010-09-17 15:46:18 -07003094{
3095 void *ret;
Jason Evans560a4e12015-09-11 16:18:53 -07003096 size_t usize;
Jason Evans8e3c3c62010-09-17 15:46:18 -07003097
Jason Evans560a4e12015-09-11 16:18:53 -07003098 usize = s2u(size);
Jason Evans0c516a02016-02-25 15:29:49 -08003099 if (unlikely(usize == 0 || size > HUGE_MAXCLASS))
Jason Evans560a4e12015-09-11 16:18:53 -07003100 return (NULL);
3101
Jason Evans676df882015-09-11 20:50:20 -07003102 if (likely(usize <= large_maxclass)) {
Jason Evans88fef7c2015-02-12 14:06:37 -08003103 size_t copysize;
Jason Evans8e3c3c62010-09-17 15:46:18 -07003104
Jason Evans88fef7c2015-02-12 14:06:37 -08003105 /* Try to avoid moving the allocation. */
Jason Evans243f7a02016-02-19 20:09:31 -08003106 if (!arena_ralloc_no_move(tsd, ptr, oldsize, usize, 0, zero))
Jason Evans88fef7c2015-02-12 14:06:37 -08003107 return (ptr);
Jason Evans8e3c3c62010-09-17 15:46:18 -07003108
Jason Evans88fef7c2015-02-12 14:06:37 -08003109 /*
3110 * size and oldsize are different enough that we need to move
3111 * the object. In that case, fall back to allocating new space
3112 * and copying.
3113 */
Jason Evans560a4e12015-09-11 16:18:53 -07003114 ret = arena_ralloc_move_helper(tsd, arena, usize, alignment,
3115 zero, tcache);
3116 if (ret == NULL)
3117 return (NULL);
Jason Evans88fef7c2015-02-12 14:06:37 -08003118
3119 /*
3120 * Junk/zero-filling were already done by
3121 * ipalloc()/arena_malloc().
3122 */
3123
Jason Evans560a4e12015-09-11 16:18:53 -07003124 copysize = (usize < oldsize) ? usize : oldsize;
Jason Evans88fef7c2015-02-12 14:06:37 -08003125 JEMALLOC_VALGRIND_MAKE_MEM_UNDEFINED(ret, copysize);
3126 memcpy(ret, ptr, copysize);
3127 isqalloc(tsd, ptr, oldsize, tcache);
3128 } else {
Jason Evans560a4e12015-09-11 16:18:53 -07003129 ret = huge_ralloc(tsd, arena, ptr, oldsize, usize, alignment,
3130 zero, tcache);
Jason Evans8e3c3c62010-09-17 15:46:18 -07003131 }
Jason Evanse476f8a2010-01-16 09:53:50 -08003132 return (ret);
Jason Evanse476f8a2010-01-16 09:53:50 -08003133}
3134
Jason Evans609ae592012-10-11 13:53:15 -07003135dss_prec_t
3136arena_dss_prec_get(arena_t *arena)
3137{
3138 dss_prec_t ret;
3139
3140 malloc_mutex_lock(&arena->lock);
3141 ret = arena->dss_prec;
3142 malloc_mutex_unlock(&arena->lock);
3143 return (ret);
3144}
3145
Jason Evans4d434ad2014-04-15 12:09:48 -07003146bool
Jason Evans609ae592012-10-11 13:53:15 -07003147arena_dss_prec_set(arena_t *arena, dss_prec_t dss_prec)
3148{
3149
Jason Evans551ebc42014-10-03 10:16:09 -07003150 if (!have_dss)
Jason Evans4d434ad2014-04-15 12:09:48 -07003151 return (dss_prec != dss_prec_disabled);
Jason Evans609ae592012-10-11 13:53:15 -07003152 malloc_mutex_lock(&arena->lock);
3153 arena->dss_prec = dss_prec;
3154 malloc_mutex_unlock(&arena->lock);
Jason Evans4d434ad2014-04-15 12:09:48 -07003155 return (false);
Jason Evans609ae592012-10-11 13:53:15 -07003156}
3157
Jason Evans8d6a3e82015-03-18 18:55:33 -07003158ssize_t
3159arena_lg_dirty_mult_default_get(void)
3160{
3161
3162 return ((ssize_t)atomic_read_z((size_t *)&lg_dirty_mult_default));
3163}
3164
3165bool
3166arena_lg_dirty_mult_default_set(ssize_t lg_dirty_mult)
3167{
3168
Jason Evans243f7a02016-02-19 20:09:31 -08003169 if (opt_purge != purge_mode_ratio)
3170 return (true);
Jason Evans8d6a3e82015-03-18 18:55:33 -07003171 if (!arena_lg_dirty_mult_valid(lg_dirty_mult))
3172 return (true);
3173 atomic_write_z((size_t *)&lg_dirty_mult_default, (size_t)lg_dirty_mult);
3174 return (false);
3175}
3176
Jason Evans243f7a02016-02-19 20:09:31 -08003177ssize_t
3178arena_decay_time_default_get(void)
3179{
3180
3181 return ((ssize_t)atomic_read_z((size_t *)&decay_time_default));
3182}
3183
3184bool
3185arena_decay_time_default_set(ssize_t decay_time)
3186{
3187
3188 if (opt_purge != purge_mode_decay)
3189 return (true);
3190 if (!arena_decay_time_valid(decay_time))
3191 return (true);
3192 atomic_write_z((size_t *)&decay_time_default, (size_t)decay_time);
3193 return (false);
3194}
3195
Jason Evans3c07f802016-02-27 20:40:13 -08003196static void
3197arena_basic_stats_merge_locked(arena_t *arena, unsigned *nthreads,
3198 const char **dss, ssize_t *lg_dirty_mult, ssize_t *decay_time,
3199 size_t *nactive, size_t *ndirty)
Jason Evans609ae592012-10-11 13:53:15 -07003200{
Jason Evans609ae592012-10-11 13:53:15 -07003201
Jason Evans3c07f802016-02-27 20:40:13 -08003202 *nthreads += arena_nthreads_get(arena);
Jason Evans609ae592012-10-11 13:53:15 -07003203 *dss = dss_prec_names[arena->dss_prec];
Jason Evans562d2662015-03-24 16:36:12 -07003204 *lg_dirty_mult = arena->lg_dirty_mult;
Jason Evans243f7a02016-02-19 20:09:31 -08003205 *decay_time = arena->decay_time;
Jason Evans609ae592012-10-11 13:53:15 -07003206 *nactive += arena->nactive;
3207 *ndirty += arena->ndirty;
Jason Evans3c07f802016-02-27 20:40:13 -08003208}
3209
3210void
3211arena_basic_stats_merge(arena_t *arena, unsigned *nthreads, const char **dss,
3212 ssize_t *lg_dirty_mult, ssize_t *decay_time, size_t *nactive,
3213 size_t *ndirty)
3214{
3215
3216 malloc_mutex_lock(&arena->lock);
3217 arena_basic_stats_merge_locked(arena, nthreads, dss, lg_dirty_mult,
3218 decay_time, nactive, ndirty);
3219 malloc_mutex_unlock(&arena->lock);
3220}
3221
3222void
3223arena_stats_merge(arena_t *arena, unsigned *nthreads, const char **dss,
3224 ssize_t *lg_dirty_mult, ssize_t *decay_time, size_t *nactive,
3225 size_t *ndirty, arena_stats_t *astats, malloc_bin_stats_t *bstats,
3226 malloc_large_stats_t *lstats, malloc_huge_stats_t *hstats)
3227{
3228 unsigned i;
3229
3230 cassert(config_stats);
3231
3232 malloc_mutex_lock(&arena->lock);
3233 arena_basic_stats_merge_locked(arena, nthreads, dss, lg_dirty_mult,
3234 decay_time, nactive, ndirty);
Jason Evans609ae592012-10-11 13:53:15 -07003235
3236 astats->mapped += arena->stats.mapped;
3237 astats->npurge += arena->stats.npurge;
3238 astats->nmadvise += arena->stats.nmadvise;
3239 astats->purged += arena->stats.purged;
Jason Evans4581b972014-11-27 17:22:36 -02003240 astats->metadata_mapped += arena->stats.metadata_mapped;
3241 astats->metadata_allocated += arena_metadata_allocated_get(arena);
Jason Evans609ae592012-10-11 13:53:15 -07003242 astats->allocated_large += arena->stats.allocated_large;
3243 astats->nmalloc_large += arena->stats.nmalloc_large;
3244 astats->ndalloc_large += arena->stats.ndalloc_large;
3245 astats->nrequests_large += arena->stats.nrequests_large;
Jason Evanse2deab72014-05-15 22:22:27 -07003246 astats->allocated_huge += arena->stats.allocated_huge;
3247 astats->nmalloc_huge += arena->stats.nmalloc_huge;
3248 astats->ndalloc_huge += arena->stats.ndalloc_huge;
Jason Evans609ae592012-10-11 13:53:15 -07003249
3250 for (i = 0; i < nlclasses; i++) {
3251 lstats[i].nmalloc += arena->stats.lstats[i].nmalloc;
3252 lstats[i].ndalloc += arena->stats.lstats[i].ndalloc;
3253 lstats[i].nrequests += arena->stats.lstats[i].nrequests;
3254 lstats[i].curruns += arena->stats.lstats[i].curruns;
3255 }
Jason Evans3c4d92e2014-10-12 22:53:59 -07003256
3257 for (i = 0; i < nhclasses; i++) {
3258 hstats[i].nmalloc += arena->stats.hstats[i].nmalloc;
3259 hstats[i].ndalloc += arena->stats.hstats[i].ndalloc;
3260 hstats[i].curhchunks += arena->stats.hstats[i].curhchunks;
3261 }
Jason Evans609ae592012-10-11 13:53:15 -07003262 malloc_mutex_unlock(&arena->lock);
3263
3264 for (i = 0; i < NBINS; i++) {
3265 arena_bin_t *bin = &arena->bins[i];
3266
3267 malloc_mutex_lock(&bin->lock);
Jason Evans609ae592012-10-11 13:53:15 -07003268 bstats[i].nmalloc += bin->stats.nmalloc;
3269 bstats[i].ndalloc += bin->stats.ndalloc;
3270 bstats[i].nrequests += bin->stats.nrequests;
Jason Evans3c4d92e2014-10-12 22:53:59 -07003271 bstats[i].curregs += bin->stats.curregs;
Jason Evans609ae592012-10-11 13:53:15 -07003272 if (config_tcache) {
3273 bstats[i].nfills += bin->stats.nfills;
3274 bstats[i].nflushes += bin->stats.nflushes;
3275 }
3276 bstats[i].nruns += bin->stats.nruns;
3277 bstats[i].reruns += bin->stats.reruns;
3278 bstats[i].curruns += bin->stats.curruns;
3279 malloc_mutex_unlock(&bin->lock);
3280 }
3281}
3282
Jason Evans767d8502016-02-24 23:58:10 -08003283unsigned
3284arena_nthreads_get(arena_t *arena)
3285{
3286
3287 return (atomic_read_u(&arena->nthreads));
3288}
3289
3290void
3291arena_nthreads_inc(arena_t *arena)
3292{
3293
3294 atomic_add_u(&arena->nthreads, 1);
3295}
3296
3297void
3298arena_nthreads_dec(arena_t *arena)
3299{
3300
3301 atomic_sub_u(&arena->nthreads, 1);
3302}
3303
Jason Evans8bb31982014-10-07 23:14:57 -07003304arena_t *
3305arena_new(unsigned ind)
Jason Evanse476f8a2010-01-16 09:53:50 -08003306{
Jason Evans8bb31982014-10-07 23:14:57 -07003307 arena_t *arena;
Dave Watson3417a302016-02-23 12:06:21 -08003308 size_t arena_size;
Jason Evanse476f8a2010-01-16 09:53:50 -08003309 unsigned i;
3310 arena_bin_t *bin;
Jason Evanse476f8a2010-01-16 09:53:50 -08003311
Dave Watson3417a302016-02-23 12:06:21 -08003312 /* Compute arena size to incorporate sufficient runs_avail elements. */
Dave Watson4a0dbb52016-02-29 11:54:42 -08003313 arena_size = offsetof(arena_t, runs_avail) + (sizeof(ph_heap_t) *
Dave Watson38127292016-02-24 20:10:02 -08003314 runs_avail_nclasses);
Jason Evans8bb31982014-10-07 23:14:57 -07003315 /*
Jason Evans3c4d92e2014-10-12 22:53:59 -07003316 * Allocate arena, arena->lstats, and arena->hstats contiguously, mainly
3317 * because there is no way to clean up if base_alloc() OOMs.
Jason Evans8bb31982014-10-07 23:14:57 -07003318 */
3319 if (config_stats) {
Dave Watson3417a302016-02-23 12:06:21 -08003320 arena = (arena_t *)base_alloc(CACHELINE_CEILING(arena_size) +
3321 QUANTUM_CEILING(nlclasses * sizeof(malloc_large_stats_t) +
Jason Evans3c4d92e2014-10-12 22:53:59 -07003322 nhclasses) * sizeof(malloc_huge_stats_t));
Jason Evans8bb31982014-10-07 23:14:57 -07003323 } else
Dave Watson3417a302016-02-23 12:06:21 -08003324 arena = (arena_t *)base_alloc(arena_size);
Jason Evans8bb31982014-10-07 23:14:57 -07003325 if (arena == NULL)
3326 return (NULL);
3327
Jason Evans6109fe02010-02-10 10:37:56 -08003328 arena->ind = ind;
Jason Evans597632b2011-03-18 13:41:33 -07003329 arena->nthreads = 0;
Jason Evanscbf3a6d2015-02-11 12:24:27 -08003330 if (malloc_mutex_init(&arena->lock))
3331 return (NULL);
Jason Evanse476f8a2010-01-16 09:53:50 -08003332
Jason Evans7372b152012-02-10 20:22:09 -08003333 if (config_stats) {
3334 memset(&arena->stats, 0, sizeof(arena_stats_t));
Jason Evansd49cb682014-11-17 10:31:59 -08003335 arena->stats.lstats = (malloc_large_stats_t *)((uintptr_t)arena
Dave Watson3417a302016-02-23 12:06:21 -08003336 + CACHELINE_CEILING(arena_size));
Jason Evans7372b152012-02-10 20:22:09 -08003337 memset(arena->stats.lstats, 0, nlclasses *
3338 sizeof(malloc_large_stats_t));
Jason Evansd49cb682014-11-17 10:31:59 -08003339 arena->stats.hstats = (malloc_huge_stats_t *)((uintptr_t)arena
Dave Watson3417a302016-02-23 12:06:21 -08003340 + CACHELINE_CEILING(arena_size) +
Jason Evans3c4d92e2014-10-12 22:53:59 -07003341 QUANTUM_CEILING(nlclasses * sizeof(malloc_large_stats_t)));
3342 memset(arena->stats.hstats, 0, nhclasses *
3343 sizeof(malloc_huge_stats_t));
Jason Evans7372b152012-02-10 20:22:09 -08003344 if (config_tcache)
3345 ql_new(&arena->tcache_ql);
3346 }
Jason Evanse476f8a2010-01-16 09:53:50 -08003347
Jason Evans7372b152012-02-10 20:22:09 -08003348 if (config_prof)
3349 arena->prof_accumbytes = 0;
Jason Evansd34f9e72010-02-11 13:19:21 -08003350
Jason Evans8a03cf02015-05-04 09:58:36 -07003351 if (config_cache_oblivious) {
3352 /*
3353 * A nondeterministic seed based on the address of arena reduces
3354 * the likelihood of lockstep non-uniform cache index
3355 * utilization among identical concurrent processes, but at the
3356 * cost of test repeatability. For debug builds, instead use a
3357 * deterministic seed.
3358 */
3359 arena->offset_state = config_debug ? ind :
3360 (uint64_t)(uintptr_t)arena;
3361 }
3362
Jason Evans609ae592012-10-11 13:53:15 -07003363 arena->dss_prec = chunk_dss_prec_get();
3364
Jason Evanse476f8a2010-01-16 09:53:50 -08003365 arena->spare = NULL;
3366
Jason Evans8d6a3e82015-03-18 18:55:33 -07003367 arena->lg_dirty_mult = arena_lg_dirty_mult_default_get();
Jason Evans0a9f9a42015-06-22 18:50:32 -07003368 arena->purging = false;
Jason Evanse476f8a2010-01-16 09:53:50 -08003369 arena->nactive = 0;
3370 arena->ndirty = 0;
3371
Dave Watson3417a302016-02-23 12:06:21 -08003372 for(i = 0; i < runs_avail_nclasses; i++)
Dave Watson4a0dbb52016-02-29 11:54:42 -08003373 ph_new(&arena->runs_avail[i]);
Jason Evansee41ad42015-02-15 18:04:46 -08003374 qr_new(&arena->runs_dirty, rd_link);
Jason Evans738e0892015-02-18 01:15:50 -08003375 qr_new(&arena->chunks_cache, cc_link);
Jason Evansee41ad42015-02-15 18:04:46 -08003376
Jason Evans243f7a02016-02-19 20:09:31 -08003377 if (opt_purge == purge_mode_decay)
3378 arena_decay_init(arena, arena_decay_time_default_get());
3379
Jason Evansee41ad42015-02-15 18:04:46 -08003380 ql_new(&arena->huge);
3381 if (malloc_mutex_init(&arena->huge_mtx))
3382 return (NULL);
3383
Jason Evansb49a3342015-07-28 11:28:19 -04003384 extent_tree_szad_new(&arena->chunks_szad_cached);
3385 extent_tree_ad_new(&arena->chunks_ad_cached);
3386 extent_tree_szad_new(&arena->chunks_szad_retained);
3387 extent_tree_ad_new(&arena->chunks_ad_retained);
Jason Evansee41ad42015-02-15 18:04:46 -08003388 if (malloc_mutex_init(&arena->chunks_mtx))
3389 return (NULL);
3390 ql_new(&arena->node_cache);
3391 if (malloc_mutex_init(&arena->node_cache_mtx))
3392 return (NULL);
3393
Jason Evansb49a3342015-07-28 11:28:19 -04003394 arena->chunk_hooks = chunk_hooks_default;
Jason Evanse476f8a2010-01-16 09:53:50 -08003395
3396 /* Initialize bins. */
Jason Evansb1726102012-02-28 16:50:47 -08003397 for (i = 0; i < NBINS; i++) {
Jason Evanse476f8a2010-01-16 09:53:50 -08003398 bin = &arena->bins[i];
Jason Evans86815df2010-03-13 20:32:56 -08003399 if (malloc_mutex_init(&bin->lock))
Jason Evans8bb31982014-10-07 23:14:57 -07003400 return (NULL);
Jason Evanse476f8a2010-01-16 09:53:50 -08003401 bin->runcur = NULL;
Jason Evans613cdc82016-03-08 01:04:48 -08003402 ph_new(&bin->runs);
Jason Evans7372b152012-02-10 20:22:09 -08003403 if (config_stats)
3404 memset(&bin->stats, 0, sizeof(malloc_bin_stats_t));
Jason Evanse476f8a2010-01-16 09:53:50 -08003405 }
3406
Jason Evans8bb31982014-10-07 23:14:57 -07003407 return (arena);
Jason Evanse476f8a2010-01-16 09:53:50 -08003408}
3409
Jason Evans49f7e8f2011-03-15 13:59:15 -07003410/*
3411 * Calculate bin_info->run_size such that it meets the following constraints:
3412 *
Jason Evans155bfa72014-10-05 17:54:10 -07003413 * *) bin_info->run_size <= arena_maxrun
Jason Evans47e57f92011-03-22 09:00:56 -07003414 * *) bin_info->nregs <= RUN_MAXREGS
Jason Evans49f7e8f2011-03-15 13:59:15 -07003415 *
Jason Evans0c5dd032014-09-29 01:31:39 -07003416 * bin_info->nregs and bin_info->reg0_offset are also calculated here, since
3417 * these settings are all interdependent.
Jason Evans49f7e8f2011-03-15 13:59:15 -07003418 */
Jason Evans0c5dd032014-09-29 01:31:39 -07003419static void
3420bin_info_run_size_calc(arena_bin_info_t *bin_info)
Jason Evans49f7e8f2011-03-15 13:59:15 -07003421{
Jason Evans122449b2012-04-06 00:35:09 -07003422 size_t pad_size;
Jason Evans0c5dd032014-09-29 01:31:39 -07003423 size_t try_run_size, perfect_run_size, actual_run_size;
3424 uint32_t try_nregs, perfect_nregs, actual_nregs;
Jason Evans49f7e8f2011-03-15 13:59:15 -07003425
3426 /*
Jason Evans122449b2012-04-06 00:35:09 -07003427 * Determine redzone size based on minimum alignment and minimum
3428 * redzone size. Add padding to the end of the run if it is needed to
3429 * align the regions. The padding allows each redzone to be half the
3430 * minimum alignment; without the padding, each redzone would have to
3431 * be twice as large in order to maintain alignment.
3432 */
Jason Evans9c640bf2014-09-11 16:20:44 -07003433 if (config_fill && unlikely(opt_redzone)) {
Jason Evans9f4ee602016-02-24 10:32:45 -08003434 size_t align_min = ZU(1) << (ffs_zu(bin_info->reg_size) - 1);
Jason Evans122449b2012-04-06 00:35:09 -07003435 if (align_min <= REDZONE_MINSIZE) {
3436 bin_info->redzone_size = REDZONE_MINSIZE;
3437 pad_size = 0;
3438 } else {
3439 bin_info->redzone_size = align_min >> 1;
3440 pad_size = bin_info->redzone_size;
3441 }
3442 } else {
3443 bin_info->redzone_size = 0;
3444 pad_size = 0;
3445 }
3446 bin_info->reg_interval = bin_info->reg_size +
3447 (bin_info->redzone_size << 1);
3448
3449 /*
Jason Evans0c5dd032014-09-29 01:31:39 -07003450 * Compute run size under ideal conditions (no redzones, no limit on run
3451 * size).
Jason Evans49f7e8f2011-03-15 13:59:15 -07003452 */
Jason Evans0c5dd032014-09-29 01:31:39 -07003453 try_run_size = PAGE;
Jason Evans9e1810c2016-02-24 12:42:23 -08003454 try_nregs = (uint32_t)(try_run_size / bin_info->reg_size);
Jason Evans49f7e8f2011-03-15 13:59:15 -07003455 do {
Jason Evans0c5dd032014-09-29 01:31:39 -07003456 perfect_run_size = try_run_size;
3457 perfect_nregs = try_nregs;
Jason Evans49f7e8f2011-03-15 13:59:15 -07003458
Jason Evansae4c7b42012-04-02 07:04:34 -07003459 try_run_size += PAGE;
Jason Evans9e1810c2016-02-24 12:42:23 -08003460 try_nregs = (uint32_t)(try_run_size / bin_info->reg_size);
Jason Evans0c5dd032014-09-29 01:31:39 -07003461 } while (perfect_run_size != perfect_nregs * bin_info->reg_size);
3462 assert(perfect_nregs <= RUN_MAXREGS);
Jason Evans49f7e8f2011-03-15 13:59:15 -07003463
Jason Evans0c5dd032014-09-29 01:31:39 -07003464 actual_run_size = perfect_run_size;
Jason Evans9e1810c2016-02-24 12:42:23 -08003465 actual_nregs = (uint32_t)((actual_run_size - pad_size) /
3466 bin_info->reg_interval);
Jason Evans0c5dd032014-09-29 01:31:39 -07003467
3468 /*
3469 * Redzones can require enough padding that not even a single region can
3470 * fit within the number of pages that would normally be dedicated to a
3471 * run for this size class. Increase the run size until at least one
3472 * region fits.
3473 */
3474 while (actual_nregs == 0) {
3475 assert(config_fill && unlikely(opt_redzone));
3476
3477 actual_run_size += PAGE;
Jason Evans9e1810c2016-02-24 12:42:23 -08003478 actual_nregs = (uint32_t)((actual_run_size - pad_size) /
3479 bin_info->reg_interval);
Jason Evans0c5dd032014-09-29 01:31:39 -07003480 }
3481
3482 /*
3483 * Make sure that the run will fit within an arena chunk.
3484 */
Jason Evans155bfa72014-10-05 17:54:10 -07003485 while (actual_run_size > arena_maxrun) {
Jason Evans0c5dd032014-09-29 01:31:39 -07003486 actual_run_size -= PAGE;
Jason Evans9e1810c2016-02-24 12:42:23 -08003487 actual_nregs = (uint32_t)((actual_run_size - pad_size) /
3488 bin_info->reg_interval);
Jason Evans0c5dd032014-09-29 01:31:39 -07003489 }
3490 assert(actual_nregs > 0);
Jason Evans5707d6f2015-03-06 17:14:05 -08003491 assert(actual_run_size == s2u(actual_run_size));
Jason Evans49f7e8f2011-03-15 13:59:15 -07003492
3493 /* Copy final settings. */
Jason Evans0c5dd032014-09-29 01:31:39 -07003494 bin_info->run_size = actual_run_size;
3495 bin_info->nregs = actual_nregs;
Jason Evans9e1810c2016-02-24 12:42:23 -08003496 bin_info->reg0_offset = (uint32_t)(actual_run_size - (actual_nregs *
3497 bin_info->reg_interval) - pad_size + bin_info->redzone_size);
Jason Evans122449b2012-04-06 00:35:09 -07003498
Jason Evans8a03cf02015-05-04 09:58:36 -07003499 if (actual_run_size > small_maxrun)
3500 small_maxrun = actual_run_size;
3501
Jason Evans122449b2012-04-06 00:35:09 -07003502 assert(bin_info->reg0_offset - bin_info->redzone_size + (bin_info->nregs
3503 * bin_info->reg_interval) + pad_size == bin_info->run_size);
Jason Evans49f7e8f2011-03-15 13:59:15 -07003504}
3505
Jason Evansb1726102012-02-28 16:50:47 -08003506static void
Jason Evans49f7e8f2011-03-15 13:59:15 -07003507bin_info_init(void)
3508{
3509 arena_bin_info_t *bin_info;
Jason Evans49f7e8f2011-03-15 13:59:15 -07003510
Jason Evans8a03cf02015-05-04 09:58:36 -07003511#define BIN_INFO_INIT_bin_yes(index, size) \
Jason Evansd04047c2014-05-28 16:11:55 -07003512 bin_info = &arena_bin_info[index]; \
Jason Evansb1726102012-02-28 16:50:47 -08003513 bin_info->reg_size = size; \
Jason Evans0c5dd032014-09-29 01:31:39 -07003514 bin_info_run_size_calc(bin_info); \
Jason Evansb1726102012-02-28 16:50:47 -08003515 bitmap_info_init(&bin_info->bitmap_info, bin_info->nregs);
Jason Evansd04047c2014-05-28 16:11:55 -07003516#define BIN_INFO_INIT_bin_no(index, size)
3517#define SC(index, lg_grp, lg_delta, ndelta, bin, lg_delta_lookup) \
3518 BIN_INFO_INIT_bin_##bin(index, (ZU(1)<<lg_grp) + (ZU(ndelta)<<lg_delta))
Jason Evansb1726102012-02-28 16:50:47 -08003519 SIZE_CLASSES
Jason Evansd04047c2014-05-28 16:11:55 -07003520#undef BIN_INFO_INIT_bin_yes
3521#undef BIN_INFO_INIT_bin_no
3522#undef SC
Jason Evans49f7e8f2011-03-15 13:59:15 -07003523}
3524
Jason Evans8a03cf02015-05-04 09:58:36 -07003525static bool
3526small_run_size_init(void)
3527{
3528
3529 assert(small_maxrun != 0);
3530
3531 small_run_tab = (bool *)base_alloc(sizeof(bool) * (small_maxrun >>
3532 LG_PAGE));
3533 if (small_run_tab == NULL)
3534 return (true);
3535
3536#define TAB_INIT_bin_yes(index, size) { \
3537 arena_bin_info_t *bin_info = &arena_bin_info[index]; \
3538 small_run_tab[bin_info->run_size >> LG_PAGE] = true; \
3539 }
3540#define TAB_INIT_bin_no(index, size)
3541#define SC(index, lg_grp, lg_delta, ndelta, bin, lg_delta_lookup) \
3542 TAB_INIT_bin_##bin(index, (ZU(1)<<lg_grp) + (ZU(ndelta)<<lg_delta))
3543 SIZE_CLASSES
3544#undef TAB_INIT_bin_yes
3545#undef TAB_INIT_bin_no
3546#undef SC
3547
3548 return (false);
3549}
3550
Jason Evans0da8ce12016-02-22 16:20:56 -08003551static bool
3552run_quantize_init(void)
3553{
3554 unsigned i;
3555
3556 run_quantize_max = chunksize + large_pad;
3557
3558 run_quantize_floor_tab = (size_t *)base_alloc(sizeof(size_t) *
3559 (run_quantize_max >> LG_PAGE));
3560 if (run_quantize_floor_tab == NULL)
3561 return (true);
3562
3563 run_quantize_ceil_tab = (size_t *)base_alloc(sizeof(size_t) *
3564 (run_quantize_max >> LG_PAGE));
3565 if (run_quantize_ceil_tab == NULL)
3566 return (true);
3567
3568 for (i = 1; i <= run_quantize_max >> LG_PAGE; i++) {
3569 size_t run_size = i << LG_PAGE;
3570
3571 run_quantize_floor_tab[i-1] =
3572 run_quantize_floor_compute(run_size);
3573 run_quantize_ceil_tab[i-1] =
3574 run_quantize_ceil_compute(run_size);
3575 }
3576
3577 return (false);
3578}
3579
Jason Evans8a03cf02015-05-04 09:58:36 -07003580bool
Jason Evansa0bf2422010-01-29 14:30:41 -08003581arena_boot(void)
Jason Evanse476f8a2010-01-16 09:53:50 -08003582{
Jason Evans7393f442010-10-01 17:35:43 -07003583 unsigned i;
Jason Evanse476f8a2010-01-16 09:53:50 -08003584
Jason Evans8d6a3e82015-03-18 18:55:33 -07003585 arena_lg_dirty_mult_default_set(opt_lg_dirty_mult);
Jason Evans243f7a02016-02-19 20:09:31 -08003586 arena_decay_time_default_set(opt_decay_time);
Jason Evans8d6a3e82015-03-18 18:55:33 -07003587
Jason Evanse476f8a2010-01-16 09:53:50 -08003588 /*
3589 * Compute the header size such that it is large enough to contain the
Jason Evans7393f442010-10-01 17:35:43 -07003590 * page map. The page map is biased to omit entries for the header
3591 * itself, so some iteration is necessary to compute the map bias.
3592 *
3593 * 1) Compute safe header_size and map_bias values that include enough
3594 * space for an unbiased page map.
3595 * 2) Refine map_bias based on (1) to omit the header pages in the page
3596 * map. The resulting map_bias may be one too small.
3597 * 3) Refine map_bias based on (2). The result will be >= the result
3598 * from (2), and will always be correct.
Jason Evanse476f8a2010-01-16 09:53:50 -08003599 */
Jason Evans7393f442010-10-01 17:35:43 -07003600 map_bias = 0;
3601 for (i = 0; i < 3; i++) {
Dmitry-Mea306a602015-09-04 13:15:28 +03003602 size_t header_size = offsetof(arena_chunk_t, map_bits) +
Qinfan Wuff6a31d2014-08-29 13:34:40 -07003603 ((sizeof(arena_chunk_map_bits_t) +
3604 sizeof(arena_chunk_map_misc_t)) * (chunk_npages-map_bias));
Jason Evans0c5dd032014-09-29 01:31:39 -07003605 map_bias = (header_size + PAGE_MASK) >> LG_PAGE;
Jason Evans7393f442010-10-01 17:35:43 -07003606 }
3607 assert(map_bias > 0);
3608
Qinfan Wuff6a31d2014-08-29 13:34:40 -07003609 map_misc_offset = offsetof(arena_chunk_t, map_bits) +
3610 sizeof(arena_chunk_map_bits_t) * (chunk_npages-map_bias);
3611
Jason Evans155bfa72014-10-05 17:54:10 -07003612 arena_maxrun = chunksize - (map_bias << LG_PAGE);
Jason Evansfc0b3b72014-10-09 17:54:06 -07003613 assert(arena_maxrun > 0);
Jason Evans676df882015-09-11 20:50:20 -07003614 large_maxclass = index2size(size2index(chunksize)-1);
3615 if (large_maxclass > arena_maxrun) {
Jason Evans155bfa72014-10-05 17:54:10 -07003616 /*
3617 * For small chunk sizes it's possible for there to be fewer
3618 * non-header pages available than are necessary to serve the
3619 * size classes just below chunksize.
3620 */
Jason Evans676df882015-09-11 20:50:20 -07003621 large_maxclass = arena_maxrun;
Jason Evans155bfa72014-10-05 17:54:10 -07003622 }
Jason Evans676df882015-09-11 20:50:20 -07003623 assert(large_maxclass > 0);
3624 nlclasses = size2index(large_maxclass) - size2index(SMALL_MAXCLASS);
Jason Evans3c4d92e2014-10-12 22:53:59 -07003625 nhclasses = NSIZES - nlclasses - NBINS;
Jason Evansa0bf2422010-01-29 14:30:41 -08003626
Jason Evansb1726102012-02-28 16:50:47 -08003627 bin_info_init();
Jason Evans0da8ce12016-02-22 16:20:56 -08003628 if (small_run_size_init())
3629 return (true);
3630 if (run_quantize_init())
3631 return (true);
3632
Dave Watson3417a302016-02-23 12:06:21 -08003633 runs_avail_bias = size2index(PAGE);
3634 runs_avail_nclasses = size2index(run_quantize_max)+1 - runs_avail_bias;
3635
Jason Evans0da8ce12016-02-22 16:20:56 -08003636 return (false);
Jason Evanse476f8a2010-01-16 09:53:50 -08003637}
Jason Evans4e2e3dd2012-03-13 16:31:41 -07003638
3639void
3640arena_prefork(arena_t *arena)
3641{
3642 unsigned i;
3643
3644 malloc_mutex_prefork(&arena->lock);
Jason Evanscbf3a6d2015-02-11 12:24:27 -08003645 malloc_mutex_prefork(&arena->huge_mtx);
3646 malloc_mutex_prefork(&arena->chunks_mtx);
3647 malloc_mutex_prefork(&arena->node_cache_mtx);
Jason Evans4e2e3dd2012-03-13 16:31:41 -07003648 for (i = 0; i < NBINS; i++)
3649 malloc_mutex_prefork(&arena->bins[i].lock);
3650}
3651
3652void
3653arena_postfork_parent(arena_t *arena)
3654{
3655 unsigned i;
3656
3657 for (i = 0; i < NBINS; i++)
3658 malloc_mutex_postfork_parent(&arena->bins[i].lock);
Jason Evanscbf3a6d2015-02-11 12:24:27 -08003659 malloc_mutex_postfork_parent(&arena->node_cache_mtx);
3660 malloc_mutex_postfork_parent(&arena->chunks_mtx);
3661 malloc_mutex_postfork_parent(&arena->huge_mtx);
Jason Evans4e2e3dd2012-03-13 16:31:41 -07003662 malloc_mutex_postfork_parent(&arena->lock);
3663}
3664
3665void
3666arena_postfork_child(arena_t *arena)
3667{
3668 unsigned i;
3669
3670 for (i = 0; i < NBINS; i++)
3671 malloc_mutex_postfork_child(&arena->bins[i].lock);
Jason Evanscbf3a6d2015-02-11 12:24:27 -08003672 malloc_mutex_postfork_child(&arena->node_cache_mtx);
3673 malloc_mutex_postfork_child(&arena->chunks_mtx);
3674 malloc_mutex_postfork_child(&arena->huge_mtx);
Jason Evans4e2e3dd2012-03-13 16:31:41 -07003675 malloc_mutex_postfork_child(&arena->lock);
3676}