blob: a72fea2fc58bfd2fc7bb419ac9913c933998da47 [file] [log] [blame]
Jason Evanse476f8a2010-01-16 09:53:50 -08001#define JEMALLOC_ARENA_C_
Jason Evans376b1522010-02-11 14:45:59 -08002#include "jemalloc/internal/jemalloc_internal.h"
Jason Evanse476f8a2010-01-16 09:53:50 -08003
4/******************************************************************************/
5/* Data. */
6
Jason Evanse476f8a2010-01-16 09:53:50 -08007ssize_t opt_lg_dirty_mult = LG_DIRTY_MULT_DEFAULT;
Jason Evans8d6a3e82015-03-18 18:55:33 -07008static ssize_t lg_dirty_mult_default;
Jason Evansb1726102012-02-28 16:50:47 -08009arena_bin_info_t arena_bin_info[NBINS];
Jason Evanse476f8a2010-01-16 09:53:50 -080010
Jason Evans155bfa72014-10-05 17:54:10 -070011size_t map_bias;
12size_t map_misc_offset;
13size_t arena_maxrun; /* Max run size for arenas. */
14size_t arena_maxclass; /* Max size class for arenas. */
Jason Evans8a03cf02015-05-04 09:58:36 -070015static size_t small_maxrun; /* Max run size used for small size classes. */
16static bool *small_run_tab; /* Valid small run page multiples. */
Jason Evans3c4d92e2014-10-12 22:53:59 -070017unsigned nlclasses; /* Number of large size classes. */
18unsigned nhclasses; /* Number of huge size classes. */
Jason Evanse476f8a2010-01-16 09:53:50 -080019
20/******************************************************************************/
Jason Evansaa5113b2014-01-14 16:23:03 -080021/*
22 * Function prototypes for static functions that are referenced prior to
23 * definition.
24 */
Jason Evanse476f8a2010-01-16 09:53:50 -080025
Jason Evans6005f072010-09-30 16:55:08 -070026static void arena_purge(arena_t *arena, bool all);
Jason Evanse3d13062012-10-30 15:42:37 -070027static void arena_run_dalloc(arena_t *arena, arena_run_t *run, bool dirty,
Jason Evans8fadb1a2015-08-04 10:49:46 -070028 bool cleaned, bool decommitted);
Jason Evanse476f8a2010-01-16 09:53:50 -080029static void arena_dalloc_bin_run(arena_t *arena, arena_chunk_t *chunk,
30 arena_run_t *run, arena_bin_t *bin);
Jason Evans940a2e02010-10-17 17:51:37 -070031static void arena_bin_lower_run(arena_t *arena, arena_chunk_t *chunk,
32 arena_run_t *run, arena_bin_t *bin);
Jason Evanse476f8a2010-01-16 09:53:50 -080033
34/******************************************************************************/
35
Jason Evans8fadb1a2015-08-04 10:49:46 -070036#define CHUNK_MAP_KEY ((uintptr_t)0x1U)
Ben Maurerf9ff6032014-04-06 13:24:16 -070037
Jason Evans8fadb1a2015-08-04 10:49:46 -070038JEMALLOC_INLINE_C arena_chunk_map_misc_t *
39arena_miscelm_key_create(size_t size)
40{
41
42 return ((arena_chunk_map_misc_t *)((size << CHUNK_MAP_SIZE_SHIFT) |
43 CHUNK_MAP_KEY));
44}
45
46JEMALLOC_INLINE_C bool
47arena_miscelm_is_key(const arena_chunk_map_misc_t *miscelm)
48{
49
50 return (((uintptr_t)miscelm & CHUNK_MAP_KEY) != 0);
51}
52
53#undef CHUNK_MAP_KEY
54
55JEMALLOC_INLINE_C size_t
56arena_miscelm_key_size_get(const arena_chunk_map_misc_t *miscelm)
57{
58
59 assert(arena_miscelm_is_key(miscelm));
60
61 return (((uintptr_t)miscelm & CHUNK_MAP_SIZE_MASK) >>
62 CHUNK_MAP_SIZE_SHIFT);
63}
64
65JEMALLOC_INLINE_C size_t
66arena_miscelm_size_get(arena_chunk_map_misc_t *miscelm)
67{
68 arena_chunk_t *chunk;
69 size_t pageind, mapbits;
70
71 assert(!arena_miscelm_is_key(miscelm));
72
73 chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(miscelm);
74 pageind = arena_miscelm_to_pageind(miscelm);
75 mapbits = arena_mapbits_get(chunk, pageind);
76 return ((mapbits & CHUNK_MAP_SIZE_MASK) >> CHUNK_MAP_SIZE_SHIFT);
Ben Maurerf9ff6032014-04-06 13:24:16 -070077}
78
Jason Evansaf1f5922014-10-30 16:38:08 -070079JEMALLOC_INLINE_C int
Qinfan Wuff6a31d2014-08-29 13:34:40 -070080arena_run_comp(arena_chunk_map_misc_t *a, arena_chunk_map_misc_t *b)
Jason Evanse476f8a2010-01-16 09:53:50 -080081{
Qinfan Wuff6a31d2014-08-29 13:34:40 -070082 uintptr_t a_miscelm = (uintptr_t)a;
83 uintptr_t b_miscelm = (uintptr_t)b;
Jason Evanse476f8a2010-01-16 09:53:50 -080084
85 assert(a != NULL);
86 assert(b != NULL);
87
Qinfan Wuff6a31d2014-08-29 13:34:40 -070088 return ((a_miscelm > b_miscelm) - (a_miscelm < b_miscelm));
Jason Evanse476f8a2010-01-16 09:53:50 -080089}
90
Jason Evansf3ff7522010-02-28 15:00:18 -080091/* Generate red-black tree functions. */
Qinfan Wuff6a31d2014-08-29 13:34:40 -070092rb_gen(static UNUSED, arena_run_tree_, arena_run_tree_t, arena_chunk_map_misc_t,
Jason Evans070b3c32014-08-14 14:45:58 -070093 rb_link, arena_run_comp)
Jason Evanse476f8a2010-01-16 09:53:50 -080094
Jason Evans8a03cf02015-05-04 09:58:36 -070095static size_t
96run_quantize(size_t size)
97{
98 size_t qsize;
99
100 assert(size != 0);
101 assert(size == PAGE_CEILING(size));
102
103 /* Don't change sizes that are valid small run sizes. */
104 if (size <= small_maxrun && small_run_tab[size >> LG_PAGE])
105 return (size);
106
107 /*
108 * Round down to the nearest run size that can actually be requested
109 * during normal large allocation. Add large_pad so that cache index
110 * randomization can offset the allocation from the page boundary.
111 */
112 qsize = index2size(size2index(size - large_pad + 1) - 1) + large_pad;
113 if (qsize <= SMALL_MAXCLASS + large_pad)
114 return (run_quantize(size - large_pad));
115 assert(qsize <= size);
116 return (qsize);
117}
118
119static size_t
120run_quantize_next(size_t size)
121{
122 size_t large_run_size_next;
123
124 assert(size != 0);
125 assert(size == PAGE_CEILING(size));
126
127 /*
128 * Return the next quantized size greater than the input size.
129 * Quantized sizes comprise the union of run sizes that back small
130 * region runs, and run sizes that back large regions with no explicit
131 * alignment constraints.
132 */
133
134 if (size > SMALL_MAXCLASS) {
135 large_run_size_next = PAGE_CEILING(index2size(size2index(size -
136 large_pad) + 1) + large_pad);
137 } else
138 large_run_size_next = SIZE_T_MAX;
139 if (size >= small_maxrun)
140 return (large_run_size_next);
141
142 while (true) {
143 size += PAGE;
144 assert(size <= small_maxrun);
145 if (small_run_tab[size >> LG_PAGE]) {
146 if (large_run_size_next < size)
147 return (large_run_size_next);
148 return (size);
149 }
150 }
151}
152
153static size_t
154run_quantize_first(size_t size)
155{
156 size_t qsize = run_quantize(size);
157
158 if (qsize < size) {
159 /*
160 * Skip a quantization that may have an adequately large run,
161 * because under-sized runs may be mixed in. This only happens
162 * when an unusual size is requested, i.e. for aligned
163 * allocation, and is just one of several places where linear
164 * search would potentially find sufficiently aligned available
165 * memory somewhere lower.
166 */
167 qsize = run_quantize_next(size);
168 }
169 return (qsize);
170}
171
Jason Evansaf1f5922014-10-30 16:38:08 -0700172JEMALLOC_INLINE_C int
Qinfan Wuff6a31d2014-08-29 13:34:40 -0700173arena_avail_comp(arena_chunk_map_misc_t *a, arena_chunk_map_misc_t *b)
Jason Evanse476f8a2010-01-16 09:53:50 -0800174{
175 int ret;
Jason Evans5707d6f2015-03-06 17:14:05 -0800176 uintptr_t a_miscelm = (uintptr_t)a;
Jason Evans8fadb1a2015-08-04 10:49:46 -0700177 size_t a_qsize = run_quantize(arena_miscelm_is_key(a) ?
178 arena_miscelm_key_size_get(a) : arena_miscelm_size_get(a));
179 size_t b_qsize = run_quantize(arena_miscelm_size_get(b));
Jason Evanse476f8a2010-01-16 09:53:50 -0800180
Jason Evans5707d6f2015-03-06 17:14:05 -0800181 /*
Jason Evans8a03cf02015-05-04 09:58:36 -0700182 * Compare based on quantized size rather than size, in order to sort
183 * equally useful runs only by address.
Jason Evans5707d6f2015-03-06 17:14:05 -0800184 */
Jason Evans8a03cf02015-05-04 09:58:36 -0700185 ret = (a_qsize > b_qsize) - (a_qsize < b_qsize);
Qinfan Wu55c9aa12014-08-06 16:10:08 -0700186 if (ret == 0) {
Jason Evans8fadb1a2015-08-04 10:49:46 -0700187 if (!arena_miscelm_is_key(a)) {
Jason Evans5707d6f2015-03-06 17:14:05 -0800188 uintptr_t b_miscelm = (uintptr_t)b;
189
Qinfan Wuff6a31d2014-08-29 13:34:40 -0700190 ret = (a_miscelm > b_miscelm) - (a_miscelm < b_miscelm);
Jason Evans5707d6f2015-03-06 17:14:05 -0800191 } else {
Qinfan Wuea73eb82014-08-06 16:43:01 -0700192 /*
193 * Treat keys as if they are lower than anything else.
194 */
Qinfan Wu55c9aa12014-08-06 16:10:08 -0700195 ret = -1;
Qinfan Wuea73eb82014-08-06 16:43:01 -0700196 }
Qinfan Wu55c9aa12014-08-06 16:10:08 -0700197 }
Jason Evanse476f8a2010-01-16 09:53:50 -0800198
199 return (ret);
200}
201
Jason Evansf3ff7522010-02-28 15:00:18 -0800202/* Generate red-black tree functions. */
Qinfan Wuff6a31d2014-08-29 13:34:40 -0700203rb_gen(static UNUSED, arena_avail_tree_, arena_avail_tree_t,
204 arena_chunk_map_misc_t, rb_link, arena_avail_comp)
Jason Evanse476f8a2010-01-16 09:53:50 -0800205
Jason Evanse3d13062012-10-30 15:42:37 -0700206static void
207arena_avail_insert(arena_t *arena, arena_chunk_t *chunk, size_t pageind,
Qinfan Wu90737fc2014-07-21 19:39:20 -0700208 size_t npages)
Jason Evanse3d13062012-10-30 15:42:37 -0700209{
210
211 assert(npages == (arena_mapbits_unallocated_size_get(chunk, pageind) >>
212 LG_PAGE));
Qinfan Wuff6a31d2014-08-29 13:34:40 -0700213 arena_avail_tree_insert(&arena->runs_avail, arena_miscelm_get(chunk,
Jason Evanse3d13062012-10-30 15:42:37 -0700214 pageind));
215}
216
217static void
218arena_avail_remove(arena_t *arena, arena_chunk_t *chunk, size_t pageind,
Qinfan Wu90737fc2014-07-21 19:39:20 -0700219 size_t npages)
Jason Evanse3d13062012-10-30 15:42:37 -0700220{
221
222 assert(npages == (arena_mapbits_unallocated_size_get(chunk, pageind) >>
223 LG_PAGE));
Qinfan Wuff6a31d2014-08-29 13:34:40 -0700224 arena_avail_tree_remove(&arena->runs_avail, arena_miscelm_get(chunk,
Jason Evanse3d13062012-10-30 15:42:37 -0700225 pageind));
226}
227
Jason Evans070b3c32014-08-14 14:45:58 -0700228static void
Jason Evansee41ad42015-02-15 18:04:46 -0800229arena_run_dirty_insert(arena_t *arena, arena_chunk_t *chunk, size_t pageind,
Jason Evans070b3c32014-08-14 14:45:58 -0700230 size_t npages)
231{
Qinfan Wuff6a31d2014-08-29 13:34:40 -0700232 arena_chunk_map_misc_t *miscelm = arena_miscelm_get(chunk, pageind);
Jason Evansee41ad42015-02-15 18:04:46 -0800233
Jason Evans070b3c32014-08-14 14:45:58 -0700234 assert(npages == (arena_mapbits_unallocated_size_get(chunk, pageind) >>
235 LG_PAGE));
236 assert(arena_mapbits_dirty_get(chunk, pageind) == CHUNK_MAP_DIRTY);
237 assert(arena_mapbits_dirty_get(chunk, pageind+npages-1) ==
238 CHUNK_MAP_DIRTY);
Jason Evansee41ad42015-02-15 18:04:46 -0800239
Jason Evans38e42d32015-03-10 18:15:40 -0700240 qr_new(&miscelm->rd, rd_link);
241 qr_meld(&arena->runs_dirty, &miscelm->rd, rd_link);
Jason Evans070b3c32014-08-14 14:45:58 -0700242 arena->ndirty += npages;
243}
244
245static void
Jason Evansee41ad42015-02-15 18:04:46 -0800246arena_run_dirty_remove(arena_t *arena, arena_chunk_t *chunk, size_t pageind,
Jason Evans070b3c32014-08-14 14:45:58 -0700247 size_t npages)
248{
Qinfan Wuff6a31d2014-08-29 13:34:40 -0700249 arena_chunk_map_misc_t *miscelm = arena_miscelm_get(chunk, pageind);
Jason Evansee41ad42015-02-15 18:04:46 -0800250
Jason Evans070b3c32014-08-14 14:45:58 -0700251 assert(npages == (arena_mapbits_unallocated_size_get(chunk, pageind) >>
252 LG_PAGE));
253 assert(arena_mapbits_dirty_get(chunk, pageind) == CHUNK_MAP_DIRTY);
254 assert(arena_mapbits_dirty_get(chunk, pageind+npages-1) ==
255 CHUNK_MAP_DIRTY);
Jason Evansee41ad42015-02-15 18:04:46 -0800256
Jason Evans38e42d32015-03-10 18:15:40 -0700257 qr_remove(&miscelm->rd, rd_link);
Jason Evansee41ad42015-02-15 18:04:46 -0800258 assert(arena->ndirty >= npages);
Jason Evans070b3c32014-08-14 14:45:58 -0700259 arena->ndirty -= npages;
260}
261
Jason Evansee41ad42015-02-15 18:04:46 -0800262static size_t
263arena_chunk_dirty_npages(const extent_node_t *node)
264{
265
266 return (extent_node_size_get(node) >> LG_PAGE);
267}
268
Jason Evansee41ad42015-02-15 18:04:46 -0800269void
Jason Evans738e0892015-02-18 01:15:50 -0800270arena_chunk_cache_maybe_insert(arena_t *arena, extent_node_t *node, bool cache)
Jason Evansee41ad42015-02-15 18:04:46 -0800271{
272
Jason Evans738e0892015-02-18 01:15:50 -0800273 if (cache) {
Jason Evans47701b22015-02-17 22:23:10 -0800274 extent_node_dirty_linkage_init(node);
Jason Evans738e0892015-02-18 01:15:50 -0800275 extent_node_dirty_insert(node, &arena->runs_dirty,
276 &arena->chunks_cache);
Jason Evansee41ad42015-02-15 18:04:46 -0800277 arena->ndirty += arena_chunk_dirty_npages(node);
278 }
279}
280
281void
Jason Evans738e0892015-02-18 01:15:50 -0800282arena_chunk_cache_maybe_remove(arena_t *arena, extent_node_t *node, bool dirty)
Jason Evansee41ad42015-02-15 18:04:46 -0800283{
284
285 if (dirty) {
Jason Evans738e0892015-02-18 01:15:50 -0800286 extent_node_dirty_remove(node);
Jason Evansee41ad42015-02-15 18:04:46 -0800287 assert(arena->ndirty >= arena_chunk_dirty_npages(node));
288 arena->ndirty -= arena_chunk_dirty_npages(node);
289 }
290}
291
Jason Evansaf1f5922014-10-30 16:38:08 -0700292JEMALLOC_INLINE_C void *
Jason Evans49f7e8f2011-03-15 13:59:15 -0700293arena_run_reg_alloc(arena_run_t *run, arena_bin_info_t *bin_info)
Jason Evanse476f8a2010-01-16 09:53:50 -0800294{
295 void *ret;
Jason Evans84c8eef2011-03-16 10:30:13 -0700296 unsigned regind;
Jason Evans0c5dd032014-09-29 01:31:39 -0700297 arena_chunk_map_misc_t *miscelm;
298 void *rpages;
Jason Evanse476f8a2010-01-16 09:53:50 -0800299
Jason Evans1e0a6362010-03-13 13:41:58 -0800300 assert(run->nfree > 0);
Jason Evans551ebc42014-10-03 10:16:09 -0700301 assert(!bitmap_full(run->bitmap, &bin_info->bitmap_info));
Jason Evanse476f8a2010-01-16 09:53:50 -0800302
Jason Evans0c5dd032014-09-29 01:31:39 -0700303 regind = bitmap_sfu(run->bitmap, &bin_info->bitmap_info);
304 miscelm = arena_run_to_miscelm(run);
305 rpages = arena_miscelm_to_rpages(miscelm);
306 ret = (void *)((uintptr_t)rpages + (uintptr_t)bin_info->reg0_offset +
Jason Evans122449b2012-04-06 00:35:09 -0700307 (uintptr_t)(bin_info->reg_interval * regind));
Jason Evans1e0a6362010-03-13 13:41:58 -0800308 run->nfree--;
Jason Evans1e0a6362010-03-13 13:41:58 -0800309 return (ret);
Jason Evans6109fe02010-02-10 10:37:56 -0800310}
311
Jason Evansaf1f5922014-10-30 16:38:08 -0700312JEMALLOC_INLINE_C void
Jason Evans1e0a6362010-03-13 13:41:58 -0800313arena_run_reg_dalloc(arena_run_t *run, void *ptr)
Jason Evans6109fe02010-02-10 10:37:56 -0800314{
Jason Evans49f7e8f2011-03-15 13:59:15 -0700315 arena_chunk_t *chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(run);
Jason Evans203484e2012-05-02 00:30:36 -0700316 size_t pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> LG_PAGE;
317 size_t mapbits = arena_mapbits_get(chunk, pageind);
Jason Evans155bfa72014-10-05 17:54:10 -0700318 index_t binind = arena_ptr_small_binind_get(ptr, mapbits);
Jason Evans49f7e8f2011-03-15 13:59:15 -0700319 arena_bin_info_t *bin_info = &arena_bin_info[binind];
Jason Evans84c8eef2011-03-16 10:30:13 -0700320 unsigned regind = arena_run_regind(run, bin_info, ptr);
Jason Evans84c8eef2011-03-16 10:30:13 -0700321
Jason Evans49f7e8f2011-03-15 13:59:15 -0700322 assert(run->nfree < bin_info->nregs);
Jason Evans1e0a6362010-03-13 13:41:58 -0800323 /* Freeing an interior pointer can cause assertion failure. */
Jason Evans0c5dd032014-09-29 01:31:39 -0700324 assert(((uintptr_t)ptr -
325 ((uintptr_t)arena_miscelm_to_rpages(arena_run_to_miscelm(run)) +
Jason Evans122449b2012-04-06 00:35:09 -0700326 (uintptr_t)bin_info->reg0_offset)) %
327 (uintptr_t)bin_info->reg_interval == 0);
Jason Evans0c5dd032014-09-29 01:31:39 -0700328 assert((uintptr_t)ptr >=
329 (uintptr_t)arena_miscelm_to_rpages(arena_run_to_miscelm(run)) +
Jason Evans49f7e8f2011-03-15 13:59:15 -0700330 (uintptr_t)bin_info->reg0_offset);
Jason Evans84c8eef2011-03-16 10:30:13 -0700331 /* Freeing an unallocated pointer can cause assertion failure. */
Jason Evans0c5dd032014-09-29 01:31:39 -0700332 assert(bitmap_get(run->bitmap, &bin_info->bitmap_info, regind));
Jason Evanse476f8a2010-01-16 09:53:50 -0800333
Jason Evans0c5dd032014-09-29 01:31:39 -0700334 bitmap_unset(run->bitmap, &bin_info->bitmap_info, regind);
Jason Evans1e0a6362010-03-13 13:41:58 -0800335 run->nfree++;
Jason Evanse476f8a2010-01-16 09:53:50 -0800336}
337
Jason Evansaf1f5922014-10-30 16:38:08 -0700338JEMALLOC_INLINE_C void
Jason Evans38067482013-01-21 20:04:42 -0800339arena_run_zero(arena_chunk_t *chunk, size_t run_ind, size_t npages)
340{
341
Jason Evansbd87b012014-04-15 16:35:08 -0700342 JEMALLOC_VALGRIND_MAKE_MEM_UNDEFINED((void *)((uintptr_t)chunk +
343 (run_ind << LG_PAGE)), (npages << LG_PAGE));
Jason Evans38067482013-01-21 20:04:42 -0800344 memset((void *)((uintptr_t)chunk + (run_ind << LG_PAGE)), 0,
345 (npages << LG_PAGE));
Jason Evans38067482013-01-21 20:04:42 -0800346}
347
Jason Evansaf1f5922014-10-30 16:38:08 -0700348JEMALLOC_INLINE_C void
Jason Evansdda90f52013-10-19 23:48:40 -0700349arena_run_page_mark_zeroed(arena_chunk_t *chunk, size_t run_ind)
350{
351
Jason Evansbd87b012014-04-15 16:35:08 -0700352 JEMALLOC_VALGRIND_MAKE_MEM_DEFINED((void *)((uintptr_t)chunk + (run_ind
353 << LG_PAGE)), PAGE);
Jason Evansdda90f52013-10-19 23:48:40 -0700354}
355
Jason Evansaf1f5922014-10-30 16:38:08 -0700356JEMALLOC_INLINE_C void
Jason Evans38067482013-01-21 20:04:42 -0800357arena_run_page_validate_zeroed(arena_chunk_t *chunk, size_t run_ind)
Jason Evans21fb95b2010-10-18 17:45:40 -0700358{
Jason Evansd4bab212010-10-24 20:08:37 -0700359 size_t i;
Jason Evansae4c7b42012-04-02 07:04:34 -0700360 UNUSED size_t *p = (size_t *)((uintptr_t)chunk + (run_ind << LG_PAGE));
Jason Evansd4bab212010-10-24 20:08:37 -0700361
Jason Evansdda90f52013-10-19 23:48:40 -0700362 arena_run_page_mark_zeroed(chunk, run_ind);
Jason Evansae4c7b42012-04-02 07:04:34 -0700363 for (i = 0; i < PAGE / sizeof(size_t); i++)
Jason Evans21fb95b2010-10-18 17:45:40 -0700364 assert(p[i] == 0);
365}
Jason Evans21fb95b2010-10-18 17:45:40 -0700366
Jason Evanse476f8a2010-01-16 09:53:50 -0800367static void
Jason Evansaa5113b2014-01-14 16:23:03 -0800368arena_cactive_update(arena_t *arena, size_t add_pages, size_t sub_pages)
369{
370
371 if (config_stats) {
Jason Evans15229372014-08-06 23:38:39 -0700372 ssize_t cactive_diff = CHUNK_CEILING((arena->nactive + add_pages
373 - sub_pages) << LG_PAGE) - CHUNK_CEILING(arena->nactive <<
374 LG_PAGE);
Jason Evansaa5113b2014-01-14 16:23:03 -0800375 if (cactive_diff != 0)
376 stats_cactive_add(cactive_diff);
377 }
378}
379
380static void
381arena_run_split_remove(arena_t *arena, arena_chunk_t *chunk, size_t run_ind,
Jason Evans8fadb1a2015-08-04 10:49:46 -0700382 size_t flag_dirty, size_t flag_decommitted, size_t need_pages)
Jason Evansaa5113b2014-01-14 16:23:03 -0800383{
384 size_t total_pages, rem_pages;
385
Jason Evans8fadb1a2015-08-04 10:49:46 -0700386 assert(flag_dirty == 0 || flag_decommitted == 0);
387
Jason Evansaa5113b2014-01-14 16:23:03 -0800388 total_pages = arena_mapbits_unallocated_size_get(chunk, run_ind) >>
389 LG_PAGE;
390 assert(arena_mapbits_dirty_get(chunk, run_ind+total_pages-1) ==
391 flag_dirty);
392 assert(need_pages <= total_pages);
393 rem_pages = total_pages - need_pages;
394
Qinfan Wu90737fc2014-07-21 19:39:20 -0700395 arena_avail_remove(arena, chunk, run_ind, total_pages);
Jason Evans070b3c32014-08-14 14:45:58 -0700396 if (flag_dirty != 0)
Jason Evansee41ad42015-02-15 18:04:46 -0800397 arena_run_dirty_remove(arena, chunk, run_ind, total_pages);
Jason Evansaa5113b2014-01-14 16:23:03 -0800398 arena_cactive_update(arena, need_pages, 0);
399 arena->nactive += need_pages;
400
401 /* Keep track of trailing unused pages for later use. */
402 if (rem_pages > 0) {
Jason Evans8fadb1a2015-08-04 10:49:46 -0700403 size_t flags = flag_dirty | flag_decommitted;
404
405 if (flags != 0) {
Jason Evansaa5113b2014-01-14 16:23:03 -0800406 arena_mapbits_unallocated_set(chunk,
Jason Evans8fadb1a2015-08-04 10:49:46 -0700407 run_ind+need_pages, (rem_pages << LG_PAGE), flags);
Jason Evansaa5113b2014-01-14 16:23:03 -0800408 arena_mapbits_unallocated_set(chunk,
409 run_ind+total_pages-1, (rem_pages << LG_PAGE),
Jason Evans8fadb1a2015-08-04 10:49:46 -0700410 flags);
411 if (flag_dirty != 0) {
412 arena_run_dirty_insert(arena, chunk,
413 run_ind+need_pages, rem_pages);
414 }
Jason Evansaa5113b2014-01-14 16:23:03 -0800415 } else {
416 arena_mapbits_unallocated_set(chunk, run_ind+need_pages,
417 (rem_pages << LG_PAGE),
418 arena_mapbits_unzeroed_get(chunk,
419 run_ind+need_pages));
420 arena_mapbits_unallocated_set(chunk,
421 run_ind+total_pages-1, (rem_pages << LG_PAGE),
422 arena_mapbits_unzeroed_get(chunk,
423 run_ind+total_pages-1));
424 }
Qinfan Wu90737fc2014-07-21 19:39:20 -0700425 arena_avail_insert(arena, chunk, run_ind+need_pages, rem_pages);
Jason Evansaa5113b2014-01-14 16:23:03 -0800426 }
427}
428
Jason Evans8fadb1a2015-08-04 10:49:46 -0700429static bool
Jason Evansaa5113b2014-01-14 16:23:03 -0800430arena_run_split_large_helper(arena_t *arena, arena_run_t *run, size_t size,
431 bool remove, bool zero)
Jason Evanse476f8a2010-01-16 09:53:50 -0800432{
433 arena_chunk_t *chunk;
Jason Evans0c5dd032014-09-29 01:31:39 -0700434 arena_chunk_map_misc_t *miscelm;
Jason Evans8fadb1a2015-08-04 10:49:46 -0700435 size_t flag_dirty, flag_decommitted, run_ind, need_pages, i;
Jason Evans203484e2012-05-02 00:30:36 -0700436
Jason Evanse476f8a2010-01-16 09:53:50 -0800437 chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(run);
Jason Evans0c5dd032014-09-29 01:31:39 -0700438 miscelm = arena_run_to_miscelm(run);
439 run_ind = arena_miscelm_to_pageind(miscelm);
Jason Evans203484e2012-05-02 00:30:36 -0700440 flag_dirty = arena_mapbits_dirty_get(chunk, run_ind);
Jason Evans8fadb1a2015-08-04 10:49:46 -0700441 flag_decommitted = arena_mapbits_decommitted_get(chunk, run_ind);
Jason Evansae4c7b42012-04-02 07:04:34 -0700442 need_pages = (size >> LG_PAGE);
Jason Evanse476f8a2010-01-16 09:53:50 -0800443 assert(need_pages > 0);
Jason Evanse476f8a2010-01-16 09:53:50 -0800444
Jason Evansde249c82015-08-09 16:47:27 -0700445 if (flag_decommitted != 0 && arena->chunk_hooks.commit(chunk, chunksize,
446 run_ind << LG_PAGE, size, arena->ind))
Jason Evans8fadb1a2015-08-04 10:49:46 -0700447 return (true);
448
Jason Evansc368f8c2013-10-29 18:17:42 -0700449 if (remove) {
Jason Evansaa5113b2014-01-14 16:23:03 -0800450 arena_run_split_remove(arena, chunk, run_ind, flag_dirty,
Jason Evans8fadb1a2015-08-04 10:49:46 -0700451 flag_decommitted, need_pages);
Jason Evans19b3d612010-03-18 20:36:40 -0700452 }
453
Jason Evansaa5113b2014-01-14 16:23:03 -0800454 if (zero) {
455 if (flag_dirty == 0) {
456 /*
457 * The run is clean, so some pages may be zeroed (i.e.
458 * never before touched).
459 */
460 for (i = 0; i < need_pages; i++) {
461 if (arena_mapbits_unzeroed_get(chunk, run_ind+i)
462 != 0)
463 arena_run_zero(chunk, run_ind+i, 1);
464 else if (config_debug) {
465 arena_run_page_validate_zeroed(chunk,
466 run_ind+i);
467 } else {
468 arena_run_page_mark_zeroed(chunk,
469 run_ind+i);
Jason Evans19b3d612010-03-18 20:36:40 -0700470 }
Jason Evanse476f8a2010-01-16 09:53:50 -0800471 }
Jason Evansdda90f52013-10-19 23:48:40 -0700472 } else {
Jason Evansaa5113b2014-01-14 16:23:03 -0800473 /* The run is dirty, so all pages must be zeroed. */
474 arena_run_zero(chunk, run_ind, need_pages);
Jason Evanse476f8a2010-01-16 09:53:50 -0800475 }
Jason Evans19b3d612010-03-18 20:36:40 -0700476 } else {
Jason Evansbd87b012014-04-15 16:35:08 -0700477 JEMALLOC_VALGRIND_MAKE_MEM_UNDEFINED((void *)((uintptr_t)chunk +
Jason Evansdda90f52013-10-19 23:48:40 -0700478 (run_ind << LG_PAGE)), (need_pages << LG_PAGE));
Jason Evanse476f8a2010-01-16 09:53:50 -0800479 }
Jason Evansaa5113b2014-01-14 16:23:03 -0800480
481 /*
482 * Set the last element first, in case the run only contains one page
483 * (i.e. both statements set the same element).
484 */
485 arena_mapbits_large_set(chunk, run_ind+need_pages-1, 0, flag_dirty);
486 arena_mapbits_large_set(chunk, run_ind, size, flag_dirty);
Jason Evans8fadb1a2015-08-04 10:49:46 -0700487 return (false);
Jason Evanse476f8a2010-01-16 09:53:50 -0800488}
489
Jason Evans8fadb1a2015-08-04 10:49:46 -0700490static bool
Jason Evansaa5113b2014-01-14 16:23:03 -0800491arena_run_split_large(arena_t *arena, arena_run_t *run, size_t size, bool zero)
Jason Evansc368f8c2013-10-29 18:17:42 -0700492{
493
Jason Evans8fadb1a2015-08-04 10:49:46 -0700494 return (arena_run_split_large_helper(arena, run, size, true, zero));
Jason Evansc368f8c2013-10-29 18:17:42 -0700495}
496
Jason Evans8fadb1a2015-08-04 10:49:46 -0700497static bool
Jason Evansaa5113b2014-01-14 16:23:03 -0800498arena_run_init_large(arena_t *arena, arena_run_t *run, size_t size, bool zero)
Jason Evansc368f8c2013-10-29 18:17:42 -0700499{
500
Jason Evans8fadb1a2015-08-04 10:49:46 -0700501 return (arena_run_split_large_helper(arena, run, size, false, zero));
Jason Evansaa5113b2014-01-14 16:23:03 -0800502}
503
Jason Evans8fadb1a2015-08-04 10:49:46 -0700504static bool
Jason Evansaa5113b2014-01-14 16:23:03 -0800505arena_run_split_small(arena_t *arena, arena_run_t *run, size_t size,
Jason Evans155bfa72014-10-05 17:54:10 -0700506 index_t binind)
Jason Evansaa5113b2014-01-14 16:23:03 -0800507{
508 arena_chunk_t *chunk;
Jason Evans0c5dd032014-09-29 01:31:39 -0700509 arena_chunk_map_misc_t *miscelm;
Jason Evans8fadb1a2015-08-04 10:49:46 -0700510 size_t flag_dirty, flag_decommitted, run_ind, need_pages, i;
Jason Evansaa5113b2014-01-14 16:23:03 -0800511
512 assert(binind != BININD_INVALID);
513
514 chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(run);
Jason Evans0c5dd032014-09-29 01:31:39 -0700515 miscelm = arena_run_to_miscelm(run);
516 run_ind = arena_miscelm_to_pageind(miscelm);
Jason Evansaa5113b2014-01-14 16:23:03 -0800517 flag_dirty = arena_mapbits_dirty_get(chunk, run_ind);
Jason Evans8fadb1a2015-08-04 10:49:46 -0700518 flag_decommitted = arena_mapbits_decommitted_get(chunk, run_ind);
Jason Evansaa5113b2014-01-14 16:23:03 -0800519 need_pages = (size >> LG_PAGE);
520 assert(need_pages > 0);
521
Jason Evans8fadb1a2015-08-04 10:49:46 -0700522 if (flag_decommitted != 0 && arena->chunk_hooks.commit(chunk, chunksize,
523 run_ind << LG_PAGE, size, arena->ind))
524 return (true);
525
526 arena_run_split_remove(arena, chunk, run_ind, flag_dirty,
527 flag_decommitted, need_pages);
Jason Evansaa5113b2014-01-14 16:23:03 -0800528
Jason Evans381c23d2014-10-10 23:01:03 -0700529 for (i = 0; i < need_pages; i++) {
Jason Evansaa5113b2014-01-14 16:23:03 -0800530 arena_mapbits_small_set(chunk, run_ind+i, i, binind, 0);
531 if (config_debug && flag_dirty == 0 &&
532 arena_mapbits_unzeroed_get(chunk, run_ind+i) == 0)
533 arena_run_page_validate_zeroed(chunk, run_ind+i);
534 }
Jason Evansbd87b012014-04-15 16:35:08 -0700535 JEMALLOC_VALGRIND_MAKE_MEM_UNDEFINED((void *)((uintptr_t)chunk +
Jason Evansaa5113b2014-01-14 16:23:03 -0800536 (run_ind << LG_PAGE)), (need_pages << LG_PAGE));
Jason Evans8fadb1a2015-08-04 10:49:46 -0700537 return (false);
Jason Evansaa5113b2014-01-14 16:23:03 -0800538}
539
540static arena_chunk_t *
541arena_chunk_init_spare(arena_t *arena)
542{
543 arena_chunk_t *chunk;
544
545 assert(arena->spare != NULL);
546
547 chunk = arena->spare;
548 arena->spare = NULL;
549
550 assert(arena_mapbits_allocated_get(chunk, map_bias) == 0);
551 assert(arena_mapbits_allocated_get(chunk, chunk_npages-1) == 0);
552 assert(arena_mapbits_unallocated_size_get(chunk, map_bias) ==
Jason Evans155bfa72014-10-05 17:54:10 -0700553 arena_maxrun);
Jason Evansaa5113b2014-01-14 16:23:03 -0800554 assert(arena_mapbits_unallocated_size_get(chunk, chunk_npages-1) ==
Jason Evans155bfa72014-10-05 17:54:10 -0700555 arena_maxrun);
Jason Evansaa5113b2014-01-14 16:23:03 -0800556 assert(arena_mapbits_dirty_get(chunk, map_bias) ==
557 arena_mapbits_dirty_get(chunk, chunk_npages-1));
558
559 return (chunk);
560}
561
Jason Evans99bd94f2015-02-18 16:40:53 -0800562static bool
563arena_chunk_register(arena_t *arena, arena_chunk_t *chunk, bool zero)
564{
565
Jason Evans8fadb1a2015-08-04 10:49:46 -0700566 /*
567 * The extent node notion of "committed" doesn't directly apply to
Jason Evansde249c82015-08-09 16:47:27 -0700568 * arena chunks. Arbitrarily mark them as committed. The commit state
569 * of runs is tracked individually, and upon chunk deallocation the
570 * entire chunk is in a consistent commit state.
Jason Evans8fadb1a2015-08-04 10:49:46 -0700571 */
572 extent_node_init(&chunk->node, arena, chunk, chunksize, zero, true);
Jason Evans99bd94f2015-02-18 16:40:53 -0800573 extent_node_achunk_set(&chunk->node, true);
574 return (chunk_register(chunk, &chunk->node));
575}
576
577static arena_chunk_t *
Jason Evansb49a3342015-07-28 11:28:19 -0400578arena_chunk_alloc_internal_hard(arena_t *arena, chunk_hooks_t *chunk_hooks,
Jason Evans8fadb1a2015-08-04 10:49:46 -0700579 bool *zero, bool *commit)
Jason Evans99bd94f2015-02-18 16:40:53 -0800580{
581 arena_chunk_t *chunk;
Jason Evans99bd94f2015-02-18 16:40:53 -0800582
583 malloc_mutex_unlock(&arena->lock);
Jason Evansb49a3342015-07-28 11:28:19 -0400584
585 chunk = (arena_chunk_t *)chunk_alloc_wrapper(arena, chunk_hooks, NULL,
Jason Evans8fadb1a2015-08-04 10:49:46 -0700586 chunksize, chunksize, zero, commit);
587 if (chunk != NULL && !*commit) {
588 /* Commit header. */
589 if (chunk_hooks->commit(chunk, chunksize, 0, map_bias <<
590 LG_PAGE, arena->ind)) {
591 chunk_dalloc_wrapper(arena, chunk_hooks,
592 (void *)chunk, chunksize, *commit);
593 chunk = NULL;
594 }
595 }
Jason Evans99bd94f2015-02-18 16:40:53 -0800596 if (chunk != NULL && arena_chunk_register(arena, chunk, *zero)) {
Jason Evans8fadb1a2015-08-04 10:49:46 -0700597 if (!*commit) {
598 /* Undo commit of header. */
599 chunk_hooks->decommit(chunk, chunksize, 0, map_bias <<
600 LG_PAGE, arena->ind);
601 }
Jason Evansb49a3342015-07-28 11:28:19 -0400602 chunk_dalloc_wrapper(arena, chunk_hooks, (void *)chunk,
Jason Evans8fadb1a2015-08-04 10:49:46 -0700603 chunksize, *commit);
Jason Evans99bd94f2015-02-18 16:40:53 -0800604 chunk = NULL;
605 }
Jason Evans99bd94f2015-02-18 16:40:53 -0800606
Jason Evans8fadb1a2015-08-04 10:49:46 -0700607 malloc_mutex_lock(&arena->lock);
Jason Evans99bd94f2015-02-18 16:40:53 -0800608 return (chunk);
609}
610
Jason Evansaa5113b2014-01-14 16:23:03 -0800611static arena_chunk_t *
Jason Evans8fadb1a2015-08-04 10:49:46 -0700612arena_chunk_alloc_internal(arena_t *arena, bool *zero, bool *commit)
Jason Evanse2deab72014-05-15 22:22:27 -0700613{
614 arena_chunk_t *chunk;
Jason Evansb49a3342015-07-28 11:28:19 -0400615 chunk_hooks_t chunk_hooks = CHUNK_HOOKS_INITIALIZER;
Jason Evanse2deab72014-05-15 22:22:27 -0700616
Jason Evansb49a3342015-07-28 11:28:19 -0400617 chunk = chunk_alloc_cache(arena, &chunk_hooks, NULL, chunksize,
618 chunksize, zero, true);
Jason Evans8fadb1a2015-08-04 10:49:46 -0700619 if (chunk != NULL) {
620 if (arena_chunk_register(arena, chunk, *zero)) {
621 chunk_dalloc_cache(arena, &chunk_hooks, chunk,
Jason Evansde249c82015-08-09 16:47:27 -0700622 chunksize, true);
Jason Evans8fadb1a2015-08-04 10:49:46 -0700623 return (NULL);
624 }
625 *commit = true;
Jason Evansb49a3342015-07-28 11:28:19 -0400626 }
627 if (chunk == NULL) {
628 chunk = arena_chunk_alloc_internal_hard(arena, &chunk_hooks,
Jason Evans8fadb1a2015-08-04 10:49:46 -0700629 zero, commit);
Jason Evansb49a3342015-07-28 11:28:19 -0400630 }
Jason Evans99bd94f2015-02-18 16:40:53 -0800631
Jason Evans4581b972014-11-27 17:22:36 -0200632 if (config_stats && chunk != NULL) {
Jason Evanse2deab72014-05-15 22:22:27 -0700633 arena->stats.mapped += chunksize;
Jason Evans4581b972014-11-27 17:22:36 -0200634 arena->stats.metadata_mapped += (map_bias << LG_PAGE);
635 }
Jason Evanse2deab72014-05-15 22:22:27 -0700636
637 return (chunk);
638}
639
Jason Evanse2deab72014-05-15 22:22:27 -0700640static arena_chunk_t *
Jason Evansaa5113b2014-01-14 16:23:03 -0800641arena_chunk_init_hard(arena_t *arena)
642{
643 arena_chunk_t *chunk;
Jason Evans8fadb1a2015-08-04 10:49:46 -0700644 bool zero, commit;
Jason Evans45186f02015-08-10 23:03:34 -0700645 size_t flag_unzeroed, flag_decommitted, i;
Jason Evansaa5113b2014-01-14 16:23:03 -0800646
647 assert(arena->spare == NULL);
648
649 zero = false;
Jason Evans8fadb1a2015-08-04 10:49:46 -0700650 commit = false;
651 chunk = arena_chunk_alloc_internal(arena, &zero, &commit);
Jason Evansaa5113b2014-01-14 16:23:03 -0800652 if (chunk == NULL)
653 return (NULL);
Jason Evansaa5113b2014-01-14 16:23:03 -0800654
Jason Evansaa5113b2014-01-14 16:23:03 -0800655 /*
Jason Evansaa5113b2014-01-14 16:23:03 -0800656 * Initialize the map to contain one maximal free untouched run. Mark
Jason Evans8fadb1a2015-08-04 10:49:46 -0700657 * the pages as zeroed if chunk_alloc() returned a zeroed or decommitted
658 * chunk.
Jason Evansaa5113b2014-01-14 16:23:03 -0800659 */
Jason Evans45186f02015-08-10 23:03:34 -0700660 flag_unzeroed = (zero || !commit) ? 0 : CHUNK_MAP_UNZEROED;
661 flag_decommitted = commit ? 0 : CHUNK_MAP_DECOMMITTED;
662 arena_mapbits_unallocated_set(chunk, map_bias, arena_maxrun,
663 flag_unzeroed | flag_decommitted);
Jason Evansaa5113b2014-01-14 16:23:03 -0800664 /*
665 * There is no need to initialize the internal page map entries unless
666 * the chunk is not zeroed.
667 */
Jason Evans551ebc42014-10-03 10:16:09 -0700668 if (!zero) {
Jason Evansbd87b012014-04-15 16:35:08 -0700669 JEMALLOC_VALGRIND_MAKE_MEM_UNDEFINED(
Qinfan Wuff6a31d2014-08-29 13:34:40 -0700670 (void *)arena_bitselm_get(chunk, map_bias+1),
671 (size_t)((uintptr_t) arena_bitselm_get(chunk,
672 chunk_npages-1) - (uintptr_t)arena_bitselm_get(chunk,
673 map_bias+1)));
Jason Evansaa5113b2014-01-14 16:23:03 -0800674 for (i = map_bias+1; i < chunk_npages-1; i++)
Jason Evans45186f02015-08-10 23:03:34 -0700675 arena_mapbits_internal_set(chunk, i, flag_unzeroed);
Jason Evansaa5113b2014-01-14 16:23:03 -0800676 } else {
Qinfan Wuff6a31d2014-08-29 13:34:40 -0700677 JEMALLOC_VALGRIND_MAKE_MEM_DEFINED((void
678 *)arena_bitselm_get(chunk, map_bias+1), (size_t)((uintptr_t)
679 arena_bitselm_get(chunk, chunk_npages-1) -
680 (uintptr_t)arena_bitselm_get(chunk, map_bias+1)));
Jason Evansaa5113b2014-01-14 16:23:03 -0800681 if (config_debug) {
682 for (i = map_bias+1; i < chunk_npages-1; i++) {
683 assert(arena_mapbits_unzeroed_get(chunk, i) ==
Jason Evans45186f02015-08-10 23:03:34 -0700684 flag_unzeroed);
Jason Evansaa5113b2014-01-14 16:23:03 -0800685 }
686 }
687 }
Jason Evans155bfa72014-10-05 17:54:10 -0700688 arena_mapbits_unallocated_set(chunk, chunk_npages-1, arena_maxrun,
Jason Evans45186f02015-08-10 23:03:34 -0700689 flag_unzeroed);
Jason Evansaa5113b2014-01-14 16:23:03 -0800690
691 return (chunk);
Jason Evansc368f8c2013-10-29 18:17:42 -0700692}
693
Jason Evanse476f8a2010-01-16 09:53:50 -0800694static arena_chunk_t *
695arena_chunk_alloc(arena_t *arena)
696{
697 arena_chunk_t *chunk;
Jason Evanse476f8a2010-01-16 09:53:50 -0800698
Jason Evansaa5113b2014-01-14 16:23:03 -0800699 if (arena->spare != NULL)
700 chunk = arena_chunk_init_spare(arena);
Chris Pride20a8c782014-03-25 22:36:05 -0700701 else {
Jason Evansaa5113b2014-01-14 16:23:03 -0800702 chunk = arena_chunk_init_hard(arena);
Chris Pride20a8c782014-03-25 22:36:05 -0700703 if (chunk == NULL)
704 return (NULL);
705 }
Jason Evanse476f8a2010-01-16 09:53:50 -0800706
Jason Evanse3d13062012-10-30 15:42:37 -0700707 /* Insert the run into the runs_avail tree. */
Qinfan Wu90737fc2014-07-21 19:39:20 -0700708 arena_avail_insert(arena, chunk, map_bias, chunk_npages-map_bias);
Jason Evanse3d13062012-10-30 15:42:37 -0700709
Jason Evanse476f8a2010-01-16 09:53:50 -0800710 return (chunk);
711}
712
713static void
Jason Evanse2deab72014-05-15 22:22:27 -0700714arena_chunk_dalloc(arena_t *arena, arena_chunk_t *chunk)
Jason Evanse476f8a2010-01-16 09:53:50 -0800715{
Qinfan Wu04d60a12014-07-18 14:21:17 -0700716
Jason Evans30fe12b2012-05-10 17:09:17 -0700717 assert(arena_mapbits_allocated_get(chunk, map_bias) == 0);
718 assert(arena_mapbits_allocated_get(chunk, chunk_npages-1) == 0);
719 assert(arena_mapbits_unallocated_size_get(chunk, map_bias) ==
Jason Evans155bfa72014-10-05 17:54:10 -0700720 arena_maxrun);
Jason Evans30fe12b2012-05-10 17:09:17 -0700721 assert(arena_mapbits_unallocated_size_get(chunk, chunk_npages-1) ==
Jason Evans155bfa72014-10-05 17:54:10 -0700722 arena_maxrun);
Jason Evans30fe12b2012-05-10 17:09:17 -0700723 assert(arena_mapbits_dirty_get(chunk, map_bias) ==
724 arena_mapbits_dirty_get(chunk, chunk_npages-1));
Jason Evansde249c82015-08-09 16:47:27 -0700725 assert(arena_mapbits_decommitted_get(chunk, map_bias) ==
726 arena_mapbits_decommitted_get(chunk, chunk_npages-1));
Jason Evans30fe12b2012-05-10 17:09:17 -0700727
Jason Evanse476f8a2010-01-16 09:53:50 -0800728 /*
Jason Evanse3d13062012-10-30 15:42:37 -0700729 * Remove run from the runs_avail tree, so that the arena does not use
730 * it.
Jason Evanse476f8a2010-01-16 09:53:50 -0800731 */
Qinfan Wu90737fc2014-07-21 19:39:20 -0700732 arena_avail_remove(arena, chunk, map_bias, chunk_npages-map_bias);
Jason Evanse476f8a2010-01-16 09:53:50 -0800733
Jason Evans8d4203c2010-04-13 20:53:21 -0700734 if (arena->spare != NULL) {
735 arena_chunk_t *spare = arena->spare;
Jason Evansb49a3342015-07-28 11:28:19 -0400736 chunk_hooks_t chunk_hooks = CHUNK_HOOKS_INITIALIZER;
Jason Evansde249c82015-08-09 16:47:27 -0700737 bool committed;
Jason Evans8d4203c2010-04-13 20:53:21 -0700738
739 arena->spare = chunk;
Jason Evans070b3c32014-08-14 14:45:58 -0700740 if (arena_mapbits_dirty_get(spare, map_bias) != 0) {
Jason Evansee41ad42015-02-15 18:04:46 -0800741 arena_run_dirty_remove(arena, spare, map_bias,
Jason Evans070b3c32014-08-14 14:45:58 -0700742 chunk_npages-map_bias);
743 }
Jason Evans99bd94f2015-02-18 16:40:53 -0800744
Jason Evanscbf3a6d2015-02-11 12:24:27 -0800745 chunk_deregister(spare, &spare->node);
Jason Evans99bd94f2015-02-18 16:40:53 -0800746
Jason Evansde249c82015-08-09 16:47:27 -0700747 committed = (arena_mapbits_decommitted_get(spare, map_bias) ==
748 0);
749 if (!committed) {
750 /*
751 * Decommit the header. Mark the chunk as decommitted
752 * even if header decommit fails, since treating a
753 * partially committed chunk as committed has a high
754 * potential for causing later access of decommitted
755 * memory.
756 */
757 chunk_hooks = chunk_hooks_get(arena);
758 chunk_hooks.decommit(spare, chunksize, 0, map_bias <<
759 LG_PAGE, arena->ind);
760 }
761
Jason Evansb49a3342015-07-28 11:28:19 -0400762 chunk_dalloc_cache(arena, &chunk_hooks, (void *)spare,
Jason Evansde249c82015-08-09 16:47:27 -0700763 chunksize, committed);
Jason Evans99bd94f2015-02-18 16:40:53 -0800764
Jason Evans4581b972014-11-27 17:22:36 -0200765 if (config_stats) {
Jason Evans9b41ac92014-10-14 22:20:00 -0700766 arena->stats.mapped -= chunksize;
Jason Evans4581b972014-11-27 17:22:36 -0200767 arena->stats.metadata_mapped -= (map_bias << LG_PAGE);
768 }
Jason Evans8d4203c2010-04-13 20:53:21 -0700769 } else
770 arena->spare = chunk;
Jason Evanse476f8a2010-01-16 09:53:50 -0800771}
772
Jason Evans9b41ac92014-10-14 22:20:00 -0700773static void
774arena_huge_malloc_stats_update(arena_t *arena, size_t usize)
775{
776 index_t index = size2index(usize) - nlclasses - NBINS;
777
778 cassert(config_stats);
779
780 arena->stats.nmalloc_huge++;
781 arena->stats.allocated_huge += usize;
782 arena->stats.hstats[index].nmalloc++;
783 arena->stats.hstats[index].curhchunks++;
784}
785
786static void
787arena_huge_malloc_stats_update_undo(arena_t *arena, size_t usize)
788{
789 index_t index = size2index(usize) - nlclasses - NBINS;
790
791 cassert(config_stats);
792
793 arena->stats.nmalloc_huge--;
794 arena->stats.allocated_huge -= usize;
795 arena->stats.hstats[index].nmalloc--;
796 arena->stats.hstats[index].curhchunks--;
797}
798
799static void
800arena_huge_dalloc_stats_update(arena_t *arena, size_t usize)
801{
802 index_t index = size2index(usize) - nlclasses - NBINS;
803
804 cassert(config_stats);
805
806 arena->stats.ndalloc_huge++;
807 arena->stats.allocated_huge -= usize;
808 arena->stats.hstats[index].ndalloc++;
809 arena->stats.hstats[index].curhchunks--;
810}
811
812static void
813arena_huge_dalloc_stats_update_undo(arena_t *arena, size_t usize)
814{
815 index_t index = size2index(usize) - nlclasses - NBINS;
816
817 cassert(config_stats);
818
819 arena->stats.ndalloc_huge--;
820 arena->stats.allocated_huge += usize;
821 arena->stats.hstats[index].ndalloc--;
822 arena->stats.hstats[index].curhchunks++;
823}
824
825static void
826arena_huge_ralloc_stats_update(arena_t *arena, size_t oldsize, size_t usize)
827{
828
829 arena_huge_dalloc_stats_update(arena, oldsize);
830 arena_huge_malloc_stats_update(arena, usize);
831}
832
833static void
834arena_huge_ralloc_stats_update_undo(arena_t *arena, size_t oldsize,
835 size_t usize)
836{
837
838 arena_huge_dalloc_stats_update_undo(arena, oldsize);
839 arena_huge_malloc_stats_update_undo(arena, usize);
840}
841
Jason Evanscbf3a6d2015-02-11 12:24:27 -0800842extent_node_t *
843arena_node_alloc(arena_t *arena)
844{
845 extent_node_t *node;
846
847 malloc_mutex_lock(&arena->node_cache_mtx);
Jason Evans2195ba42015-02-15 16:43:52 -0800848 node = ql_last(&arena->node_cache, ql_link);
Jason Evanscbf3a6d2015-02-11 12:24:27 -0800849 if (node == NULL) {
850 malloc_mutex_unlock(&arena->node_cache_mtx);
851 return (base_alloc(sizeof(extent_node_t)));
852 }
Jason Evans2195ba42015-02-15 16:43:52 -0800853 ql_tail_remove(&arena->node_cache, extent_node_t, ql_link);
Jason Evanscbf3a6d2015-02-11 12:24:27 -0800854 malloc_mutex_unlock(&arena->node_cache_mtx);
855 return (node);
856}
857
858void
859arena_node_dalloc(arena_t *arena, extent_node_t *node)
860{
861
862 malloc_mutex_lock(&arena->node_cache_mtx);
Jason Evans2195ba42015-02-15 16:43:52 -0800863 ql_elm_new(node, ql_link);
864 ql_tail_insert(&arena->node_cache, node, ql_link);
Jason Evanscbf3a6d2015-02-11 12:24:27 -0800865 malloc_mutex_unlock(&arena->node_cache_mtx);
866}
867
Jason Evans99bd94f2015-02-18 16:40:53 -0800868static void *
Jason Evansb49a3342015-07-28 11:28:19 -0400869arena_chunk_alloc_huge_hard(arena_t *arena, chunk_hooks_t *chunk_hooks,
Jason Evans99bd94f2015-02-18 16:40:53 -0800870 size_t usize, size_t alignment, bool *zero, size_t csize)
Jason Evans9b41ac92014-10-14 22:20:00 -0700871{
872 void *ret;
Jason Evans8fadb1a2015-08-04 10:49:46 -0700873 bool commit = true;
Jason Evans9b41ac92014-10-14 22:20:00 -0700874
Jason Evansb49a3342015-07-28 11:28:19 -0400875 ret = chunk_alloc_wrapper(arena, chunk_hooks, NULL, csize, alignment,
Jason Evans8fadb1a2015-08-04 10:49:46 -0700876 zero, &commit);
Jason Evans9b41ac92014-10-14 22:20:00 -0700877 if (ret == NULL) {
878 /* Revert optimistic stats updates. */
879 malloc_mutex_lock(&arena->lock);
880 if (config_stats) {
881 arena_huge_malloc_stats_update_undo(arena, usize);
882 arena->stats.mapped -= usize;
883 }
884 arena->nactive -= (usize >> LG_PAGE);
885 malloc_mutex_unlock(&arena->lock);
Jason Evans9b41ac92014-10-14 22:20:00 -0700886 }
887
Jason Evans99bd94f2015-02-18 16:40:53 -0800888 return (ret);
889}
Jason Evans9b41ac92014-10-14 22:20:00 -0700890
Jason Evans99bd94f2015-02-18 16:40:53 -0800891void *
892arena_chunk_alloc_huge(arena_t *arena, size_t usize, size_t alignment,
893 bool *zero)
894{
895 void *ret;
Jason Evansb49a3342015-07-28 11:28:19 -0400896 chunk_hooks_t chunk_hooks = CHUNK_HOOKS_INITIALIZER;
Jason Evans99bd94f2015-02-18 16:40:53 -0800897 size_t csize = CHUNK_CEILING(usize);
898
899 malloc_mutex_lock(&arena->lock);
900
901 /* Optimistically update stats. */
902 if (config_stats) {
903 arena_huge_malloc_stats_update(arena, usize);
904 arena->stats.mapped += usize;
905 }
906 arena->nactive += (usize >> LG_PAGE);
907
Jason Evansb49a3342015-07-28 11:28:19 -0400908 ret = chunk_alloc_cache(arena, &chunk_hooks, NULL, csize, alignment,
909 zero, true);
Jason Evans99bd94f2015-02-18 16:40:53 -0800910 malloc_mutex_unlock(&arena->lock);
911 if (ret == NULL) {
Jason Evansb49a3342015-07-28 11:28:19 -0400912 ret = arena_chunk_alloc_huge_hard(arena, &chunk_hooks, usize,
Jason Evans99bd94f2015-02-18 16:40:53 -0800913 alignment, zero, csize);
914 }
915
916 if (config_stats && ret != NULL)
917 stats_cactive_add(usize);
Jason Evans9b41ac92014-10-14 22:20:00 -0700918 return (ret);
919}
920
921void
922arena_chunk_dalloc_huge(arena_t *arena, void *chunk, size_t usize)
923{
Jason Evansb49a3342015-07-28 11:28:19 -0400924 chunk_hooks_t chunk_hooks = CHUNK_HOOKS_INITIALIZER;
Jason Evans99bd94f2015-02-18 16:40:53 -0800925 size_t csize;
Jason Evans9b41ac92014-10-14 22:20:00 -0700926
Jason Evans99bd94f2015-02-18 16:40:53 -0800927 csize = CHUNK_CEILING(usize);
Jason Evans9b41ac92014-10-14 22:20:00 -0700928 malloc_mutex_lock(&arena->lock);
Jason Evans9b41ac92014-10-14 22:20:00 -0700929 if (config_stats) {
930 arena_huge_dalloc_stats_update(arena, usize);
931 arena->stats.mapped -= usize;
932 stats_cactive_sub(usize);
933 }
934 arena->nactive -= (usize >> LG_PAGE);
Jason Evans99bd94f2015-02-18 16:40:53 -0800935
Jason Evansde249c82015-08-09 16:47:27 -0700936 chunk_dalloc_cache(arena, &chunk_hooks, chunk, csize, true);
Jason Evansb49a3342015-07-28 11:28:19 -0400937 malloc_mutex_unlock(&arena->lock);
Jason Evans9b41ac92014-10-14 22:20:00 -0700938}
939
940void
941arena_chunk_ralloc_huge_similar(arena_t *arena, void *chunk, size_t oldsize,
942 size_t usize)
943{
944
945 assert(CHUNK_CEILING(oldsize) == CHUNK_CEILING(usize));
946 assert(oldsize != usize);
947
948 malloc_mutex_lock(&arena->lock);
949 if (config_stats)
950 arena_huge_ralloc_stats_update(arena, oldsize, usize);
951 if (oldsize < usize) {
952 size_t udiff = usize - oldsize;
953 arena->nactive += udiff >> LG_PAGE;
954 if (config_stats)
955 stats_cactive_add(udiff);
956 } else {
957 size_t udiff = oldsize - usize;
958 arena->nactive -= udiff >> LG_PAGE;
959 if (config_stats)
960 stats_cactive_sub(udiff);
961 }
962 malloc_mutex_unlock(&arena->lock);
963}
964
965void
966arena_chunk_ralloc_huge_shrink(arena_t *arena, void *chunk, size_t oldsize,
967 size_t usize)
968{
Jason Evans9b41ac92014-10-14 22:20:00 -0700969 size_t udiff = oldsize - usize;
970 size_t cdiff = CHUNK_CEILING(oldsize) - CHUNK_CEILING(usize);
971
972 malloc_mutex_lock(&arena->lock);
Jason Evans9b41ac92014-10-14 22:20:00 -0700973 if (config_stats) {
974 arena_huge_ralloc_stats_update(arena, oldsize, usize);
975 if (cdiff != 0) {
976 arena->stats.mapped -= cdiff;
977 stats_cactive_sub(udiff);
978 }
979 }
980 arena->nactive -= udiff >> LG_PAGE;
Jason Evans99bd94f2015-02-18 16:40:53 -0800981
Jason Evans2012d5a2014-11-17 09:54:49 -0800982 if (cdiff != 0) {
Jason Evansb49a3342015-07-28 11:28:19 -0400983 chunk_hooks_t chunk_hooks = CHUNK_HOOKS_INITIALIZER;
Jason Evans99bd94f2015-02-18 16:40:53 -0800984 void *nchunk = (void *)((uintptr_t)chunk +
985 CHUNK_CEILING(usize));
986
Jason Evansde249c82015-08-09 16:47:27 -0700987 chunk_dalloc_cache(arena, &chunk_hooks, nchunk, cdiff, true);
Jason Evansb49a3342015-07-28 11:28:19 -0400988 }
989 malloc_mutex_unlock(&arena->lock);
Jason Evans99bd94f2015-02-18 16:40:53 -0800990}
991
Jason Evansb49a3342015-07-28 11:28:19 -0400992static bool
993arena_chunk_ralloc_huge_expand_hard(arena_t *arena, chunk_hooks_t *chunk_hooks,
994 void *chunk, size_t oldsize, size_t usize, bool *zero, void *nchunk,
995 size_t udiff, size_t cdiff)
Jason Evans99bd94f2015-02-18 16:40:53 -0800996{
997 bool err;
Jason Evans8fadb1a2015-08-04 10:49:46 -0700998 bool commit = true;
Jason Evans99bd94f2015-02-18 16:40:53 -0800999
Jason Evansb49a3342015-07-28 11:28:19 -04001000 err = (chunk_alloc_wrapper(arena, chunk_hooks, nchunk, cdiff, chunksize,
Jason Evans8fadb1a2015-08-04 10:49:46 -07001001 zero, &commit) == NULL);
Jason Evans99bd94f2015-02-18 16:40:53 -08001002 if (err) {
1003 /* Revert optimistic stats updates. */
1004 malloc_mutex_lock(&arena->lock);
1005 if (config_stats) {
1006 arena_huge_ralloc_stats_update_undo(arena, oldsize,
1007 usize);
1008 arena->stats.mapped -= cdiff;
1009 }
1010 arena->nactive -= (udiff >> LG_PAGE);
1011 malloc_mutex_unlock(&arena->lock);
Jason Evansb49a3342015-07-28 11:28:19 -04001012 } else if (chunk_hooks->merge(chunk, CHUNK_CEILING(oldsize), nchunk,
1013 cdiff, true, arena->ind)) {
Jason Evans8fadb1a2015-08-04 10:49:46 -07001014 chunk_dalloc_arena(arena, chunk_hooks, nchunk, cdiff, *zero,
1015 true);
Jason Evansb49a3342015-07-28 11:28:19 -04001016 err = true;
Jason Evans2012d5a2014-11-17 09:54:49 -08001017 }
Jason Evans99bd94f2015-02-18 16:40:53 -08001018 return (err);
Jason Evans9b41ac92014-10-14 22:20:00 -07001019}
1020
1021bool
1022arena_chunk_ralloc_huge_expand(arena_t *arena, void *chunk, size_t oldsize,
1023 size_t usize, bool *zero)
1024{
Jason Evans99bd94f2015-02-18 16:40:53 -08001025 bool err;
Jason Evans8fadb1a2015-08-04 10:49:46 -07001026 chunk_hooks_t chunk_hooks = chunk_hooks_get(arena);
Jason Evans99bd94f2015-02-18 16:40:53 -08001027 void *nchunk = (void *)((uintptr_t)chunk + CHUNK_CEILING(oldsize));
Jason Evans9b41ac92014-10-14 22:20:00 -07001028 size_t udiff = usize - oldsize;
1029 size_t cdiff = CHUNK_CEILING(usize) - CHUNK_CEILING(oldsize);
1030
1031 malloc_mutex_lock(&arena->lock);
Jason Evans99bd94f2015-02-18 16:40:53 -08001032
1033 /* Optimistically update stats. */
Jason Evans9b41ac92014-10-14 22:20:00 -07001034 if (config_stats) {
Jason Evans9b41ac92014-10-14 22:20:00 -07001035 arena_huge_ralloc_stats_update(arena, oldsize, usize);
1036 arena->stats.mapped += cdiff;
1037 }
1038 arena->nactive += (udiff >> LG_PAGE);
Jason Evans9b41ac92014-10-14 22:20:00 -07001039
Jason Evansb49a3342015-07-28 11:28:19 -04001040 err = (chunk_alloc_cache(arena, &arena->chunk_hooks, nchunk, cdiff,
1041 chunksize, zero, true) == NULL);
Jason Evans99bd94f2015-02-18 16:40:53 -08001042 malloc_mutex_unlock(&arena->lock);
1043 if (err) {
Jason Evansb49a3342015-07-28 11:28:19 -04001044 err = arena_chunk_ralloc_huge_expand_hard(arena, &chunk_hooks,
1045 chunk, oldsize, usize, zero, nchunk, udiff,
1046 cdiff);
1047 } else if (chunk_hooks.merge(chunk, CHUNK_CEILING(oldsize), nchunk,
1048 cdiff, true, arena->ind)) {
Jason Evans8fadb1a2015-08-04 10:49:46 -07001049 chunk_dalloc_arena(arena, &chunk_hooks, nchunk, cdiff, *zero,
1050 true);
Jason Evansb49a3342015-07-28 11:28:19 -04001051 err = true;
Jason Evans9b41ac92014-10-14 22:20:00 -07001052 }
1053
Jason Evans99bd94f2015-02-18 16:40:53 -08001054 if (config_stats && !err)
Jason Evans9b41ac92014-10-14 22:20:00 -07001055 stats_cactive_add(udiff);
Jason Evans99bd94f2015-02-18 16:40:53 -08001056 return (err);
Jason Evans9b41ac92014-10-14 22:20:00 -07001057}
1058
Jason Evansaa282662015-07-15 16:02:21 -07001059/*
1060 * Do first-best-fit run selection, i.e. select the lowest run that best fits.
1061 * Run sizes are quantized, so not all candidate runs are necessarily exactly
1062 * the same size.
1063 */
Jason Evans97c04a92015-03-06 19:57:36 -08001064static arena_run_t *
Jason Evansaa282662015-07-15 16:02:21 -07001065arena_run_first_best_fit(arena_t *arena, size_t size)
Jason Evans97c04a92015-03-06 19:57:36 -08001066{
Jason Evansaa282662015-07-15 16:02:21 -07001067 size_t search_size = run_quantize_first(size);
Jason Evans8fadb1a2015-08-04 10:49:46 -07001068 arena_chunk_map_misc_t *key = arena_miscelm_key_create(search_size);
Jason Evansaa282662015-07-15 16:02:21 -07001069 arena_chunk_map_misc_t *miscelm =
1070 arena_avail_tree_nsearch(&arena->runs_avail, key);
1071 if (miscelm == NULL)
1072 return (NULL);
1073 return (&miscelm->run);
Jason Evans97c04a92015-03-06 19:57:36 -08001074}
1075
Jason Evanse476f8a2010-01-16 09:53:50 -08001076static arena_run_t *
Jason Evansaa5113b2014-01-14 16:23:03 -08001077arena_run_alloc_large_helper(arena_t *arena, size_t size, bool zero)
Jason Evanse476f8a2010-01-16 09:53:50 -08001078{
Jason Evansaa282662015-07-15 16:02:21 -07001079 arena_run_t *run = arena_run_first_best_fit(arena, s2u(size));
Jason Evans8fadb1a2015-08-04 10:49:46 -07001080 if (run != NULL) {
1081 if (arena_run_split_large(arena, run, size, zero))
1082 run = NULL;
1083 }
Jason Evans97c04a92015-03-06 19:57:36 -08001084 return (run);
Jason Evans5b0c9962012-05-10 15:47:24 -07001085}
1086
1087static arena_run_t *
Jason Evansaa5113b2014-01-14 16:23:03 -08001088arena_run_alloc_large(arena_t *arena, size_t size, bool zero)
Jason Evans5b0c9962012-05-10 15:47:24 -07001089{
1090 arena_chunk_t *chunk;
1091 arena_run_t *run;
1092
Jason Evansfc0b3b72014-10-09 17:54:06 -07001093 assert(size <= arena_maxrun);
Jason Evans8a03cf02015-05-04 09:58:36 -07001094 assert(size == PAGE_CEILING(size));
Jason Evans5b0c9962012-05-10 15:47:24 -07001095
1096 /* Search the arena's chunks for the lowest best fit. */
Jason Evansaa5113b2014-01-14 16:23:03 -08001097 run = arena_run_alloc_large_helper(arena, size, zero);
Jason Evans5b0c9962012-05-10 15:47:24 -07001098 if (run != NULL)
1099 return (run);
1100
Jason Evanse476f8a2010-01-16 09:53:50 -08001101 /*
1102 * No usable runs. Create a new chunk from which to allocate the run.
1103 */
1104 chunk = arena_chunk_alloc(arena);
Jason Evanse00572b2010-03-14 19:43:56 -07001105 if (chunk != NULL) {
Jason Evans0c5dd032014-09-29 01:31:39 -07001106 run = &arena_miscelm_get(chunk, map_bias)->run;
Jason Evans8fadb1a2015-08-04 10:49:46 -07001107 if (arena_run_split_large(arena, run, size, zero))
1108 run = NULL;
Jason Evanse00572b2010-03-14 19:43:56 -07001109 return (run);
1110 }
1111
1112 /*
1113 * arena_chunk_alloc() failed, but another thread may have made
1114 * sufficient memory available while this one dropped arena->lock in
1115 * arena_chunk_alloc(), so search one more time.
1116 */
Jason Evansaa5113b2014-01-14 16:23:03 -08001117 return (arena_run_alloc_large_helper(arena, size, zero));
1118}
1119
1120static arena_run_t *
Jason Evans155bfa72014-10-05 17:54:10 -07001121arena_run_alloc_small_helper(arena_t *arena, size_t size, index_t binind)
Jason Evansaa5113b2014-01-14 16:23:03 -08001122{
Jason Evansaa282662015-07-15 16:02:21 -07001123 arena_run_t *run = arena_run_first_best_fit(arena, size);
Jason Evans8fadb1a2015-08-04 10:49:46 -07001124 if (run != NULL) {
1125 if (arena_run_split_small(arena, run, size, binind))
1126 run = NULL;
1127 }
Jason Evans97c04a92015-03-06 19:57:36 -08001128 return (run);
Jason Evansaa5113b2014-01-14 16:23:03 -08001129}
1130
1131static arena_run_t *
Jason Evans155bfa72014-10-05 17:54:10 -07001132arena_run_alloc_small(arena_t *arena, size_t size, index_t binind)
Jason Evansaa5113b2014-01-14 16:23:03 -08001133{
1134 arena_chunk_t *chunk;
1135 arena_run_t *run;
1136
Jason Evansfc0b3b72014-10-09 17:54:06 -07001137 assert(size <= arena_maxrun);
Jason Evans8a03cf02015-05-04 09:58:36 -07001138 assert(size == PAGE_CEILING(size));
Jason Evansaa5113b2014-01-14 16:23:03 -08001139 assert(binind != BININD_INVALID);
1140
1141 /* Search the arena's chunks for the lowest best fit. */
1142 run = arena_run_alloc_small_helper(arena, size, binind);
1143 if (run != NULL)
1144 return (run);
1145
1146 /*
1147 * No usable runs. Create a new chunk from which to allocate the run.
1148 */
1149 chunk = arena_chunk_alloc(arena);
1150 if (chunk != NULL) {
Jason Evans0c5dd032014-09-29 01:31:39 -07001151 run = &arena_miscelm_get(chunk, map_bias)->run;
Jason Evans8fadb1a2015-08-04 10:49:46 -07001152 if (arena_run_split_small(arena, run, size, binind))
1153 run = NULL;
Jason Evansaa5113b2014-01-14 16:23:03 -08001154 return (run);
1155 }
1156
1157 /*
1158 * arena_chunk_alloc() failed, but another thread may have made
1159 * sufficient memory available while this one dropped arena->lock in
1160 * arena_chunk_alloc(), so search one more time.
1161 */
1162 return (arena_run_alloc_small_helper(arena, size, binind));
Jason Evanse476f8a2010-01-16 09:53:50 -08001163}
1164
Jason Evans8d6a3e82015-03-18 18:55:33 -07001165static bool
1166arena_lg_dirty_mult_valid(ssize_t lg_dirty_mult)
1167{
1168
Jason Evansbd16ea42015-03-24 15:59:28 -07001169 return (lg_dirty_mult >= -1 && lg_dirty_mult < (ssize_t)(sizeof(size_t)
1170 << 3));
Jason Evans8d6a3e82015-03-18 18:55:33 -07001171}
1172
1173ssize_t
1174arena_lg_dirty_mult_get(arena_t *arena)
1175{
1176 ssize_t lg_dirty_mult;
1177
1178 malloc_mutex_lock(&arena->lock);
1179 lg_dirty_mult = arena->lg_dirty_mult;
1180 malloc_mutex_unlock(&arena->lock);
1181
1182 return (lg_dirty_mult);
1183}
1184
1185bool
1186arena_lg_dirty_mult_set(arena_t *arena, ssize_t lg_dirty_mult)
1187{
1188
1189 if (!arena_lg_dirty_mult_valid(lg_dirty_mult))
1190 return (true);
1191
1192 malloc_mutex_lock(&arena->lock);
1193 arena->lg_dirty_mult = lg_dirty_mult;
1194 arena_maybe_purge(arena);
1195 malloc_mutex_unlock(&arena->lock);
1196
1197 return (false);
1198}
1199
Jason Evans99bd94f2015-02-18 16:40:53 -08001200void
Jason Evans05b21be2010-03-14 17:36:10 -07001201arena_maybe_purge(arena_t *arena)
1202{
1203
Jason Evanse3d13062012-10-30 15:42:37 -07001204 /* Don't purge if the option is disabled. */
Jason Evans8d6a3e82015-03-18 18:55:33 -07001205 if (arena->lg_dirty_mult < 0)
Jason Evanse3d13062012-10-30 15:42:37 -07001206 return;
Jason Evans0a9f9a42015-06-22 18:50:32 -07001207 /* Don't recursively purge. */
1208 if (arena->purging)
Jason Evanse3d13062012-10-30 15:42:37 -07001209 return;
Jason Evans0a9f9a42015-06-22 18:50:32 -07001210 /*
1211 * Iterate, since preventing recursive purging could otherwise leave too
1212 * many dirty pages.
1213 */
1214 while (true) {
1215 size_t threshold = (arena->nactive >> arena->lg_dirty_mult);
1216 if (threshold < chunk_npages)
1217 threshold = chunk_npages;
1218 /*
1219 * Don't purge unless the number of purgeable pages exceeds the
1220 * threshold.
1221 */
1222 if (arena->ndirty <= threshold)
1223 return;
1224 arena_purge(arena, false);
1225 }
Jason Evans05b21be2010-03-14 17:36:10 -07001226}
1227
Qinfan Wua244e502014-07-21 10:23:36 -07001228static size_t
1229arena_dirty_count(arena_t *arena)
1230{
1231 size_t ndirty = 0;
Jason Evans38e42d32015-03-10 18:15:40 -07001232 arena_runs_dirty_link_t *rdelm;
Jason Evansee41ad42015-02-15 18:04:46 -08001233 extent_node_t *chunkselm;
Qinfan Wua244e502014-07-21 10:23:36 -07001234
Jason Evans38e42d32015-03-10 18:15:40 -07001235 for (rdelm = qr_next(&arena->runs_dirty, rd_link),
Jason Evans738e0892015-02-18 01:15:50 -08001236 chunkselm = qr_next(&arena->chunks_cache, cc_link);
Jason Evans38e42d32015-03-10 18:15:40 -07001237 rdelm != &arena->runs_dirty; rdelm = qr_next(rdelm, rd_link)) {
Jason Evansee41ad42015-02-15 18:04:46 -08001238 size_t npages;
1239
Jason Evansf5c8f372015-03-10 18:29:49 -07001240 if (rdelm == &chunkselm->rd) {
Jason Evansee41ad42015-02-15 18:04:46 -08001241 npages = extent_node_size_get(chunkselm) >> LG_PAGE;
Jason Evans738e0892015-02-18 01:15:50 -08001242 chunkselm = qr_next(chunkselm, cc_link);
Jason Evansee41ad42015-02-15 18:04:46 -08001243 } else {
Jason Evans38e42d32015-03-10 18:15:40 -07001244 arena_chunk_t *chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(
1245 rdelm);
1246 arena_chunk_map_misc_t *miscelm =
1247 arena_rd_to_miscelm(rdelm);
1248 size_t pageind = arena_miscelm_to_pageind(miscelm);
Jason Evansee41ad42015-02-15 18:04:46 -08001249 assert(arena_mapbits_allocated_get(chunk, pageind) ==
1250 0);
1251 assert(arena_mapbits_large_get(chunk, pageind) == 0);
1252 assert(arena_mapbits_dirty_get(chunk, pageind) != 0);
1253 npages = arena_mapbits_unallocated_size_get(chunk,
1254 pageind) >> LG_PAGE;
1255 }
Qinfan Wua244e502014-07-21 10:23:36 -07001256 ndirty += npages;
1257 }
1258
Jason Evans2b2f6dc2014-11-01 02:29:10 -07001259 return (ndirty);
Jason Evansaa5113b2014-01-14 16:23:03 -08001260}
1261
1262static size_t
Jason Evans070b3c32014-08-14 14:45:58 -07001263arena_compute_npurge(arena_t *arena, bool all)
Jason Evansaa5113b2014-01-14 16:23:03 -08001264{
Jason Evans070b3c32014-08-14 14:45:58 -07001265 size_t npurge;
Jason Evansaa5113b2014-01-14 16:23:03 -08001266
1267 /*
1268 * Compute the minimum number of pages that this thread should try to
1269 * purge.
1270 */
Jason Evans551ebc42014-10-03 10:16:09 -07001271 if (!all) {
Jason Evans8d6a3e82015-03-18 18:55:33 -07001272 size_t threshold = (arena->nactive >> arena->lg_dirty_mult);
Mike Hommey65057332015-02-04 07:16:55 +09001273 threshold = threshold < chunk_npages ? chunk_npages : threshold;
Jason Evansaa5113b2014-01-14 16:23:03 -08001274
Jason Evans070b3c32014-08-14 14:45:58 -07001275 npurge = arena->ndirty - threshold;
Jason Evansaa5113b2014-01-14 16:23:03 -08001276 } else
Jason Evans070b3c32014-08-14 14:45:58 -07001277 npurge = arena->ndirty;
Jason Evansaa5113b2014-01-14 16:23:03 -08001278
Jason Evans070b3c32014-08-14 14:45:58 -07001279 return (npurge);
Jason Evansaa5113b2014-01-14 16:23:03 -08001280}
1281
Qinfan Wue9708002014-07-21 18:09:04 -07001282static size_t
Jason Evansb49a3342015-07-28 11:28:19 -04001283arena_stash_dirty(arena_t *arena, chunk_hooks_t *chunk_hooks, bool all,
1284 size_t npurge, arena_runs_dirty_link_t *purge_runs_sentinel,
Jason Evansee41ad42015-02-15 18:04:46 -08001285 extent_node_t *purge_chunks_sentinel)
Jason Evansaa5113b2014-01-14 16:23:03 -08001286{
Jason Evans38e42d32015-03-10 18:15:40 -07001287 arena_runs_dirty_link_t *rdelm, *rdelm_next;
Jason Evansee41ad42015-02-15 18:04:46 -08001288 extent_node_t *chunkselm;
Qinfan Wue9708002014-07-21 18:09:04 -07001289 size_t nstashed = 0;
Jason Evansaa5113b2014-01-14 16:23:03 -08001290
Jason Evansee41ad42015-02-15 18:04:46 -08001291 /* Stash at least npurge pages. */
Jason Evans38e42d32015-03-10 18:15:40 -07001292 for (rdelm = qr_next(&arena->runs_dirty, rd_link),
Jason Evans738e0892015-02-18 01:15:50 -08001293 chunkselm = qr_next(&arena->chunks_cache, cc_link);
Jason Evans38e42d32015-03-10 18:15:40 -07001294 rdelm != &arena->runs_dirty; rdelm = rdelm_next) {
Jason Evansee41ad42015-02-15 18:04:46 -08001295 size_t npages;
Jason Evans38e42d32015-03-10 18:15:40 -07001296 rdelm_next = qr_next(rdelm, rd_link);
Jason Evansaa5113b2014-01-14 16:23:03 -08001297
Jason Evansf5c8f372015-03-10 18:29:49 -07001298 if (rdelm == &chunkselm->rd) {
Jason Evans99bd94f2015-02-18 16:40:53 -08001299 extent_node_t *chunkselm_next;
1300 bool zero;
Jason Evansee41ad42015-02-15 18:04:46 -08001301 UNUSED void *chunk;
Jason Evansaa5113b2014-01-14 16:23:03 -08001302
Jason Evans738e0892015-02-18 01:15:50 -08001303 chunkselm_next = qr_next(chunkselm, cc_link);
Jason Evansee41ad42015-02-15 18:04:46 -08001304 /*
Jason Evans99bd94f2015-02-18 16:40:53 -08001305 * Allocate. chunkselm remains valid due to the
1306 * dalloc_node=false argument to chunk_alloc_cache().
Jason Evansee41ad42015-02-15 18:04:46 -08001307 */
Jason Evansee41ad42015-02-15 18:04:46 -08001308 zero = false;
Jason Evansb49a3342015-07-28 11:28:19 -04001309 chunk = chunk_alloc_cache(arena, chunk_hooks,
Jason Evans99bd94f2015-02-18 16:40:53 -08001310 extent_node_addr_get(chunkselm),
1311 extent_node_size_get(chunkselm), chunksize, &zero,
1312 false);
1313 assert(chunk == extent_node_addr_get(chunkselm));
1314 assert(zero == extent_node_zeroed_get(chunkselm));
1315 extent_node_dirty_insert(chunkselm, purge_runs_sentinel,
Jason Evans738e0892015-02-18 01:15:50 -08001316 purge_chunks_sentinel);
Jason Evans99bd94f2015-02-18 16:40:53 -08001317 npages = extent_node_size_get(chunkselm) >> LG_PAGE;
Jason Evansee41ad42015-02-15 18:04:46 -08001318 chunkselm = chunkselm_next;
1319 } else {
1320 arena_chunk_t *chunk =
Jason Evans38e42d32015-03-10 18:15:40 -07001321 (arena_chunk_t *)CHUNK_ADDR2BASE(rdelm);
1322 arena_chunk_map_misc_t *miscelm =
1323 arena_rd_to_miscelm(rdelm);
1324 size_t pageind = arena_miscelm_to_pageind(miscelm);
1325 arena_run_t *run = &miscelm->run;
Jason Evansee41ad42015-02-15 18:04:46 -08001326 size_t run_size =
1327 arena_mapbits_unallocated_size_get(chunk, pageind);
Jason Evans070b3c32014-08-14 14:45:58 -07001328
Jason Evansee41ad42015-02-15 18:04:46 -08001329 npages = run_size >> LG_PAGE;
1330
1331 assert(pageind + npages <= chunk_npages);
1332 assert(arena_mapbits_dirty_get(chunk, pageind) ==
1333 arena_mapbits_dirty_get(chunk, pageind+npages-1));
1334
1335 /*
1336 * If purging the spare chunk's run, make it available
1337 * prior to allocation.
1338 */
1339 if (chunk == arena->spare)
1340 arena_chunk_alloc(arena);
1341
1342 /* Temporarily allocate the free dirty run. */
1343 arena_run_split_large(arena, run, run_size, false);
Jason Evans339c2b22015-02-17 22:25:56 -08001344 /* Stash. */
Jason Evansee41ad42015-02-15 18:04:46 -08001345 if (false)
Jason Evans38e42d32015-03-10 18:15:40 -07001346 qr_new(rdelm, rd_link); /* Redundant. */
Jason Evansee41ad42015-02-15 18:04:46 -08001347 else {
Jason Evans38e42d32015-03-10 18:15:40 -07001348 assert(qr_next(rdelm, rd_link) == rdelm);
1349 assert(qr_prev(rdelm, rd_link) == rdelm);
Jason Evansee41ad42015-02-15 18:04:46 -08001350 }
Jason Evans38e42d32015-03-10 18:15:40 -07001351 qr_meld(purge_runs_sentinel, rdelm, rd_link);
Jason Evansee41ad42015-02-15 18:04:46 -08001352 }
Jason Evansaa5113b2014-01-14 16:23:03 -08001353
Qinfan Wue9708002014-07-21 18:09:04 -07001354 nstashed += npages;
Jason Evans551ebc42014-10-03 10:16:09 -07001355 if (!all && nstashed >= npurge)
Qinfan Wue9708002014-07-21 18:09:04 -07001356 break;
Jason Evansaa5113b2014-01-14 16:23:03 -08001357 }
Qinfan Wue9708002014-07-21 18:09:04 -07001358
1359 return (nstashed);
Jason Evansaa5113b2014-01-14 16:23:03 -08001360}
1361
1362static size_t
Jason Evansb49a3342015-07-28 11:28:19 -04001363arena_purge_stashed(arena_t *arena, chunk_hooks_t *chunk_hooks,
Jason Evans38e42d32015-03-10 18:15:40 -07001364 arena_runs_dirty_link_t *purge_runs_sentinel,
Jason Evansee41ad42015-02-15 18:04:46 -08001365 extent_node_t *purge_chunks_sentinel)
Jason Evansaa5113b2014-01-14 16:23:03 -08001366{
Qinfan Wue9708002014-07-21 18:09:04 -07001367 size_t npurged, nmadvise;
Jason Evans38e42d32015-03-10 18:15:40 -07001368 arena_runs_dirty_link_t *rdelm;
Jason Evansee41ad42015-02-15 18:04:46 -08001369 extent_node_t *chunkselm;
Jason Evansaa5113b2014-01-14 16:23:03 -08001370
Jason Evansaa5113b2014-01-14 16:23:03 -08001371 if (config_stats)
1372 nmadvise = 0;
1373 npurged = 0;
Qinfan Wue9708002014-07-21 18:09:04 -07001374
1375 malloc_mutex_unlock(&arena->lock);
Jason Evans38e42d32015-03-10 18:15:40 -07001376 for (rdelm = qr_next(purge_runs_sentinel, rd_link),
Jason Evans738e0892015-02-18 01:15:50 -08001377 chunkselm = qr_next(purge_chunks_sentinel, cc_link);
Jason Evans38e42d32015-03-10 18:15:40 -07001378 rdelm != purge_runs_sentinel; rdelm = qr_next(rdelm, rd_link)) {
Jason Evansee41ad42015-02-15 18:04:46 -08001379 size_t npages;
Qinfan Wue9708002014-07-21 18:09:04 -07001380
Jason Evansf5c8f372015-03-10 18:29:49 -07001381 if (rdelm == &chunkselm->rd) {
Jason Evansb49a3342015-07-28 11:28:19 -04001382 /*
1383 * Don't actually purge the chunk here because 1)
1384 * chunkselm is embedded in the chunk and must remain
1385 * valid, and 2) we deallocate the chunk in
1386 * arena_unstash_purged(), where it is destroyed,
1387 * decommitted, or purged, depending on chunk
1388 * deallocation policy.
1389 */
Jason Evansee41ad42015-02-15 18:04:46 -08001390 size_t size = extent_node_size_get(chunkselm);
Jason Evansee41ad42015-02-15 18:04:46 -08001391 npages = size >> LG_PAGE;
Jason Evans738e0892015-02-18 01:15:50 -08001392 chunkselm = qr_next(chunkselm, cc_link);
Jason Evansee41ad42015-02-15 18:04:46 -08001393 } else {
Jason Evans45186f02015-08-10 23:03:34 -07001394 size_t pageind, run_size, flag_unzeroed, flags, i;
1395 bool decommitted;
Jason Evans8fadb1a2015-08-04 10:49:46 -07001396 arena_chunk_t *chunk =
1397 (arena_chunk_t *)CHUNK_ADDR2BASE(rdelm);
Jason Evans38e42d32015-03-10 18:15:40 -07001398 arena_chunk_map_misc_t *miscelm =
1399 arena_rd_to_miscelm(rdelm);
1400 pageind = arena_miscelm_to_pageind(miscelm);
Jason Evansee41ad42015-02-15 18:04:46 -08001401 run_size = arena_mapbits_large_size_get(chunk, pageind);
1402 npages = run_size >> LG_PAGE;
Qinfan Wue9708002014-07-21 18:09:04 -07001403
Jason Evansee41ad42015-02-15 18:04:46 -08001404 assert(pageind + npages <= chunk_npages);
Jason Evansde249c82015-08-09 16:47:27 -07001405 assert(!arena_mapbits_decommitted_get(chunk, pageind));
1406 assert(!arena_mapbits_decommitted_get(chunk,
1407 pageind+npages-1));
Jason Evans8fadb1a2015-08-04 10:49:46 -07001408 decommitted = !chunk_hooks->decommit(chunk, chunksize,
1409 pageind << LG_PAGE, npages << LG_PAGE, arena->ind);
1410 if (decommitted) {
Jason Evans45186f02015-08-10 23:03:34 -07001411 flag_unzeroed = 0;
1412 flags = CHUNK_MAP_DECOMMITTED;
Jason Evans8fadb1a2015-08-04 10:49:46 -07001413 } else {
Jason Evans45186f02015-08-10 23:03:34 -07001414 flag_unzeroed = chunk_purge_wrapper(arena,
Jason Evans8fadb1a2015-08-04 10:49:46 -07001415 chunk_hooks, chunk, chunksize, pageind <<
Jason Evans45186f02015-08-10 23:03:34 -07001416 LG_PAGE, run_size) ? CHUNK_MAP_UNZEROED : 0;
1417 flags = flag_unzeroed;
Jason Evans8fadb1a2015-08-04 10:49:46 -07001418 }
Jason Evans45186f02015-08-10 23:03:34 -07001419 arena_mapbits_large_set(chunk, pageind+npages-1, 0,
1420 flags);
1421 arena_mapbits_large_set(chunk, pageind, run_size,
1422 flags);
Jason Evansee41ad42015-02-15 18:04:46 -08001423
1424 /*
Jason Evans45186f02015-08-10 23:03:34 -07001425 * Set the unzeroed flag for internal pages, now that
Jason Evans8d6a3e82015-03-18 18:55:33 -07001426 * chunk_purge_wrapper() has returned whether the pages
1427 * were zeroed as a side effect of purging. This chunk
1428 * map modification is safe even though the arena mutex
Jason Evansee41ad42015-02-15 18:04:46 -08001429 * isn't currently owned by this thread, because the run
1430 * is marked as allocated, thus protecting it from being
1431 * modified by any other thread. As long as these
1432 * writes don't perturb the first and last elements'
1433 * CHUNK_MAP_ALLOCATED bits, behavior is well defined.
1434 */
Jason Evans45186f02015-08-10 23:03:34 -07001435 for (i = 1; i < npages-1; i++) {
1436 arena_mapbits_internal_set(chunk, pageind+i,
Jason Evansee41ad42015-02-15 18:04:46 -08001437 flag_unzeroed);
1438 }
Jason Evansaa5113b2014-01-14 16:23:03 -08001439 }
Qinfan Wue9708002014-07-21 18:09:04 -07001440
Jason Evansaa5113b2014-01-14 16:23:03 -08001441 npurged += npages;
1442 if (config_stats)
1443 nmadvise++;
1444 }
1445 malloc_mutex_lock(&arena->lock);
Qinfan Wue9708002014-07-21 18:09:04 -07001446
1447 if (config_stats) {
Jason Evansaa5113b2014-01-14 16:23:03 -08001448 arena->stats.nmadvise += nmadvise;
Qinfan Wue9708002014-07-21 18:09:04 -07001449 arena->stats.purged += npurged;
1450 }
Jason Evansaa5113b2014-01-14 16:23:03 -08001451
1452 return (npurged);
1453}
1454
1455static void
Jason Evansb49a3342015-07-28 11:28:19 -04001456arena_unstash_purged(arena_t *arena, chunk_hooks_t *chunk_hooks,
Jason Evans38e42d32015-03-10 18:15:40 -07001457 arena_runs_dirty_link_t *purge_runs_sentinel,
Jason Evansee41ad42015-02-15 18:04:46 -08001458 extent_node_t *purge_chunks_sentinel)
Jason Evansaa5113b2014-01-14 16:23:03 -08001459{
Jason Evans38e42d32015-03-10 18:15:40 -07001460 arena_runs_dirty_link_t *rdelm, *rdelm_next;
Jason Evansee41ad42015-02-15 18:04:46 -08001461 extent_node_t *chunkselm;
Jason Evansaa5113b2014-01-14 16:23:03 -08001462
Jason Evansb49a3342015-07-28 11:28:19 -04001463 /* Deallocate chunks/runs. */
Jason Evans38e42d32015-03-10 18:15:40 -07001464 for (rdelm = qr_next(purge_runs_sentinel, rd_link),
Jason Evans738e0892015-02-18 01:15:50 -08001465 chunkselm = qr_next(purge_chunks_sentinel, cc_link);
Jason Evans38e42d32015-03-10 18:15:40 -07001466 rdelm != purge_runs_sentinel; rdelm = rdelm_next) {
1467 rdelm_next = qr_next(rdelm, rd_link);
Jason Evansf5c8f372015-03-10 18:29:49 -07001468 if (rdelm == &chunkselm->rd) {
Jason Evansee41ad42015-02-15 18:04:46 -08001469 extent_node_t *chunkselm_next = qr_next(chunkselm,
Jason Evans738e0892015-02-18 01:15:50 -08001470 cc_link);
Jason Evans339c2b22015-02-17 22:25:56 -08001471 void *addr = extent_node_addr_get(chunkselm);
1472 size_t size = extent_node_size_get(chunkselm);
Jason Evans738e0892015-02-18 01:15:50 -08001473 bool zeroed = extent_node_zeroed_get(chunkselm);
Jason Evansde249c82015-08-09 16:47:27 -07001474 bool committed = extent_node_committed_get(chunkselm);
Jason Evans738e0892015-02-18 01:15:50 -08001475 extent_node_dirty_remove(chunkselm);
Jason Evansee41ad42015-02-15 18:04:46 -08001476 arena_node_dalloc(arena, chunkselm);
1477 chunkselm = chunkselm_next;
Jason Evansb49a3342015-07-28 11:28:19 -04001478 chunk_dalloc_arena(arena, chunk_hooks, addr, size,
Jason Evansde249c82015-08-09 16:47:27 -07001479 zeroed, committed);
Jason Evansee41ad42015-02-15 18:04:46 -08001480 } else {
Jason Evans8fadb1a2015-08-04 10:49:46 -07001481 arena_chunk_t *chunk =
1482 (arena_chunk_t *)CHUNK_ADDR2BASE(rdelm);
Jason Evans38e42d32015-03-10 18:15:40 -07001483 arena_chunk_map_misc_t *miscelm =
1484 arena_rd_to_miscelm(rdelm);
Jason Evans8fadb1a2015-08-04 10:49:46 -07001485 size_t pageind = arena_miscelm_to_pageind(miscelm);
1486 bool decommitted = (arena_mapbits_decommitted_get(chunk,
1487 pageind) != 0);
Jason Evans38e42d32015-03-10 18:15:40 -07001488 arena_run_t *run = &miscelm->run;
1489 qr_remove(rdelm, rd_link);
Jason Evans8fadb1a2015-08-04 10:49:46 -07001490 arena_run_dalloc(arena, run, false, true, decommitted);
Jason Evansee41ad42015-02-15 18:04:46 -08001491 }
Jason Evansaa5113b2014-01-14 16:23:03 -08001492 }
1493}
1494
Jason Evans8d6a3e82015-03-18 18:55:33 -07001495static void
Jason Evans6005f072010-09-30 16:55:08 -07001496arena_purge(arena_t *arena, bool all)
Jason Evanse476f8a2010-01-16 09:53:50 -08001497{
Jason Evans8fadb1a2015-08-04 10:49:46 -07001498 chunk_hooks_t chunk_hooks = chunk_hooks_get(arena);
Jason Evans070b3c32014-08-14 14:45:58 -07001499 size_t npurge, npurgeable, npurged;
Jason Evans38e42d32015-03-10 18:15:40 -07001500 arena_runs_dirty_link_t purge_runs_sentinel;
Jason Evansee41ad42015-02-15 18:04:46 -08001501 extent_node_t purge_chunks_sentinel;
Qinfan Wue9708002014-07-21 18:09:04 -07001502
Jason Evans0a9f9a42015-06-22 18:50:32 -07001503 arena->purging = true;
1504
Jason Evans2b2f6dc2014-11-01 02:29:10 -07001505 /*
1506 * Calls to arena_dirty_count() are disabled even for debug builds
1507 * because overhead grows nonlinearly as memory usage increases.
1508 */
1509 if (false && config_debug) {
Qinfan Wu90737fc2014-07-21 19:39:20 -07001510 size_t ndirty = arena_dirty_count(arena);
Qinfan Wua244e502014-07-21 10:23:36 -07001511 assert(ndirty == arena->ndirty);
Jason Evans2caa4712010-03-04 21:35:07 -08001512 }
Jason Evans8d6a3e82015-03-18 18:55:33 -07001513 assert((arena->nactive >> arena->lg_dirty_mult) < arena->ndirty || all);
Jason Evanse476f8a2010-01-16 09:53:50 -08001514
Jason Evans7372b152012-02-10 20:22:09 -08001515 if (config_stats)
1516 arena->stats.npurge++;
Jason Evanse476f8a2010-01-16 09:53:50 -08001517
Jason Evans070b3c32014-08-14 14:45:58 -07001518 npurge = arena_compute_npurge(arena, all);
Jason Evansee41ad42015-02-15 18:04:46 -08001519 qr_new(&purge_runs_sentinel, rd_link);
Jason Evans47701b22015-02-17 22:23:10 -08001520 extent_node_dirty_linkage_init(&purge_chunks_sentinel);
Jason Evansee41ad42015-02-15 18:04:46 -08001521
Jason Evansb49a3342015-07-28 11:28:19 -04001522 npurgeable = arena_stash_dirty(arena, &chunk_hooks, all, npurge,
1523 &purge_runs_sentinel, &purge_chunks_sentinel);
Jason Evans070b3c32014-08-14 14:45:58 -07001524 assert(npurgeable >= npurge);
Jason Evansb49a3342015-07-28 11:28:19 -04001525 npurged = arena_purge_stashed(arena, &chunk_hooks, &purge_runs_sentinel,
Jason Evansee41ad42015-02-15 18:04:46 -08001526 &purge_chunks_sentinel);
Qinfan Wue9708002014-07-21 18:09:04 -07001527 assert(npurged == npurgeable);
Jason Evansb49a3342015-07-28 11:28:19 -04001528 arena_unstash_purged(arena, &chunk_hooks, &purge_runs_sentinel,
Jason Evansee41ad42015-02-15 18:04:46 -08001529 &purge_chunks_sentinel);
Jason Evans0a9f9a42015-06-22 18:50:32 -07001530
1531 arena->purging = false;
Jason Evanse476f8a2010-01-16 09:53:50 -08001532}
1533
Jason Evans6005f072010-09-30 16:55:08 -07001534void
1535arena_purge_all(arena_t *arena)
1536{
1537
1538 malloc_mutex_lock(&arena->lock);
1539 arena_purge(arena, true);
1540 malloc_mutex_unlock(&arena->lock);
1541}
1542
Jason Evanse476f8a2010-01-16 09:53:50 -08001543static void
Jason Evansaa5113b2014-01-14 16:23:03 -08001544arena_run_coalesce(arena_t *arena, arena_chunk_t *chunk, size_t *p_size,
Jason Evans8fadb1a2015-08-04 10:49:46 -07001545 size_t *p_run_ind, size_t *p_run_pages, size_t flag_dirty,
1546 size_t flag_decommitted)
Jason Evanse476f8a2010-01-16 09:53:50 -08001547{
Jason Evansaa5113b2014-01-14 16:23:03 -08001548 size_t size = *p_size;
1549 size_t run_ind = *p_run_ind;
1550 size_t run_pages = *p_run_pages;
Jason Evanse476f8a2010-01-16 09:53:50 -08001551
1552 /* Try to coalesce forward. */
1553 if (run_ind + run_pages < chunk_npages &&
Jason Evans203484e2012-05-02 00:30:36 -07001554 arena_mapbits_allocated_get(chunk, run_ind+run_pages) == 0 &&
Jason Evans8fadb1a2015-08-04 10:49:46 -07001555 arena_mapbits_dirty_get(chunk, run_ind+run_pages) == flag_dirty &&
1556 arena_mapbits_decommitted_get(chunk, run_ind+run_pages) ==
1557 flag_decommitted) {
Jason Evans203484e2012-05-02 00:30:36 -07001558 size_t nrun_size = arena_mapbits_unallocated_size_get(chunk,
1559 run_ind+run_pages);
Jason Evansae4c7b42012-04-02 07:04:34 -07001560 size_t nrun_pages = nrun_size >> LG_PAGE;
Jason Evanse476f8a2010-01-16 09:53:50 -08001561
1562 /*
1563 * Remove successor from runs_avail; the coalesced run is
1564 * inserted later.
1565 */
Jason Evans203484e2012-05-02 00:30:36 -07001566 assert(arena_mapbits_unallocated_size_get(chunk,
1567 run_ind+run_pages+nrun_pages-1) == nrun_size);
1568 assert(arena_mapbits_dirty_get(chunk,
1569 run_ind+run_pages+nrun_pages-1) == flag_dirty);
Jason Evans8fadb1a2015-08-04 10:49:46 -07001570 assert(arena_mapbits_decommitted_get(chunk,
1571 run_ind+run_pages+nrun_pages-1) == flag_decommitted);
Qinfan Wu90737fc2014-07-21 19:39:20 -07001572 arena_avail_remove(arena, chunk, run_ind+run_pages, nrun_pages);
Jason Evanse476f8a2010-01-16 09:53:50 -08001573
Jason Evansee41ad42015-02-15 18:04:46 -08001574 /*
1575 * If the successor is dirty, remove it from the set of dirty
1576 * pages.
1577 */
Qinfan Wu04d60a12014-07-18 14:21:17 -07001578 if (flag_dirty != 0) {
Jason Evansee41ad42015-02-15 18:04:46 -08001579 arena_run_dirty_remove(arena, chunk, run_ind+run_pages,
Jason Evans070b3c32014-08-14 14:45:58 -07001580 nrun_pages);
Qinfan Wu04d60a12014-07-18 14:21:17 -07001581 }
1582
Jason Evanse476f8a2010-01-16 09:53:50 -08001583 size += nrun_size;
Jason Evans12ca9142010-10-17 19:56:09 -07001584 run_pages += nrun_pages;
Jason Evanse476f8a2010-01-16 09:53:50 -08001585
Jason Evans203484e2012-05-02 00:30:36 -07001586 arena_mapbits_unallocated_size_set(chunk, run_ind, size);
1587 arena_mapbits_unallocated_size_set(chunk, run_ind+run_pages-1,
1588 size);
Jason Evanse476f8a2010-01-16 09:53:50 -08001589 }
1590
1591 /* Try to coalesce backward. */
Jason Evansaa5113b2014-01-14 16:23:03 -08001592 if (run_ind > map_bias && arena_mapbits_allocated_get(chunk,
1593 run_ind-1) == 0 && arena_mapbits_dirty_get(chunk, run_ind-1) ==
Jason Evans8fadb1a2015-08-04 10:49:46 -07001594 flag_dirty && arena_mapbits_decommitted_get(chunk, run_ind-1) ==
1595 flag_decommitted) {
Jason Evans203484e2012-05-02 00:30:36 -07001596 size_t prun_size = arena_mapbits_unallocated_size_get(chunk,
1597 run_ind-1);
Jason Evansae4c7b42012-04-02 07:04:34 -07001598 size_t prun_pages = prun_size >> LG_PAGE;
Jason Evanse476f8a2010-01-16 09:53:50 -08001599
Jason Evans12ca9142010-10-17 19:56:09 -07001600 run_ind -= prun_pages;
Jason Evanse476f8a2010-01-16 09:53:50 -08001601
1602 /*
1603 * Remove predecessor from runs_avail; the coalesced run is
1604 * inserted later.
1605 */
Jason Evans203484e2012-05-02 00:30:36 -07001606 assert(arena_mapbits_unallocated_size_get(chunk, run_ind) ==
1607 prun_size);
1608 assert(arena_mapbits_dirty_get(chunk, run_ind) == flag_dirty);
Jason Evans8fadb1a2015-08-04 10:49:46 -07001609 assert(arena_mapbits_decommitted_get(chunk, run_ind) ==
1610 flag_decommitted);
Qinfan Wu90737fc2014-07-21 19:39:20 -07001611 arena_avail_remove(arena, chunk, run_ind, prun_pages);
Jason Evanse476f8a2010-01-16 09:53:50 -08001612
Jason Evansee41ad42015-02-15 18:04:46 -08001613 /*
1614 * If the predecessor is dirty, remove it from the set of dirty
1615 * pages.
1616 */
1617 if (flag_dirty != 0) {
1618 arena_run_dirty_remove(arena, chunk, run_ind,
1619 prun_pages);
1620 }
Qinfan Wu04d60a12014-07-18 14:21:17 -07001621
Jason Evanse476f8a2010-01-16 09:53:50 -08001622 size += prun_size;
Jason Evans12ca9142010-10-17 19:56:09 -07001623 run_pages += prun_pages;
Jason Evanse476f8a2010-01-16 09:53:50 -08001624
Jason Evans203484e2012-05-02 00:30:36 -07001625 arena_mapbits_unallocated_size_set(chunk, run_ind, size);
1626 arena_mapbits_unallocated_size_set(chunk, run_ind+run_pages-1,
1627 size);
Jason Evanse476f8a2010-01-16 09:53:50 -08001628 }
1629
Jason Evansaa5113b2014-01-14 16:23:03 -08001630 *p_size = size;
1631 *p_run_ind = run_ind;
1632 *p_run_pages = run_pages;
1633}
1634
Jason Evans8fadb1a2015-08-04 10:49:46 -07001635static size_t
1636arena_run_size_get(arena_t *arena, arena_chunk_t *chunk, arena_run_t *run,
1637 size_t run_ind)
1638{
1639 size_t size;
1640
1641 assert(run_ind >= map_bias);
1642 assert(run_ind < chunk_npages);
1643
1644 if (arena_mapbits_large_get(chunk, run_ind) != 0) {
1645 size = arena_mapbits_large_size_get(chunk, run_ind);
1646 assert(size == PAGE || arena_mapbits_large_size_get(chunk,
1647 run_ind+(size>>LG_PAGE)-1) == 0);
1648 } else {
1649 arena_bin_info_t *bin_info = &arena_bin_info[run->binind];
1650 size = bin_info->run_size;
1651 }
1652
1653 return (size);
1654}
1655
1656static bool
1657arena_run_decommit(arena_t *arena, arena_chunk_t *chunk, arena_run_t *run)
1658{
1659 arena_chunk_map_misc_t *miscelm = arena_run_to_miscelm(run);
1660 size_t run_ind = arena_miscelm_to_pageind(miscelm);
1661 size_t offset = run_ind << LG_PAGE;
1662 size_t length = arena_run_size_get(arena, chunk, run, run_ind);
1663
1664 return (arena->chunk_hooks.decommit(chunk, chunksize, offset, length,
1665 arena->ind));
1666}
1667
Jason Evansaa5113b2014-01-14 16:23:03 -08001668static void
Jason Evans8fadb1a2015-08-04 10:49:46 -07001669arena_run_dalloc(arena_t *arena, arena_run_t *run, bool dirty, bool cleaned,
1670 bool decommitted)
Jason Evansaa5113b2014-01-14 16:23:03 -08001671{
1672 arena_chunk_t *chunk;
Jason Evans0c5dd032014-09-29 01:31:39 -07001673 arena_chunk_map_misc_t *miscelm;
Jason Evans8fadb1a2015-08-04 10:49:46 -07001674 size_t size, run_ind, run_pages, flag_dirty, flag_decommitted;
Jason Evansaa5113b2014-01-14 16:23:03 -08001675
1676 chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(run);
Jason Evans0c5dd032014-09-29 01:31:39 -07001677 miscelm = arena_run_to_miscelm(run);
1678 run_ind = arena_miscelm_to_pageind(miscelm);
Jason Evansaa5113b2014-01-14 16:23:03 -08001679 assert(run_ind >= map_bias);
1680 assert(run_ind < chunk_npages);
Jason Evans8fadb1a2015-08-04 10:49:46 -07001681 size = arena_run_size_get(arena, chunk, run, run_ind);
Jason Evansaa5113b2014-01-14 16:23:03 -08001682 run_pages = (size >> LG_PAGE);
1683 arena_cactive_update(arena, 0, run_pages);
1684 arena->nactive -= run_pages;
1685
1686 /*
1687 * The run is dirty if the caller claims to have dirtied it, as well as
1688 * if it was already dirty before being allocated and the caller
1689 * doesn't claim to have cleaned it.
1690 */
1691 assert(arena_mapbits_dirty_get(chunk, run_ind) ==
1692 arena_mapbits_dirty_get(chunk, run_ind+run_pages-1));
Jason Evans8fadb1a2015-08-04 10:49:46 -07001693 if (!cleaned && !decommitted && arena_mapbits_dirty_get(chunk, run_ind)
1694 != 0)
Jason Evansaa5113b2014-01-14 16:23:03 -08001695 dirty = true;
1696 flag_dirty = dirty ? CHUNK_MAP_DIRTY : 0;
Jason Evans8fadb1a2015-08-04 10:49:46 -07001697 flag_decommitted = decommitted ? CHUNK_MAP_DECOMMITTED : 0;
Jason Evansaa5113b2014-01-14 16:23:03 -08001698
1699 /* Mark pages as unallocated in the chunk map. */
Jason Evans8fadb1a2015-08-04 10:49:46 -07001700 if (dirty || decommitted) {
1701 size_t flags = flag_dirty | flag_decommitted;
1702 arena_mapbits_unallocated_set(chunk, run_ind, size, flags);
Jason Evansaa5113b2014-01-14 16:23:03 -08001703 arena_mapbits_unallocated_set(chunk, run_ind+run_pages-1, size,
Jason Evans8fadb1a2015-08-04 10:49:46 -07001704 flags);
Jason Evansaa5113b2014-01-14 16:23:03 -08001705 } else {
1706 arena_mapbits_unallocated_set(chunk, run_ind, size,
1707 arena_mapbits_unzeroed_get(chunk, run_ind));
1708 arena_mapbits_unallocated_set(chunk, run_ind+run_pages-1, size,
1709 arena_mapbits_unzeroed_get(chunk, run_ind+run_pages-1));
1710 }
1711
Jason Evans8fadb1a2015-08-04 10:49:46 -07001712 arena_run_coalesce(arena, chunk, &size, &run_ind, &run_pages,
1713 flag_dirty, flag_decommitted);
Jason Evansaa5113b2014-01-14 16:23:03 -08001714
Jason Evanse476f8a2010-01-16 09:53:50 -08001715 /* Insert into runs_avail, now that coalescing is complete. */
Jason Evans203484e2012-05-02 00:30:36 -07001716 assert(arena_mapbits_unallocated_size_get(chunk, run_ind) ==
1717 arena_mapbits_unallocated_size_get(chunk, run_ind+run_pages-1));
1718 assert(arena_mapbits_dirty_get(chunk, run_ind) ==
1719 arena_mapbits_dirty_get(chunk, run_ind+run_pages-1));
Jason Evans8fadb1a2015-08-04 10:49:46 -07001720 assert(arena_mapbits_decommitted_get(chunk, run_ind) ==
1721 arena_mapbits_decommitted_get(chunk, run_ind+run_pages-1));
Qinfan Wu90737fc2014-07-21 19:39:20 -07001722 arena_avail_insert(arena, chunk, run_ind, run_pages);
Jason Evans8d4203c2010-04-13 20:53:21 -07001723
Jason Evans070b3c32014-08-14 14:45:58 -07001724 if (dirty)
Jason Evansee41ad42015-02-15 18:04:46 -08001725 arena_run_dirty_insert(arena, chunk, run_ind, run_pages);
Qinfan Wu04d60a12014-07-18 14:21:17 -07001726
Jason Evans203484e2012-05-02 00:30:36 -07001727 /* Deallocate chunk if it is now completely unused. */
Jason Evans155bfa72014-10-05 17:54:10 -07001728 if (size == arena_maxrun) {
Jason Evans203484e2012-05-02 00:30:36 -07001729 assert(run_ind == map_bias);
Jason Evans155bfa72014-10-05 17:54:10 -07001730 assert(run_pages == (arena_maxrun >> LG_PAGE));
Jason Evanse2deab72014-05-15 22:22:27 -07001731 arena_chunk_dalloc(arena, chunk);
Jason Evans203484e2012-05-02 00:30:36 -07001732 }
Jason Evanse476f8a2010-01-16 09:53:50 -08001733
Jason Evans4fb7f512010-01-27 18:27:09 -08001734 /*
Jason Evans8d4203c2010-04-13 20:53:21 -07001735 * It is okay to do dirty page processing here even if the chunk was
Jason Evans4fb7f512010-01-27 18:27:09 -08001736 * deallocated above, since in that case it is the spare. Waiting
1737 * until after possible chunk deallocation to do dirty processing
1738 * allows for an old spare to be fully deallocated, thus decreasing the
1739 * chances of spuriously crossing the dirty page purging threshold.
1740 */
Jason Evans8d4203c2010-04-13 20:53:21 -07001741 if (dirty)
Jason Evans05b21be2010-03-14 17:36:10 -07001742 arena_maybe_purge(arena);
Jason Evanse476f8a2010-01-16 09:53:50 -08001743}
1744
1745static void
Jason Evansde249c82015-08-09 16:47:27 -07001746arena_run_dalloc_decommit(arena_t *arena, arena_chunk_t *chunk,
1747 arena_run_t *run)
1748{
1749 bool committed = arena_run_decommit(arena, chunk, run);
1750
1751 arena_run_dalloc(arena, run, committed, false, !committed);
1752}
1753
1754static void
Jason Evanse476f8a2010-01-16 09:53:50 -08001755arena_run_trim_head(arena_t *arena, arena_chunk_t *chunk, arena_run_t *run,
1756 size_t oldsize, size_t newsize)
1757{
Jason Evans0c5dd032014-09-29 01:31:39 -07001758 arena_chunk_map_misc_t *miscelm = arena_run_to_miscelm(run);
1759 size_t pageind = arena_miscelm_to_pageind(miscelm);
Jason Evansae4c7b42012-04-02 07:04:34 -07001760 size_t head_npages = (oldsize - newsize) >> LG_PAGE;
Jason Evans203484e2012-05-02 00:30:36 -07001761 size_t flag_dirty = arena_mapbits_dirty_get(chunk, pageind);
Jason Evans8fadb1a2015-08-04 10:49:46 -07001762 bool decommitted = (arena_mapbits_decommitted_get(chunk, pageind) != 0);
Jason Evanse476f8a2010-01-16 09:53:50 -08001763
1764 assert(oldsize > newsize);
1765
1766 /*
1767 * Update the chunk map so that arena_run_dalloc() can treat the
Jason Evans940a2e02010-10-17 17:51:37 -07001768 * leading run as separately allocated. Set the last element of each
1769 * run first, in case of single-page runs.
Jason Evanse476f8a2010-01-16 09:53:50 -08001770 */
Jason Evans203484e2012-05-02 00:30:36 -07001771 assert(arena_mapbits_large_size_get(chunk, pageind) == oldsize);
Jason Evansd8ceef62012-05-10 20:59:39 -07001772 arena_mapbits_large_set(chunk, pageind+head_npages-1, 0, flag_dirty);
1773 arena_mapbits_large_set(chunk, pageind, oldsize-newsize, flag_dirty);
Jason Evans940a2e02010-10-17 17:51:37 -07001774
Jason Evans7372b152012-02-10 20:22:09 -08001775 if (config_debug) {
Jason Evansae4c7b42012-04-02 07:04:34 -07001776 UNUSED size_t tail_npages = newsize >> LG_PAGE;
Jason Evans203484e2012-05-02 00:30:36 -07001777 assert(arena_mapbits_large_size_get(chunk,
1778 pageind+head_npages+tail_npages-1) == 0);
1779 assert(arena_mapbits_dirty_get(chunk,
1780 pageind+head_npages+tail_npages-1) == flag_dirty);
Jason Evans940a2e02010-10-17 17:51:37 -07001781 }
Jason Evansd8ceef62012-05-10 20:59:39 -07001782 arena_mapbits_large_set(chunk, pageind+head_npages, newsize,
1783 flag_dirty);
Jason Evanse476f8a2010-01-16 09:53:50 -08001784
Jason Evans8fadb1a2015-08-04 10:49:46 -07001785 arena_run_dalloc(arena, run, false, false, decommitted);
Jason Evanse476f8a2010-01-16 09:53:50 -08001786}
1787
1788static void
1789arena_run_trim_tail(arena_t *arena, arena_chunk_t *chunk, arena_run_t *run,
1790 size_t oldsize, size_t newsize, bool dirty)
1791{
Jason Evans0c5dd032014-09-29 01:31:39 -07001792 arena_chunk_map_misc_t *miscelm = arena_run_to_miscelm(run);
1793 size_t pageind = arena_miscelm_to_pageind(miscelm);
Jason Evansae4c7b42012-04-02 07:04:34 -07001794 size_t head_npages = newsize >> LG_PAGE;
Jason Evans203484e2012-05-02 00:30:36 -07001795 size_t flag_dirty = arena_mapbits_dirty_get(chunk, pageind);
Jason Evansde249c82015-08-09 16:47:27 -07001796 bool decommitted = (arena_mapbits_decommitted_get(chunk, pageind) != 0);
Jason Evans0c5dd032014-09-29 01:31:39 -07001797 arena_chunk_map_misc_t *tail_miscelm;
1798 arena_run_t *tail_run;
Jason Evanse476f8a2010-01-16 09:53:50 -08001799
1800 assert(oldsize > newsize);
1801
1802 /*
1803 * Update the chunk map so that arena_run_dalloc() can treat the
Jason Evans940a2e02010-10-17 17:51:37 -07001804 * trailing run as separately allocated. Set the last element of each
1805 * run first, in case of single-page runs.
Jason Evanse476f8a2010-01-16 09:53:50 -08001806 */
Jason Evans203484e2012-05-02 00:30:36 -07001807 assert(arena_mapbits_large_size_get(chunk, pageind) == oldsize);
Jason Evansd8ceef62012-05-10 20:59:39 -07001808 arena_mapbits_large_set(chunk, pageind+head_npages-1, 0, flag_dirty);
1809 arena_mapbits_large_set(chunk, pageind, newsize, flag_dirty);
Jason Evans940a2e02010-10-17 17:51:37 -07001810
Jason Evans203484e2012-05-02 00:30:36 -07001811 if (config_debug) {
1812 UNUSED size_t tail_npages = (oldsize - newsize) >> LG_PAGE;
1813 assert(arena_mapbits_large_size_get(chunk,
1814 pageind+head_npages+tail_npages-1) == 0);
1815 assert(arena_mapbits_dirty_get(chunk,
1816 pageind+head_npages+tail_npages-1) == flag_dirty);
1817 }
1818 arena_mapbits_large_set(chunk, pageind+head_npages, oldsize-newsize,
Jason Evansd8ceef62012-05-10 20:59:39 -07001819 flag_dirty);
Jason Evanse476f8a2010-01-16 09:53:50 -08001820
Jason Evans0c5dd032014-09-29 01:31:39 -07001821 tail_miscelm = arena_miscelm_get(chunk, pageind + head_npages);
1822 tail_run = &tail_miscelm->run;
Jason Evans8fadb1a2015-08-04 10:49:46 -07001823 arena_run_dalloc(arena, tail_run, dirty, false, decommitted);
Jason Evanse476f8a2010-01-16 09:53:50 -08001824}
1825
1826static arena_run_t *
Jason Evanse7a10582012-02-13 17:36:52 -08001827arena_bin_runs_first(arena_bin_t *bin)
1828{
Qinfan Wuff6a31d2014-08-29 13:34:40 -07001829 arena_chunk_map_misc_t *miscelm = arena_run_tree_first(&bin->runs);
Jason Evans0c5dd032014-09-29 01:31:39 -07001830 if (miscelm != NULL)
1831 return (&miscelm->run);
Jason Evanse7a10582012-02-13 17:36:52 -08001832
1833 return (NULL);
1834}
1835
1836static void
1837arena_bin_runs_insert(arena_bin_t *bin, arena_run_t *run)
1838{
Jason Evans0c5dd032014-09-29 01:31:39 -07001839 arena_chunk_map_misc_t *miscelm = arena_run_to_miscelm(run);
Jason Evanse7a10582012-02-13 17:36:52 -08001840
Qinfan Wuff6a31d2014-08-29 13:34:40 -07001841 assert(arena_run_tree_search(&bin->runs, miscelm) == NULL);
Jason Evanse7a10582012-02-13 17:36:52 -08001842
Qinfan Wuff6a31d2014-08-29 13:34:40 -07001843 arena_run_tree_insert(&bin->runs, miscelm);
Jason Evanse7a10582012-02-13 17:36:52 -08001844}
1845
1846static void
1847arena_bin_runs_remove(arena_bin_t *bin, arena_run_t *run)
1848{
Jason Evans0c5dd032014-09-29 01:31:39 -07001849 arena_chunk_map_misc_t *miscelm = arena_run_to_miscelm(run);
Jason Evanse7a10582012-02-13 17:36:52 -08001850
Qinfan Wuff6a31d2014-08-29 13:34:40 -07001851 assert(arena_run_tree_search(&bin->runs, miscelm) != NULL);
Jason Evanse7a10582012-02-13 17:36:52 -08001852
Qinfan Wuff6a31d2014-08-29 13:34:40 -07001853 arena_run_tree_remove(&bin->runs, miscelm);
Jason Evanse7a10582012-02-13 17:36:52 -08001854}
1855
1856static arena_run_t *
1857arena_bin_nonfull_run_tryget(arena_bin_t *bin)
1858{
1859 arena_run_t *run = arena_bin_runs_first(bin);
1860 if (run != NULL) {
1861 arena_bin_runs_remove(bin, run);
1862 if (config_stats)
1863 bin->stats.reruns++;
1864 }
1865 return (run);
1866}
1867
1868static arena_run_t *
Jason Evanse476f8a2010-01-16 09:53:50 -08001869arena_bin_nonfull_run_get(arena_t *arena, arena_bin_t *bin)
1870{
Jason Evanse476f8a2010-01-16 09:53:50 -08001871 arena_run_t *run;
Jason Evans155bfa72014-10-05 17:54:10 -07001872 index_t binind;
Jason Evans49f7e8f2011-03-15 13:59:15 -07001873 arena_bin_info_t *bin_info;
Jason Evanse476f8a2010-01-16 09:53:50 -08001874
1875 /* Look for a usable run. */
Jason Evanse7a10582012-02-13 17:36:52 -08001876 run = arena_bin_nonfull_run_tryget(bin);
1877 if (run != NULL)
Jason Evanse476f8a2010-01-16 09:53:50 -08001878 return (run);
Jason Evanse476f8a2010-01-16 09:53:50 -08001879 /* No existing runs have any space available. */
1880
Jason Evans49f7e8f2011-03-15 13:59:15 -07001881 binind = arena_bin_index(arena, bin);
1882 bin_info = &arena_bin_info[binind];
1883
Jason Evanse476f8a2010-01-16 09:53:50 -08001884 /* Allocate a new run. */
Jason Evanse00572b2010-03-14 19:43:56 -07001885 malloc_mutex_unlock(&bin->lock);
Jason Evanse69bee02010-03-15 22:25:23 -07001886 /******************************/
Jason Evans86815df2010-03-13 20:32:56 -08001887 malloc_mutex_lock(&arena->lock);
Jason Evansaa5113b2014-01-14 16:23:03 -08001888 run = arena_run_alloc_small(arena, bin_info->run_size, binind);
Jason Evanse00572b2010-03-14 19:43:56 -07001889 if (run != NULL) {
1890 /* Initialize run internals. */
Jason Evans381c23d2014-10-10 23:01:03 -07001891 run->binind = binind;
Jason Evans49f7e8f2011-03-15 13:59:15 -07001892 run->nfree = bin_info->nregs;
Jason Evans0c5dd032014-09-29 01:31:39 -07001893 bitmap_init(run->bitmap, &bin_info->bitmap_info);
Jason Evanse69bee02010-03-15 22:25:23 -07001894 }
1895 malloc_mutex_unlock(&arena->lock);
1896 /********************************/
1897 malloc_mutex_lock(&bin->lock);
1898 if (run != NULL) {
Jason Evans7372b152012-02-10 20:22:09 -08001899 if (config_stats) {
1900 bin->stats.nruns++;
1901 bin->stats.curruns++;
Jason Evans7372b152012-02-10 20:22:09 -08001902 }
Jason Evanse00572b2010-03-14 19:43:56 -07001903 return (run);
1904 }
1905
1906 /*
Jason Evansaa5113b2014-01-14 16:23:03 -08001907 * arena_run_alloc_small() failed, but another thread may have made
Jason Evans940a2e02010-10-17 17:51:37 -07001908 * sufficient memory available while this one dropped bin->lock above,
Jason Evanse00572b2010-03-14 19:43:56 -07001909 * so search one more time.
1910 */
Jason Evanse7a10582012-02-13 17:36:52 -08001911 run = arena_bin_nonfull_run_tryget(bin);
1912 if (run != NULL)
Jason Evanse00572b2010-03-14 19:43:56 -07001913 return (run);
Jason Evanse00572b2010-03-14 19:43:56 -07001914
1915 return (NULL);
Jason Evanse476f8a2010-01-16 09:53:50 -08001916}
1917
Jason Evans1e0a6362010-03-13 13:41:58 -08001918/* Re-fill bin->runcur, then call arena_run_reg_alloc(). */
Jason Evanse476f8a2010-01-16 09:53:50 -08001919static void *
1920arena_bin_malloc_hard(arena_t *arena, arena_bin_t *bin)
1921{
Jason Evanse00572b2010-03-14 19:43:56 -07001922 void *ret;
Jason Evans155bfa72014-10-05 17:54:10 -07001923 index_t binind;
Jason Evans49f7e8f2011-03-15 13:59:15 -07001924 arena_bin_info_t *bin_info;
Jason Evanse00572b2010-03-14 19:43:56 -07001925 arena_run_t *run;
Jason Evanse476f8a2010-01-16 09:53:50 -08001926
Jason Evans49f7e8f2011-03-15 13:59:15 -07001927 binind = arena_bin_index(arena, bin);
1928 bin_info = &arena_bin_info[binind];
Jason Evanse00572b2010-03-14 19:43:56 -07001929 bin->runcur = NULL;
1930 run = arena_bin_nonfull_run_get(arena, bin);
1931 if (bin->runcur != NULL && bin->runcur->nfree > 0) {
1932 /*
1933 * Another thread updated runcur while this one ran without the
1934 * bin lock in arena_bin_nonfull_run_get().
1935 */
Jason Evanse00572b2010-03-14 19:43:56 -07001936 assert(bin->runcur->nfree > 0);
Jason Evans49f7e8f2011-03-15 13:59:15 -07001937 ret = arena_run_reg_alloc(bin->runcur, bin_info);
Jason Evanse00572b2010-03-14 19:43:56 -07001938 if (run != NULL) {
Jason Evans940a2e02010-10-17 17:51:37 -07001939 arena_chunk_t *chunk;
1940
1941 /*
Jason Evansaa5113b2014-01-14 16:23:03 -08001942 * arena_run_alloc_small() may have allocated run, or
1943 * it may have pulled run from the bin's run tree.
1944 * Therefore it is unsafe to make any assumptions about
1945 * how run has previously been used, and
1946 * arena_bin_lower_run() must be called, as if a region
1947 * were just deallocated from the run.
Jason Evans940a2e02010-10-17 17:51:37 -07001948 */
1949 chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(run);
Jason Evans49f7e8f2011-03-15 13:59:15 -07001950 if (run->nfree == bin_info->nregs)
Jason Evans8de6a022010-10-17 20:57:30 -07001951 arena_dalloc_bin_run(arena, chunk, run, bin);
1952 else
1953 arena_bin_lower_run(arena, chunk, run, bin);
Jason Evanse00572b2010-03-14 19:43:56 -07001954 }
1955 return (ret);
1956 }
1957
1958 if (run == NULL)
Jason Evanse476f8a2010-01-16 09:53:50 -08001959 return (NULL);
Jason Evanse00572b2010-03-14 19:43:56 -07001960
1961 bin->runcur = run;
1962
Jason Evanse476f8a2010-01-16 09:53:50 -08001963 assert(bin->runcur->nfree > 0);
1964
Jason Evans49f7e8f2011-03-15 13:59:15 -07001965 return (arena_run_reg_alloc(bin->runcur, bin_info));
Jason Evanse476f8a2010-01-16 09:53:50 -08001966}
1967
Jason Evans86815df2010-03-13 20:32:56 -08001968void
Jason Evans155bfa72014-10-05 17:54:10 -07001969arena_tcache_fill_small(arena_t *arena, tcache_bin_t *tbin, index_t binind,
Jason Evans7372b152012-02-10 20:22:09 -08001970 uint64_t prof_accumbytes)
Jason Evanse476f8a2010-01-16 09:53:50 -08001971{
1972 unsigned i, nfill;
1973 arena_bin_t *bin;
1974 arena_run_t *run;
1975 void *ptr;
1976
1977 assert(tbin->ncached == 0);
1978
Jason Evans88c222c2013-02-06 11:59:30 -08001979 if (config_prof && arena_prof_accum(arena, prof_accumbytes))
1980 prof_idump();
Jason Evanse69bee02010-03-15 22:25:23 -07001981 bin = &arena->bins[binind];
1982 malloc_mutex_lock(&bin->lock);
Jason Evans1dcb4f82011-03-21 00:18:17 -07001983 for (i = 0, nfill = (tcache_bin_info[binind].ncached_max >>
1984 tbin->lg_fill_div); i < nfill; i++) {
Jason Evanse476f8a2010-01-16 09:53:50 -08001985 if ((run = bin->runcur) != NULL && run->nfree > 0)
Jason Evans49f7e8f2011-03-15 13:59:15 -07001986 ptr = arena_run_reg_alloc(run, &arena_bin_info[binind]);
Jason Evanse476f8a2010-01-16 09:53:50 -08001987 else
1988 ptr = arena_bin_malloc_hard(arena, bin);
Jason Evansf11a6772014-10-05 13:05:10 -07001989 if (ptr == NULL) {
1990 /*
1991 * OOM. tbin->avail isn't yet filled down to its first
1992 * element, so the successful allocations (if any) must
1993 * be moved to the base of tbin->avail before bailing
1994 * out.
1995 */
1996 if (i > 0) {
1997 memmove(tbin->avail, &tbin->avail[nfill - i],
1998 i * sizeof(void *));
1999 }
Jason Evanse476f8a2010-01-16 09:53:50 -08002000 break;
Jason Evansf11a6772014-10-05 13:05:10 -07002001 }
Guilherme Goncalves2c5cb612014-12-08 19:12:41 -02002002 if (config_fill && unlikely(opt_junk_alloc)) {
Jason Evans122449b2012-04-06 00:35:09 -07002003 arena_alloc_junk_small(ptr, &arena_bin_info[binind],
2004 true);
2005 }
Jason Evans9c43c132011-03-18 10:53:15 -07002006 /* Insert such that low regions get used first. */
2007 tbin->avail[nfill - 1 - i] = ptr;
Jason Evanse476f8a2010-01-16 09:53:50 -08002008 }
Jason Evans7372b152012-02-10 20:22:09 -08002009 if (config_stats) {
Jason Evans7372b152012-02-10 20:22:09 -08002010 bin->stats.nmalloc += i;
2011 bin->stats.nrequests += tbin->tstats.nrequests;
Jason Evans3c4d92e2014-10-12 22:53:59 -07002012 bin->stats.curregs += i;
Jason Evans7372b152012-02-10 20:22:09 -08002013 bin->stats.nfills++;
2014 tbin->tstats.nrequests = 0;
2015 }
Jason Evans86815df2010-03-13 20:32:56 -08002016 malloc_mutex_unlock(&bin->lock);
Jason Evanse476f8a2010-01-16 09:53:50 -08002017 tbin->ncached = i;
Jason Evanse476f8a2010-01-16 09:53:50 -08002018}
Jason Evanse476f8a2010-01-16 09:53:50 -08002019
Jason Evans122449b2012-04-06 00:35:09 -07002020void
2021arena_alloc_junk_small(void *ptr, arena_bin_info_t *bin_info, bool zero)
2022{
2023
2024 if (zero) {
2025 size_t redzone_size = bin_info->redzone_size;
2026 memset((void *)((uintptr_t)ptr - redzone_size), 0xa5,
2027 redzone_size);
2028 memset((void *)((uintptr_t)ptr + bin_info->reg_size), 0xa5,
2029 redzone_size);
2030 } else {
2031 memset((void *)((uintptr_t)ptr - bin_info->redzone_size), 0xa5,
2032 bin_info->reg_interval);
2033 }
2034}
2035
Jason Evans0d6c5d82013-12-17 15:14:36 -08002036#ifdef JEMALLOC_JET
2037#undef arena_redzone_corruption
2038#define arena_redzone_corruption JEMALLOC_N(arena_redzone_corruption_impl)
2039#endif
2040static void
2041arena_redzone_corruption(void *ptr, size_t usize, bool after,
2042 size_t offset, uint8_t byte)
2043{
2044
Jason Evans5fae7dc2015-07-23 13:56:25 -07002045 malloc_printf("<jemalloc>: Corrupt redzone %zu byte%s %s %p "
2046 "(size %zu), byte=%#x\n", offset, (offset == 1) ? "" : "s",
Jason Evans0d6c5d82013-12-17 15:14:36 -08002047 after ? "after" : "before", ptr, usize, byte);
2048}
2049#ifdef JEMALLOC_JET
Jason Evans0d6c5d82013-12-17 15:14:36 -08002050#undef arena_redzone_corruption
Jason Evans6b694c42014-01-07 16:47:56 -08002051#define arena_redzone_corruption JEMALLOC_N(arena_redzone_corruption)
2052arena_redzone_corruption_t *arena_redzone_corruption =
2053 JEMALLOC_N(arena_redzone_corruption_impl);
Jason Evans0d6c5d82013-12-17 15:14:36 -08002054#endif
2055
2056static void
2057arena_redzones_validate(void *ptr, arena_bin_info_t *bin_info, bool reset)
Jason Evans122449b2012-04-06 00:35:09 -07002058{
2059 size_t size = bin_info->reg_size;
2060 size_t redzone_size = bin_info->redzone_size;
2061 size_t i;
2062 bool error = false;
2063
Guilherme Goncalves2c5cb612014-12-08 19:12:41 -02002064 if (opt_junk_alloc) {
2065 for (i = 1; i <= redzone_size; i++) {
2066 uint8_t *byte = (uint8_t *)((uintptr_t)ptr - i);
2067 if (*byte != 0xa5) {
2068 error = true;
Jason Evans8fadb1a2015-08-04 10:49:46 -07002069 arena_redzone_corruption(ptr, size, false, i,
2070 *byte);
Guilherme Goncalves2c5cb612014-12-08 19:12:41 -02002071 if (reset)
2072 *byte = 0xa5;
2073 }
2074 }
2075 for (i = 0; i < redzone_size; i++) {
2076 uint8_t *byte = (uint8_t *)((uintptr_t)ptr + size + i);
2077 if (*byte != 0xa5) {
2078 error = true;
Jason Evans8fadb1a2015-08-04 10:49:46 -07002079 arena_redzone_corruption(ptr, size, true, i,
2080 *byte);
Guilherme Goncalves2c5cb612014-12-08 19:12:41 -02002081 if (reset)
2082 *byte = 0xa5;
2083 }
Jason Evans122449b2012-04-06 00:35:09 -07002084 }
2085 }
Guilherme Goncalves2c5cb612014-12-08 19:12:41 -02002086
Jason Evans122449b2012-04-06 00:35:09 -07002087 if (opt_abort && error)
2088 abort();
Jason Evans0d6c5d82013-12-17 15:14:36 -08002089}
Jason Evans122449b2012-04-06 00:35:09 -07002090
Jason Evans6b694c42014-01-07 16:47:56 -08002091#ifdef JEMALLOC_JET
2092#undef arena_dalloc_junk_small
2093#define arena_dalloc_junk_small JEMALLOC_N(arena_dalloc_junk_small_impl)
2094#endif
Jason Evans0d6c5d82013-12-17 15:14:36 -08002095void
2096arena_dalloc_junk_small(void *ptr, arena_bin_info_t *bin_info)
2097{
2098 size_t redzone_size = bin_info->redzone_size;
2099
2100 arena_redzones_validate(ptr, bin_info, false);
Jason Evans122449b2012-04-06 00:35:09 -07002101 memset((void *)((uintptr_t)ptr - redzone_size), 0x5a,
2102 bin_info->reg_interval);
2103}
Jason Evans6b694c42014-01-07 16:47:56 -08002104#ifdef JEMALLOC_JET
2105#undef arena_dalloc_junk_small
2106#define arena_dalloc_junk_small JEMALLOC_N(arena_dalloc_junk_small)
2107arena_dalloc_junk_small_t *arena_dalloc_junk_small =
2108 JEMALLOC_N(arena_dalloc_junk_small_impl);
2109#endif
Jason Evans122449b2012-04-06 00:35:09 -07002110
Jason Evans0d6c5d82013-12-17 15:14:36 -08002111void
2112arena_quarantine_junk_small(void *ptr, size_t usize)
2113{
Jason Evans155bfa72014-10-05 17:54:10 -07002114 index_t binind;
Jason Evans0d6c5d82013-12-17 15:14:36 -08002115 arena_bin_info_t *bin_info;
2116 cassert(config_fill);
Guilherme Goncalves2c5cb612014-12-08 19:12:41 -02002117 assert(opt_junk_free);
Jason Evans0d6c5d82013-12-17 15:14:36 -08002118 assert(opt_quarantine);
2119 assert(usize <= SMALL_MAXCLASS);
2120
Jason Evans155bfa72014-10-05 17:54:10 -07002121 binind = size2index(usize);
Jason Evans0d6c5d82013-12-17 15:14:36 -08002122 bin_info = &arena_bin_info[binind];
2123 arena_redzones_validate(ptr, bin_info, true);
2124}
2125
Jason Evanse476f8a2010-01-16 09:53:50 -08002126void *
2127arena_malloc_small(arena_t *arena, size_t size, bool zero)
2128{
2129 void *ret;
2130 arena_bin_t *bin;
2131 arena_run_t *run;
Jason Evans155bfa72014-10-05 17:54:10 -07002132 index_t binind;
Jason Evanse476f8a2010-01-16 09:53:50 -08002133
Jason Evans155bfa72014-10-05 17:54:10 -07002134 binind = size2index(size);
Jason Evansb1726102012-02-28 16:50:47 -08002135 assert(binind < NBINS);
Jason Evanse476f8a2010-01-16 09:53:50 -08002136 bin = &arena->bins[binind];
Jason Evans155bfa72014-10-05 17:54:10 -07002137 size = index2size(binind);
Jason Evanse476f8a2010-01-16 09:53:50 -08002138
Jason Evans86815df2010-03-13 20:32:56 -08002139 malloc_mutex_lock(&bin->lock);
Jason Evanse476f8a2010-01-16 09:53:50 -08002140 if ((run = bin->runcur) != NULL && run->nfree > 0)
Jason Evans49f7e8f2011-03-15 13:59:15 -07002141 ret = arena_run_reg_alloc(run, &arena_bin_info[binind]);
Jason Evanse476f8a2010-01-16 09:53:50 -08002142 else
2143 ret = arena_bin_malloc_hard(arena, bin);
2144
2145 if (ret == NULL) {
Jason Evans86815df2010-03-13 20:32:56 -08002146 malloc_mutex_unlock(&bin->lock);
Jason Evanse476f8a2010-01-16 09:53:50 -08002147 return (NULL);
2148 }
2149
Jason Evans7372b152012-02-10 20:22:09 -08002150 if (config_stats) {
Jason Evans7372b152012-02-10 20:22:09 -08002151 bin->stats.nmalloc++;
2152 bin->stats.nrequests++;
Jason Evans3c4d92e2014-10-12 22:53:59 -07002153 bin->stats.curregs++;
Jason Evans7372b152012-02-10 20:22:09 -08002154 }
Jason Evans86815df2010-03-13 20:32:56 -08002155 malloc_mutex_unlock(&bin->lock);
Jason Evans551ebc42014-10-03 10:16:09 -07002156 if (config_prof && !isthreaded && arena_prof_accum(arena, size))
Jason Evans88c222c2013-02-06 11:59:30 -08002157 prof_idump();
Jason Evanse476f8a2010-01-16 09:53:50 -08002158
Jason Evans551ebc42014-10-03 10:16:09 -07002159 if (!zero) {
Jason Evans7372b152012-02-10 20:22:09 -08002160 if (config_fill) {
Guilherme Goncalves2c5cb612014-12-08 19:12:41 -02002161 if (unlikely(opt_junk_alloc)) {
Jason Evans122449b2012-04-06 00:35:09 -07002162 arena_alloc_junk_small(ret,
2163 &arena_bin_info[binind], false);
Jason Evans9c640bf2014-09-11 16:20:44 -07002164 } else if (unlikely(opt_zero))
Jason Evans7372b152012-02-10 20:22:09 -08002165 memset(ret, 0, size);
2166 }
Jason Evansbd87b012014-04-15 16:35:08 -07002167 JEMALLOC_VALGRIND_MAKE_MEM_UNDEFINED(ret, size);
Jason Evans122449b2012-04-06 00:35:09 -07002168 } else {
Guilherme Goncalves2c5cb612014-12-08 19:12:41 -02002169 if (config_fill && unlikely(opt_junk_alloc)) {
Jason Evans122449b2012-04-06 00:35:09 -07002170 arena_alloc_junk_small(ret, &arena_bin_info[binind],
2171 true);
2172 }
Jason Evansbd87b012014-04-15 16:35:08 -07002173 JEMALLOC_VALGRIND_MAKE_MEM_UNDEFINED(ret, size);
Jason Evanse476f8a2010-01-16 09:53:50 -08002174 memset(ret, 0, size);
Jason Evans122449b2012-04-06 00:35:09 -07002175 }
Jason Evanse476f8a2010-01-16 09:53:50 -08002176
2177 return (ret);
2178}
2179
2180void *
Jason Evanse476f8a2010-01-16 09:53:50 -08002181arena_malloc_large(arena_t *arena, size_t size, bool zero)
2182{
2183 void *ret;
Jason Evans155bfa72014-10-05 17:54:10 -07002184 size_t usize;
Jason Evans8a03cf02015-05-04 09:58:36 -07002185 uintptr_t random_offset;
Jason Evans0c5dd032014-09-29 01:31:39 -07002186 arena_run_t *run;
2187 arena_chunk_map_misc_t *miscelm;
Jason Evans88c222c2013-02-06 11:59:30 -08002188 UNUSED bool idump;
Jason Evanse476f8a2010-01-16 09:53:50 -08002189
2190 /* Large allocation. */
Jason Evans155bfa72014-10-05 17:54:10 -07002191 usize = s2u(size);
Jason Evanse476f8a2010-01-16 09:53:50 -08002192 malloc_mutex_lock(&arena->lock);
Jason Evans8a03cf02015-05-04 09:58:36 -07002193 if (config_cache_oblivious) {
Jason Evansbce61d62015-07-07 09:32:05 -07002194 uint64_t r;
2195
Jason Evans8a03cf02015-05-04 09:58:36 -07002196 /*
2197 * Compute a uniformly distributed offset within the first page
2198 * that is a multiple of the cacheline size, e.g. [0 .. 63) * 64
2199 * for 4 KiB pages and 64-byte cachelines.
2200 */
2201 prng64(r, LG_PAGE - LG_CACHELINE, arena->offset_state,
Jason Evans0a9f9a42015-06-22 18:50:32 -07002202 UINT64_C(6364136223846793009),
2203 UINT64_C(1442695040888963409));
Jason Evans8a03cf02015-05-04 09:58:36 -07002204 random_offset = ((uintptr_t)r) << LG_CACHELINE;
2205 } else
2206 random_offset = 0;
2207 run = arena_run_alloc_large(arena, usize + large_pad, zero);
Jason Evans0c5dd032014-09-29 01:31:39 -07002208 if (run == NULL) {
Jason Evanse476f8a2010-01-16 09:53:50 -08002209 malloc_mutex_unlock(&arena->lock);
2210 return (NULL);
2211 }
Jason Evans0c5dd032014-09-29 01:31:39 -07002212 miscelm = arena_run_to_miscelm(run);
Jason Evans8a03cf02015-05-04 09:58:36 -07002213 ret = (void *)((uintptr_t)arena_miscelm_to_rpages(miscelm) +
2214 random_offset);
Jason Evans7372b152012-02-10 20:22:09 -08002215 if (config_stats) {
Jason Evans155bfa72014-10-05 17:54:10 -07002216 index_t index = size2index(usize) - NBINS;
2217
Jason Evans7372b152012-02-10 20:22:09 -08002218 arena->stats.nmalloc_large++;
2219 arena->stats.nrequests_large++;
Jason Evans155bfa72014-10-05 17:54:10 -07002220 arena->stats.allocated_large += usize;
2221 arena->stats.lstats[index].nmalloc++;
2222 arena->stats.lstats[index].nrequests++;
2223 arena->stats.lstats[index].curruns++;
Jason Evanse476f8a2010-01-16 09:53:50 -08002224 }
Jason Evans7372b152012-02-10 20:22:09 -08002225 if (config_prof)
Jason Evans155bfa72014-10-05 17:54:10 -07002226 idump = arena_prof_accum_locked(arena, usize);
Jason Evanse476f8a2010-01-16 09:53:50 -08002227 malloc_mutex_unlock(&arena->lock);
Jason Evans88c222c2013-02-06 11:59:30 -08002228 if (config_prof && idump)
2229 prof_idump();
Jason Evanse476f8a2010-01-16 09:53:50 -08002230
Jason Evans551ebc42014-10-03 10:16:09 -07002231 if (!zero) {
Jason Evans7372b152012-02-10 20:22:09 -08002232 if (config_fill) {
Guilherme Goncalves2c5cb612014-12-08 19:12:41 -02002233 if (unlikely(opt_junk_alloc))
Jason Evans155bfa72014-10-05 17:54:10 -07002234 memset(ret, 0xa5, usize);
Jason Evans9c640bf2014-09-11 16:20:44 -07002235 else if (unlikely(opt_zero))
Jason Evans155bfa72014-10-05 17:54:10 -07002236 memset(ret, 0, usize);
Jason Evans7372b152012-02-10 20:22:09 -08002237 }
Jason Evanse476f8a2010-01-16 09:53:50 -08002238 }
2239
2240 return (ret);
2241}
2242
Jason Evanse476f8a2010-01-16 09:53:50 -08002243/* Only handles large allocations that require more than page alignment. */
Jason Evans88fef7c2015-02-12 14:06:37 -08002244static void *
Jason Evans50883de2015-07-23 17:13:18 -07002245arena_palloc_large(tsd_t *tsd, arena_t *arena, size_t usize, size_t alignment,
Jason Evans88fef7c2015-02-12 14:06:37 -08002246 bool zero)
Jason Evanse476f8a2010-01-16 09:53:50 -08002247{
2248 void *ret;
Jason Evans5ff709c2012-04-11 18:13:45 -07002249 size_t alloc_size, leadsize, trailsize;
2250 arena_run_t *run;
Jason Evanse476f8a2010-01-16 09:53:50 -08002251 arena_chunk_t *chunk;
Jason Evans0c5dd032014-09-29 01:31:39 -07002252 arena_chunk_map_misc_t *miscelm;
2253 void *rpages;
Jason Evanse476f8a2010-01-16 09:53:50 -08002254
Jason Evans50883de2015-07-23 17:13:18 -07002255 assert(usize == PAGE_CEILING(usize));
Jason Evans93443682010-10-20 17:39:18 -07002256
Jason Evans88fef7c2015-02-12 14:06:37 -08002257 arena = arena_choose(tsd, arena);
2258 if (unlikely(arena == NULL))
2259 return (NULL);
2260
Jason Evans93443682010-10-20 17:39:18 -07002261 alignment = PAGE_CEILING(alignment);
Jason Evans50883de2015-07-23 17:13:18 -07002262 alloc_size = usize + large_pad + alignment - PAGE;
Jason Evanse476f8a2010-01-16 09:53:50 -08002263
2264 malloc_mutex_lock(&arena->lock);
Jason Evansaa5113b2014-01-14 16:23:03 -08002265 run = arena_run_alloc_large(arena, alloc_size, false);
Jason Evans5ff709c2012-04-11 18:13:45 -07002266 if (run == NULL) {
Jason Evanse476f8a2010-01-16 09:53:50 -08002267 malloc_mutex_unlock(&arena->lock);
2268 return (NULL);
2269 }
Jason Evans5ff709c2012-04-11 18:13:45 -07002270 chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(run);
Jason Evans0c5dd032014-09-29 01:31:39 -07002271 miscelm = arena_run_to_miscelm(run);
2272 rpages = arena_miscelm_to_rpages(miscelm);
Jason Evanse476f8a2010-01-16 09:53:50 -08002273
Jason Evans0c5dd032014-09-29 01:31:39 -07002274 leadsize = ALIGNMENT_CEILING((uintptr_t)rpages, alignment) -
2275 (uintptr_t)rpages;
Jason Evans50883de2015-07-23 17:13:18 -07002276 assert(alloc_size >= leadsize + usize);
2277 trailsize = alloc_size - leadsize - usize - large_pad;
Jason Evans5ff709c2012-04-11 18:13:45 -07002278 if (leadsize != 0) {
Jason Evans0c5dd032014-09-29 01:31:39 -07002279 arena_chunk_map_misc_t *head_miscelm = miscelm;
2280 arena_run_t *head_run = run;
2281
2282 miscelm = arena_miscelm_get(chunk,
2283 arena_miscelm_to_pageind(head_miscelm) + (leadsize >>
2284 LG_PAGE));
2285 run = &miscelm->run;
2286
2287 arena_run_trim_head(arena, chunk, head_run, alloc_size,
2288 alloc_size - leadsize);
Jason Evans5ff709c2012-04-11 18:13:45 -07002289 }
2290 if (trailsize != 0) {
Jason Evans50883de2015-07-23 17:13:18 -07002291 arena_run_trim_tail(arena, chunk, run, usize + large_pad +
2292 trailsize, usize + large_pad, false);
Jason Evanse476f8a2010-01-16 09:53:50 -08002293 }
Jason Evans8fadb1a2015-08-04 10:49:46 -07002294 if (arena_run_init_large(arena, run, usize + large_pad, zero)) {
2295 size_t run_ind =
2296 arena_miscelm_to_pageind(arena_run_to_miscelm(run));
Jason Evansde249c82015-08-09 16:47:27 -07002297 bool dirty = (arena_mapbits_dirty_get(chunk, run_ind) != 0);
2298 bool decommitted = (arena_mapbits_decommitted_get(chunk,
2299 run_ind) != 0);
Jason Evans8fadb1a2015-08-04 10:49:46 -07002300
Jason Evansde249c82015-08-09 16:47:27 -07002301 assert(decommitted); /* Cause of OOM. */
2302 arena_run_dalloc(arena, run, dirty, false, decommitted);
Jason Evans8fadb1a2015-08-04 10:49:46 -07002303 malloc_mutex_unlock(&arena->lock);
2304 return (NULL);
2305 }
Jason Evans0c5dd032014-09-29 01:31:39 -07002306 ret = arena_miscelm_to_rpages(miscelm);
Jason Evanse476f8a2010-01-16 09:53:50 -08002307
Jason Evans7372b152012-02-10 20:22:09 -08002308 if (config_stats) {
Jason Evans50883de2015-07-23 17:13:18 -07002309 index_t index = size2index(usize) - NBINS;
Jason Evans155bfa72014-10-05 17:54:10 -07002310
Jason Evans7372b152012-02-10 20:22:09 -08002311 arena->stats.nmalloc_large++;
2312 arena->stats.nrequests_large++;
Jason Evans50883de2015-07-23 17:13:18 -07002313 arena->stats.allocated_large += usize;
Jason Evans155bfa72014-10-05 17:54:10 -07002314 arena->stats.lstats[index].nmalloc++;
2315 arena->stats.lstats[index].nrequests++;
2316 arena->stats.lstats[index].curruns++;
Jason Evanse476f8a2010-01-16 09:53:50 -08002317 }
Jason Evanse476f8a2010-01-16 09:53:50 -08002318 malloc_mutex_unlock(&arena->lock);
2319
Jason Evans551ebc42014-10-03 10:16:09 -07002320 if (config_fill && !zero) {
Guilherme Goncalves2c5cb612014-12-08 19:12:41 -02002321 if (unlikely(opt_junk_alloc))
Jason Evans50883de2015-07-23 17:13:18 -07002322 memset(ret, 0xa5, usize);
Jason Evans9c640bf2014-09-11 16:20:44 -07002323 else if (unlikely(opt_zero))
Jason Evans50883de2015-07-23 17:13:18 -07002324 memset(ret, 0, usize);
Jason Evans8e3c3c62010-09-17 15:46:18 -07002325 }
Jason Evanse476f8a2010-01-16 09:53:50 -08002326 return (ret);
2327}
2328
Jason Evans88fef7c2015-02-12 14:06:37 -08002329void *
2330arena_palloc(tsd_t *tsd, arena_t *arena, size_t usize, size_t alignment,
2331 bool zero, tcache_t *tcache)
2332{
2333 void *ret;
2334
Jason Evans8a03cf02015-05-04 09:58:36 -07002335 if (usize <= SMALL_MAXCLASS && (alignment < PAGE || (alignment == PAGE
Jason Evans51541752015-05-19 17:42:31 -07002336 && (usize & PAGE_MASK) == 0))) {
2337 /* Small; alignment doesn't require special run placement. */
Jason Evans88fef7c2015-02-12 14:06:37 -08002338 ret = arena_malloc(tsd, arena, usize, zero, tcache);
Jason Evans51541752015-05-19 17:42:31 -07002339 } else if (usize <= arena_maxclass && alignment <= PAGE) {
2340 /*
2341 * Large; alignment doesn't require special run placement.
2342 * However, the cached pointer may be at a random offset from
2343 * the base of the run, so do some bit manipulation to retrieve
2344 * the base.
2345 */
2346 ret = arena_malloc(tsd, arena, usize, zero, tcache);
2347 if (config_cache_oblivious)
2348 ret = (void *)((uintptr_t)ret & ~PAGE_MASK);
2349 } else {
Jason Evans88fef7c2015-02-12 14:06:37 -08002350 if (likely(usize <= arena_maxclass)) {
2351 ret = arena_palloc_large(tsd, arena, usize, alignment,
2352 zero);
2353 } else if (likely(alignment <= chunksize))
2354 ret = huge_malloc(tsd, arena, usize, zero, tcache);
2355 else {
2356 ret = huge_palloc(tsd, arena, usize, alignment, zero,
2357 tcache);
2358 }
2359 }
2360 return (ret);
2361}
2362
Jason Evans0b270a92010-03-31 16:45:04 -07002363void
2364arena_prof_promoted(const void *ptr, size_t size)
2365{
2366 arena_chunk_t *chunk;
Jason Evans155bfa72014-10-05 17:54:10 -07002367 size_t pageind;
2368 index_t binind;
Jason Evans0b270a92010-03-31 16:45:04 -07002369
Jason Evans78f73522012-04-18 13:38:40 -07002370 cassert(config_prof);
Jason Evans0b270a92010-03-31 16:45:04 -07002371 assert(ptr != NULL);
2372 assert(CHUNK_ADDR2BASE(ptr) != ptr);
Jason Evans155bfa72014-10-05 17:54:10 -07002373 assert(isalloc(ptr, false) == LARGE_MINCLASS);
2374 assert(isalloc(ptr, true) == LARGE_MINCLASS);
Jason Evansb1726102012-02-28 16:50:47 -08002375 assert(size <= SMALL_MAXCLASS);
Jason Evans0b270a92010-03-31 16:45:04 -07002376
2377 chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr);
Jason Evansae4c7b42012-04-02 07:04:34 -07002378 pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> LG_PAGE;
Jason Evans155bfa72014-10-05 17:54:10 -07002379 binind = size2index(size);
Jason Evansb1726102012-02-28 16:50:47 -08002380 assert(binind < NBINS);
Jason Evans203484e2012-05-02 00:30:36 -07002381 arena_mapbits_large_binind_set(chunk, pageind, binind);
Jason Evans0b270a92010-03-31 16:45:04 -07002382
Jason Evans155bfa72014-10-05 17:54:10 -07002383 assert(isalloc(ptr, false) == LARGE_MINCLASS);
Jason Evans122449b2012-04-06 00:35:09 -07002384 assert(isalloc(ptr, true) == size);
Jason Evans0b270a92010-03-31 16:45:04 -07002385}
Jason Evans6109fe02010-02-10 10:37:56 -08002386
Jason Evanse476f8a2010-01-16 09:53:50 -08002387static void
Jason Evans088e6a02010-10-18 00:04:44 -07002388arena_dissociate_bin_run(arena_chunk_t *chunk, arena_run_t *run,
Jason Evanse476f8a2010-01-16 09:53:50 -08002389 arena_bin_t *bin)
2390{
Jason Evanse476f8a2010-01-16 09:53:50 -08002391
Jason Evans19b3d612010-03-18 20:36:40 -07002392 /* Dissociate run from bin. */
Jason Evanse476f8a2010-01-16 09:53:50 -08002393 if (run == bin->runcur)
2394 bin->runcur = NULL;
Jason Evans49f7e8f2011-03-15 13:59:15 -07002395 else {
Jason Evansee41ad42015-02-15 18:04:46 -08002396 index_t binind = arena_bin_index(extent_node_arena_get(
2397 &chunk->node), bin);
Jason Evans49f7e8f2011-03-15 13:59:15 -07002398 arena_bin_info_t *bin_info = &arena_bin_info[binind];
2399
2400 if (bin_info->nregs != 1) {
Jason Evans49f7e8f2011-03-15 13:59:15 -07002401 /*
2402 * This block's conditional is necessary because if the
2403 * run only contains one region, then it never gets
2404 * inserted into the non-full runs tree.
2405 */
Jason Evanse7a10582012-02-13 17:36:52 -08002406 arena_bin_runs_remove(bin, run);
Jason Evans49f7e8f2011-03-15 13:59:15 -07002407 }
Jason Evanse476f8a2010-01-16 09:53:50 -08002408 }
Jason Evans088e6a02010-10-18 00:04:44 -07002409}
2410
2411static void
2412arena_dalloc_bin_run(arena_t *arena, arena_chunk_t *chunk, arena_run_t *run,
2413 arena_bin_t *bin)
2414{
Jason Evans088e6a02010-10-18 00:04:44 -07002415
2416 assert(run != bin->runcur);
Jason Evans0c5dd032014-09-29 01:31:39 -07002417 assert(arena_run_tree_search(&bin->runs, arena_run_to_miscelm(run)) ==
2418 NULL);
Jason Evans86815df2010-03-13 20:32:56 -08002419
Jason Evanse00572b2010-03-14 19:43:56 -07002420 malloc_mutex_unlock(&bin->lock);
2421 /******************************/
Jason Evans86815df2010-03-13 20:32:56 -08002422 malloc_mutex_lock(&arena->lock);
Jason Evansde249c82015-08-09 16:47:27 -07002423 arena_run_dalloc_decommit(arena, chunk, run);
Jason Evans86815df2010-03-13 20:32:56 -08002424 malloc_mutex_unlock(&arena->lock);
Jason Evanse00572b2010-03-14 19:43:56 -07002425 /****************************/
2426 malloc_mutex_lock(&bin->lock);
Jason Evans7372b152012-02-10 20:22:09 -08002427 if (config_stats)
2428 bin->stats.curruns--;
Jason Evanse476f8a2010-01-16 09:53:50 -08002429}
2430
Jason Evans940a2e02010-10-17 17:51:37 -07002431static void
2432arena_bin_lower_run(arena_t *arena, arena_chunk_t *chunk, arena_run_t *run,
2433 arena_bin_t *bin)
Jason Evanse476f8a2010-01-16 09:53:50 -08002434{
Jason Evanse476f8a2010-01-16 09:53:50 -08002435
Jason Evans8de6a022010-10-17 20:57:30 -07002436 /*
Jason Evanse7a10582012-02-13 17:36:52 -08002437 * Make sure that if bin->runcur is non-NULL, it refers to the lowest
2438 * non-full run. It is okay to NULL runcur out rather than proactively
2439 * keeping it pointing at the lowest non-full run.
Jason Evans8de6a022010-10-17 20:57:30 -07002440 */
Jason Evanse7a10582012-02-13 17:36:52 -08002441 if ((uintptr_t)run < (uintptr_t)bin->runcur) {
Jason Evans8de6a022010-10-17 20:57:30 -07002442 /* Switch runcur. */
Jason Evanse7a10582012-02-13 17:36:52 -08002443 if (bin->runcur->nfree > 0)
2444 arena_bin_runs_insert(bin, bin->runcur);
Jason Evans8de6a022010-10-17 20:57:30 -07002445 bin->runcur = run;
Jason Evanse7a10582012-02-13 17:36:52 -08002446 if (config_stats)
2447 bin->stats.reruns++;
2448 } else
2449 arena_bin_runs_insert(bin, run);
Jason Evans940a2e02010-10-17 17:51:37 -07002450}
2451
Jason Evansfc0b3b72014-10-09 17:54:06 -07002452static void
2453arena_dalloc_bin_locked_impl(arena_t *arena, arena_chunk_t *chunk, void *ptr,
2454 arena_chunk_map_bits_t *bitselm, bool junked)
Jason Evans940a2e02010-10-17 17:51:37 -07002455{
Jason Evans0c5dd032014-09-29 01:31:39 -07002456 size_t pageind, rpages_ind;
Jason Evans940a2e02010-10-17 17:51:37 -07002457 arena_run_t *run;
2458 arena_bin_t *bin;
Mike Hommey8b499712012-04-24 23:22:02 +02002459 arena_bin_info_t *bin_info;
Jason Evans155bfa72014-10-05 17:54:10 -07002460 index_t binind;
Jason Evans940a2e02010-10-17 17:51:37 -07002461
Jason Evansae4c7b42012-04-02 07:04:34 -07002462 pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> LG_PAGE;
Jason Evans0c5dd032014-09-29 01:31:39 -07002463 rpages_ind = pageind - arena_mapbits_small_runind_get(chunk, pageind);
2464 run = &arena_miscelm_get(chunk, rpages_ind)->run;
Jason Evans381c23d2014-10-10 23:01:03 -07002465 binind = run->binind;
2466 bin = &arena->bins[binind];
Mike Hommey8b499712012-04-24 23:22:02 +02002467 bin_info = &arena_bin_info[binind];
Jason Evans940a2e02010-10-17 17:51:37 -07002468
Guilherme Goncalves2c5cb612014-12-08 19:12:41 -02002469 if (!junked && config_fill && unlikely(opt_junk_free))
Jason Evans122449b2012-04-06 00:35:09 -07002470 arena_dalloc_junk_small(ptr, bin_info);
Jason Evans940a2e02010-10-17 17:51:37 -07002471
2472 arena_run_reg_dalloc(run, ptr);
Jason Evans49f7e8f2011-03-15 13:59:15 -07002473 if (run->nfree == bin_info->nregs) {
Jason Evans088e6a02010-10-18 00:04:44 -07002474 arena_dissociate_bin_run(chunk, run, bin);
Jason Evans8de6a022010-10-17 20:57:30 -07002475 arena_dalloc_bin_run(arena, chunk, run, bin);
Jason Evans088e6a02010-10-18 00:04:44 -07002476 } else if (run->nfree == 1 && run != bin->runcur)
Jason Evans8de6a022010-10-17 20:57:30 -07002477 arena_bin_lower_run(arena, chunk, run, bin);
Jason Evanse476f8a2010-01-16 09:53:50 -08002478
Jason Evans7372b152012-02-10 20:22:09 -08002479 if (config_stats) {
Jason Evans7372b152012-02-10 20:22:09 -08002480 bin->stats.ndalloc++;
Jason Evans3c4d92e2014-10-12 22:53:59 -07002481 bin->stats.curregs--;
Jason Evans7372b152012-02-10 20:22:09 -08002482 }
Jason Evanse476f8a2010-01-16 09:53:50 -08002483}
2484
Jason Evanse476f8a2010-01-16 09:53:50 -08002485void
Jason Evansfc0b3b72014-10-09 17:54:06 -07002486arena_dalloc_bin_junked_locked(arena_t *arena, arena_chunk_t *chunk, void *ptr,
2487 arena_chunk_map_bits_t *bitselm)
2488{
2489
2490 arena_dalloc_bin_locked_impl(arena, chunk, ptr, bitselm, true);
2491}
2492
2493void
Jason Evans203484e2012-05-02 00:30:36 -07002494arena_dalloc_bin(arena_t *arena, arena_chunk_t *chunk, void *ptr,
Qinfan Wuff6a31d2014-08-29 13:34:40 -07002495 size_t pageind, arena_chunk_map_bits_t *bitselm)
Jason Evans203484e2012-05-02 00:30:36 -07002496{
2497 arena_run_t *run;
2498 arena_bin_t *bin;
Jason Evans0c5dd032014-09-29 01:31:39 -07002499 size_t rpages_ind;
Jason Evans203484e2012-05-02 00:30:36 -07002500
Jason Evans0c5dd032014-09-29 01:31:39 -07002501 rpages_ind = pageind - arena_mapbits_small_runind_get(chunk, pageind);
2502 run = &arena_miscelm_get(chunk, rpages_ind)->run;
Jason Evans381c23d2014-10-10 23:01:03 -07002503 bin = &arena->bins[run->binind];
Jason Evans203484e2012-05-02 00:30:36 -07002504 malloc_mutex_lock(&bin->lock);
Jason Evansfc0b3b72014-10-09 17:54:06 -07002505 arena_dalloc_bin_locked_impl(arena, chunk, ptr, bitselm, false);
Jason Evans203484e2012-05-02 00:30:36 -07002506 malloc_mutex_unlock(&bin->lock);
2507}
2508
2509void
2510arena_dalloc_small(arena_t *arena, arena_chunk_t *chunk, void *ptr,
2511 size_t pageind)
2512{
Qinfan Wuff6a31d2014-08-29 13:34:40 -07002513 arena_chunk_map_bits_t *bitselm;
Jason Evans203484e2012-05-02 00:30:36 -07002514
2515 if (config_debug) {
Jason Evans80737c32012-05-02 16:11:03 -07002516 /* arena_ptr_small_binind_get() does extra sanity checking. */
2517 assert(arena_ptr_small_binind_get(ptr, arena_mapbits_get(chunk,
2518 pageind)) != BININD_INVALID);
Jason Evans203484e2012-05-02 00:30:36 -07002519 }
Qinfan Wuff6a31d2014-08-29 13:34:40 -07002520 bitselm = arena_bitselm_get(chunk, pageind);
2521 arena_dalloc_bin(arena, chunk, ptr, pageind, bitselm);
Jason Evans203484e2012-05-02 00:30:36 -07002522}
Jason Evanse476f8a2010-01-16 09:53:50 -08002523
Jason Evans6b694c42014-01-07 16:47:56 -08002524#ifdef JEMALLOC_JET
2525#undef arena_dalloc_junk_large
2526#define arena_dalloc_junk_large JEMALLOC_N(arena_dalloc_junk_large_impl)
2527#endif
Jason Evansfc0b3b72014-10-09 17:54:06 -07002528void
Jason Evans6b694c42014-01-07 16:47:56 -08002529arena_dalloc_junk_large(void *ptr, size_t usize)
2530{
2531
Guilherme Goncalves2c5cb612014-12-08 19:12:41 -02002532 if (config_fill && unlikely(opt_junk_free))
Jason Evans6b694c42014-01-07 16:47:56 -08002533 memset(ptr, 0x5a, usize);
2534}
2535#ifdef JEMALLOC_JET
2536#undef arena_dalloc_junk_large
2537#define arena_dalloc_junk_large JEMALLOC_N(arena_dalloc_junk_large)
2538arena_dalloc_junk_large_t *arena_dalloc_junk_large =
2539 JEMALLOC_N(arena_dalloc_junk_large_impl);
2540#endif
2541
Jason Evanse476f8a2010-01-16 09:53:50 -08002542void
Jason Evansfc0b3b72014-10-09 17:54:06 -07002543arena_dalloc_large_locked_impl(arena_t *arena, arena_chunk_t *chunk,
2544 void *ptr, bool junked)
Jason Evanse476f8a2010-01-16 09:53:50 -08002545{
Jason Evans0c5dd032014-09-29 01:31:39 -07002546 size_t pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> LG_PAGE;
2547 arena_chunk_map_misc_t *miscelm = arena_miscelm_get(chunk, pageind);
2548 arena_run_t *run = &miscelm->run;
Jason Evans13668262010-01-31 03:57:29 -08002549
Jason Evans7372b152012-02-10 20:22:09 -08002550 if (config_fill || config_stats) {
Jason Evans8a03cf02015-05-04 09:58:36 -07002551 size_t usize = arena_mapbits_large_size_get(chunk, pageind) -
2552 large_pad;
Jason Evanse476f8a2010-01-16 09:53:50 -08002553
Jason Evansfc0b3b72014-10-09 17:54:06 -07002554 if (!junked)
2555 arena_dalloc_junk_large(ptr, usize);
Jason Evans7372b152012-02-10 20:22:09 -08002556 if (config_stats) {
Jason Evans155bfa72014-10-05 17:54:10 -07002557 index_t index = size2index(usize) - NBINS;
2558
Jason Evans7372b152012-02-10 20:22:09 -08002559 arena->stats.ndalloc_large++;
Jason Evans6b694c42014-01-07 16:47:56 -08002560 arena->stats.allocated_large -= usize;
Jason Evans155bfa72014-10-05 17:54:10 -07002561 arena->stats.lstats[index].ndalloc++;
2562 arena->stats.lstats[index].curruns--;
Jason Evans7372b152012-02-10 20:22:09 -08002563 }
Jason Evanse476f8a2010-01-16 09:53:50 -08002564 }
Jason Evanse476f8a2010-01-16 09:53:50 -08002565
Jason Evansde249c82015-08-09 16:47:27 -07002566 arena_run_dalloc_decommit(arena, chunk, run);
Jason Evanse476f8a2010-01-16 09:53:50 -08002567}
2568
Jason Evans203484e2012-05-02 00:30:36 -07002569void
Jason Evansfc0b3b72014-10-09 17:54:06 -07002570arena_dalloc_large_junked_locked(arena_t *arena, arena_chunk_t *chunk,
2571 void *ptr)
2572{
2573
2574 arena_dalloc_large_locked_impl(arena, chunk, ptr, true);
2575}
2576
2577void
Jason Evans203484e2012-05-02 00:30:36 -07002578arena_dalloc_large(arena_t *arena, arena_chunk_t *chunk, void *ptr)
2579{
2580
2581 malloc_mutex_lock(&arena->lock);
Jason Evansfc0b3b72014-10-09 17:54:06 -07002582 arena_dalloc_large_locked_impl(arena, chunk, ptr, false);
Jason Evans203484e2012-05-02 00:30:36 -07002583 malloc_mutex_unlock(&arena->lock);
2584}
2585
Jason Evanse476f8a2010-01-16 09:53:50 -08002586static void
2587arena_ralloc_large_shrink(arena_t *arena, arena_chunk_t *chunk, void *ptr,
Jason Evans8e3c3c62010-09-17 15:46:18 -07002588 size_t oldsize, size_t size)
Jason Evanse476f8a2010-01-16 09:53:50 -08002589{
Jason Evans0c5dd032014-09-29 01:31:39 -07002590 size_t pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> LG_PAGE;
2591 arena_chunk_map_misc_t *miscelm = arena_miscelm_get(chunk, pageind);
2592 arena_run_t *run = &miscelm->run;
Jason Evanse476f8a2010-01-16 09:53:50 -08002593
2594 assert(size < oldsize);
2595
2596 /*
2597 * Shrink the run, and make trailing pages available for other
2598 * allocations.
2599 */
2600 malloc_mutex_lock(&arena->lock);
Jason Evans8a03cf02015-05-04 09:58:36 -07002601 arena_run_trim_tail(arena, chunk, run, oldsize + large_pad, size +
2602 large_pad, true);
Jason Evans7372b152012-02-10 20:22:09 -08002603 if (config_stats) {
Jason Evans155bfa72014-10-05 17:54:10 -07002604 index_t oldindex = size2index(oldsize) - NBINS;
2605 index_t index = size2index(size) - NBINS;
2606
Jason Evans7372b152012-02-10 20:22:09 -08002607 arena->stats.ndalloc_large++;
2608 arena->stats.allocated_large -= oldsize;
Jason Evans155bfa72014-10-05 17:54:10 -07002609 arena->stats.lstats[oldindex].ndalloc++;
2610 arena->stats.lstats[oldindex].curruns--;
Jason Evans990d10c2010-01-31 03:49:35 -08002611
Jason Evans7372b152012-02-10 20:22:09 -08002612 arena->stats.nmalloc_large++;
2613 arena->stats.nrequests_large++;
2614 arena->stats.allocated_large += size;
Jason Evans155bfa72014-10-05 17:54:10 -07002615 arena->stats.lstats[index].nmalloc++;
2616 arena->stats.lstats[index].nrequests++;
2617 arena->stats.lstats[index].curruns++;
Jason Evanse476f8a2010-01-16 09:53:50 -08002618 }
Jason Evanse476f8a2010-01-16 09:53:50 -08002619 malloc_mutex_unlock(&arena->lock);
2620}
2621
2622static bool
2623arena_ralloc_large_grow(arena_t *arena, arena_chunk_t *chunk, void *ptr,
Jason Evans8e3c3c62010-09-17 15:46:18 -07002624 size_t oldsize, size_t size, size_t extra, bool zero)
Jason Evanse476f8a2010-01-16 09:53:50 -08002625{
Jason Evansae4c7b42012-04-02 07:04:34 -07002626 size_t pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> LG_PAGE;
Jason Evans5716d972015-08-06 23:34:12 -07002627 size_t npages = (oldsize + large_pad) >> LG_PAGE;
Jason Evans8e3c3c62010-09-17 15:46:18 -07002628 size_t followsize;
Jason Evans155bfa72014-10-05 17:54:10 -07002629 size_t usize_min = s2u(size);
Jason Evanse476f8a2010-01-16 09:53:50 -08002630
Jason Evans8a03cf02015-05-04 09:58:36 -07002631 assert(oldsize == arena_mapbits_large_size_get(chunk, pageind) -
2632 large_pad);
Jason Evanse476f8a2010-01-16 09:53:50 -08002633
2634 /* Try to extend the run. */
Jason Evans155bfa72014-10-05 17:54:10 -07002635 assert(usize_min > oldsize);
Jason Evanse476f8a2010-01-16 09:53:50 -08002636 malloc_mutex_lock(&arena->lock);
Jason Evans5716d972015-08-06 23:34:12 -07002637 if (pageind+npages < chunk_npages &&
Jason Evans203484e2012-05-02 00:30:36 -07002638 arena_mapbits_allocated_get(chunk, pageind+npages) == 0 &&
2639 (followsize = arena_mapbits_unallocated_size_get(chunk,
Jason Evans155bfa72014-10-05 17:54:10 -07002640 pageind+npages)) >= usize_min - oldsize) {
Jason Evanse476f8a2010-01-16 09:53:50 -08002641 /*
2642 * The next run is available and sufficiently large. Split the
2643 * following run, then merge the first part with the existing
2644 * allocation.
2645 */
Guilherme Goncalves9c6a8d32014-12-17 14:46:35 -02002646 arena_run_t *run;
Jason Evans155bfa72014-10-05 17:54:10 -07002647 size_t flag_dirty, splitsize, usize;
2648
2649 usize = s2u(size + extra);
2650 while (oldsize + followsize < usize)
2651 usize = index2size(size2index(usize)-1);
2652 assert(usize >= usize_min);
Jason Evans5716d972015-08-06 23:34:12 -07002653 splitsize = usize - oldsize;
Jason Evans155bfa72014-10-05 17:54:10 -07002654
Guilherme Goncalves9c6a8d32014-12-17 14:46:35 -02002655 run = &arena_miscelm_get(chunk, pageind+npages)->run;
Jason Evans8fadb1a2015-08-04 10:49:46 -07002656 if (arena_run_split_large(arena, run, splitsize, zero)) {
2657 malloc_mutex_unlock(&arena->lock);
2658 return (true);
2659 }
Jason Evanse476f8a2010-01-16 09:53:50 -08002660
Jason Evans088e6a02010-10-18 00:04:44 -07002661 size = oldsize + splitsize;
Jason Evans5716d972015-08-06 23:34:12 -07002662 npages = (size + large_pad) >> LG_PAGE;
Jason Evans940a2e02010-10-17 17:51:37 -07002663
2664 /*
2665 * Mark the extended run as dirty if either portion of the run
2666 * was dirty before allocation. This is rather pedantic,
2667 * because there's not actually any sequence of events that
2668 * could cause the resulting run to be passed to
2669 * arena_run_dalloc() with the dirty argument set to false
2670 * (which is when dirty flag consistency would really matter).
2671 */
Jason Evans203484e2012-05-02 00:30:36 -07002672 flag_dirty = arena_mapbits_dirty_get(chunk, pageind) |
2673 arena_mapbits_dirty_get(chunk, pageind+npages-1);
Jason Evans5716d972015-08-06 23:34:12 -07002674 arena_mapbits_large_set(chunk, pageind, size + large_pad,
2675 flag_dirty);
Jason Evans203484e2012-05-02 00:30:36 -07002676 arena_mapbits_large_set(chunk, pageind+npages-1, 0, flag_dirty);
Jason Evanse476f8a2010-01-16 09:53:50 -08002677
Jason Evans7372b152012-02-10 20:22:09 -08002678 if (config_stats) {
Jason Evans155bfa72014-10-05 17:54:10 -07002679 index_t oldindex = size2index(oldsize) - NBINS;
2680 index_t index = size2index(size) - NBINS;
2681
Jason Evans7372b152012-02-10 20:22:09 -08002682 arena->stats.ndalloc_large++;
2683 arena->stats.allocated_large -= oldsize;
Jason Evans155bfa72014-10-05 17:54:10 -07002684 arena->stats.lstats[oldindex].ndalloc++;
2685 arena->stats.lstats[oldindex].curruns--;
Jason Evans990d10c2010-01-31 03:49:35 -08002686
Jason Evans7372b152012-02-10 20:22:09 -08002687 arena->stats.nmalloc_large++;
2688 arena->stats.nrequests_large++;
2689 arena->stats.allocated_large += size;
Jason Evans155bfa72014-10-05 17:54:10 -07002690 arena->stats.lstats[index].nmalloc++;
2691 arena->stats.lstats[index].nrequests++;
2692 arena->stats.lstats[index].curruns++;
Jason Evans940a2e02010-10-17 17:51:37 -07002693 }
Jason Evanse476f8a2010-01-16 09:53:50 -08002694 malloc_mutex_unlock(&arena->lock);
2695 return (false);
2696 }
2697 malloc_mutex_unlock(&arena->lock);
2698
2699 return (true);
2700}
2701
Jason Evans6b694c42014-01-07 16:47:56 -08002702#ifdef JEMALLOC_JET
2703#undef arena_ralloc_junk_large
2704#define arena_ralloc_junk_large JEMALLOC_N(arena_ralloc_junk_large_impl)
2705#endif
2706static void
2707arena_ralloc_junk_large(void *ptr, size_t old_usize, size_t usize)
2708{
2709
Guilherme Goncalves2c5cb612014-12-08 19:12:41 -02002710 if (config_fill && unlikely(opt_junk_free)) {
Jason Evans6b694c42014-01-07 16:47:56 -08002711 memset((void *)((uintptr_t)ptr + usize), 0x5a,
2712 old_usize - usize);
2713 }
2714}
2715#ifdef JEMALLOC_JET
2716#undef arena_ralloc_junk_large
2717#define arena_ralloc_junk_large JEMALLOC_N(arena_ralloc_junk_large)
2718arena_ralloc_junk_large_t *arena_ralloc_junk_large =
2719 JEMALLOC_N(arena_ralloc_junk_large_impl);
2720#endif
2721
Jason Evanse476f8a2010-01-16 09:53:50 -08002722/*
2723 * Try to resize a large allocation, in order to avoid copying. This will
2724 * always fail if growing an object, and the following run is already in use.
2725 */
2726static bool
Jason Evans8e3c3c62010-09-17 15:46:18 -07002727arena_ralloc_large(void *ptr, size_t oldsize, size_t size, size_t extra,
2728 bool zero)
Jason Evanse476f8a2010-01-16 09:53:50 -08002729{
Jason Evans155bfa72014-10-05 17:54:10 -07002730 size_t usize;
Jason Evanse476f8a2010-01-16 09:53:50 -08002731
Jason Evans155bfa72014-10-05 17:54:10 -07002732 /* Make sure extra can't cause size_t overflow. */
Daniel Micay809b0ac2014-10-23 10:30:52 -04002733 if (unlikely(extra >= arena_maxclass))
Jason Evans155bfa72014-10-05 17:54:10 -07002734 return (true);
2735
2736 usize = s2u(size + extra);
2737 if (usize == oldsize) {
Jason Evanse476f8a2010-01-16 09:53:50 -08002738 /* Same size class. */
Jason Evanse476f8a2010-01-16 09:53:50 -08002739 return (false);
2740 } else {
2741 arena_chunk_t *chunk;
2742 arena_t *arena;
2743
2744 chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr);
Jason Evansee41ad42015-02-15 18:04:46 -08002745 arena = extent_node_arena_get(&chunk->node);
Jason Evanse476f8a2010-01-16 09:53:50 -08002746
Jason Evans155bfa72014-10-05 17:54:10 -07002747 if (usize < oldsize) {
Jason Evanse476f8a2010-01-16 09:53:50 -08002748 /* Fill before shrinking in order avoid a race. */
Jason Evans155bfa72014-10-05 17:54:10 -07002749 arena_ralloc_junk_large(ptr, oldsize, usize);
Jason Evans8e3c3c62010-09-17 15:46:18 -07002750 arena_ralloc_large_shrink(arena, chunk, ptr, oldsize,
Jason Evans155bfa72014-10-05 17:54:10 -07002751 usize);
Jason Evanse476f8a2010-01-16 09:53:50 -08002752 return (false);
2753 } else {
2754 bool ret = arena_ralloc_large_grow(arena, chunk, ptr,
Jason Evans155bfa72014-10-05 17:54:10 -07002755 oldsize, size, extra, zero);
Jason Evans551ebc42014-10-03 10:16:09 -07002756 if (config_fill && !ret && !zero) {
Guilherme Goncalves2c5cb612014-12-08 19:12:41 -02002757 if (unlikely(opt_junk_alloc)) {
Jason Evans6b694c42014-01-07 16:47:56 -08002758 memset((void *)((uintptr_t)ptr +
2759 oldsize), 0xa5, isalloc(ptr,
2760 config_prof) - oldsize);
Jason Evans9c640bf2014-09-11 16:20:44 -07002761 } else if (unlikely(opt_zero)) {
Jason Evans6b694c42014-01-07 16:47:56 -08002762 memset((void *)((uintptr_t)ptr +
2763 oldsize), 0, isalloc(ptr,
2764 config_prof) - oldsize);
2765 }
Jason Evanse476f8a2010-01-16 09:53:50 -08002766 }
Jason Evanse476f8a2010-01-16 09:53:50 -08002767 return (ret);
2768 }
2769 }
2770}
2771
Jason Evansb2c31662014-01-12 15:05:44 -08002772bool
Jason Evans8e3c3c62010-09-17 15:46:18 -07002773arena_ralloc_no_move(void *ptr, size_t oldsize, size_t size, size_t extra,
2774 bool zero)
Jason Evanse476f8a2010-01-16 09:53:50 -08002775{
Jason Evanse476f8a2010-01-16 09:53:50 -08002776
Jason Evans88fef7c2015-02-12 14:06:37 -08002777 if (likely(size <= arena_maxclass)) {
2778 /*
2779 * Avoid moving the allocation if the size class can be left the
2780 * same.
2781 */
2782 if (likely(oldsize <= arena_maxclass)) {
2783 if (oldsize <= SMALL_MAXCLASS) {
2784 assert(
2785 arena_bin_info[size2index(oldsize)].reg_size
2786 == oldsize);
2787 if ((size + extra <= SMALL_MAXCLASS &&
2788 size2index(size + extra) ==
2789 size2index(oldsize)) || (size <= oldsize &&
2790 size + extra >= oldsize))
Jason Evansb2c31662014-01-12 15:05:44 -08002791 return (false);
Jason Evans88fef7c2015-02-12 14:06:37 -08002792 } else {
2793 assert(size <= arena_maxclass);
2794 if (size + extra > SMALL_MAXCLASS) {
2795 if (!arena_ralloc_large(ptr, oldsize,
2796 size, extra, zero))
2797 return (false);
2798 }
Jason Evanse476f8a2010-01-16 09:53:50 -08002799 }
2800 }
Jason Evanse476f8a2010-01-16 09:53:50 -08002801
Jason Evans88fef7c2015-02-12 14:06:37 -08002802 /* Reallocation would require a move. */
2803 return (true);
2804 } else
2805 return (huge_ralloc_no_move(ptr, oldsize, size, extra, zero));
Jason Evans8e3c3c62010-09-17 15:46:18 -07002806}
Jason Evanse476f8a2010-01-16 09:53:50 -08002807
Jason Evans8e3c3c62010-09-17 15:46:18 -07002808void *
Jason Evans5460aa62014-09-22 21:09:23 -07002809arena_ralloc(tsd_t *tsd, arena_t *arena, void *ptr, size_t oldsize, size_t size,
Jason Evans1cb181e2015-01-29 15:30:47 -08002810 size_t extra, size_t alignment, bool zero, tcache_t *tcache)
Jason Evans8e3c3c62010-09-17 15:46:18 -07002811{
2812 void *ret;
Jason Evans8e3c3c62010-09-17 15:46:18 -07002813
Jason Evans88fef7c2015-02-12 14:06:37 -08002814 if (likely(size <= arena_maxclass)) {
2815 size_t copysize;
Jason Evans8e3c3c62010-09-17 15:46:18 -07002816
Jason Evans88fef7c2015-02-12 14:06:37 -08002817 /* Try to avoid moving the allocation. */
2818 if (!arena_ralloc_no_move(ptr, oldsize, size, extra, zero))
2819 return (ptr);
Jason Evans8e3c3c62010-09-17 15:46:18 -07002820
Jason Evans88fef7c2015-02-12 14:06:37 -08002821 /*
2822 * size and oldsize are different enough that we need to move
2823 * the object. In that case, fall back to allocating new space
2824 * and copying.
2825 */
Jason Evans38d92102011-03-23 00:37:29 -07002826 if (alignment != 0) {
Jason Evans88fef7c2015-02-12 14:06:37 -08002827 size_t usize = sa2u(size + extra, alignment);
Jason Evans38d92102011-03-23 00:37:29 -07002828 if (usize == 0)
2829 return (NULL);
Jason Evans1cb181e2015-01-29 15:30:47 -08002830 ret = ipalloct(tsd, usize, alignment, zero, tcache,
2831 arena);
Jason Evans88fef7c2015-02-12 14:06:37 -08002832 } else {
2833 ret = arena_malloc(tsd, arena, size + extra, zero,
2834 tcache);
2835 }
Jason Evans8e3c3c62010-09-17 15:46:18 -07002836
Jason Evans88fef7c2015-02-12 14:06:37 -08002837 if (ret == NULL) {
2838 if (extra == 0)
2839 return (NULL);
2840 /* Try again, this time without extra. */
2841 if (alignment != 0) {
2842 size_t usize = sa2u(size, alignment);
2843 if (usize == 0)
2844 return (NULL);
2845 ret = ipalloct(tsd, usize, alignment, zero,
2846 tcache, arena);
2847 } else {
2848 ret = arena_malloc(tsd, arena, size, zero,
2849 tcache);
2850 }
2851
2852 if (ret == NULL)
2853 return (NULL);
2854 }
2855
2856 /*
2857 * Junk/zero-filling were already done by
2858 * ipalloc()/arena_malloc().
2859 */
2860
2861 /*
2862 * Copy at most size bytes (not size+extra), since the caller
2863 * has no expectation that the extra bytes will be reliably
2864 * preserved.
2865 */
2866 copysize = (size < oldsize) ? size : oldsize;
2867 JEMALLOC_VALGRIND_MAKE_MEM_UNDEFINED(ret, copysize);
2868 memcpy(ret, ptr, copysize);
2869 isqalloc(tsd, ptr, oldsize, tcache);
2870 } else {
2871 ret = huge_ralloc(tsd, arena, ptr, oldsize, size, extra,
2872 alignment, zero, tcache);
Jason Evans8e3c3c62010-09-17 15:46:18 -07002873 }
Jason Evanse476f8a2010-01-16 09:53:50 -08002874 return (ret);
Jason Evanse476f8a2010-01-16 09:53:50 -08002875}
2876
Jason Evans609ae592012-10-11 13:53:15 -07002877dss_prec_t
2878arena_dss_prec_get(arena_t *arena)
2879{
2880 dss_prec_t ret;
2881
2882 malloc_mutex_lock(&arena->lock);
2883 ret = arena->dss_prec;
2884 malloc_mutex_unlock(&arena->lock);
2885 return (ret);
2886}
2887
Jason Evans4d434ad2014-04-15 12:09:48 -07002888bool
Jason Evans609ae592012-10-11 13:53:15 -07002889arena_dss_prec_set(arena_t *arena, dss_prec_t dss_prec)
2890{
2891
Jason Evans551ebc42014-10-03 10:16:09 -07002892 if (!have_dss)
Jason Evans4d434ad2014-04-15 12:09:48 -07002893 return (dss_prec != dss_prec_disabled);
Jason Evans609ae592012-10-11 13:53:15 -07002894 malloc_mutex_lock(&arena->lock);
2895 arena->dss_prec = dss_prec;
2896 malloc_mutex_unlock(&arena->lock);
Jason Evans4d434ad2014-04-15 12:09:48 -07002897 return (false);
Jason Evans609ae592012-10-11 13:53:15 -07002898}
2899
Jason Evans8d6a3e82015-03-18 18:55:33 -07002900ssize_t
2901arena_lg_dirty_mult_default_get(void)
2902{
2903
2904 return ((ssize_t)atomic_read_z((size_t *)&lg_dirty_mult_default));
2905}
2906
2907bool
2908arena_lg_dirty_mult_default_set(ssize_t lg_dirty_mult)
2909{
2910
2911 if (!arena_lg_dirty_mult_valid(lg_dirty_mult))
2912 return (true);
2913 atomic_write_z((size_t *)&lg_dirty_mult_default, (size_t)lg_dirty_mult);
2914 return (false);
2915}
2916
Jason Evans609ae592012-10-11 13:53:15 -07002917void
Jason Evans562d2662015-03-24 16:36:12 -07002918arena_stats_merge(arena_t *arena, const char **dss, ssize_t *lg_dirty_mult,
2919 size_t *nactive, size_t *ndirty, arena_stats_t *astats,
2920 malloc_bin_stats_t *bstats, malloc_large_stats_t *lstats,
2921 malloc_huge_stats_t *hstats)
Jason Evans609ae592012-10-11 13:53:15 -07002922{
2923 unsigned i;
2924
2925 malloc_mutex_lock(&arena->lock);
2926 *dss = dss_prec_names[arena->dss_prec];
Jason Evans562d2662015-03-24 16:36:12 -07002927 *lg_dirty_mult = arena->lg_dirty_mult;
Jason Evans609ae592012-10-11 13:53:15 -07002928 *nactive += arena->nactive;
2929 *ndirty += arena->ndirty;
2930
2931 astats->mapped += arena->stats.mapped;
2932 astats->npurge += arena->stats.npurge;
2933 astats->nmadvise += arena->stats.nmadvise;
2934 astats->purged += arena->stats.purged;
Jason Evans4581b972014-11-27 17:22:36 -02002935 astats->metadata_mapped += arena->stats.metadata_mapped;
2936 astats->metadata_allocated += arena_metadata_allocated_get(arena);
Jason Evans609ae592012-10-11 13:53:15 -07002937 astats->allocated_large += arena->stats.allocated_large;
2938 astats->nmalloc_large += arena->stats.nmalloc_large;
2939 astats->ndalloc_large += arena->stats.ndalloc_large;
2940 astats->nrequests_large += arena->stats.nrequests_large;
Jason Evanse2deab72014-05-15 22:22:27 -07002941 astats->allocated_huge += arena->stats.allocated_huge;
2942 astats->nmalloc_huge += arena->stats.nmalloc_huge;
2943 astats->ndalloc_huge += arena->stats.ndalloc_huge;
Jason Evans609ae592012-10-11 13:53:15 -07002944
2945 for (i = 0; i < nlclasses; i++) {
2946 lstats[i].nmalloc += arena->stats.lstats[i].nmalloc;
2947 lstats[i].ndalloc += arena->stats.lstats[i].ndalloc;
2948 lstats[i].nrequests += arena->stats.lstats[i].nrequests;
2949 lstats[i].curruns += arena->stats.lstats[i].curruns;
2950 }
Jason Evans3c4d92e2014-10-12 22:53:59 -07002951
2952 for (i = 0; i < nhclasses; i++) {
2953 hstats[i].nmalloc += arena->stats.hstats[i].nmalloc;
2954 hstats[i].ndalloc += arena->stats.hstats[i].ndalloc;
2955 hstats[i].curhchunks += arena->stats.hstats[i].curhchunks;
2956 }
Jason Evans609ae592012-10-11 13:53:15 -07002957 malloc_mutex_unlock(&arena->lock);
2958
2959 for (i = 0; i < NBINS; i++) {
2960 arena_bin_t *bin = &arena->bins[i];
2961
2962 malloc_mutex_lock(&bin->lock);
Jason Evans609ae592012-10-11 13:53:15 -07002963 bstats[i].nmalloc += bin->stats.nmalloc;
2964 bstats[i].ndalloc += bin->stats.ndalloc;
2965 bstats[i].nrequests += bin->stats.nrequests;
Jason Evans3c4d92e2014-10-12 22:53:59 -07002966 bstats[i].curregs += bin->stats.curregs;
Jason Evans609ae592012-10-11 13:53:15 -07002967 if (config_tcache) {
2968 bstats[i].nfills += bin->stats.nfills;
2969 bstats[i].nflushes += bin->stats.nflushes;
2970 }
2971 bstats[i].nruns += bin->stats.nruns;
2972 bstats[i].reruns += bin->stats.reruns;
2973 bstats[i].curruns += bin->stats.curruns;
2974 malloc_mutex_unlock(&bin->lock);
2975 }
2976}
2977
Jason Evans8bb31982014-10-07 23:14:57 -07002978arena_t *
2979arena_new(unsigned ind)
Jason Evanse476f8a2010-01-16 09:53:50 -08002980{
Jason Evans8bb31982014-10-07 23:14:57 -07002981 arena_t *arena;
Jason Evanse476f8a2010-01-16 09:53:50 -08002982 unsigned i;
2983 arena_bin_t *bin;
Jason Evanse476f8a2010-01-16 09:53:50 -08002984
Jason Evans8bb31982014-10-07 23:14:57 -07002985 /*
Jason Evans3c4d92e2014-10-12 22:53:59 -07002986 * Allocate arena, arena->lstats, and arena->hstats contiguously, mainly
2987 * because there is no way to clean up if base_alloc() OOMs.
Jason Evans8bb31982014-10-07 23:14:57 -07002988 */
2989 if (config_stats) {
2990 arena = (arena_t *)base_alloc(CACHELINE_CEILING(sizeof(arena_t))
Jason Evans3c4d92e2014-10-12 22:53:59 -07002991 + QUANTUM_CEILING(nlclasses * sizeof(malloc_large_stats_t) +
2992 nhclasses) * sizeof(malloc_huge_stats_t));
Jason Evans8bb31982014-10-07 23:14:57 -07002993 } else
2994 arena = (arena_t *)base_alloc(sizeof(arena_t));
2995 if (arena == NULL)
2996 return (NULL);
2997
Jason Evans6109fe02010-02-10 10:37:56 -08002998 arena->ind = ind;
Jason Evans597632b2011-03-18 13:41:33 -07002999 arena->nthreads = 0;
Jason Evanscbf3a6d2015-02-11 12:24:27 -08003000 if (malloc_mutex_init(&arena->lock))
3001 return (NULL);
Jason Evanse476f8a2010-01-16 09:53:50 -08003002
Jason Evans7372b152012-02-10 20:22:09 -08003003 if (config_stats) {
3004 memset(&arena->stats, 0, sizeof(arena_stats_t));
Jason Evansd49cb682014-11-17 10:31:59 -08003005 arena->stats.lstats = (malloc_large_stats_t *)((uintptr_t)arena
3006 + CACHELINE_CEILING(sizeof(arena_t)));
Jason Evans7372b152012-02-10 20:22:09 -08003007 memset(arena->stats.lstats, 0, nlclasses *
3008 sizeof(malloc_large_stats_t));
Jason Evansd49cb682014-11-17 10:31:59 -08003009 arena->stats.hstats = (malloc_huge_stats_t *)((uintptr_t)arena
3010 + CACHELINE_CEILING(sizeof(arena_t)) +
Jason Evans3c4d92e2014-10-12 22:53:59 -07003011 QUANTUM_CEILING(nlclasses * sizeof(malloc_large_stats_t)));
3012 memset(arena->stats.hstats, 0, nhclasses *
3013 sizeof(malloc_huge_stats_t));
Jason Evans7372b152012-02-10 20:22:09 -08003014 if (config_tcache)
3015 ql_new(&arena->tcache_ql);
3016 }
Jason Evanse476f8a2010-01-16 09:53:50 -08003017
Jason Evans7372b152012-02-10 20:22:09 -08003018 if (config_prof)
3019 arena->prof_accumbytes = 0;
Jason Evansd34f9e72010-02-11 13:19:21 -08003020
Jason Evans8a03cf02015-05-04 09:58:36 -07003021 if (config_cache_oblivious) {
3022 /*
3023 * A nondeterministic seed based on the address of arena reduces
3024 * the likelihood of lockstep non-uniform cache index
3025 * utilization among identical concurrent processes, but at the
3026 * cost of test repeatability. For debug builds, instead use a
3027 * deterministic seed.
3028 */
3029 arena->offset_state = config_debug ? ind :
3030 (uint64_t)(uintptr_t)arena;
3031 }
3032
Jason Evans609ae592012-10-11 13:53:15 -07003033 arena->dss_prec = chunk_dss_prec_get();
3034
Jason Evanse476f8a2010-01-16 09:53:50 -08003035 arena->spare = NULL;
3036
Jason Evans8d6a3e82015-03-18 18:55:33 -07003037 arena->lg_dirty_mult = arena_lg_dirty_mult_default_get();
Jason Evans0a9f9a42015-06-22 18:50:32 -07003038 arena->purging = false;
Jason Evanse476f8a2010-01-16 09:53:50 -08003039 arena->nactive = 0;
3040 arena->ndirty = 0;
3041
Jason Evanse3d13062012-10-30 15:42:37 -07003042 arena_avail_tree_new(&arena->runs_avail);
Jason Evansee41ad42015-02-15 18:04:46 -08003043 qr_new(&arena->runs_dirty, rd_link);
Jason Evans738e0892015-02-18 01:15:50 -08003044 qr_new(&arena->chunks_cache, cc_link);
Jason Evansee41ad42015-02-15 18:04:46 -08003045
3046 ql_new(&arena->huge);
3047 if (malloc_mutex_init(&arena->huge_mtx))
3048 return (NULL);
3049
Jason Evansb49a3342015-07-28 11:28:19 -04003050 extent_tree_szad_new(&arena->chunks_szad_cached);
3051 extent_tree_ad_new(&arena->chunks_ad_cached);
3052 extent_tree_szad_new(&arena->chunks_szad_retained);
3053 extent_tree_ad_new(&arena->chunks_ad_retained);
Jason Evansee41ad42015-02-15 18:04:46 -08003054 if (malloc_mutex_init(&arena->chunks_mtx))
3055 return (NULL);
3056 ql_new(&arena->node_cache);
3057 if (malloc_mutex_init(&arena->node_cache_mtx))
3058 return (NULL);
3059
Jason Evansb49a3342015-07-28 11:28:19 -04003060 arena->chunk_hooks = chunk_hooks_default;
Jason Evanse476f8a2010-01-16 09:53:50 -08003061
3062 /* Initialize bins. */
Jason Evansb1726102012-02-28 16:50:47 -08003063 for (i = 0; i < NBINS; i++) {
Jason Evanse476f8a2010-01-16 09:53:50 -08003064 bin = &arena->bins[i];
Jason Evans86815df2010-03-13 20:32:56 -08003065 if (malloc_mutex_init(&bin->lock))
Jason Evans8bb31982014-10-07 23:14:57 -07003066 return (NULL);
Jason Evanse476f8a2010-01-16 09:53:50 -08003067 bin->runcur = NULL;
3068 arena_run_tree_new(&bin->runs);
Jason Evans7372b152012-02-10 20:22:09 -08003069 if (config_stats)
3070 memset(&bin->stats, 0, sizeof(malloc_bin_stats_t));
Jason Evanse476f8a2010-01-16 09:53:50 -08003071 }
3072
Jason Evans8bb31982014-10-07 23:14:57 -07003073 return (arena);
Jason Evanse476f8a2010-01-16 09:53:50 -08003074}
3075
Jason Evans49f7e8f2011-03-15 13:59:15 -07003076/*
3077 * Calculate bin_info->run_size such that it meets the following constraints:
3078 *
Jason Evans155bfa72014-10-05 17:54:10 -07003079 * *) bin_info->run_size <= arena_maxrun
Jason Evans47e57f92011-03-22 09:00:56 -07003080 * *) bin_info->nregs <= RUN_MAXREGS
Jason Evans49f7e8f2011-03-15 13:59:15 -07003081 *
Jason Evans0c5dd032014-09-29 01:31:39 -07003082 * bin_info->nregs and bin_info->reg0_offset are also calculated here, since
3083 * these settings are all interdependent.
Jason Evans49f7e8f2011-03-15 13:59:15 -07003084 */
Jason Evans0c5dd032014-09-29 01:31:39 -07003085static void
3086bin_info_run_size_calc(arena_bin_info_t *bin_info)
Jason Evans49f7e8f2011-03-15 13:59:15 -07003087{
Jason Evans122449b2012-04-06 00:35:09 -07003088 size_t pad_size;
Jason Evans0c5dd032014-09-29 01:31:39 -07003089 size_t try_run_size, perfect_run_size, actual_run_size;
3090 uint32_t try_nregs, perfect_nregs, actual_nregs;
Jason Evans49f7e8f2011-03-15 13:59:15 -07003091
3092 /*
Jason Evans122449b2012-04-06 00:35:09 -07003093 * Determine redzone size based on minimum alignment and minimum
3094 * redzone size. Add padding to the end of the run if it is needed to
3095 * align the regions. The padding allows each redzone to be half the
3096 * minimum alignment; without the padding, each redzone would have to
3097 * be twice as large in order to maintain alignment.
3098 */
Jason Evans9c640bf2014-09-11 16:20:44 -07003099 if (config_fill && unlikely(opt_redzone)) {
Qinfan Wuff6a31d2014-08-29 13:34:40 -07003100 size_t align_min = ZU(1) << (jemalloc_ffs(bin_info->reg_size) -
3101 1);
Jason Evans122449b2012-04-06 00:35:09 -07003102 if (align_min <= REDZONE_MINSIZE) {
3103 bin_info->redzone_size = REDZONE_MINSIZE;
3104 pad_size = 0;
3105 } else {
3106 bin_info->redzone_size = align_min >> 1;
3107 pad_size = bin_info->redzone_size;
3108 }
3109 } else {
3110 bin_info->redzone_size = 0;
3111 pad_size = 0;
3112 }
3113 bin_info->reg_interval = bin_info->reg_size +
3114 (bin_info->redzone_size << 1);
3115
3116 /*
Jason Evans0c5dd032014-09-29 01:31:39 -07003117 * Compute run size under ideal conditions (no redzones, no limit on run
3118 * size).
Jason Evans49f7e8f2011-03-15 13:59:15 -07003119 */
Jason Evans0c5dd032014-09-29 01:31:39 -07003120 try_run_size = PAGE;
3121 try_nregs = try_run_size / bin_info->reg_size;
Jason Evans49f7e8f2011-03-15 13:59:15 -07003122 do {
Jason Evans0c5dd032014-09-29 01:31:39 -07003123 perfect_run_size = try_run_size;
3124 perfect_nregs = try_nregs;
Jason Evans49f7e8f2011-03-15 13:59:15 -07003125
Jason Evansae4c7b42012-04-02 07:04:34 -07003126 try_run_size += PAGE;
Jason Evans0c5dd032014-09-29 01:31:39 -07003127 try_nregs = try_run_size / bin_info->reg_size;
3128 } while (perfect_run_size != perfect_nregs * bin_info->reg_size);
3129 assert(perfect_nregs <= RUN_MAXREGS);
Jason Evans49f7e8f2011-03-15 13:59:15 -07003130
Jason Evans0c5dd032014-09-29 01:31:39 -07003131 actual_run_size = perfect_run_size;
3132 actual_nregs = (actual_run_size - pad_size) / bin_info->reg_interval;
3133
3134 /*
3135 * Redzones can require enough padding that not even a single region can
3136 * fit within the number of pages that would normally be dedicated to a
3137 * run for this size class. Increase the run size until at least one
3138 * region fits.
3139 */
3140 while (actual_nregs == 0) {
3141 assert(config_fill && unlikely(opt_redzone));
3142
3143 actual_run_size += PAGE;
3144 actual_nregs = (actual_run_size - pad_size) /
3145 bin_info->reg_interval;
3146 }
3147
3148 /*
3149 * Make sure that the run will fit within an arena chunk.
3150 */
Jason Evans155bfa72014-10-05 17:54:10 -07003151 while (actual_run_size > arena_maxrun) {
Jason Evans0c5dd032014-09-29 01:31:39 -07003152 actual_run_size -= PAGE;
3153 actual_nregs = (actual_run_size - pad_size) /
3154 bin_info->reg_interval;
3155 }
3156 assert(actual_nregs > 0);
Jason Evans5707d6f2015-03-06 17:14:05 -08003157 assert(actual_run_size == s2u(actual_run_size));
Jason Evans49f7e8f2011-03-15 13:59:15 -07003158
3159 /* Copy final settings. */
Jason Evans0c5dd032014-09-29 01:31:39 -07003160 bin_info->run_size = actual_run_size;
3161 bin_info->nregs = actual_nregs;
3162 bin_info->reg0_offset = actual_run_size - (actual_nregs *
3163 bin_info->reg_interval) - pad_size + bin_info->redzone_size;
Jason Evans122449b2012-04-06 00:35:09 -07003164
Jason Evans8a03cf02015-05-04 09:58:36 -07003165 if (actual_run_size > small_maxrun)
3166 small_maxrun = actual_run_size;
3167
Jason Evans122449b2012-04-06 00:35:09 -07003168 assert(bin_info->reg0_offset - bin_info->redzone_size + (bin_info->nregs
3169 * bin_info->reg_interval) + pad_size == bin_info->run_size);
Jason Evans49f7e8f2011-03-15 13:59:15 -07003170}
3171
Jason Evansb1726102012-02-28 16:50:47 -08003172static void
Jason Evans49f7e8f2011-03-15 13:59:15 -07003173bin_info_init(void)
3174{
3175 arena_bin_info_t *bin_info;
Jason Evans49f7e8f2011-03-15 13:59:15 -07003176
Jason Evans8a03cf02015-05-04 09:58:36 -07003177#define BIN_INFO_INIT_bin_yes(index, size) \
Jason Evansd04047c2014-05-28 16:11:55 -07003178 bin_info = &arena_bin_info[index]; \
Jason Evansb1726102012-02-28 16:50:47 -08003179 bin_info->reg_size = size; \
Jason Evans0c5dd032014-09-29 01:31:39 -07003180 bin_info_run_size_calc(bin_info); \
Jason Evansb1726102012-02-28 16:50:47 -08003181 bitmap_info_init(&bin_info->bitmap_info, bin_info->nregs);
Jason Evansd04047c2014-05-28 16:11:55 -07003182#define BIN_INFO_INIT_bin_no(index, size)
3183#define SC(index, lg_grp, lg_delta, ndelta, bin, lg_delta_lookup) \
3184 BIN_INFO_INIT_bin_##bin(index, (ZU(1)<<lg_grp) + (ZU(ndelta)<<lg_delta))
Jason Evansb1726102012-02-28 16:50:47 -08003185 SIZE_CLASSES
Jason Evansd04047c2014-05-28 16:11:55 -07003186#undef BIN_INFO_INIT_bin_yes
3187#undef BIN_INFO_INIT_bin_no
3188#undef SC
Jason Evans49f7e8f2011-03-15 13:59:15 -07003189}
3190
Jason Evans8a03cf02015-05-04 09:58:36 -07003191static bool
3192small_run_size_init(void)
3193{
3194
3195 assert(small_maxrun != 0);
3196
3197 small_run_tab = (bool *)base_alloc(sizeof(bool) * (small_maxrun >>
3198 LG_PAGE));
3199 if (small_run_tab == NULL)
3200 return (true);
3201
3202#define TAB_INIT_bin_yes(index, size) { \
3203 arena_bin_info_t *bin_info = &arena_bin_info[index]; \
3204 small_run_tab[bin_info->run_size >> LG_PAGE] = true; \
3205 }
3206#define TAB_INIT_bin_no(index, size)
3207#define SC(index, lg_grp, lg_delta, ndelta, bin, lg_delta_lookup) \
3208 TAB_INIT_bin_##bin(index, (ZU(1)<<lg_grp) + (ZU(ndelta)<<lg_delta))
3209 SIZE_CLASSES
3210#undef TAB_INIT_bin_yes
3211#undef TAB_INIT_bin_no
3212#undef SC
3213
3214 return (false);
3215}
3216
3217bool
Jason Evansa0bf2422010-01-29 14:30:41 -08003218arena_boot(void)
Jason Evanse476f8a2010-01-16 09:53:50 -08003219{
Jason Evansa0bf2422010-01-29 14:30:41 -08003220 size_t header_size;
Jason Evans7393f442010-10-01 17:35:43 -07003221 unsigned i;
Jason Evanse476f8a2010-01-16 09:53:50 -08003222
Jason Evans8d6a3e82015-03-18 18:55:33 -07003223 arena_lg_dirty_mult_default_set(opt_lg_dirty_mult);
3224
Jason Evanse476f8a2010-01-16 09:53:50 -08003225 /*
3226 * Compute the header size such that it is large enough to contain the
Jason Evans7393f442010-10-01 17:35:43 -07003227 * page map. The page map is biased to omit entries for the header
3228 * itself, so some iteration is necessary to compute the map bias.
3229 *
3230 * 1) Compute safe header_size and map_bias values that include enough
3231 * space for an unbiased page map.
3232 * 2) Refine map_bias based on (1) to omit the header pages in the page
3233 * map. The resulting map_bias may be one too small.
3234 * 3) Refine map_bias based on (2). The result will be >= the result
3235 * from (2), and will always be correct.
Jason Evanse476f8a2010-01-16 09:53:50 -08003236 */
Jason Evans7393f442010-10-01 17:35:43 -07003237 map_bias = 0;
3238 for (i = 0; i < 3; i++) {
Qinfan Wuff6a31d2014-08-29 13:34:40 -07003239 header_size = offsetof(arena_chunk_t, map_bits) +
3240 ((sizeof(arena_chunk_map_bits_t) +
3241 sizeof(arena_chunk_map_misc_t)) * (chunk_npages-map_bias));
Jason Evans0c5dd032014-09-29 01:31:39 -07003242 map_bias = (header_size + PAGE_MASK) >> LG_PAGE;
Jason Evans7393f442010-10-01 17:35:43 -07003243 }
3244 assert(map_bias > 0);
3245
Qinfan Wuff6a31d2014-08-29 13:34:40 -07003246 map_misc_offset = offsetof(arena_chunk_t, map_bits) +
3247 sizeof(arena_chunk_map_bits_t) * (chunk_npages-map_bias);
3248
Jason Evans155bfa72014-10-05 17:54:10 -07003249 arena_maxrun = chunksize - (map_bias << LG_PAGE);
Jason Evansfc0b3b72014-10-09 17:54:06 -07003250 assert(arena_maxrun > 0);
Jason Evans155bfa72014-10-05 17:54:10 -07003251 arena_maxclass = index2size(size2index(chunksize)-1);
3252 if (arena_maxclass > arena_maxrun) {
3253 /*
3254 * For small chunk sizes it's possible for there to be fewer
3255 * non-header pages available than are necessary to serve the
3256 * size classes just below chunksize.
3257 */
3258 arena_maxclass = arena_maxrun;
3259 }
Jason Evansfc0b3b72014-10-09 17:54:06 -07003260 assert(arena_maxclass > 0);
Jason Evans155bfa72014-10-05 17:54:10 -07003261 nlclasses = size2index(arena_maxclass) - size2index(SMALL_MAXCLASS);
Jason Evans3c4d92e2014-10-12 22:53:59 -07003262 nhclasses = NSIZES - nlclasses - NBINS;
Jason Evansa0bf2422010-01-29 14:30:41 -08003263
Jason Evansb1726102012-02-28 16:50:47 -08003264 bin_info_init();
Jason Evans8a03cf02015-05-04 09:58:36 -07003265 return (small_run_size_init());
Jason Evanse476f8a2010-01-16 09:53:50 -08003266}
Jason Evans4e2e3dd2012-03-13 16:31:41 -07003267
3268void
3269arena_prefork(arena_t *arena)
3270{
3271 unsigned i;
3272
3273 malloc_mutex_prefork(&arena->lock);
Jason Evanscbf3a6d2015-02-11 12:24:27 -08003274 malloc_mutex_prefork(&arena->huge_mtx);
3275 malloc_mutex_prefork(&arena->chunks_mtx);
3276 malloc_mutex_prefork(&arena->node_cache_mtx);
Jason Evans4e2e3dd2012-03-13 16:31:41 -07003277 for (i = 0; i < NBINS; i++)
3278 malloc_mutex_prefork(&arena->bins[i].lock);
3279}
3280
3281void
3282arena_postfork_parent(arena_t *arena)
3283{
3284 unsigned i;
3285
3286 for (i = 0; i < NBINS; i++)
3287 malloc_mutex_postfork_parent(&arena->bins[i].lock);
Jason Evanscbf3a6d2015-02-11 12:24:27 -08003288 malloc_mutex_postfork_parent(&arena->node_cache_mtx);
3289 malloc_mutex_postfork_parent(&arena->chunks_mtx);
3290 malloc_mutex_postfork_parent(&arena->huge_mtx);
Jason Evans4e2e3dd2012-03-13 16:31:41 -07003291 malloc_mutex_postfork_parent(&arena->lock);
3292}
3293
3294void
3295arena_postfork_child(arena_t *arena)
3296{
3297 unsigned i;
3298
3299 for (i = 0; i < NBINS; i++)
3300 malloc_mutex_postfork_child(&arena->bins[i].lock);
Jason Evanscbf3a6d2015-02-11 12:24:27 -08003301 malloc_mutex_postfork_child(&arena->node_cache_mtx);
3302 malloc_mutex_postfork_child(&arena->chunks_mtx);
3303 malloc_mutex_postfork_child(&arena->huge_mtx);
Jason Evans4e2e3dd2012-03-13 16:31:41 -07003304 malloc_mutex_postfork_child(&arena->lock);
3305}