blob: a119d2682e2eb253d3f9d10e815068f669c03b91 [file] [log] [blame]
Jason Evanse476f8a2010-01-16 09:53:50 -08001#define JEMALLOC_ARENA_C_
Jason Evans376b1522010-02-11 14:45:59 -08002#include "jemalloc/internal/jemalloc_internal.h"
Jason Evanse476f8a2010-01-16 09:53:50 -08003
4/******************************************************************************/
5/* Data. */
6
Jason Evanse476f8a2010-01-16 09:53:50 -08007ssize_t opt_lg_dirty_mult = LG_DIRTY_MULT_DEFAULT;
Jason Evans8d6a3e82015-03-18 18:55:33 -07008static ssize_t lg_dirty_mult_default;
Jason Evansb1726102012-02-28 16:50:47 -08009arena_bin_info_t arena_bin_info[NBINS];
Jason Evanse476f8a2010-01-16 09:53:50 -080010
Jason Evans155bfa72014-10-05 17:54:10 -070011size_t map_bias;
12size_t map_misc_offset;
13size_t arena_maxrun; /* Max run size for arenas. */
Jason Evans676df882015-09-11 20:50:20 -070014size_t large_maxclass; /* Max large size class. */
Jason Evans8a03cf02015-05-04 09:58:36 -070015static size_t small_maxrun; /* Max run size used for small size classes. */
16static bool *small_run_tab; /* Valid small run page multiples. */
Jason Evans3c4d92e2014-10-12 22:53:59 -070017unsigned nlclasses; /* Number of large size classes. */
18unsigned nhclasses; /* Number of huge size classes. */
Jason Evanse476f8a2010-01-16 09:53:50 -080019
20/******************************************************************************/
Jason Evansaa5113b2014-01-14 16:23:03 -080021/*
22 * Function prototypes for static functions that are referenced prior to
23 * definition.
24 */
Jason Evanse476f8a2010-01-16 09:53:50 -080025
Jason Evans6005f072010-09-30 16:55:08 -070026static void arena_purge(arena_t *arena, bool all);
Jason Evanse3d13062012-10-30 15:42:37 -070027static void arena_run_dalloc(arena_t *arena, arena_run_t *run, bool dirty,
Jason Evans8fadb1a2015-08-04 10:49:46 -070028 bool cleaned, bool decommitted);
Jason Evanse476f8a2010-01-16 09:53:50 -080029static void arena_dalloc_bin_run(arena_t *arena, arena_chunk_t *chunk,
30 arena_run_t *run, arena_bin_t *bin);
Jason Evans940a2e02010-10-17 17:51:37 -070031static void arena_bin_lower_run(arena_t *arena, arena_chunk_t *chunk,
32 arena_run_t *run, arena_bin_t *bin);
Jason Evanse476f8a2010-01-16 09:53:50 -080033
34/******************************************************************************/
35
Jason Evans8fadb1a2015-08-04 10:49:46 -070036#define CHUNK_MAP_KEY ((uintptr_t)0x1U)
Ben Maurerf9ff6032014-04-06 13:24:16 -070037
Jason Evans8fadb1a2015-08-04 10:49:46 -070038JEMALLOC_INLINE_C arena_chunk_map_misc_t *
39arena_miscelm_key_create(size_t size)
40{
41
Jason Evans5ef33a92015-08-19 14:12:05 -070042 return ((arena_chunk_map_misc_t *)(arena_mapbits_size_encode(size) |
Jason Evans8fadb1a2015-08-04 10:49:46 -070043 CHUNK_MAP_KEY));
44}
45
46JEMALLOC_INLINE_C bool
47arena_miscelm_is_key(const arena_chunk_map_misc_t *miscelm)
48{
49
50 return (((uintptr_t)miscelm & CHUNK_MAP_KEY) != 0);
51}
52
53#undef CHUNK_MAP_KEY
54
55JEMALLOC_INLINE_C size_t
56arena_miscelm_key_size_get(const arena_chunk_map_misc_t *miscelm)
57{
58
59 assert(arena_miscelm_is_key(miscelm));
60
Jason Evans5ef33a92015-08-19 14:12:05 -070061 return (arena_mapbits_size_decode((uintptr_t)miscelm));
Jason Evans8fadb1a2015-08-04 10:49:46 -070062}
63
64JEMALLOC_INLINE_C size_t
65arena_miscelm_size_get(arena_chunk_map_misc_t *miscelm)
66{
67 arena_chunk_t *chunk;
68 size_t pageind, mapbits;
69
70 assert(!arena_miscelm_is_key(miscelm));
71
72 chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(miscelm);
73 pageind = arena_miscelm_to_pageind(miscelm);
74 mapbits = arena_mapbits_get(chunk, pageind);
Jason Evans5ef33a92015-08-19 14:12:05 -070075 return (arena_mapbits_size_decode(mapbits));
Ben Maurerf9ff6032014-04-06 13:24:16 -070076}
77
Jason Evansaf1f5922014-10-30 16:38:08 -070078JEMALLOC_INLINE_C int
Qinfan Wuff6a31d2014-08-29 13:34:40 -070079arena_run_comp(arena_chunk_map_misc_t *a, arena_chunk_map_misc_t *b)
Jason Evanse476f8a2010-01-16 09:53:50 -080080{
Qinfan Wuff6a31d2014-08-29 13:34:40 -070081 uintptr_t a_miscelm = (uintptr_t)a;
82 uintptr_t b_miscelm = (uintptr_t)b;
Jason Evanse476f8a2010-01-16 09:53:50 -080083
84 assert(a != NULL);
85 assert(b != NULL);
86
Qinfan Wuff6a31d2014-08-29 13:34:40 -070087 return ((a_miscelm > b_miscelm) - (a_miscelm < b_miscelm));
Jason Evanse476f8a2010-01-16 09:53:50 -080088}
89
Jason Evansf3ff7522010-02-28 15:00:18 -080090/* Generate red-black tree functions. */
Qinfan Wuff6a31d2014-08-29 13:34:40 -070091rb_gen(static UNUSED, arena_run_tree_, arena_run_tree_t, arena_chunk_map_misc_t,
Jason Evans070b3c32014-08-14 14:45:58 -070092 rb_link, arena_run_comp)
Jason Evanse476f8a2010-01-16 09:53:50 -080093
Jason Evans8a03cf02015-05-04 09:58:36 -070094static size_t
95run_quantize(size_t size)
96{
97 size_t qsize;
98
99 assert(size != 0);
100 assert(size == PAGE_CEILING(size));
101
102 /* Don't change sizes that are valid small run sizes. */
103 if (size <= small_maxrun && small_run_tab[size >> LG_PAGE])
104 return (size);
105
106 /*
107 * Round down to the nearest run size that can actually be requested
108 * during normal large allocation. Add large_pad so that cache index
109 * randomization can offset the allocation from the page boundary.
110 */
111 qsize = index2size(size2index(size - large_pad + 1) - 1) + large_pad;
112 if (qsize <= SMALL_MAXCLASS + large_pad)
113 return (run_quantize(size - large_pad));
114 assert(qsize <= size);
115 return (qsize);
116}
117
118static size_t
119run_quantize_next(size_t size)
120{
121 size_t large_run_size_next;
122
123 assert(size != 0);
124 assert(size == PAGE_CEILING(size));
125
126 /*
127 * Return the next quantized size greater than the input size.
128 * Quantized sizes comprise the union of run sizes that back small
129 * region runs, and run sizes that back large regions with no explicit
130 * alignment constraints.
131 */
132
133 if (size > SMALL_MAXCLASS) {
134 large_run_size_next = PAGE_CEILING(index2size(size2index(size -
135 large_pad) + 1) + large_pad);
136 } else
137 large_run_size_next = SIZE_T_MAX;
138 if (size >= small_maxrun)
139 return (large_run_size_next);
140
141 while (true) {
142 size += PAGE;
143 assert(size <= small_maxrun);
144 if (small_run_tab[size >> LG_PAGE]) {
145 if (large_run_size_next < size)
146 return (large_run_size_next);
147 return (size);
148 }
149 }
150}
151
152static size_t
153run_quantize_first(size_t size)
154{
155 size_t qsize = run_quantize(size);
156
157 if (qsize < size) {
158 /*
159 * Skip a quantization that may have an adequately large run,
160 * because under-sized runs may be mixed in. This only happens
161 * when an unusual size is requested, i.e. for aligned
162 * allocation, and is just one of several places where linear
163 * search would potentially find sufficiently aligned available
164 * memory somewhere lower.
165 */
166 qsize = run_quantize_next(size);
167 }
168 return (qsize);
169}
170
Jason Evansaf1f5922014-10-30 16:38:08 -0700171JEMALLOC_INLINE_C int
Qinfan Wuff6a31d2014-08-29 13:34:40 -0700172arena_avail_comp(arena_chunk_map_misc_t *a, arena_chunk_map_misc_t *b)
Jason Evanse476f8a2010-01-16 09:53:50 -0800173{
174 int ret;
Jason Evans5707d6f2015-03-06 17:14:05 -0800175 uintptr_t a_miscelm = (uintptr_t)a;
Jason Evans8fadb1a2015-08-04 10:49:46 -0700176 size_t a_qsize = run_quantize(arena_miscelm_is_key(a) ?
177 arena_miscelm_key_size_get(a) : arena_miscelm_size_get(a));
178 size_t b_qsize = run_quantize(arena_miscelm_size_get(b));
Jason Evanse476f8a2010-01-16 09:53:50 -0800179
Jason Evans5707d6f2015-03-06 17:14:05 -0800180 /*
Jason Evans8a03cf02015-05-04 09:58:36 -0700181 * Compare based on quantized size rather than size, in order to sort
182 * equally useful runs only by address.
Jason Evans5707d6f2015-03-06 17:14:05 -0800183 */
Jason Evans8a03cf02015-05-04 09:58:36 -0700184 ret = (a_qsize > b_qsize) - (a_qsize < b_qsize);
Qinfan Wu55c9aa12014-08-06 16:10:08 -0700185 if (ret == 0) {
Jason Evans8fadb1a2015-08-04 10:49:46 -0700186 if (!arena_miscelm_is_key(a)) {
Jason Evans5707d6f2015-03-06 17:14:05 -0800187 uintptr_t b_miscelm = (uintptr_t)b;
188
Qinfan Wuff6a31d2014-08-29 13:34:40 -0700189 ret = (a_miscelm > b_miscelm) - (a_miscelm < b_miscelm);
Jason Evans5707d6f2015-03-06 17:14:05 -0800190 } else {
Qinfan Wuea73eb82014-08-06 16:43:01 -0700191 /*
192 * Treat keys as if they are lower than anything else.
193 */
Qinfan Wu55c9aa12014-08-06 16:10:08 -0700194 ret = -1;
Qinfan Wuea73eb82014-08-06 16:43:01 -0700195 }
Qinfan Wu55c9aa12014-08-06 16:10:08 -0700196 }
Jason Evanse476f8a2010-01-16 09:53:50 -0800197
198 return (ret);
199}
200
Jason Evansf3ff7522010-02-28 15:00:18 -0800201/* Generate red-black tree functions. */
Qinfan Wuff6a31d2014-08-29 13:34:40 -0700202rb_gen(static UNUSED, arena_avail_tree_, arena_avail_tree_t,
203 arena_chunk_map_misc_t, rb_link, arena_avail_comp)
Jason Evanse476f8a2010-01-16 09:53:50 -0800204
Jason Evanse3d13062012-10-30 15:42:37 -0700205static void
206arena_avail_insert(arena_t *arena, arena_chunk_t *chunk, size_t pageind,
Qinfan Wu90737fc2014-07-21 19:39:20 -0700207 size_t npages)
Jason Evanse3d13062012-10-30 15:42:37 -0700208{
209
210 assert(npages == (arena_mapbits_unallocated_size_get(chunk, pageind) >>
211 LG_PAGE));
Qinfan Wuff6a31d2014-08-29 13:34:40 -0700212 arena_avail_tree_insert(&arena->runs_avail, arena_miscelm_get(chunk,
Jason Evanse3d13062012-10-30 15:42:37 -0700213 pageind));
214}
215
216static void
217arena_avail_remove(arena_t *arena, arena_chunk_t *chunk, size_t pageind,
Qinfan Wu90737fc2014-07-21 19:39:20 -0700218 size_t npages)
Jason Evanse3d13062012-10-30 15:42:37 -0700219{
220
221 assert(npages == (arena_mapbits_unallocated_size_get(chunk, pageind) >>
222 LG_PAGE));
Qinfan Wuff6a31d2014-08-29 13:34:40 -0700223 arena_avail_tree_remove(&arena->runs_avail, arena_miscelm_get(chunk,
Jason Evanse3d13062012-10-30 15:42:37 -0700224 pageind));
225}
226
Jason Evans070b3c32014-08-14 14:45:58 -0700227static void
Jason Evansee41ad42015-02-15 18:04:46 -0800228arena_run_dirty_insert(arena_t *arena, arena_chunk_t *chunk, size_t pageind,
Jason Evans070b3c32014-08-14 14:45:58 -0700229 size_t npages)
230{
Qinfan Wuff6a31d2014-08-29 13:34:40 -0700231 arena_chunk_map_misc_t *miscelm = arena_miscelm_get(chunk, pageind);
Jason Evansee41ad42015-02-15 18:04:46 -0800232
Jason Evans070b3c32014-08-14 14:45:58 -0700233 assert(npages == (arena_mapbits_unallocated_size_get(chunk, pageind) >>
234 LG_PAGE));
235 assert(arena_mapbits_dirty_get(chunk, pageind) == CHUNK_MAP_DIRTY);
236 assert(arena_mapbits_dirty_get(chunk, pageind+npages-1) ==
237 CHUNK_MAP_DIRTY);
Jason Evansee41ad42015-02-15 18:04:46 -0800238
Jason Evans38e42d32015-03-10 18:15:40 -0700239 qr_new(&miscelm->rd, rd_link);
240 qr_meld(&arena->runs_dirty, &miscelm->rd, rd_link);
Jason Evans070b3c32014-08-14 14:45:58 -0700241 arena->ndirty += npages;
242}
243
244static void
Jason Evansee41ad42015-02-15 18:04:46 -0800245arena_run_dirty_remove(arena_t *arena, arena_chunk_t *chunk, size_t pageind,
Jason Evans070b3c32014-08-14 14:45:58 -0700246 size_t npages)
247{
Qinfan Wuff6a31d2014-08-29 13:34:40 -0700248 arena_chunk_map_misc_t *miscelm = arena_miscelm_get(chunk, pageind);
Jason Evansee41ad42015-02-15 18:04:46 -0800249
Jason Evans070b3c32014-08-14 14:45:58 -0700250 assert(npages == (arena_mapbits_unallocated_size_get(chunk, pageind) >>
251 LG_PAGE));
252 assert(arena_mapbits_dirty_get(chunk, pageind) == CHUNK_MAP_DIRTY);
253 assert(arena_mapbits_dirty_get(chunk, pageind+npages-1) ==
254 CHUNK_MAP_DIRTY);
Jason Evansee41ad42015-02-15 18:04:46 -0800255
Jason Evans38e42d32015-03-10 18:15:40 -0700256 qr_remove(&miscelm->rd, rd_link);
Jason Evansee41ad42015-02-15 18:04:46 -0800257 assert(arena->ndirty >= npages);
Jason Evans070b3c32014-08-14 14:45:58 -0700258 arena->ndirty -= npages;
259}
260
Jason Evansee41ad42015-02-15 18:04:46 -0800261static size_t
262arena_chunk_dirty_npages(const extent_node_t *node)
263{
264
265 return (extent_node_size_get(node) >> LG_PAGE);
266}
267
Jason Evansee41ad42015-02-15 18:04:46 -0800268void
Jason Evans738e0892015-02-18 01:15:50 -0800269arena_chunk_cache_maybe_insert(arena_t *arena, extent_node_t *node, bool cache)
Jason Evansee41ad42015-02-15 18:04:46 -0800270{
271
Jason Evans738e0892015-02-18 01:15:50 -0800272 if (cache) {
Jason Evans47701b22015-02-17 22:23:10 -0800273 extent_node_dirty_linkage_init(node);
Jason Evans738e0892015-02-18 01:15:50 -0800274 extent_node_dirty_insert(node, &arena->runs_dirty,
275 &arena->chunks_cache);
Jason Evansee41ad42015-02-15 18:04:46 -0800276 arena->ndirty += arena_chunk_dirty_npages(node);
277 }
278}
279
280void
Jason Evans738e0892015-02-18 01:15:50 -0800281arena_chunk_cache_maybe_remove(arena_t *arena, extent_node_t *node, bool dirty)
Jason Evansee41ad42015-02-15 18:04:46 -0800282{
283
284 if (dirty) {
Jason Evans738e0892015-02-18 01:15:50 -0800285 extent_node_dirty_remove(node);
Jason Evansee41ad42015-02-15 18:04:46 -0800286 assert(arena->ndirty >= arena_chunk_dirty_npages(node));
287 arena->ndirty -= arena_chunk_dirty_npages(node);
288 }
289}
290
Jason Evansaf1f5922014-10-30 16:38:08 -0700291JEMALLOC_INLINE_C void *
Jason Evans49f7e8f2011-03-15 13:59:15 -0700292arena_run_reg_alloc(arena_run_t *run, arena_bin_info_t *bin_info)
Jason Evanse476f8a2010-01-16 09:53:50 -0800293{
294 void *ret;
Jason Evans84c8eef2011-03-16 10:30:13 -0700295 unsigned regind;
Jason Evans0c5dd032014-09-29 01:31:39 -0700296 arena_chunk_map_misc_t *miscelm;
297 void *rpages;
Jason Evanse476f8a2010-01-16 09:53:50 -0800298
Jason Evans1e0a6362010-03-13 13:41:58 -0800299 assert(run->nfree > 0);
Jason Evans551ebc42014-10-03 10:16:09 -0700300 assert(!bitmap_full(run->bitmap, &bin_info->bitmap_info));
Jason Evanse476f8a2010-01-16 09:53:50 -0800301
Jason Evans0c5dd032014-09-29 01:31:39 -0700302 regind = bitmap_sfu(run->bitmap, &bin_info->bitmap_info);
303 miscelm = arena_run_to_miscelm(run);
304 rpages = arena_miscelm_to_rpages(miscelm);
305 ret = (void *)((uintptr_t)rpages + (uintptr_t)bin_info->reg0_offset +
Jason Evans122449b2012-04-06 00:35:09 -0700306 (uintptr_t)(bin_info->reg_interval * regind));
Jason Evans1e0a6362010-03-13 13:41:58 -0800307 run->nfree--;
Jason Evans1e0a6362010-03-13 13:41:58 -0800308 return (ret);
Jason Evans6109fe02010-02-10 10:37:56 -0800309}
310
Jason Evansaf1f5922014-10-30 16:38:08 -0700311JEMALLOC_INLINE_C void
Jason Evans1e0a6362010-03-13 13:41:58 -0800312arena_run_reg_dalloc(arena_run_t *run, void *ptr)
Jason Evans6109fe02010-02-10 10:37:56 -0800313{
Jason Evans49f7e8f2011-03-15 13:59:15 -0700314 arena_chunk_t *chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(run);
Jason Evans203484e2012-05-02 00:30:36 -0700315 size_t pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> LG_PAGE;
316 size_t mapbits = arena_mapbits_get(chunk, pageind);
Jason Evansd01fd192015-08-19 15:21:32 -0700317 szind_t binind = arena_ptr_small_binind_get(ptr, mapbits);
Jason Evans49f7e8f2011-03-15 13:59:15 -0700318 arena_bin_info_t *bin_info = &arena_bin_info[binind];
Jason Evans84c8eef2011-03-16 10:30:13 -0700319 unsigned regind = arena_run_regind(run, bin_info, ptr);
Jason Evans84c8eef2011-03-16 10:30:13 -0700320
Jason Evans49f7e8f2011-03-15 13:59:15 -0700321 assert(run->nfree < bin_info->nregs);
Jason Evans1e0a6362010-03-13 13:41:58 -0800322 /* Freeing an interior pointer can cause assertion failure. */
Jason Evans0c5dd032014-09-29 01:31:39 -0700323 assert(((uintptr_t)ptr -
324 ((uintptr_t)arena_miscelm_to_rpages(arena_run_to_miscelm(run)) +
Jason Evans122449b2012-04-06 00:35:09 -0700325 (uintptr_t)bin_info->reg0_offset)) %
326 (uintptr_t)bin_info->reg_interval == 0);
Jason Evans0c5dd032014-09-29 01:31:39 -0700327 assert((uintptr_t)ptr >=
328 (uintptr_t)arena_miscelm_to_rpages(arena_run_to_miscelm(run)) +
Jason Evans49f7e8f2011-03-15 13:59:15 -0700329 (uintptr_t)bin_info->reg0_offset);
Jason Evans84c8eef2011-03-16 10:30:13 -0700330 /* Freeing an unallocated pointer can cause assertion failure. */
Jason Evans0c5dd032014-09-29 01:31:39 -0700331 assert(bitmap_get(run->bitmap, &bin_info->bitmap_info, regind));
Jason Evanse476f8a2010-01-16 09:53:50 -0800332
Jason Evans0c5dd032014-09-29 01:31:39 -0700333 bitmap_unset(run->bitmap, &bin_info->bitmap_info, regind);
Jason Evans1e0a6362010-03-13 13:41:58 -0800334 run->nfree++;
Jason Evanse476f8a2010-01-16 09:53:50 -0800335}
336
Jason Evansaf1f5922014-10-30 16:38:08 -0700337JEMALLOC_INLINE_C void
Jason Evans38067482013-01-21 20:04:42 -0800338arena_run_zero(arena_chunk_t *chunk, size_t run_ind, size_t npages)
339{
340
Jason Evansbd87b012014-04-15 16:35:08 -0700341 JEMALLOC_VALGRIND_MAKE_MEM_UNDEFINED((void *)((uintptr_t)chunk +
342 (run_ind << LG_PAGE)), (npages << LG_PAGE));
Jason Evans38067482013-01-21 20:04:42 -0800343 memset((void *)((uintptr_t)chunk + (run_ind << LG_PAGE)), 0,
344 (npages << LG_PAGE));
Jason Evans38067482013-01-21 20:04:42 -0800345}
346
Jason Evansaf1f5922014-10-30 16:38:08 -0700347JEMALLOC_INLINE_C void
Jason Evansdda90f52013-10-19 23:48:40 -0700348arena_run_page_mark_zeroed(arena_chunk_t *chunk, size_t run_ind)
349{
350
Jason Evansbd87b012014-04-15 16:35:08 -0700351 JEMALLOC_VALGRIND_MAKE_MEM_DEFINED((void *)((uintptr_t)chunk + (run_ind
352 << LG_PAGE)), PAGE);
Jason Evansdda90f52013-10-19 23:48:40 -0700353}
354
Jason Evansaf1f5922014-10-30 16:38:08 -0700355JEMALLOC_INLINE_C void
Jason Evans38067482013-01-21 20:04:42 -0800356arena_run_page_validate_zeroed(arena_chunk_t *chunk, size_t run_ind)
Jason Evans21fb95b2010-10-18 17:45:40 -0700357{
Jason Evansd4bab212010-10-24 20:08:37 -0700358 size_t i;
Jason Evansae4c7b42012-04-02 07:04:34 -0700359 UNUSED size_t *p = (size_t *)((uintptr_t)chunk + (run_ind << LG_PAGE));
Jason Evansd4bab212010-10-24 20:08:37 -0700360
Jason Evansdda90f52013-10-19 23:48:40 -0700361 arena_run_page_mark_zeroed(chunk, run_ind);
Jason Evansae4c7b42012-04-02 07:04:34 -0700362 for (i = 0; i < PAGE / sizeof(size_t); i++)
Jason Evans21fb95b2010-10-18 17:45:40 -0700363 assert(p[i] == 0);
364}
Jason Evans21fb95b2010-10-18 17:45:40 -0700365
Jason Evanse476f8a2010-01-16 09:53:50 -0800366static void
Jason Evansaa5113b2014-01-14 16:23:03 -0800367arena_cactive_update(arena_t *arena, size_t add_pages, size_t sub_pages)
368{
369
370 if (config_stats) {
Jason Evans15229372014-08-06 23:38:39 -0700371 ssize_t cactive_diff = CHUNK_CEILING((arena->nactive + add_pages
372 - sub_pages) << LG_PAGE) - CHUNK_CEILING(arena->nactive <<
373 LG_PAGE);
Jason Evansaa5113b2014-01-14 16:23:03 -0800374 if (cactive_diff != 0)
375 stats_cactive_add(cactive_diff);
376 }
377}
378
379static void
380arena_run_split_remove(arena_t *arena, arena_chunk_t *chunk, size_t run_ind,
Jason Evans8fadb1a2015-08-04 10:49:46 -0700381 size_t flag_dirty, size_t flag_decommitted, size_t need_pages)
Jason Evansaa5113b2014-01-14 16:23:03 -0800382{
383 size_t total_pages, rem_pages;
384
Jason Evans8fadb1a2015-08-04 10:49:46 -0700385 assert(flag_dirty == 0 || flag_decommitted == 0);
386
Jason Evansaa5113b2014-01-14 16:23:03 -0800387 total_pages = arena_mapbits_unallocated_size_get(chunk, run_ind) >>
388 LG_PAGE;
389 assert(arena_mapbits_dirty_get(chunk, run_ind+total_pages-1) ==
390 flag_dirty);
391 assert(need_pages <= total_pages);
392 rem_pages = total_pages - need_pages;
393
Qinfan Wu90737fc2014-07-21 19:39:20 -0700394 arena_avail_remove(arena, chunk, run_ind, total_pages);
Jason Evans070b3c32014-08-14 14:45:58 -0700395 if (flag_dirty != 0)
Jason Evansee41ad42015-02-15 18:04:46 -0800396 arena_run_dirty_remove(arena, chunk, run_ind, total_pages);
Jason Evansaa5113b2014-01-14 16:23:03 -0800397 arena_cactive_update(arena, need_pages, 0);
398 arena->nactive += need_pages;
399
400 /* Keep track of trailing unused pages for later use. */
401 if (rem_pages > 0) {
Jason Evans8fadb1a2015-08-04 10:49:46 -0700402 size_t flags = flag_dirty | flag_decommitted;
Jason Evans1f27abc2015-08-11 12:42:33 -0700403 size_t flag_unzeroed_mask = (flags == 0) ? CHUNK_MAP_UNZEROED :
404 0;
Jason Evans8fadb1a2015-08-04 10:49:46 -0700405
Jason Evans1f27abc2015-08-11 12:42:33 -0700406 arena_mapbits_unallocated_set(chunk, run_ind+need_pages,
407 (rem_pages << LG_PAGE), flags |
408 (arena_mapbits_unzeroed_get(chunk, run_ind+need_pages) &
409 flag_unzeroed_mask));
410 arena_mapbits_unallocated_set(chunk, run_ind+total_pages-1,
411 (rem_pages << LG_PAGE), flags |
412 (arena_mapbits_unzeroed_get(chunk, run_ind+total_pages-1) &
413 flag_unzeroed_mask));
414 if (flag_dirty != 0) {
415 arena_run_dirty_insert(arena, chunk, run_ind+need_pages,
416 rem_pages);
Jason Evansaa5113b2014-01-14 16:23:03 -0800417 }
Qinfan Wu90737fc2014-07-21 19:39:20 -0700418 arena_avail_insert(arena, chunk, run_ind+need_pages, rem_pages);
Jason Evansaa5113b2014-01-14 16:23:03 -0800419 }
420}
421
Jason Evans8fadb1a2015-08-04 10:49:46 -0700422static bool
Jason Evansaa5113b2014-01-14 16:23:03 -0800423arena_run_split_large_helper(arena_t *arena, arena_run_t *run, size_t size,
424 bool remove, bool zero)
Jason Evanse476f8a2010-01-16 09:53:50 -0800425{
426 arena_chunk_t *chunk;
Jason Evans0c5dd032014-09-29 01:31:39 -0700427 arena_chunk_map_misc_t *miscelm;
Dmitry-Mea306a602015-09-04 13:15:28 +0300428 size_t flag_dirty, flag_decommitted, run_ind, need_pages;
Jason Evans1f27abc2015-08-11 12:42:33 -0700429 size_t flag_unzeroed_mask;
Jason Evans203484e2012-05-02 00:30:36 -0700430
Jason Evanse476f8a2010-01-16 09:53:50 -0800431 chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(run);
Jason Evans0c5dd032014-09-29 01:31:39 -0700432 miscelm = arena_run_to_miscelm(run);
433 run_ind = arena_miscelm_to_pageind(miscelm);
Jason Evans203484e2012-05-02 00:30:36 -0700434 flag_dirty = arena_mapbits_dirty_get(chunk, run_ind);
Jason Evans8fadb1a2015-08-04 10:49:46 -0700435 flag_decommitted = arena_mapbits_decommitted_get(chunk, run_ind);
Jason Evansae4c7b42012-04-02 07:04:34 -0700436 need_pages = (size >> LG_PAGE);
Jason Evanse476f8a2010-01-16 09:53:50 -0800437 assert(need_pages > 0);
Jason Evanse476f8a2010-01-16 09:53:50 -0800438
Jason Evansde249c82015-08-09 16:47:27 -0700439 if (flag_decommitted != 0 && arena->chunk_hooks.commit(chunk, chunksize,
440 run_ind << LG_PAGE, size, arena->ind))
Jason Evans8fadb1a2015-08-04 10:49:46 -0700441 return (true);
442
Jason Evansc368f8c2013-10-29 18:17:42 -0700443 if (remove) {
Jason Evansaa5113b2014-01-14 16:23:03 -0800444 arena_run_split_remove(arena, chunk, run_ind, flag_dirty,
Jason Evans8fadb1a2015-08-04 10:49:46 -0700445 flag_decommitted, need_pages);
Jason Evans19b3d612010-03-18 20:36:40 -0700446 }
447
Jason Evansaa5113b2014-01-14 16:23:03 -0800448 if (zero) {
Jason Evans1f27abc2015-08-11 12:42:33 -0700449 if (flag_decommitted != 0) {
450 /* The run is untouched, and therefore zeroed. */
451 JEMALLOC_VALGRIND_MAKE_MEM_DEFINED((void
452 *)((uintptr_t)chunk + (run_ind << LG_PAGE)),
453 (need_pages << LG_PAGE));
454 } else if (flag_dirty != 0) {
455 /* The run is dirty, so all pages must be zeroed. */
456 arena_run_zero(chunk, run_ind, need_pages);
457 } else {
Jason Evansaa5113b2014-01-14 16:23:03 -0800458 /*
459 * The run is clean, so some pages may be zeroed (i.e.
460 * never before touched).
461 */
Dmitry-Mea306a602015-09-04 13:15:28 +0300462 size_t i;
Jason Evansaa5113b2014-01-14 16:23:03 -0800463 for (i = 0; i < need_pages; i++) {
464 if (arena_mapbits_unzeroed_get(chunk, run_ind+i)
465 != 0)
466 arena_run_zero(chunk, run_ind+i, 1);
467 else if (config_debug) {
468 arena_run_page_validate_zeroed(chunk,
469 run_ind+i);
470 } else {
471 arena_run_page_mark_zeroed(chunk,
472 run_ind+i);
Jason Evans19b3d612010-03-18 20:36:40 -0700473 }
Jason Evanse476f8a2010-01-16 09:53:50 -0800474 }
475 }
Jason Evans19b3d612010-03-18 20:36:40 -0700476 } else {
Jason Evansbd87b012014-04-15 16:35:08 -0700477 JEMALLOC_VALGRIND_MAKE_MEM_UNDEFINED((void *)((uintptr_t)chunk +
Jason Evansdda90f52013-10-19 23:48:40 -0700478 (run_ind << LG_PAGE)), (need_pages << LG_PAGE));
Jason Evanse476f8a2010-01-16 09:53:50 -0800479 }
Jason Evansaa5113b2014-01-14 16:23:03 -0800480
481 /*
482 * Set the last element first, in case the run only contains one page
483 * (i.e. both statements set the same element).
484 */
Jason Evans1f27abc2015-08-11 12:42:33 -0700485 flag_unzeroed_mask = (flag_dirty | flag_decommitted) == 0 ?
486 CHUNK_MAP_UNZEROED : 0;
487 arena_mapbits_large_set(chunk, run_ind+need_pages-1, 0, flag_dirty |
488 (flag_unzeroed_mask & arena_mapbits_unzeroed_get(chunk,
489 run_ind+need_pages-1)));
490 arena_mapbits_large_set(chunk, run_ind, size, flag_dirty |
491 (flag_unzeroed_mask & arena_mapbits_unzeroed_get(chunk, run_ind)));
Jason Evans8fadb1a2015-08-04 10:49:46 -0700492 return (false);
Jason Evanse476f8a2010-01-16 09:53:50 -0800493}
494
Jason Evans8fadb1a2015-08-04 10:49:46 -0700495static bool
Jason Evansaa5113b2014-01-14 16:23:03 -0800496arena_run_split_large(arena_t *arena, arena_run_t *run, size_t size, bool zero)
Jason Evansc368f8c2013-10-29 18:17:42 -0700497{
498
Jason Evans8fadb1a2015-08-04 10:49:46 -0700499 return (arena_run_split_large_helper(arena, run, size, true, zero));
Jason Evansc368f8c2013-10-29 18:17:42 -0700500}
501
Jason Evans8fadb1a2015-08-04 10:49:46 -0700502static bool
Jason Evansaa5113b2014-01-14 16:23:03 -0800503arena_run_init_large(arena_t *arena, arena_run_t *run, size_t size, bool zero)
Jason Evansc368f8c2013-10-29 18:17:42 -0700504{
505
Jason Evans8fadb1a2015-08-04 10:49:46 -0700506 return (arena_run_split_large_helper(arena, run, size, false, zero));
Jason Evansaa5113b2014-01-14 16:23:03 -0800507}
508
Jason Evans8fadb1a2015-08-04 10:49:46 -0700509static bool
Jason Evansaa5113b2014-01-14 16:23:03 -0800510arena_run_split_small(arena_t *arena, arena_run_t *run, size_t size,
Jason Evansd01fd192015-08-19 15:21:32 -0700511 szind_t binind)
Jason Evansaa5113b2014-01-14 16:23:03 -0800512{
513 arena_chunk_t *chunk;
Jason Evans0c5dd032014-09-29 01:31:39 -0700514 arena_chunk_map_misc_t *miscelm;
Jason Evans8fadb1a2015-08-04 10:49:46 -0700515 size_t flag_dirty, flag_decommitted, run_ind, need_pages, i;
Jason Evansaa5113b2014-01-14 16:23:03 -0800516
517 assert(binind != BININD_INVALID);
518
519 chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(run);
Jason Evans0c5dd032014-09-29 01:31:39 -0700520 miscelm = arena_run_to_miscelm(run);
521 run_ind = arena_miscelm_to_pageind(miscelm);
Jason Evansaa5113b2014-01-14 16:23:03 -0800522 flag_dirty = arena_mapbits_dirty_get(chunk, run_ind);
Jason Evans8fadb1a2015-08-04 10:49:46 -0700523 flag_decommitted = arena_mapbits_decommitted_get(chunk, run_ind);
Jason Evansaa5113b2014-01-14 16:23:03 -0800524 need_pages = (size >> LG_PAGE);
525 assert(need_pages > 0);
526
Jason Evans8fadb1a2015-08-04 10:49:46 -0700527 if (flag_decommitted != 0 && arena->chunk_hooks.commit(chunk, chunksize,
528 run_ind << LG_PAGE, size, arena->ind))
529 return (true);
530
531 arena_run_split_remove(arena, chunk, run_ind, flag_dirty,
532 flag_decommitted, need_pages);
Jason Evansaa5113b2014-01-14 16:23:03 -0800533
Jason Evans381c23d2014-10-10 23:01:03 -0700534 for (i = 0; i < need_pages; i++) {
Jason Evans1f27abc2015-08-11 12:42:33 -0700535 size_t flag_unzeroed = arena_mapbits_unzeroed_get(chunk,
536 run_ind+i);
537 arena_mapbits_small_set(chunk, run_ind+i, i, binind,
538 flag_unzeroed);
539 if (config_debug && flag_dirty == 0 && flag_unzeroed == 0)
Jason Evansaa5113b2014-01-14 16:23:03 -0800540 arena_run_page_validate_zeroed(chunk, run_ind+i);
541 }
Jason Evansbd87b012014-04-15 16:35:08 -0700542 JEMALLOC_VALGRIND_MAKE_MEM_UNDEFINED((void *)((uintptr_t)chunk +
Jason Evansaa5113b2014-01-14 16:23:03 -0800543 (run_ind << LG_PAGE)), (need_pages << LG_PAGE));
Jason Evans8fadb1a2015-08-04 10:49:46 -0700544 return (false);
Jason Evansaa5113b2014-01-14 16:23:03 -0800545}
546
547static arena_chunk_t *
548arena_chunk_init_spare(arena_t *arena)
549{
550 arena_chunk_t *chunk;
551
552 assert(arena->spare != NULL);
553
554 chunk = arena->spare;
555 arena->spare = NULL;
556
557 assert(arena_mapbits_allocated_get(chunk, map_bias) == 0);
558 assert(arena_mapbits_allocated_get(chunk, chunk_npages-1) == 0);
559 assert(arena_mapbits_unallocated_size_get(chunk, map_bias) ==
Jason Evans155bfa72014-10-05 17:54:10 -0700560 arena_maxrun);
Jason Evansaa5113b2014-01-14 16:23:03 -0800561 assert(arena_mapbits_unallocated_size_get(chunk, chunk_npages-1) ==
Jason Evans155bfa72014-10-05 17:54:10 -0700562 arena_maxrun);
Jason Evansaa5113b2014-01-14 16:23:03 -0800563 assert(arena_mapbits_dirty_get(chunk, map_bias) ==
564 arena_mapbits_dirty_get(chunk, chunk_npages-1));
565
566 return (chunk);
567}
568
Jason Evans99bd94f2015-02-18 16:40:53 -0800569static bool
570arena_chunk_register(arena_t *arena, arena_chunk_t *chunk, bool zero)
571{
572
Jason Evans8fadb1a2015-08-04 10:49:46 -0700573 /*
574 * The extent node notion of "committed" doesn't directly apply to
Jason Evansde249c82015-08-09 16:47:27 -0700575 * arena chunks. Arbitrarily mark them as committed. The commit state
576 * of runs is tracked individually, and upon chunk deallocation the
577 * entire chunk is in a consistent commit state.
Jason Evans8fadb1a2015-08-04 10:49:46 -0700578 */
579 extent_node_init(&chunk->node, arena, chunk, chunksize, zero, true);
Jason Evans99bd94f2015-02-18 16:40:53 -0800580 extent_node_achunk_set(&chunk->node, true);
581 return (chunk_register(chunk, &chunk->node));
582}
583
584static arena_chunk_t *
Jason Evansb49a3342015-07-28 11:28:19 -0400585arena_chunk_alloc_internal_hard(arena_t *arena, chunk_hooks_t *chunk_hooks,
Jason Evans8fadb1a2015-08-04 10:49:46 -0700586 bool *zero, bool *commit)
Jason Evans99bd94f2015-02-18 16:40:53 -0800587{
588 arena_chunk_t *chunk;
Jason Evans99bd94f2015-02-18 16:40:53 -0800589
590 malloc_mutex_unlock(&arena->lock);
Jason Evansb49a3342015-07-28 11:28:19 -0400591
592 chunk = (arena_chunk_t *)chunk_alloc_wrapper(arena, chunk_hooks, NULL,
Jason Evans8fadb1a2015-08-04 10:49:46 -0700593 chunksize, chunksize, zero, commit);
594 if (chunk != NULL && !*commit) {
595 /* Commit header. */
596 if (chunk_hooks->commit(chunk, chunksize, 0, map_bias <<
597 LG_PAGE, arena->ind)) {
598 chunk_dalloc_wrapper(arena, chunk_hooks,
599 (void *)chunk, chunksize, *commit);
600 chunk = NULL;
601 }
602 }
Jason Evans99bd94f2015-02-18 16:40:53 -0800603 if (chunk != NULL && arena_chunk_register(arena, chunk, *zero)) {
Jason Evans8fadb1a2015-08-04 10:49:46 -0700604 if (!*commit) {
605 /* Undo commit of header. */
606 chunk_hooks->decommit(chunk, chunksize, 0, map_bias <<
607 LG_PAGE, arena->ind);
608 }
Jason Evansb49a3342015-07-28 11:28:19 -0400609 chunk_dalloc_wrapper(arena, chunk_hooks, (void *)chunk,
Jason Evans8fadb1a2015-08-04 10:49:46 -0700610 chunksize, *commit);
Jason Evans99bd94f2015-02-18 16:40:53 -0800611 chunk = NULL;
612 }
Jason Evans99bd94f2015-02-18 16:40:53 -0800613
Jason Evans8fadb1a2015-08-04 10:49:46 -0700614 malloc_mutex_lock(&arena->lock);
Jason Evans99bd94f2015-02-18 16:40:53 -0800615 return (chunk);
616}
617
Jason Evansaa5113b2014-01-14 16:23:03 -0800618static arena_chunk_t *
Jason Evans8fadb1a2015-08-04 10:49:46 -0700619arena_chunk_alloc_internal(arena_t *arena, bool *zero, bool *commit)
Jason Evanse2deab72014-05-15 22:22:27 -0700620{
621 arena_chunk_t *chunk;
Jason Evansb49a3342015-07-28 11:28:19 -0400622 chunk_hooks_t chunk_hooks = CHUNK_HOOKS_INITIALIZER;
Jason Evanse2deab72014-05-15 22:22:27 -0700623
Jason Evansb49a3342015-07-28 11:28:19 -0400624 chunk = chunk_alloc_cache(arena, &chunk_hooks, NULL, chunksize,
625 chunksize, zero, true);
Jason Evans8fadb1a2015-08-04 10:49:46 -0700626 if (chunk != NULL) {
627 if (arena_chunk_register(arena, chunk, *zero)) {
628 chunk_dalloc_cache(arena, &chunk_hooks, chunk,
Jason Evansde249c82015-08-09 16:47:27 -0700629 chunksize, true);
Jason Evans8fadb1a2015-08-04 10:49:46 -0700630 return (NULL);
631 }
632 *commit = true;
Jason Evansb49a3342015-07-28 11:28:19 -0400633 }
634 if (chunk == NULL) {
635 chunk = arena_chunk_alloc_internal_hard(arena, &chunk_hooks,
Jason Evans8fadb1a2015-08-04 10:49:46 -0700636 zero, commit);
Jason Evansb49a3342015-07-28 11:28:19 -0400637 }
Jason Evans99bd94f2015-02-18 16:40:53 -0800638
Jason Evans4581b972014-11-27 17:22:36 -0200639 if (config_stats && chunk != NULL) {
Jason Evanse2deab72014-05-15 22:22:27 -0700640 arena->stats.mapped += chunksize;
Jason Evans4581b972014-11-27 17:22:36 -0200641 arena->stats.metadata_mapped += (map_bias << LG_PAGE);
642 }
Jason Evanse2deab72014-05-15 22:22:27 -0700643
644 return (chunk);
645}
646
Jason Evanse2deab72014-05-15 22:22:27 -0700647static arena_chunk_t *
Jason Evansaa5113b2014-01-14 16:23:03 -0800648arena_chunk_init_hard(arena_t *arena)
649{
650 arena_chunk_t *chunk;
Jason Evans8fadb1a2015-08-04 10:49:46 -0700651 bool zero, commit;
Jason Evans45186f02015-08-10 23:03:34 -0700652 size_t flag_unzeroed, flag_decommitted, i;
Jason Evansaa5113b2014-01-14 16:23:03 -0800653
654 assert(arena->spare == NULL);
655
656 zero = false;
Jason Evans8fadb1a2015-08-04 10:49:46 -0700657 commit = false;
658 chunk = arena_chunk_alloc_internal(arena, &zero, &commit);
Jason Evansaa5113b2014-01-14 16:23:03 -0800659 if (chunk == NULL)
660 return (NULL);
Jason Evansaa5113b2014-01-14 16:23:03 -0800661
Jason Evansaa5113b2014-01-14 16:23:03 -0800662 /*
Jason Evansaa5113b2014-01-14 16:23:03 -0800663 * Initialize the map to contain one maximal free untouched run. Mark
Jason Evans8fadb1a2015-08-04 10:49:46 -0700664 * the pages as zeroed if chunk_alloc() returned a zeroed or decommitted
665 * chunk.
Jason Evansaa5113b2014-01-14 16:23:03 -0800666 */
Jason Evans45186f02015-08-10 23:03:34 -0700667 flag_unzeroed = (zero || !commit) ? 0 : CHUNK_MAP_UNZEROED;
668 flag_decommitted = commit ? 0 : CHUNK_MAP_DECOMMITTED;
669 arena_mapbits_unallocated_set(chunk, map_bias, arena_maxrun,
670 flag_unzeroed | flag_decommitted);
Jason Evansaa5113b2014-01-14 16:23:03 -0800671 /*
672 * There is no need to initialize the internal page map entries unless
673 * the chunk is not zeroed.
674 */
Jason Evans551ebc42014-10-03 10:16:09 -0700675 if (!zero) {
Jason Evansbd87b012014-04-15 16:35:08 -0700676 JEMALLOC_VALGRIND_MAKE_MEM_UNDEFINED(
Qinfan Wuff6a31d2014-08-29 13:34:40 -0700677 (void *)arena_bitselm_get(chunk, map_bias+1),
678 (size_t)((uintptr_t) arena_bitselm_get(chunk,
679 chunk_npages-1) - (uintptr_t)arena_bitselm_get(chunk,
680 map_bias+1)));
Jason Evansaa5113b2014-01-14 16:23:03 -0800681 for (i = map_bias+1; i < chunk_npages-1; i++)
Jason Evans45186f02015-08-10 23:03:34 -0700682 arena_mapbits_internal_set(chunk, i, flag_unzeroed);
Jason Evansaa5113b2014-01-14 16:23:03 -0800683 } else {
Qinfan Wuff6a31d2014-08-29 13:34:40 -0700684 JEMALLOC_VALGRIND_MAKE_MEM_DEFINED((void
685 *)arena_bitselm_get(chunk, map_bias+1), (size_t)((uintptr_t)
686 arena_bitselm_get(chunk, chunk_npages-1) -
687 (uintptr_t)arena_bitselm_get(chunk, map_bias+1)));
Jason Evansaa5113b2014-01-14 16:23:03 -0800688 if (config_debug) {
689 for (i = map_bias+1; i < chunk_npages-1; i++) {
690 assert(arena_mapbits_unzeroed_get(chunk, i) ==
Jason Evans45186f02015-08-10 23:03:34 -0700691 flag_unzeroed);
Jason Evansaa5113b2014-01-14 16:23:03 -0800692 }
693 }
694 }
Jason Evans155bfa72014-10-05 17:54:10 -0700695 arena_mapbits_unallocated_set(chunk, chunk_npages-1, arena_maxrun,
Jason Evans45186f02015-08-10 23:03:34 -0700696 flag_unzeroed);
Jason Evansaa5113b2014-01-14 16:23:03 -0800697
698 return (chunk);
Jason Evansc368f8c2013-10-29 18:17:42 -0700699}
700
Jason Evanse476f8a2010-01-16 09:53:50 -0800701static arena_chunk_t *
702arena_chunk_alloc(arena_t *arena)
703{
704 arena_chunk_t *chunk;
Jason Evanse476f8a2010-01-16 09:53:50 -0800705
Jason Evansaa5113b2014-01-14 16:23:03 -0800706 if (arena->spare != NULL)
707 chunk = arena_chunk_init_spare(arena);
Chris Pride20a8c782014-03-25 22:36:05 -0700708 else {
Jason Evansaa5113b2014-01-14 16:23:03 -0800709 chunk = arena_chunk_init_hard(arena);
Chris Pride20a8c782014-03-25 22:36:05 -0700710 if (chunk == NULL)
711 return (NULL);
712 }
Jason Evanse476f8a2010-01-16 09:53:50 -0800713
Jason Evanse3d13062012-10-30 15:42:37 -0700714 /* Insert the run into the runs_avail tree. */
Qinfan Wu90737fc2014-07-21 19:39:20 -0700715 arena_avail_insert(arena, chunk, map_bias, chunk_npages-map_bias);
Jason Evanse3d13062012-10-30 15:42:37 -0700716
Jason Evanse476f8a2010-01-16 09:53:50 -0800717 return (chunk);
718}
719
720static void
Jason Evanse2deab72014-05-15 22:22:27 -0700721arena_chunk_dalloc(arena_t *arena, arena_chunk_t *chunk)
Jason Evanse476f8a2010-01-16 09:53:50 -0800722{
Qinfan Wu04d60a12014-07-18 14:21:17 -0700723
Jason Evans30fe12b2012-05-10 17:09:17 -0700724 assert(arena_mapbits_allocated_get(chunk, map_bias) == 0);
725 assert(arena_mapbits_allocated_get(chunk, chunk_npages-1) == 0);
726 assert(arena_mapbits_unallocated_size_get(chunk, map_bias) ==
Jason Evans155bfa72014-10-05 17:54:10 -0700727 arena_maxrun);
Jason Evans30fe12b2012-05-10 17:09:17 -0700728 assert(arena_mapbits_unallocated_size_get(chunk, chunk_npages-1) ==
Jason Evans155bfa72014-10-05 17:54:10 -0700729 arena_maxrun);
Jason Evans30fe12b2012-05-10 17:09:17 -0700730 assert(arena_mapbits_dirty_get(chunk, map_bias) ==
731 arena_mapbits_dirty_get(chunk, chunk_npages-1));
Jason Evansde249c82015-08-09 16:47:27 -0700732 assert(arena_mapbits_decommitted_get(chunk, map_bias) ==
733 arena_mapbits_decommitted_get(chunk, chunk_npages-1));
Jason Evans30fe12b2012-05-10 17:09:17 -0700734
Jason Evanse476f8a2010-01-16 09:53:50 -0800735 /*
Jason Evanse3d13062012-10-30 15:42:37 -0700736 * Remove run from the runs_avail tree, so that the arena does not use
737 * it.
Jason Evanse476f8a2010-01-16 09:53:50 -0800738 */
Qinfan Wu90737fc2014-07-21 19:39:20 -0700739 arena_avail_remove(arena, chunk, map_bias, chunk_npages-map_bias);
Jason Evanse476f8a2010-01-16 09:53:50 -0800740
Jason Evans8d4203c2010-04-13 20:53:21 -0700741 if (arena->spare != NULL) {
742 arena_chunk_t *spare = arena->spare;
Jason Evansb49a3342015-07-28 11:28:19 -0400743 chunk_hooks_t chunk_hooks = CHUNK_HOOKS_INITIALIZER;
Jason Evansde249c82015-08-09 16:47:27 -0700744 bool committed;
Jason Evans8d4203c2010-04-13 20:53:21 -0700745
746 arena->spare = chunk;
Jason Evans070b3c32014-08-14 14:45:58 -0700747 if (arena_mapbits_dirty_get(spare, map_bias) != 0) {
Jason Evansee41ad42015-02-15 18:04:46 -0800748 arena_run_dirty_remove(arena, spare, map_bias,
Jason Evans070b3c32014-08-14 14:45:58 -0700749 chunk_npages-map_bias);
750 }
Jason Evans99bd94f2015-02-18 16:40:53 -0800751
Jason Evanscbf3a6d2015-02-11 12:24:27 -0800752 chunk_deregister(spare, &spare->node);
Jason Evans99bd94f2015-02-18 16:40:53 -0800753
Jason Evansde249c82015-08-09 16:47:27 -0700754 committed = (arena_mapbits_decommitted_get(spare, map_bias) ==
755 0);
756 if (!committed) {
757 /*
758 * Decommit the header. Mark the chunk as decommitted
759 * even if header decommit fails, since treating a
760 * partially committed chunk as committed has a high
761 * potential for causing later access of decommitted
762 * memory.
763 */
764 chunk_hooks = chunk_hooks_get(arena);
765 chunk_hooks.decommit(spare, chunksize, 0, map_bias <<
766 LG_PAGE, arena->ind);
767 }
768
Jason Evansb49a3342015-07-28 11:28:19 -0400769 chunk_dalloc_cache(arena, &chunk_hooks, (void *)spare,
Jason Evansde249c82015-08-09 16:47:27 -0700770 chunksize, committed);
Jason Evans99bd94f2015-02-18 16:40:53 -0800771
Jason Evans4581b972014-11-27 17:22:36 -0200772 if (config_stats) {
Jason Evans9b41ac92014-10-14 22:20:00 -0700773 arena->stats.mapped -= chunksize;
Jason Evans4581b972014-11-27 17:22:36 -0200774 arena->stats.metadata_mapped -= (map_bias << LG_PAGE);
775 }
Jason Evans8d4203c2010-04-13 20:53:21 -0700776 } else
777 arena->spare = chunk;
Jason Evanse476f8a2010-01-16 09:53:50 -0800778}
779
Jason Evans9b41ac92014-10-14 22:20:00 -0700780static void
781arena_huge_malloc_stats_update(arena_t *arena, size_t usize)
782{
Jason Evansd01fd192015-08-19 15:21:32 -0700783 szind_t index = size2index(usize) - nlclasses - NBINS;
Jason Evans9b41ac92014-10-14 22:20:00 -0700784
785 cassert(config_stats);
786
787 arena->stats.nmalloc_huge++;
788 arena->stats.allocated_huge += usize;
789 arena->stats.hstats[index].nmalloc++;
790 arena->stats.hstats[index].curhchunks++;
791}
792
793static void
794arena_huge_malloc_stats_update_undo(arena_t *arena, size_t usize)
795{
Jason Evansd01fd192015-08-19 15:21:32 -0700796 szind_t index = size2index(usize) - nlclasses - NBINS;
Jason Evans9b41ac92014-10-14 22:20:00 -0700797
798 cassert(config_stats);
799
800 arena->stats.nmalloc_huge--;
801 arena->stats.allocated_huge -= usize;
802 arena->stats.hstats[index].nmalloc--;
803 arena->stats.hstats[index].curhchunks--;
804}
805
806static void
807arena_huge_dalloc_stats_update(arena_t *arena, size_t usize)
808{
Jason Evansd01fd192015-08-19 15:21:32 -0700809 szind_t index = size2index(usize) - nlclasses - NBINS;
Jason Evans9b41ac92014-10-14 22:20:00 -0700810
811 cassert(config_stats);
812
813 arena->stats.ndalloc_huge++;
814 arena->stats.allocated_huge -= usize;
815 arena->stats.hstats[index].ndalloc++;
816 arena->stats.hstats[index].curhchunks--;
817}
818
819static void
820arena_huge_dalloc_stats_update_undo(arena_t *arena, size_t usize)
821{
Jason Evansd01fd192015-08-19 15:21:32 -0700822 szind_t index = size2index(usize) - nlclasses - NBINS;
Jason Evans9b41ac92014-10-14 22:20:00 -0700823
824 cassert(config_stats);
825
826 arena->stats.ndalloc_huge--;
827 arena->stats.allocated_huge += usize;
828 arena->stats.hstats[index].ndalloc--;
829 arena->stats.hstats[index].curhchunks++;
830}
831
832static void
833arena_huge_ralloc_stats_update(arena_t *arena, size_t oldsize, size_t usize)
834{
835
836 arena_huge_dalloc_stats_update(arena, oldsize);
837 arena_huge_malloc_stats_update(arena, usize);
838}
839
840static void
841arena_huge_ralloc_stats_update_undo(arena_t *arena, size_t oldsize,
842 size_t usize)
843{
844
845 arena_huge_dalloc_stats_update_undo(arena, oldsize);
846 arena_huge_malloc_stats_update_undo(arena, usize);
847}
848
Jason Evanscbf3a6d2015-02-11 12:24:27 -0800849extent_node_t *
850arena_node_alloc(arena_t *arena)
851{
852 extent_node_t *node;
853
854 malloc_mutex_lock(&arena->node_cache_mtx);
Jason Evans2195ba42015-02-15 16:43:52 -0800855 node = ql_last(&arena->node_cache, ql_link);
Jason Evanscbf3a6d2015-02-11 12:24:27 -0800856 if (node == NULL) {
857 malloc_mutex_unlock(&arena->node_cache_mtx);
858 return (base_alloc(sizeof(extent_node_t)));
859 }
Jason Evans2195ba42015-02-15 16:43:52 -0800860 ql_tail_remove(&arena->node_cache, extent_node_t, ql_link);
Jason Evanscbf3a6d2015-02-11 12:24:27 -0800861 malloc_mutex_unlock(&arena->node_cache_mtx);
862 return (node);
863}
864
865void
866arena_node_dalloc(arena_t *arena, extent_node_t *node)
867{
868
869 malloc_mutex_lock(&arena->node_cache_mtx);
Jason Evans2195ba42015-02-15 16:43:52 -0800870 ql_elm_new(node, ql_link);
871 ql_tail_insert(&arena->node_cache, node, ql_link);
Jason Evanscbf3a6d2015-02-11 12:24:27 -0800872 malloc_mutex_unlock(&arena->node_cache_mtx);
873}
874
Jason Evans99bd94f2015-02-18 16:40:53 -0800875static void *
Jason Evansb49a3342015-07-28 11:28:19 -0400876arena_chunk_alloc_huge_hard(arena_t *arena, chunk_hooks_t *chunk_hooks,
Jason Evans99bd94f2015-02-18 16:40:53 -0800877 size_t usize, size_t alignment, bool *zero, size_t csize)
Jason Evans9b41ac92014-10-14 22:20:00 -0700878{
879 void *ret;
Jason Evans8fadb1a2015-08-04 10:49:46 -0700880 bool commit = true;
Jason Evans9b41ac92014-10-14 22:20:00 -0700881
Jason Evansb49a3342015-07-28 11:28:19 -0400882 ret = chunk_alloc_wrapper(arena, chunk_hooks, NULL, csize, alignment,
Jason Evans8fadb1a2015-08-04 10:49:46 -0700883 zero, &commit);
Jason Evans9b41ac92014-10-14 22:20:00 -0700884 if (ret == NULL) {
885 /* Revert optimistic stats updates. */
886 malloc_mutex_lock(&arena->lock);
887 if (config_stats) {
888 arena_huge_malloc_stats_update_undo(arena, usize);
889 arena->stats.mapped -= usize;
890 }
891 arena->nactive -= (usize >> LG_PAGE);
892 malloc_mutex_unlock(&arena->lock);
Jason Evans9b41ac92014-10-14 22:20:00 -0700893 }
894
Jason Evans99bd94f2015-02-18 16:40:53 -0800895 return (ret);
896}
Jason Evans9b41ac92014-10-14 22:20:00 -0700897
Jason Evans99bd94f2015-02-18 16:40:53 -0800898void *
899arena_chunk_alloc_huge(arena_t *arena, size_t usize, size_t alignment,
900 bool *zero)
901{
902 void *ret;
Jason Evansb49a3342015-07-28 11:28:19 -0400903 chunk_hooks_t chunk_hooks = CHUNK_HOOKS_INITIALIZER;
Jason Evans99bd94f2015-02-18 16:40:53 -0800904 size_t csize = CHUNK_CEILING(usize);
905
906 malloc_mutex_lock(&arena->lock);
907
908 /* Optimistically update stats. */
909 if (config_stats) {
910 arena_huge_malloc_stats_update(arena, usize);
911 arena->stats.mapped += usize;
912 }
913 arena->nactive += (usize >> LG_PAGE);
914
Jason Evansb49a3342015-07-28 11:28:19 -0400915 ret = chunk_alloc_cache(arena, &chunk_hooks, NULL, csize, alignment,
916 zero, true);
Jason Evans99bd94f2015-02-18 16:40:53 -0800917 malloc_mutex_unlock(&arena->lock);
918 if (ret == NULL) {
Jason Evansb49a3342015-07-28 11:28:19 -0400919 ret = arena_chunk_alloc_huge_hard(arena, &chunk_hooks, usize,
Jason Evans99bd94f2015-02-18 16:40:53 -0800920 alignment, zero, csize);
921 }
922
923 if (config_stats && ret != NULL)
924 stats_cactive_add(usize);
Jason Evans9b41ac92014-10-14 22:20:00 -0700925 return (ret);
926}
927
928void
929arena_chunk_dalloc_huge(arena_t *arena, void *chunk, size_t usize)
930{
Jason Evansb49a3342015-07-28 11:28:19 -0400931 chunk_hooks_t chunk_hooks = CHUNK_HOOKS_INITIALIZER;
Jason Evans99bd94f2015-02-18 16:40:53 -0800932 size_t csize;
Jason Evans9b41ac92014-10-14 22:20:00 -0700933
Jason Evans99bd94f2015-02-18 16:40:53 -0800934 csize = CHUNK_CEILING(usize);
Jason Evans9b41ac92014-10-14 22:20:00 -0700935 malloc_mutex_lock(&arena->lock);
Jason Evans9b41ac92014-10-14 22:20:00 -0700936 if (config_stats) {
937 arena_huge_dalloc_stats_update(arena, usize);
938 arena->stats.mapped -= usize;
939 stats_cactive_sub(usize);
940 }
941 arena->nactive -= (usize >> LG_PAGE);
Jason Evans99bd94f2015-02-18 16:40:53 -0800942
Jason Evansde249c82015-08-09 16:47:27 -0700943 chunk_dalloc_cache(arena, &chunk_hooks, chunk, csize, true);
Jason Evansb49a3342015-07-28 11:28:19 -0400944 malloc_mutex_unlock(&arena->lock);
Jason Evans9b41ac92014-10-14 22:20:00 -0700945}
946
947void
948arena_chunk_ralloc_huge_similar(arena_t *arena, void *chunk, size_t oldsize,
949 size_t usize)
950{
951
952 assert(CHUNK_CEILING(oldsize) == CHUNK_CEILING(usize));
953 assert(oldsize != usize);
954
955 malloc_mutex_lock(&arena->lock);
956 if (config_stats)
957 arena_huge_ralloc_stats_update(arena, oldsize, usize);
958 if (oldsize < usize) {
959 size_t udiff = usize - oldsize;
960 arena->nactive += udiff >> LG_PAGE;
961 if (config_stats)
962 stats_cactive_add(udiff);
963 } else {
964 size_t udiff = oldsize - usize;
965 arena->nactive -= udiff >> LG_PAGE;
966 if (config_stats)
967 stats_cactive_sub(udiff);
968 }
969 malloc_mutex_unlock(&arena->lock);
970}
971
972void
973arena_chunk_ralloc_huge_shrink(arena_t *arena, void *chunk, size_t oldsize,
974 size_t usize)
975{
Jason Evans9b41ac92014-10-14 22:20:00 -0700976 size_t udiff = oldsize - usize;
977 size_t cdiff = CHUNK_CEILING(oldsize) - CHUNK_CEILING(usize);
978
979 malloc_mutex_lock(&arena->lock);
Jason Evans9b41ac92014-10-14 22:20:00 -0700980 if (config_stats) {
981 arena_huge_ralloc_stats_update(arena, oldsize, usize);
982 if (cdiff != 0) {
983 arena->stats.mapped -= cdiff;
984 stats_cactive_sub(udiff);
985 }
986 }
987 arena->nactive -= udiff >> LG_PAGE;
Jason Evans99bd94f2015-02-18 16:40:53 -0800988
Jason Evans2012d5a2014-11-17 09:54:49 -0800989 if (cdiff != 0) {
Jason Evansb49a3342015-07-28 11:28:19 -0400990 chunk_hooks_t chunk_hooks = CHUNK_HOOKS_INITIALIZER;
Jason Evans99bd94f2015-02-18 16:40:53 -0800991 void *nchunk = (void *)((uintptr_t)chunk +
992 CHUNK_CEILING(usize));
993
Jason Evansde249c82015-08-09 16:47:27 -0700994 chunk_dalloc_cache(arena, &chunk_hooks, nchunk, cdiff, true);
Jason Evansb49a3342015-07-28 11:28:19 -0400995 }
996 malloc_mutex_unlock(&arena->lock);
Jason Evans99bd94f2015-02-18 16:40:53 -0800997}
998
Jason Evansb49a3342015-07-28 11:28:19 -0400999static bool
1000arena_chunk_ralloc_huge_expand_hard(arena_t *arena, chunk_hooks_t *chunk_hooks,
1001 void *chunk, size_t oldsize, size_t usize, bool *zero, void *nchunk,
1002 size_t udiff, size_t cdiff)
Jason Evans99bd94f2015-02-18 16:40:53 -08001003{
1004 bool err;
Jason Evans8fadb1a2015-08-04 10:49:46 -07001005 bool commit = true;
Jason Evans99bd94f2015-02-18 16:40:53 -08001006
Jason Evansb49a3342015-07-28 11:28:19 -04001007 err = (chunk_alloc_wrapper(arena, chunk_hooks, nchunk, cdiff, chunksize,
Jason Evans8fadb1a2015-08-04 10:49:46 -07001008 zero, &commit) == NULL);
Jason Evans99bd94f2015-02-18 16:40:53 -08001009 if (err) {
1010 /* Revert optimistic stats updates. */
1011 malloc_mutex_lock(&arena->lock);
1012 if (config_stats) {
1013 arena_huge_ralloc_stats_update_undo(arena, oldsize,
1014 usize);
1015 arena->stats.mapped -= cdiff;
1016 }
1017 arena->nactive -= (udiff >> LG_PAGE);
1018 malloc_mutex_unlock(&arena->lock);
Jason Evansb49a3342015-07-28 11:28:19 -04001019 } else if (chunk_hooks->merge(chunk, CHUNK_CEILING(oldsize), nchunk,
1020 cdiff, true, arena->ind)) {
Jason Evans8fadb1a2015-08-04 10:49:46 -07001021 chunk_dalloc_arena(arena, chunk_hooks, nchunk, cdiff, *zero,
1022 true);
Jason Evansb49a3342015-07-28 11:28:19 -04001023 err = true;
Jason Evans2012d5a2014-11-17 09:54:49 -08001024 }
Jason Evans99bd94f2015-02-18 16:40:53 -08001025 return (err);
Jason Evans9b41ac92014-10-14 22:20:00 -07001026}
1027
1028bool
1029arena_chunk_ralloc_huge_expand(arena_t *arena, void *chunk, size_t oldsize,
1030 size_t usize, bool *zero)
1031{
Jason Evans99bd94f2015-02-18 16:40:53 -08001032 bool err;
Jason Evans8fadb1a2015-08-04 10:49:46 -07001033 chunk_hooks_t chunk_hooks = chunk_hooks_get(arena);
Jason Evans99bd94f2015-02-18 16:40:53 -08001034 void *nchunk = (void *)((uintptr_t)chunk + CHUNK_CEILING(oldsize));
Jason Evans9b41ac92014-10-14 22:20:00 -07001035 size_t udiff = usize - oldsize;
1036 size_t cdiff = CHUNK_CEILING(usize) - CHUNK_CEILING(oldsize);
1037
1038 malloc_mutex_lock(&arena->lock);
Jason Evans99bd94f2015-02-18 16:40:53 -08001039
1040 /* Optimistically update stats. */
Jason Evans9b41ac92014-10-14 22:20:00 -07001041 if (config_stats) {
Jason Evans9b41ac92014-10-14 22:20:00 -07001042 arena_huge_ralloc_stats_update(arena, oldsize, usize);
1043 arena->stats.mapped += cdiff;
1044 }
1045 arena->nactive += (udiff >> LG_PAGE);
Jason Evans9b41ac92014-10-14 22:20:00 -07001046
Jason Evansb49a3342015-07-28 11:28:19 -04001047 err = (chunk_alloc_cache(arena, &arena->chunk_hooks, nchunk, cdiff,
1048 chunksize, zero, true) == NULL);
Jason Evans99bd94f2015-02-18 16:40:53 -08001049 malloc_mutex_unlock(&arena->lock);
1050 if (err) {
Jason Evansb49a3342015-07-28 11:28:19 -04001051 err = arena_chunk_ralloc_huge_expand_hard(arena, &chunk_hooks,
1052 chunk, oldsize, usize, zero, nchunk, udiff,
1053 cdiff);
1054 } else if (chunk_hooks.merge(chunk, CHUNK_CEILING(oldsize), nchunk,
1055 cdiff, true, arena->ind)) {
Jason Evans8fadb1a2015-08-04 10:49:46 -07001056 chunk_dalloc_arena(arena, &chunk_hooks, nchunk, cdiff, *zero,
1057 true);
Jason Evansb49a3342015-07-28 11:28:19 -04001058 err = true;
Jason Evans9b41ac92014-10-14 22:20:00 -07001059 }
1060
Jason Evans99bd94f2015-02-18 16:40:53 -08001061 if (config_stats && !err)
Jason Evans9b41ac92014-10-14 22:20:00 -07001062 stats_cactive_add(udiff);
Jason Evans99bd94f2015-02-18 16:40:53 -08001063 return (err);
Jason Evans9b41ac92014-10-14 22:20:00 -07001064}
1065
Jason Evansaa282662015-07-15 16:02:21 -07001066/*
1067 * Do first-best-fit run selection, i.e. select the lowest run that best fits.
1068 * Run sizes are quantized, so not all candidate runs are necessarily exactly
1069 * the same size.
1070 */
Jason Evans97c04a92015-03-06 19:57:36 -08001071static arena_run_t *
Jason Evansaa282662015-07-15 16:02:21 -07001072arena_run_first_best_fit(arena_t *arena, size_t size)
Jason Evans97c04a92015-03-06 19:57:36 -08001073{
Jason Evansaa282662015-07-15 16:02:21 -07001074 size_t search_size = run_quantize_first(size);
Jason Evans8fadb1a2015-08-04 10:49:46 -07001075 arena_chunk_map_misc_t *key = arena_miscelm_key_create(search_size);
Jason Evansaa282662015-07-15 16:02:21 -07001076 arena_chunk_map_misc_t *miscelm =
1077 arena_avail_tree_nsearch(&arena->runs_avail, key);
1078 if (miscelm == NULL)
1079 return (NULL);
1080 return (&miscelm->run);
Jason Evans97c04a92015-03-06 19:57:36 -08001081}
1082
Jason Evanse476f8a2010-01-16 09:53:50 -08001083static arena_run_t *
Jason Evansaa5113b2014-01-14 16:23:03 -08001084arena_run_alloc_large_helper(arena_t *arena, size_t size, bool zero)
Jason Evanse476f8a2010-01-16 09:53:50 -08001085{
Jason Evansaa282662015-07-15 16:02:21 -07001086 arena_run_t *run = arena_run_first_best_fit(arena, s2u(size));
Jason Evans8fadb1a2015-08-04 10:49:46 -07001087 if (run != NULL) {
1088 if (arena_run_split_large(arena, run, size, zero))
1089 run = NULL;
1090 }
Jason Evans97c04a92015-03-06 19:57:36 -08001091 return (run);
Jason Evans5b0c9962012-05-10 15:47:24 -07001092}
1093
1094static arena_run_t *
Jason Evansaa5113b2014-01-14 16:23:03 -08001095arena_run_alloc_large(arena_t *arena, size_t size, bool zero)
Jason Evans5b0c9962012-05-10 15:47:24 -07001096{
1097 arena_chunk_t *chunk;
1098 arena_run_t *run;
1099
Jason Evansfc0b3b72014-10-09 17:54:06 -07001100 assert(size <= arena_maxrun);
Jason Evans8a03cf02015-05-04 09:58:36 -07001101 assert(size == PAGE_CEILING(size));
Jason Evans5b0c9962012-05-10 15:47:24 -07001102
1103 /* Search the arena's chunks for the lowest best fit. */
Jason Evansaa5113b2014-01-14 16:23:03 -08001104 run = arena_run_alloc_large_helper(arena, size, zero);
Jason Evans5b0c9962012-05-10 15:47:24 -07001105 if (run != NULL)
1106 return (run);
1107
Jason Evanse476f8a2010-01-16 09:53:50 -08001108 /*
1109 * No usable runs. Create a new chunk from which to allocate the run.
1110 */
1111 chunk = arena_chunk_alloc(arena);
Jason Evanse00572b2010-03-14 19:43:56 -07001112 if (chunk != NULL) {
Jason Evans0c5dd032014-09-29 01:31:39 -07001113 run = &arena_miscelm_get(chunk, map_bias)->run;
Jason Evans8fadb1a2015-08-04 10:49:46 -07001114 if (arena_run_split_large(arena, run, size, zero))
1115 run = NULL;
Jason Evanse00572b2010-03-14 19:43:56 -07001116 return (run);
1117 }
1118
1119 /*
1120 * arena_chunk_alloc() failed, but another thread may have made
1121 * sufficient memory available while this one dropped arena->lock in
1122 * arena_chunk_alloc(), so search one more time.
1123 */
Jason Evansaa5113b2014-01-14 16:23:03 -08001124 return (arena_run_alloc_large_helper(arena, size, zero));
1125}
1126
1127static arena_run_t *
Jason Evansd01fd192015-08-19 15:21:32 -07001128arena_run_alloc_small_helper(arena_t *arena, size_t size, szind_t binind)
Jason Evansaa5113b2014-01-14 16:23:03 -08001129{
Jason Evansaa282662015-07-15 16:02:21 -07001130 arena_run_t *run = arena_run_first_best_fit(arena, size);
Jason Evans8fadb1a2015-08-04 10:49:46 -07001131 if (run != NULL) {
1132 if (arena_run_split_small(arena, run, size, binind))
1133 run = NULL;
1134 }
Jason Evans97c04a92015-03-06 19:57:36 -08001135 return (run);
Jason Evansaa5113b2014-01-14 16:23:03 -08001136}
1137
1138static arena_run_t *
Jason Evansd01fd192015-08-19 15:21:32 -07001139arena_run_alloc_small(arena_t *arena, size_t size, szind_t binind)
Jason Evansaa5113b2014-01-14 16:23:03 -08001140{
1141 arena_chunk_t *chunk;
1142 arena_run_t *run;
1143
Jason Evansfc0b3b72014-10-09 17:54:06 -07001144 assert(size <= arena_maxrun);
Jason Evans8a03cf02015-05-04 09:58:36 -07001145 assert(size == PAGE_CEILING(size));
Jason Evansaa5113b2014-01-14 16:23:03 -08001146 assert(binind != BININD_INVALID);
1147
1148 /* Search the arena's chunks for the lowest best fit. */
1149 run = arena_run_alloc_small_helper(arena, size, binind);
1150 if (run != NULL)
1151 return (run);
1152
1153 /*
1154 * No usable runs. Create a new chunk from which to allocate the run.
1155 */
1156 chunk = arena_chunk_alloc(arena);
1157 if (chunk != NULL) {
Jason Evans0c5dd032014-09-29 01:31:39 -07001158 run = &arena_miscelm_get(chunk, map_bias)->run;
Jason Evans8fadb1a2015-08-04 10:49:46 -07001159 if (arena_run_split_small(arena, run, size, binind))
1160 run = NULL;
Jason Evansaa5113b2014-01-14 16:23:03 -08001161 return (run);
1162 }
1163
1164 /*
1165 * arena_chunk_alloc() failed, but another thread may have made
1166 * sufficient memory available while this one dropped arena->lock in
1167 * arena_chunk_alloc(), so search one more time.
1168 */
1169 return (arena_run_alloc_small_helper(arena, size, binind));
Jason Evanse476f8a2010-01-16 09:53:50 -08001170}
1171
Jason Evans8d6a3e82015-03-18 18:55:33 -07001172static bool
1173arena_lg_dirty_mult_valid(ssize_t lg_dirty_mult)
1174{
1175
Jason Evansbd16ea42015-03-24 15:59:28 -07001176 return (lg_dirty_mult >= -1 && lg_dirty_mult < (ssize_t)(sizeof(size_t)
1177 << 3));
Jason Evans8d6a3e82015-03-18 18:55:33 -07001178}
1179
1180ssize_t
1181arena_lg_dirty_mult_get(arena_t *arena)
1182{
1183 ssize_t lg_dirty_mult;
1184
1185 malloc_mutex_lock(&arena->lock);
1186 lg_dirty_mult = arena->lg_dirty_mult;
1187 malloc_mutex_unlock(&arena->lock);
1188
1189 return (lg_dirty_mult);
1190}
1191
1192bool
1193arena_lg_dirty_mult_set(arena_t *arena, ssize_t lg_dirty_mult)
1194{
1195
1196 if (!arena_lg_dirty_mult_valid(lg_dirty_mult))
1197 return (true);
1198
1199 malloc_mutex_lock(&arena->lock);
1200 arena->lg_dirty_mult = lg_dirty_mult;
1201 arena_maybe_purge(arena);
1202 malloc_mutex_unlock(&arena->lock);
1203
1204 return (false);
1205}
1206
Jason Evans99bd94f2015-02-18 16:40:53 -08001207void
Jason Evans05b21be2010-03-14 17:36:10 -07001208arena_maybe_purge(arena_t *arena)
1209{
1210
Jason Evanse3d13062012-10-30 15:42:37 -07001211 /* Don't purge if the option is disabled. */
Jason Evans8d6a3e82015-03-18 18:55:33 -07001212 if (arena->lg_dirty_mult < 0)
Jason Evanse3d13062012-10-30 15:42:37 -07001213 return;
Jason Evans0a9f9a42015-06-22 18:50:32 -07001214 /* Don't recursively purge. */
1215 if (arena->purging)
Jason Evanse3d13062012-10-30 15:42:37 -07001216 return;
Jason Evans0a9f9a42015-06-22 18:50:32 -07001217 /*
1218 * Iterate, since preventing recursive purging could otherwise leave too
1219 * many dirty pages.
1220 */
1221 while (true) {
1222 size_t threshold = (arena->nactive >> arena->lg_dirty_mult);
1223 if (threshold < chunk_npages)
1224 threshold = chunk_npages;
1225 /*
1226 * Don't purge unless the number of purgeable pages exceeds the
1227 * threshold.
1228 */
1229 if (arena->ndirty <= threshold)
1230 return;
1231 arena_purge(arena, false);
1232 }
Jason Evans05b21be2010-03-14 17:36:10 -07001233}
1234
Qinfan Wua244e502014-07-21 10:23:36 -07001235static size_t
1236arena_dirty_count(arena_t *arena)
1237{
1238 size_t ndirty = 0;
Jason Evans38e42d32015-03-10 18:15:40 -07001239 arena_runs_dirty_link_t *rdelm;
Jason Evansee41ad42015-02-15 18:04:46 -08001240 extent_node_t *chunkselm;
Qinfan Wua244e502014-07-21 10:23:36 -07001241
Jason Evans38e42d32015-03-10 18:15:40 -07001242 for (rdelm = qr_next(&arena->runs_dirty, rd_link),
Jason Evans738e0892015-02-18 01:15:50 -08001243 chunkselm = qr_next(&arena->chunks_cache, cc_link);
Jason Evans38e42d32015-03-10 18:15:40 -07001244 rdelm != &arena->runs_dirty; rdelm = qr_next(rdelm, rd_link)) {
Jason Evansee41ad42015-02-15 18:04:46 -08001245 size_t npages;
1246
Jason Evansf5c8f372015-03-10 18:29:49 -07001247 if (rdelm == &chunkselm->rd) {
Jason Evansee41ad42015-02-15 18:04:46 -08001248 npages = extent_node_size_get(chunkselm) >> LG_PAGE;
Jason Evans738e0892015-02-18 01:15:50 -08001249 chunkselm = qr_next(chunkselm, cc_link);
Jason Evansee41ad42015-02-15 18:04:46 -08001250 } else {
Jason Evans38e42d32015-03-10 18:15:40 -07001251 arena_chunk_t *chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(
1252 rdelm);
1253 arena_chunk_map_misc_t *miscelm =
1254 arena_rd_to_miscelm(rdelm);
1255 size_t pageind = arena_miscelm_to_pageind(miscelm);
Jason Evansee41ad42015-02-15 18:04:46 -08001256 assert(arena_mapbits_allocated_get(chunk, pageind) ==
1257 0);
1258 assert(arena_mapbits_large_get(chunk, pageind) == 0);
1259 assert(arena_mapbits_dirty_get(chunk, pageind) != 0);
1260 npages = arena_mapbits_unallocated_size_get(chunk,
1261 pageind) >> LG_PAGE;
1262 }
Qinfan Wua244e502014-07-21 10:23:36 -07001263 ndirty += npages;
1264 }
1265
Jason Evans2b2f6dc2014-11-01 02:29:10 -07001266 return (ndirty);
Jason Evansaa5113b2014-01-14 16:23:03 -08001267}
1268
1269static size_t
Jason Evans070b3c32014-08-14 14:45:58 -07001270arena_compute_npurge(arena_t *arena, bool all)
Jason Evansaa5113b2014-01-14 16:23:03 -08001271{
Jason Evans070b3c32014-08-14 14:45:58 -07001272 size_t npurge;
Jason Evansaa5113b2014-01-14 16:23:03 -08001273
1274 /*
1275 * Compute the minimum number of pages that this thread should try to
1276 * purge.
1277 */
Jason Evans551ebc42014-10-03 10:16:09 -07001278 if (!all) {
Jason Evans8d6a3e82015-03-18 18:55:33 -07001279 size_t threshold = (arena->nactive >> arena->lg_dirty_mult);
Mike Hommey65057332015-02-04 07:16:55 +09001280 threshold = threshold < chunk_npages ? chunk_npages : threshold;
Jason Evansaa5113b2014-01-14 16:23:03 -08001281
Jason Evans070b3c32014-08-14 14:45:58 -07001282 npurge = arena->ndirty - threshold;
Jason Evansaa5113b2014-01-14 16:23:03 -08001283 } else
Jason Evans070b3c32014-08-14 14:45:58 -07001284 npurge = arena->ndirty;
Jason Evansaa5113b2014-01-14 16:23:03 -08001285
Jason Evans070b3c32014-08-14 14:45:58 -07001286 return (npurge);
Jason Evansaa5113b2014-01-14 16:23:03 -08001287}
1288
Qinfan Wue9708002014-07-21 18:09:04 -07001289static size_t
Jason Evansb49a3342015-07-28 11:28:19 -04001290arena_stash_dirty(arena_t *arena, chunk_hooks_t *chunk_hooks, bool all,
1291 size_t npurge, arena_runs_dirty_link_t *purge_runs_sentinel,
Jason Evansee41ad42015-02-15 18:04:46 -08001292 extent_node_t *purge_chunks_sentinel)
Jason Evansaa5113b2014-01-14 16:23:03 -08001293{
Jason Evans38e42d32015-03-10 18:15:40 -07001294 arena_runs_dirty_link_t *rdelm, *rdelm_next;
Jason Evansee41ad42015-02-15 18:04:46 -08001295 extent_node_t *chunkselm;
Qinfan Wue9708002014-07-21 18:09:04 -07001296 size_t nstashed = 0;
Jason Evansaa5113b2014-01-14 16:23:03 -08001297
Jason Evansee41ad42015-02-15 18:04:46 -08001298 /* Stash at least npurge pages. */
Jason Evans38e42d32015-03-10 18:15:40 -07001299 for (rdelm = qr_next(&arena->runs_dirty, rd_link),
Jason Evans738e0892015-02-18 01:15:50 -08001300 chunkselm = qr_next(&arena->chunks_cache, cc_link);
Jason Evans38e42d32015-03-10 18:15:40 -07001301 rdelm != &arena->runs_dirty; rdelm = rdelm_next) {
Jason Evansee41ad42015-02-15 18:04:46 -08001302 size_t npages;
Jason Evans38e42d32015-03-10 18:15:40 -07001303 rdelm_next = qr_next(rdelm, rd_link);
Jason Evansaa5113b2014-01-14 16:23:03 -08001304
Jason Evansf5c8f372015-03-10 18:29:49 -07001305 if (rdelm == &chunkselm->rd) {
Jason Evans99bd94f2015-02-18 16:40:53 -08001306 extent_node_t *chunkselm_next;
1307 bool zero;
Jason Evansee41ad42015-02-15 18:04:46 -08001308 UNUSED void *chunk;
Jason Evansaa5113b2014-01-14 16:23:03 -08001309
Jason Evans738e0892015-02-18 01:15:50 -08001310 chunkselm_next = qr_next(chunkselm, cc_link);
Jason Evansee41ad42015-02-15 18:04:46 -08001311 /*
Jason Evans99bd94f2015-02-18 16:40:53 -08001312 * Allocate. chunkselm remains valid due to the
1313 * dalloc_node=false argument to chunk_alloc_cache().
Jason Evansee41ad42015-02-15 18:04:46 -08001314 */
Jason Evansee41ad42015-02-15 18:04:46 -08001315 zero = false;
Jason Evansb49a3342015-07-28 11:28:19 -04001316 chunk = chunk_alloc_cache(arena, chunk_hooks,
Jason Evans99bd94f2015-02-18 16:40:53 -08001317 extent_node_addr_get(chunkselm),
1318 extent_node_size_get(chunkselm), chunksize, &zero,
1319 false);
1320 assert(chunk == extent_node_addr_get(chunkselm));
1321 assert(zero == extent_node_zeroed_get(chunkselm));
1322 extent_node_dirty_insert(chunkselm, purge_runs_sentinel,
Jason Evans738e0892015-02-18 01:15:50 -08001323 purge_chunks_sentinel);
Jason Evans99bd94f2015-02-18 16:40:53 -08001324 npages = extent_node_size_get(chunkselm) >> LG_PAGE;
Jason Evansee41ad42015-02-15 18:04:46 -08001325 chunkselm = chunkselm_next;
1326 } else {
1327 arena_chunk_t *chunk =
Jason Evans38e42d32015-03-10 18:15:40 -07001328 (arena_chunk_t *)CHUNK_ADDR2BASE(rdelm);
1329 arena_chunk_map_misc_t *miscelm =
1330 arena_rd_to_miscelm(rdelm);
1331 size_t pageind = arena_miscelm_to_pageind(miscelm);
1332 arena_run_t *run = &miscelm->run;
Jason Evansee41ad42015-02-15 18:04:46 -08001333 size_t run_size =
1334 arena_mapbits_unallocated_size_get(chunk, pageind);
Jason Evans070b3c32014-08-14 14:45:58 -07001335
Jason Evansee41ad42015-02-15 18:04:46 -08001336 npages = run_size >> LG_PAGE;
1337
1338 assert(pageind + npages <= chunk_npages);
1339 assert(arena_mapbits_dirty_get(chunk, pageind) ==
1340 arena_mapbits_dirty_get(chunk, pageind+npages-1));
1341
1342 /*
1343 * If purging the spare chunk's run, make it available
1344 * prior to allocation.
1345 */
1346 if (chunk == arena->spare)
1347 arena_chunk_alloc(arena);
1348
1349 /* Temporarily allocate the free dirty run. */
1350 arena_run_split_large(arena, run, run_size, false);
Jason Evans339c2b22015-02-17 22:25:56 -08001351 /* Stash. */
Jason Evansee41ad42015-02-15 18:04:46 -08001352 if (false)
Jason Evans38e42d32015-03-10 18:15:40 -07001353 qr_new(rdelm, rd_link); /* Redundant. */
Jason Evansee41ad42015-02-15 18:04:46 -08001354 else {
Jason Evans38e42d32015-03-10 18:15:40 -07001355 assert(qr_next(rdelm, rd_link) == rdelm);
1356 assert(qr_prev(rdelm, rd_link) == rdelm);
Jason Evansee41ad42015-02-15 18:04:46 -08001357 }
Jason Evans38e42d32015-03-10 18:15:40 -07001358 qr_meld(purge_runs_sentinel, rdelm, rd_link);
Jason Evansee41ad42015-02-15 18:04:46 -08001359 }
Jason Evansaa5113b2014-01-14 16:23:03 -08001360
Qinfan Wue9708002014-07-21 18:09:04 -07001361 nstashed += npages;
Jason Evans551ebc42014-10-03 10:16:09 -07001362 if (!all && nstashed >= npurge)
Qinfan Wue9708002014-07-21 18:09:04 -07001363 break;
Jason Evansaa5113b2014-01-14 16:23:03 -08001364 }
Qinfan Wue9708002014-07-21 18:09:04 -07001365
1366 return (nstashed);
Jason Evansaa5113b2014-01-14 16:23:03 -08001367}
1368
1369static size_t
Jason Evansb49a3342015-07-28 11:28:19 -04001370arena_purge_stashed(arena_t *arena, chunk_hooks_t *chunk_hooks,
Jason Evans38e42d32015-03-10 18:15:40 -07001371 arena_runs_dirty_link_t *purge_runs_sentinel,
Jason Evansee41ad42015-02-15 18:04:46 -08001372 extent_node_t *purge_chunks_sentinel)
Jason Evansaa5113b2014-01-14 16:23:03 -08001373{
Qinfan Wue9708002014-07-21 18:09:04 -07001374 size_t npurged, nmadvise;
Jason Evans38e42d32015-03-10 18:15:40 -07001375 arena_runs_dirty_link_t *rdelm;
Jason Evansee41ad42015-02-15 18:04:46 -08001376 extent_node_t *chunkselm;
Jason Evansaa5113b2014-01-14 16:23:03 -08001377
Jason Evansaa5113b2014-01-14 16:23:03 -08001378 if (config_stats)
1379 nmadvise = 0;
1380 npurged = 0;
Qinfan Wue9708002014-07-21 18:09:04 -07001381
1382 malloc_mutex_unlock(&arena->lock);
Jason Evans38e42d32015-03-10 18:15:40 -07001383 for (rdelm = qr_next(purge_runs_sentinel, rd_link),
Jason Evans738e0892015-02-18 01:15:50 -08001384 chunkselm = qr_next(purge_chunks_sentinel, cc_link);
Jason Evans38e42d32015-03-10 18:15:40 -07001385 rdelm != purge_runs_sentinel; rdelm = qr_next(rdelm, rd_link)) {
Jason Evansee41ad42015-02-15 18:04:46 -08001386 size_t npages;
Qinfan Wue9708002014-07-21 18:09:04 -07001387
Jason Evansf5c8f372015-03-10 18:29:49 -07001388 if (rdelm == &chunkselm->rd) {
Jason Evansb49a3342015-07-28 11:28:19 -04001389 /*
1390 * Don't actually purge the chunk here because 1)
1391 * chunkselm is embedded in the chunk and must remain
1392 * valid, and 2) we deallocate the chunk in
1393 * arena_unstash_purged(), where it is destroyed,
1394 * decommitted, or purged, depending on chunk
1395 * deallocation policy.
1396 */
Jason Evansee41ad42015-02-15 18:04:46 -08001397 size_t size = extent_node_size_get(chunkselm);
Jason Evansee41ad42015-02-15 18:04:46 -08001398 npages = size >> LG_PAGE;
Jason Evans738e0892015-02-18 01:15:50 -08001399 chunkselm = qr_next(chunkselm, cc_link);
Jason Evansee41ad42015-02-15 18:04:46 -08001400 } else {
Jason Evans45186f02015-08-10 23:03:34 -07001401 size_t pageind, run_size, flag_unzeroed, flags, i;
1402 bool decommitted;
Jason Evans8fadb1a2015-08-04 10:49:46 -07001403 arena_chunk_t *chunk =
1404 (arena_chunk_t *)CHUNK_ADDR2BASE(rdelm);
Jason Evans38e42d32015-03-10 18:15:40 -07001405 arena_chunk_map_misc_t *miscelm =
1406 arena_rd_to_miscelm(rdelm);
1407 pageind = arena_miscelm_to_pageind(miscelm);
Jason Evansee41ad42015-02-15 18:04:46 -08001408 run_size = arena_mapbits_large_size_get(chunk, pageind);
1409 npages = run_size >> LG_PAGE;
Qinfan Wue9708002014-07-21 18:09:04 -07001410
Jason Evansee41ad42015-02-15 18:04:46 -08001411 assert(pageind + npages <= chunk_npages);
Jason Evansde249c82015-08-09 16:47:27 -07001412 assert(!arena_mapbits_decommitted_get(chunk, pageind));
1413 assert(!arena_mapbits_decommitted_get(chunk,
1414 pageind+npages-1));
Jason Evans8fadb1a2015-08-04 10:49:46 -07001415 decommitted = !chunk_hooks->decommit(chunk, chunksize,
1416 pageind << LG_PAGE, npages << LG_PAGE, arena->ind);
1417 if (decommitted) {
Jason Evans45186f02015-08-10 23:03:34 -07001418 flag_unzeroed = 0;
1419 flags = CHUNK_MAP_DECOMMITTED;
Jason Evans8fadb1a2015-08-04 10:49:46 -07001420 } else {
Jason Evans45186f02015-08-10 23:03:34 -07001421 flag_unzeroed = chunk_purge_wrapper(arena,
Jason Evans8fadb1a2015-08-04 10:49:46 -07001422 chunk_hooks, chunk, chunksize, pageind <<
Jason Evans45186f02015-08-10 23:03:34 -07001423 LG_PAGE, run_size) ? CHUNK_MAP_UNZEROED : 0;
1424 flags = flag_unzeroed;
Jason Evans8fadb1a2015-08-04 10:49:46 -07001425 }
Jason Evans45186f02015-08-10 23:03:34 -07001426 arena_mapbits_large_set(chunk, pageind+npages-1, 0,
1427 flags);
1428 arena_mapbits_large_set(chunk, pageind, run_size,
1429 flags);
Jason Evansee41ad42015-02-15 18:04:46 -08001430
1431 /*
Jason Evans45186f02015-08-10 23:03:34 -07001432 * Set the unzeroed flag for internal pages, now that
Jason Evans8d6a3e82015-03-18 18:55:33 -07001433 * chunk_purge_wrapper() has returned whether the pages
1434 * were zeroed as a side effect of purging. This chunk
1435 * map modification is safe even though the arena mutex
Jason Evansee41ad42015-02-15 18:04:46 -08001436 * isn't currently owned by this thread, because the run
1437 * is marked as allocated, thus protecting it from being
1438 * modified by any other thread. As long as these
1439 * writes don't perturb the first and last elements'
1440 * CHUNK_MAP_ALLOCATED bits, behavior is well defined.
1441 */
Jason Evans45186f02015-08-10 23:03:34 -07001442 for (i = 1; i < npages-1; i++) {
1443 arena_mapbits_internal_set(chunk, pageind+i,
Jason Evansee41ad42015-02-15 18:04:46 -08001444 flag_unzeroed);
1445 }
Jason Evansaa5113b2014-01-14 16:23:03 -08001446 }
Qinfan Wue9708002014-07-21 18:09:04 -07001447
Jason Evansaa5113b2014-01-14 16:23:03 -08001448 npurged += npages;
1449 if (config_stats)
1450 nmadvise++;
1451 }
1452 malloc_mutex_lock(&arena->lock);
Qinfan Wue9708002014-07-21 18:09:04 -07001453
1454 if (config_stats) {
Jason Evansaa5113b2014-01-14 16:23:03 -08001455 arena->stats.nmadvise += nmadvise;
Qinfan Wue9708002014-07-21 18:09:04 -07001456 arena->stats.purged += npurged;
1457 }
Jason Evansaa5113b2014-01-14 16:23:03 -08001458
1459 return (npurged);
1460}
1461
1462static void
Jason Evansb49a3342015-07-28 11:28:19 -04001463arena_unstash_purged(arena_t *arena, chunk_hooks_t *chunk_hooks,
Jason Evans38e42d32015-03-10 18:15:40 -07001464 arena_runs_dirty_link_t *purge_runs_sentinel,
Jason Evansee41ad42015-02-15 18:04:46 -08001465 extent_node_t *purge_chunks_sentinel)
Jason Evansaa5113b2014-01-14 16:23:03 -08001466{
Jason Evans38e42d32015-03-10 18:15:40 -07001467 arena_runs_dirty_link_t *rdelm, *rdelm_next;
Jason Evansee41ad42015-02-15 18:04:46 -08001468 extent_node_t *chunkselm;
Jason Evansaa5113b2014-01-14 16:23:03 -08001469
Jason Evansb49a3342015-07-28 11:28:19 -04001470 /* Deallocate chunks/runs. */
Jason Evans38e42d32015-03-10 18:15:40 -07001471 for (rdelm = qr_next(purge_runs_sentinel, rd_link),
Jason Evans738e0892015-02-18 01:15:50 -08001472 chunkselm = qr_next(purge_chunks_sentinel, cc_link);
Jason Evans38e42d32015-03-10 18:15:40 -07001473 rdelm != purge_runs_sentinel; rdelm = rdelm_next) {
1474 rdelm_next = qr_next(rdelm, rd_link);
Jason Evansf5c8f372015-03-10 18:29:49 -07001475 if (rdelm == &chunkselm->rd) {
Jason Evansee41ad42015-02-15 18:04:46 -08001476 extent_node_t *chunkselm_next = qr_next(chunkselm,
Jason Evans738e0892015-02-18 01:15:50 -08001477 cc_link);
Jason Evans339c2b22015-02-17 22:25:56 -08001478 void *addr = extent_node_addr_get(chunkselm);
1479 size_t size = extent_node_size_get(chunkselm);
Jason Evans738e0892015-02-18 01:15:50 -08001480 bool zeroed = extent_node_zeroed_get(chunkselm);
Jason Evansde249c82015-08-09 16:47:27 -07001481 bool committed = extent_node_committed_get(chunkselm);
Jason Evans738e0892015-02-18 01:15:50 -08001482 extent_node_dirty_remove(chunkselm);
Jason Evansee41ad42015-02-15 18:04:46 -08001483 arena_node_dalloc(arena, chunkselm);
1484 chunkselm = chunkselm_next;
Jason Evansb49a3342015-07-28 11:28:19 -04001485 chunk_dalloc_arena(arena, chunk_hooks, addr, size,
Jason Evansde249c82015-08-09 16:47:27 -07001486 zeroed, committed);
Jason Evansee41ad42015-02-15 18:04:46 -08001487 } else {
Jason Evans8fadb1a2015-08-04 10:49:46 -07001488 arena_chunk_t *chunk =
1489 (arena_chunk_t *)CHUNK_ADDR2BASE(rdelm);
Jason Evans38e42d32015-03-10 18:15:40 -07001490 arena_chunk_map_misc_t *miscelm =
1491 arena_rd_to_miscelm(rdelm);
Jason Evans8fadb1a2015-08-04 10:49:46 -07001492 size_t pageind = arena_miscelm_to_pageind(miscelm);
1493 bool decommitted = (arena_mapbits_decommitted_get(chunk,
1494 pageind) != 0);
Jason Evans38e42d32015-03-10 18:15:40 -07001495 arena_run_t *run = &miscelm->run;
1496 qr_remove(rdelm, rd_link);
Jason Evans8fadb1a2015-08-04 10:49:46 -07001497 arena_run_dalloc(arena, run, false, true, decommitted);
Jason Evansee41ad42015-02-15 18:04:46 -08001498 }
Jason Evansaa5113b2014-01-14 16:23:03 -08001499 }
1500}
1501
Jason Evans8d6a3e82015-03-18 18:55:33 -07001502static void
Jason Evans6005f072010-09-30 16:55:08 -07001503arena_purge(arena_t *arena, bool all)
Jason Evanse476f8a2010-01-16 09:53:50 -08001504{
Jason Evans8fadb1a2015-08-04 10:49:46 -07001505 chunk_hooks_t chunk_hooks = chunk_hooks_get(arena);
Jason Evans070b3c32014-08-14 14:45:58 -07001506 size_t npurge, npurgeable, npurged;
Jason Evans38e42d32015-03-10 18:15:40 -07001507 arena_runs_dirty_link_t purge_runs_sentinel;
Jason Evansee41ad42015-02-15 18:04:46 -08001508 extent_node_t purge_chunks_sentinel;
Qinfan Wue9708002014-07-21 18:09:04 -07001509
Jason Evans0a9f9a42015-06-22 18:50:32 -07001510 arena->purging = true;
1511
Jason Evans2b2f6dc2014-11-01 02:29:10 -07001512 /*
1513 * Calls to arena_dirty_count() are disabled even for debug builds
1514 * because overhead grows nonlinearly as memory usage increases.
1515 */
1516 if (false && config_debug) {
Qinfan Wu90737fc2014-07-21 19:39:20 -07001517 size_t ndirty = arena_dirty_count(arena);
Qinfan Wua244e502014-07-21 10:23:36 -07001518 assert(ndirty == arena->ndirty);
Jason Evans2caa4712010-03-04 21:35:07 -08001519 }
Jason Evans8d6a3e82015-03-18 18:55:33 -07001520 assert((arena->nactive >> arena->lg_dirty_mult) < arena->ndirty || all);
Jason Evanse476f8a2010-01-16 09:53:50 -08001521
Jason Evans7372b152012-02-10 20:22:09 -08001522 if (config_stats)
1523 arena->stats.npurge++;
Jason Evanse476f8a2010-01-16 09:53:50 -08001524
Jason Evans070b3c32014-08-14 14:45:58 -07001525 npurge = arena_compute_npurge(arena, all);
Jason Evansee41ad42015-02-15 18:04:46 -08001526 qr_new(&purge_runs_sentinel, rd_link);
Jason Evans47701b22015-02-17 22:23:10 -08001527 extent_node_dirty_linkage_init(&purge_chunks_sentinel);
Jason Evansee41ad42015-02-15 18:04:46 -08001528
Jason Evansb49a3342015-07-28 11:28:19 -04001529 npurgeable = arena_stash_dirty(arena, &chunk_hooks, all, npurge,
1530 &purge_runs_sentinel, &purge_chunks_sentinel);
Jason Evans070b3c32014-08-14 14:45:58 -07001531 assert(npurgeable >= npurge);
Jason Evansb49a3342015-07-28 11:28:19 -04001532 npurged = arena_purge_stashed(arena, &chunk_hooks, &purge_runs_sentinel,
Jason Evansee41ad42015-02-15 18:04:46 -08001533 &purge_chunks_sentinel);
Qinfan Wue9708002014-07-21 18:09:04 -07001534 assert(npurged == npurgeable);
Jason Evansb49a3342015-07-28 11:28:19 -04001535 arena_unstash_purged(arena, &chunk_hooks, &purge_runs_sentinel,
Jason Evansee41ad42015-02-15 18:04:46 -08001536 &purge_chunks_sentinel);
Jason Evans0a9f9a42015-06-22 18:50:32 -07001537
1538 arena->purging = false;
Jason Evanse476f8a2010-01-16 09:53:50 -08001539}
1540
Jason Evans6005f072010-09-30 16:55:08 -07001541void
1542arena_purge_all(arena_t *arena)
1543{
1544
1545 malloc_mutex_lock(&arena->lock);
1546 arena_purge(arena, true);
1547 malloc_mutex_unlock(&arena->lock);
1548}
1549
Jason Evanse476f8a2010-01-16 09:53:50 -08001550static void
Jason Evansaa5113b2014-01-14 16:23:03 -08001551arena_run_coalesce(arena_t *arena, arena_chunk_t *chunk, size_t *p_size,
Jason Evans8fadb1a2015-08-04 10:49:46 -07001552 size_t *p_run_ind, size_t *p_run_pages, size_t flag_dirty,
1553 size_t flag_decommitted)
Jason Evanse476f8a2010-01-16 09:53:50 -08001554{
Jason Evansaa5113b2014-01-14 16:23:03 -08001555 size_t size = *p_size;
1556 size_t run_ind = *p_run_ind;
1557 size_t run_pages = *p_run_pages;
Jason Evanse476f8a2010-01-16 09:53:50 -08001558
1559 /* Try to coalesce forward. */
1560 if (run_ind + run_pages < chunk_npages &&
Jason Evans203484e2012-05-02 00:30:36 -07001561 arena_mapbits_allocated_get(chunk, run_ind+run_pages) == 0 &&
Jason Evans8fadb1a2015-08-04 10:49:46 -07001562 arena_mapbits_dirty_get(chunk, run_ind+run_pages) == flag_dirty &&
1563 arena_mapbits_decommitted_get(chunk, run_ind+run_pages) ==
1564 flag_decommitted) {
Jason Evans203484e2012-05-02 00:30:36 -07001565 size_t nrun_size = arena_mapbits_unallocated_size_get(chunk,
1566 run_ind+run_pages);
Jason Evansae4c7b42012-04-02 07:04:34 -07001567 size_t nrun_pages = nrun_size >> LG_PAGE;
Jason Evanse476f8a2010-01-16 09:53:50 -08001568
1569 /*
1570 * Remove successor from runs_avail; the coalesced run is
1571 * inserted later.
1572 */
Jason Evans203484e2012-05-02 00:30:36 -07001573 assert(arena_mapbits_unallocated_size_get(chunk,
1574 run_ind+run_pages+nrun_pages-1) == nrun_size);
1575 assert(arena_mapbits_dirty_get(chunk,
1576 run_ind+run_pages+nrun_pages-1) == flag_dirty);
Jason Evans8fadb1a2015-08-04 10:49:46 -07001577 assert(arena_mapbits_decommitted_get(chunk,
1578 run_ind+run_pages+nrun_pages-1) == flag_decommitted);
Qinfan Wu90737fc2014-07-21 19:39:20 -07001579 arena_avail_remove(arena, chunk, run_ind+run_pages, nrun_pages);
Jason Evanse476f8a2010-01-16 09:53:50 -08001580
Jason Evansee41ad42015-02-15 18:04:46 -08001581 /*
1582 * If the successor is dirty, remove it from the set of dirty
1583 * pages.
1584 */
Qinfan Wu04d60a12014-07-18 14:21:17 -07001585 if (flag_dirty != 0) {
Jason Evansee41ad42015-02-15 18:04:46 -08001586 arena_run_dirty_remove(arena, chunk, run_ind+run_pages,
Jason Evans070b3c32014-08-14 14:45:58 -07001587 nrun_pages);
Qinfan Wu04d60a12014-07-18 14:21:17 -07001588 }
1589
Jason Evanse476f8a2010-01-16 09:53:50 -08001590 size += nrun_size;
Jason Evans12ca9142010-10-17 19:56:09 -07001591 run_pages += nrun_pages;
Jason Evanse476f8a2010-01-16 09:53:50 -08001592
Jason Evans203484e2012-05-02 00:30:36 -07001593 arena_mapbits_unallocated_size_set(chunk, run_ind, size);
1594 arena_mapbits_unallocated_size_set(chunk, run_ind+run_pages-1,
1595 size);
Jason Evanse476f8a2010-01-16 09:53:50 -08001596 }
1597
1598 /* Try to coalesce backward. */
Jason Evansaa5113b2014-01-14 16:23:03 -08001599 if (run_ind > map_bias && arena_mapbits_allocated_get(chunk,
1600 run_ind-1) == 0 && arena_mapbits_dirty_get(chunk, run_ind-1) ==
Jason Evans8fadb1a2015-08-04 10:49:46 -07001601 flag_dirty && arena_mapbits_decommitted_get(chunk, run_ind-1) ==
1602 flag_decommitted) {
Jason Evans203484e2012-05-02 00:30:36 -07001603 size_t prun_size = arena_mapbits_unallocated_size_get(chunk,
1604 run_ind-1);
Jason Evansae4c7b42012-04-02 07:04:34 -07001605 size_t prun_pages = prun_size >> LG_PAGE;
Jason Evanse476f8a2010-01-16 09:53:50 -08001606
Jason Evans12ca9142010-10-17 19:56:09 -07001607 run_ind -= prun_pages;
Jason Evanse476f8a2010-01-16 09:53:50 -08001608
1609 /*
1610 * Remove predecessor from runs_avail; the coalesced run is
1611 * inserted later.
1612 */
Jason Evans203484e2012-05-02 00:30:36 -07001613 assert(arena_mapbits_unallocated_size_get(chunk, run_ind) ==
1614 prun_size);
1615 assert(arena_mapbits_dirty_get(chunk, run_ind) == flag_dirty);
Jason Evans8fadb1a2015-08-04 10:49:46 -07001616 assert(arena_mapbits_decommitted_get(chunk, run_ind) ==
1617 flag_decommitted);
Qinfan Wu90737fc2014-07-21 19:39:20 -07001618 arena_avail_remove(arena, chunk, run_ind, prun_pages);
Jason Evanse476f8a2010-01-16 09:53:50 -08001619
Jason Evansee41ad42015-02-15 18:04:46 -08001620 /*
1621 * If the predecessor is dirty, remove it from the set of dirty
1622 * pages.
1623 */
1624 if (flag_dirty != 0) {
1625 arena_run_dirty_remove(arena, chunk, run_ind,
1626 prun_pages);
1627 }
Qinfan Wu04d60a12014-07-18 14:21:17 -07001628
Jason Evanse476f8a2010-01-16 09:53:50 -08001629 size += prun_size;
Jason Evans12ca9142010-10-17 19:56:09 -07001630 run_pages += prun_pages;
Jason Evanse476f8a2010-01-16 09:53:50 -08001631
Jason Evans203484e2012-05-02 00:30:36 -07001632 arena_mapbits_unallocated_size_set(chunk, run_ind, size);
1633 arena_mapbits_unallocated_size_set(chunk, run_ind+run_pages-1,
1634 size);
Jason Evanse476f8a2010-01-16 09:53:50 -08001635 }
1636
Jason Evansaa5113b2014-01-14 16:23:03 -08001637 *p_size = size;
1638 *p_run_ind = run_ind;
1639 *p_run_pages = run_pages;
1640}
1641
Jason Evans8fadb1a2015-08-04 10:49:46 -07001642static size_t
1643arena_run_size_get(arena_t *arena, arena_chunk_t *chunk, arena_run_t *run,
1644 size_t run_ind)
1645{
1646 size_t size;
1647
1648 assert(run_ind >= map_bias);
1649 assert(run_ind < chunk_npages);
1650
1651 if (arena_mapbits_large_get(chunk, run_ind) != 0) {
1652 size = arena_mapbits_large_size_get(chunk, run_ind);
1653 assert(size == PAGE || arena_mapbits_large_size_get(chunk,
1654 run_ind+(size>>LG_PAGE)-1) == 0);
1655 } else {
1656 arena_bin_info_t *bin_info = &arena_bin_info[run->binind];
1657 size = bin_info->run_size;
1658 }
1659
1660 return (size);
1661}
1662
1663static bool
1664arena_run_decommit(arena_t *arena, arena_chunk_t *chunk, arena_run_t *run)
1665{
1666 arena_chunk_map_misc_t *miscelm = arena_run_to_miscelm(run);
1667 size_t run_ind = arena_miscelm_to_pageind(miscelm);
1668 size_t offset = run_ind << LG_PAGE;
1669 size_t length = arena_run_size_get(arena, chunk, run, run_ind);
1670
1671 return (arena->chunk_hooks.decommit(chunk, chunksize, offset, length,
1672 arena->ind));
1673}
1674
Jason Evansaa5113b2014-01-14 16:23:03 -08001675static void
Jason Evans8fadb1a2015-08-04 10:49:46 -07001676arena_run_dalloc(arena_t *arena, arena_run_t *run, bool dirty, bool cleaned,
1677 bool decommitted)
Jason Evansaa5113b2014-01-14 16:23:03 -08001678{
1679 arena_chunk_t *chunk;
Jason Evans0c5dd032014-09-29 01:31:39 -07001680 arena_chunk_map_misc_t *miscelm;
Jason Evans8fadb1a2015-08-04 10:49:46 -07001681 size_t size, run_ind, run_pages, flag_dirty, flag_decommitted;
Jason Evansaa5113b2014-01-14 16:23:03 -08001682
1683 chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(run);
Jason Evans0c5dd032014-09-29 01:31:39 -07001684 miscelm = arena_run_to_miscelm(run);
1685 run_ind = arena_miscelm_to_pageind(miscelm);
Jason Evansaa5113b2014-01-14 16:23:03 -08001686 assert(run_ind >= map_bias);
1687 assert(run_ind < chunk_npages);
Jason Evans8fadb1a2015-08-04 10:49:46 -07001688 size = arena_run_size_get(arena, chunk, run, run_ind);
Jason Evansaa5113b2014-01-14 16:23:03 -08001689 run_pages = (size >> LG_PAGE);
1690 arena_cactive_update(arena, 0, run_pages);
1691 arena->nactive -= run_pages;
1692
1693 /*
1694 * The run is dirty if the caller claims to have dirtied it, as well as
1695 * if it was already dirty before being allocated and the caller
1696 * doesn't claim to have cleaned it.
1697 */
1698 assert(arena_mapbits_dirty_get(chunk, run_ind) ==
1699 arena_mapbits_dirty_get(chunk, run_ind+run_pages-1));
Jason Evans8fadb1a2015-08-04 10:49:46 -07001700 if (!cleaned && !decommitted && arena_mapbits_dirty_get(chunk, run_ind)
1701 != 0)
Jason Evansaa5113b2014-01-14 16:23:03 -08001702 dirty = true;
1703 flag_dirty = dirty ? CHUNK_MAP_DIRTY : 0;
Jason Evans8fadb1a2015-08-04 10:49:46 -07001704 flag_decommitted = decommitted ? CHUNK_MAP_DECOMMITTED : 0;
Jason Evansaa5113b2014-01-14 16:23:03 -08001705
1706 /* Mark pages as unallocated in the chunk map. */
Jason Evans8fadb1a2015-08-04 10:49:46 -07001707 if (dirty || decommitted) {
1708 size_t flags = flag_dirty | flag_decommitted;
1709 arena_mapbits_unallocated_set(chunk, run_ind, size, flags);
Jason Evansaa5113b2014-01-14 16:23:03 -08001710 arena_mapbits_unallocated_set(chunk, run_ind+run_pages-1, size,
Jason Evans8fadb1a2015-08-04 10:49:46 -07001711 flags);
Jason Evansaa5113b2014-01-14 16:23:03 -08001712 } else {
1713 arena_mapbits_unallocated_set(chunk, run_ind, size,
1714 arena_mapbits_unzeroed_get(chunk, run_ind));
1715 arena_mapbits_unallocated_set(chunk, run_ind+run_pages-1, size,
1716 arena_mapbits_unzeroed_get(chunk, run_ind+run_pages-1));
1717 }
1718
Jason Evans8fadb1a2015-08-04 10:49:46 -07001719 arena_run_coalesce(arena, chunk, &size, &run_ind, &run_pages,
1720 flag_dirty, flag_decommitted);
Jason Evansaa5113b2014-01-14 16:23:03 -08001721
Jason Evanse476f8a2010-01-16 09:53:50 -08001722 /* Insert into runs_avail, now that coalescing is complete. */
Jason Evans203484e2012-05-02 00:30:36 -07001723 assert(arena_mapbits_unallocated_size_get(chunk, run_ind) ==
1724 arena_mapbits_unallocated_size_get(chunk, run_ind+run_pages-1));
1725 assert(arena_mapbits_dirty_get(chunk, run_ind) ==
1726 arena_mapbits_dirty_get(chunk, run_ind+run_pages-1));
Jason Evans8fadb1a2015-08-04 10:49:46 -07001727 assert(arena_mapbits_decommitted_get(chunk, run_ind) ==
1728 arena_mapbits_decommitted_get(chunk, run_ind+run_pages-1));
Qinfan Wu90737fc2014-07-21 19:39:20 -07001729 arena_avail_insert(arena, chunk, run_ind, run_pages);
Jason Evans8d4203c2010-04-13 20:53:21 -07001730
Jason Evans070b3c32014-08-14 14:45:58 -07001731 if (dirty)
Jason Evansee41ad42015-02-15 18:04:46 -08001732 arena_run_dirty_insert(arena, chunk, run_ind, run_pages);
Qinfan Wu04d60a12014-07-18 14:21:17 -07001733
Jason Evans203484e2012-05-02 00:30:36 -07001734 /* Deallocate chunk if it is now completely unused. */
Jason Evans155bfa72014-10-05 17:54:10 -07001735 if (size == arena_maxrun) {
Jason Evans203484e2012-05-02 00:30:36 -07001736 assert(run_ind == map_bias);
Jason Evans155bfa72014-10-05 17:54:10 -07001737 assert(run_pages == (arena_maxrun >> LG_PAGE));
Jason Evanse2deab72014-05-15 22:22:27 -07001738 arena_chunk_dalloc(arena, chunk);
Jason Evans203484e2012-05-02 00:30:36 -07001739 }
Jason Evanse476f8a2010-01-16 09:53:50 -08001740
Jason Evans4fb7f512010-01-27 18:27:09 -08001741 /*
Jason Evans8d4203c2010-04-13 20:53:21 -07001742 * It is okay to do dirty page processing here even if the chunk was
Jason Evans4fb7f512010-01-27 18:27:09 -08001743 * deallocated above, since in that case it is the spare. Waiting
1744 * until after possible chunk deallocation to do dirty processing
1745 * allows for an old spare to be fully deallocated, thus decreasing the
1746 * chances of spuriously crossing the dirty page purging threshold.
1747 */
Jason Evans8d4203c2010-04-13 20:53:21 -07001748 if (dirty)
Jason Evans05b21be2010-03-14 17:36:10 -07001749 arena_maybe_purge(arena);
Jason Evanse476f8a2010-01-16 09:53:50 -08001750}
1751
1752static void
Jason Evansde249c82015-08-09 16:47:27 -07001753arena_run_dalloc_decommit(arena_t *arena, arena_chunk_t *chunk,
1754 arena_run_t *run)
1755{
1756 bool committed = arena_run_decommit(arena, chunk, run);
1757
1758 arena_run_dalloc(arena, run, committed, false, !committed);
1759}
1760
1761static void
Jason Evanse476f8a2010-01-16 09:53:50 -08001762arena_run_trim_head(arena_t *arena, arena_chunk_t *chunk, arena_run_t *run,
1763 size_t oldsize, size_t newsize)
1764{
Jason Evans0c5dd032014-09-29 01:31:39 -07001765 arena_chunk_map_misc_t *miscelm = arena_run_to_miscelm(run);
1766 size_t pageind = arena_miscelm_to_pageind(miscelm);
Jason Evansae4c7b42012-04-02 07:04:34 -07001767 size_t head_npages = (oldsize - newsize) >> LG_PAGE;
Jason Evans203484e2012-05-02 00:30:36 -07001768 size_t flag_dirty = arena_mapbits_dirty_get(chunk, pageind);
Jason Evans1f27abc2015-08-11 12:42:33 -07001769 size_t flag_decommitted = arena_mapbits_decommitted_get(chunk, pageind);
1770 size_t flag_unzeroed_mask = (flag_dirty | flag_decommitted) == 0 ?
1771 CHUNK_MAP_UNZEROED : 0;
Jason Evanse476f8a2010-01-16 09:53:50 -08001772
1773 assert(oldsize > newsize);
1774
1775 /*
1776 * Update the chunk map so that arena_run_dalloc() can treat the
Jason Evans940a2e02010-10-17 17:51:37 -07001777 * leading run as separately allocated. Set the last element of each
1778 * run first, in case of single-page runs.
Jason Evanse476f8a2010-01-16 09:53:50 -08001779 */
Jason Evans203484e2012-05-02 00:30:36 -07001780 assert(arena_mapbits_large_size_get(chunk, pageind) == oldsize);
Jason Evans1f27abc2015-08-11 12:42:33 -07001781 arena_mapbits_large_set(chunk, pageind+head_npages-1, 0, flag_dirty |
1782 (flag_unzeroed_mask & arena_mapbits_unzeroed_get(chunk,
1783 pageind+head_npages-1)));
1784 arena_mapbits_large_set(chunk, pageind, oldsize-newsize, flag_dirty |
1785 (flag_unzeroed_mask & arena_mapbits_unzeroed_get(chunk, pageind)));
Jason Evans940a2e02010-10-17 17:51:37 -07001786
Jason Evans7372b152012-02-10 20:22:09 -08001787 if (config_debug) {
Jason Evansae4c7b42012-04-02 07:04:34 -07001788 UNUSED size_t tail_npages = newsize >> LG_PAGE;
Jason Evans203484e2012-05-02 00:30:36 -07001789 assert(arena_mapbits_large_size_get(chunk,
1790 pageind+head_npages+tail_npages-1) == 0);
1791 assert(arena_mapbits_dirty_get(chunk,
1792 pageind+head_npages+tail_npages-1) == flag_dirty);
Jason Evans940a2e02010-10-17 17:51:37 -07001793 }
Jason Evansd8ceef62012-05-10 20:59:39 -07001794 arena_mapbits_large_set(chunk, pageind+head_npages, newsize,
Jason Evans1f27abc2015-08-11 12:42:33 -07001795 flag_dirty | (flag_unzeroed_mask & arena_mapbits_unzeroed_get(chunk,
1796 pageind+head_npages)));
Jason Evanse476f8a2010-01-16 09:53:50 -08001797
Jason Evans1f27abc2015-08-11 12:42:33 -07001798 arena_run_dalloc(arena, run, false, false, (flag_decommitted != 0));
Jason Evanse476f8a2010-01-16 09:53:50 -08001799}
1800
1801static void
1802arena_run_trim_tail(arena_t *arena, arena_chunk_t *chunk, arena_run_t *run,
1803 size_t oldsize, size_t newsize, bool dirty)
1804{
Jason Evans0c5dd032014-09-29 01:31:39 -07001805 arena_chunk_map_misc_t *miscelm = arena_run_to_miscelm(run);
1806 size_t pageind = arena_miscelm_to_pageind(miscelm);
Jason Evansae4c7b42012-04-02 07:04:34 -07001807 size_t head_npages = newsize >> LG_PAGE;
Jason Evans203484e2012-05-02 00:30:36 -07001808 size_t flag_dirty = arena_mapbits_dirty_get(chunk, pageind);
Jason Evans1f27abc2015-08-11 12:42:33 -07001809 size_t flag_decommitted = arena_mapbits_decommitted_get(chunk, pageind);
1810 size_t flag_unzeroed_mask = (flag_dirty | flag_decommitted) == 0 ?
1811 CHUNK_MAP_UNZEROED : 0;
Jason Evans0c5dd032014-09-29 01:31:39 -07001812 arena_chunk_map_misc_t *tail_miscelm;
1813 arena_run_t *tail_run;
Jason Evanse476f8a2010-01-16 09:53:50 -08001814
1815 assert(oldsize > newsize);
1816
1817 /*
1818 * Update the chunk map so that arena_run_dalloc() can treat the
Jason Evans940a2e02010-10-17 17:51:37 -07001819 * trailing run as separately allocated. Set the last element of each
1820 * run first, in case of single-page runs.
Jason Evanse476f8a2010-01-16 09:53:50 -08001821 */
Jason Evans203484e2012-05-02 00:30:36 -07001822 assert(arena_mapbits_large_size_get(chunk, pageind) == oldsize);
Jason Evans1f27abc2015-08-11 12:42:33 -07001823 arena_mapbits_large_set(chunk, pageind+head_npages-1, 0, flag_dirty |
1824 (flag_unzeroed_mask & arena_mapbits_unzeroed_get(chunk,
1825 pageind+head_npages-1)));
1826 arena_mapbits_large_set(chunk, pageind, newsize, flag_dirty |
1827 (flag_unzeroed_mask & arena_mapbits_unzeroed_get(chunk, pageind)));
Jason Evans940a2e02010-10-17 17:51:37 -07001828
Jason Evans203484e2012-05-02 00:30:36 -07001829 if (config_debug) {
1830 UNUSED size_t tail_npages = (oldsize - newsize) >> LG_PAGE;
1831 assert(arena_mapbits_large_size_get(chunk,
1832 pageind+head_npages+tail_npages-1) == 0);
1833 assert(arena_mapbits_dirty_get(chunk,
1834 pageind+head_npages+tail_npages-1) == flag_dirty);
1835 }
1836 arena_mapbits_large_set(chunk, pageind+head_npages, oldsize-newsize,
Jason Evans1f27abc2015-08-11 12:42:33 -07001837 flag_dirty | (flag_unzeroed_mask & arena_mapbits_unzeroed_get(chunk,
1838 pageind+head_npages)));
Jason Evanse476f8a2010-01-16 09:53:50 -08001839
Jason Evans0c5dd032014-09-29 01:31:39 -07001840 tail_miscelm = arena_miscelm_get(chunk, pageind + head_npages);
1841 tail_run = &tail_miscelm->run;
Jason Evans1f27abc2015-08-11 12:42:33 -07001842 arena_run_dalloc(arena, tail_run, dirty, false, (flag_decommitted !=
1843 0));
Jason Evanse476f8a2010-01-16 09:53:50 -08001844}
1845
1846static arena_run_t *
Jason Evanse7a10582012-02-13 17:36:52 -08001847arena_bin_runs_first(arena_bin_t *bin)
1848{
Qinfan Wuff6a31d2014-08-29 13:34:40 -07001849 arena_chunk_map_misc_t *miscelm = arena_run_tree_first(&bin->runs);
Jason Evans0c5dd032014-09-29 01:31:39 -07001850 if (miscelm != NULL)
1851 return (&miscelm->run);
Jason Evanse7a10582012-02-13 17:36:52 -08001852
1853 return (NULL);
1854}
1855
1856static void
1857arena_bin_runs_insert(arena_bin_t *bin, arena_run_t *run)
1858{
Jason Evans0c5dd032014-09-29 01:31:39 -07001859 arena_chunk_map_misc_t *miscelm = arena_run_to_miscelm(run);
Jason Evanse7a10582012-02-13 17:36:52 -08001860
Qinfan Wuff6a31d2014-08-29 13:34:40 -07001861 assert(arena_run_tree_search(&bin->runs, miscelm) == NULL);
Jason Evanse7a10582012-02-13 17:36:52 -08001862
Qinfan Wuff6a31d2014-08-29 13:34:40 -07001863 arena_run_tree_insert(&bin->runs, miscelm);
Jason Evanse7a10582012-02-13 17:36:52 -08001864}
1865
1866static void
1867arena_bin_runs_remove(arena_bin_t *bin, arena_run_t *run)
1868{
Jason Evans0c5dd032014-09-29 01:31:39 -07001869 arena_chunk_map_misc_t *miscelm = arena_run_to_miscelm(run);
Jason Evanse7a10582012-02-13 17:36:52 -08001870
Qinfan Wuff6a31d2014-08-29 13:34:40 -07001871 assert(arena_run_tree_search(&bin->runs, miscelm) != NULL);
Jason Evanse7a10582012-02-13 17:36:52 -08001872
Qinfan Wuff6a31d2014-08-29 13:34:40 -07001873 arena_run_tree_remove(&bin->runs, miscelm);
Jason Evanse7a10582012-02-13 17:36:52 -08001874}
1875
1876static arena_run_t *
1877arena_bin_nonfull_run_tryget(arena_bin_t *bin)
1878{
1879 arena_run_t *run = arena_bin_runs_first(bin);
1880 if (run != NULL) {
1881 arena_bin_runs_remove(bin, run);
1882 if (config_stats)
1883 bin->stats.reruns++;
1884 }
1885 return (run);
1886}
1887
1888static arena_run_t *
Jason Evanse476f8a2010-01-16 09:53:50 -08001889arena_bin_nonfull_run_get(arena_t *arena, arena_bin_t *bin)
1890{
Jason Evanse476f8a2010-01-16 09:53:50 -08001891 arena_run_t *run;
Jason Evansd01fd192015-08-19 15:21:32 -07001892 szind_t binind;
Jason Evans49f7e8f2011-03-15 13:59:15 -07001893 arena_bin_info_t *bin_info;
Jason Evanse476f8a2010-01-16 09:53:50 -08001894
1895 /* Look for a usable run. */
Jason Evanse7a10582012-02-13 17:36:52 -08001896 run = arena_bin_nonfull_run_tryget(bin);
1897 if (run != NULL)
Jason Evanse476f8a2010-01-16 09:53:50 -08001898 return (run);
Jason Evanse476f8a2010-01-16 09:53:50 -08001899 /* No existing runs have any space available. */
1900
Jason Evans49f7e8f2011-03-15 13:59:15 -07001901 binind = arena_bin_index(arena, bin);
1902 bin_info = &arena_bin_info[binind];
1903
Jason Evanse476f8a2010-01-16 09:53:50 -08001904 /* Allocate a new run. */
Jason Evanse00572b2010-03-14 19:43:56 -07001905 malloc_mutex_unlock(&bin->lock);
Jason Evanse69bee02010-03-15 22:25:23 -07001906 /******************************/
Jason Evans86815df2010-03-13 20:32:56 -08001907 malloc_mutex_lock(&arena->lock);
Jason Evansaa5113b2014-01-14 16:23:03 -08001908 run = arena_run_alloc_small(arena, bin_info->run_size, binind);
Jason Evanse00572b2010-03-14 19:43:56 -07001909 if (run != NULL) {
1910 /* Initialize run internals. */
Jason Evans381c23d2014-10-10 23:01:03 -07001911 run->binind = binind;
Jason Evans49f7e8f2011-03-15 13:59:15 -07001912 run->nfree = bin_info->nregs;
Jason Evans0c5dd032014-09-29 01:31:39 -07001913 bitmap_init(run->bitmap, &bin_info->bitmap_info);
Jason Evanse69bee02010-03-15 22:25:23 -07001914 }
1915 malloc_mutex_unlock(&arena->lock);
1916 /********************************/
1917 malloc_mutex_lock(&bin->lock);
1918 if (run != NULL) {
Jason Evans7372b152012-02-10 20:22:09 -08001919 if (config_stats) {
1920 bin->stats.nruns++;
1921 bin->stats.curruns++;
Jason Evans7372b152012-02-10 20:22:09 -08001922 }
Jason Evanse00572b2010-03-14 19:43:56 -07001923 return (run);
1924 }
1925
1926 /*
Jason Evansaa5113b2014-01-14 16:23:03 -08001927 * arena_run_alloc_small() failed, but another thread may have made
Jason Evans940a2e02010-10-17 17:51:37 -07001928 * sufficient memory available while this one dropped bin->lock above,
Jason Evanse00572b2010-03-14 19:43:56 -07001929 * so search one more time.
1930 */
Jason Evanse7a10582012-02-13 17:36:52 -08001931 run = arena_bin_nonfull_run_tryget(bin);
1932 if (run != NULL)
Jason Evanse00572b2010-03-14 19:43:56 -07001933 return (run);
Jason Evanse00572b2010-03-14 19:43:56 -07001934
1935 return (NULL);
Jason Evanse476f8a2010-01-16 09:53:50 -08001936}
1937
Jason Evans1e0a6362010-03-13 13:41:58 -08001938/* Re-fill bin->runcur, then call arena_run_reg_alloc(). */
Jason Evanse476f8a2010-01-16 09:53:50 -08001939static void *
1940arena_bin_malloc_hard(arena_t *arena, arena_bin_t *bin)
1941{
Jason Evansd01fd192015-08-19 15:21:32 -07001942 szind_t binind;
Jason Evans49f7e8f2011-03-15 13:59:15 -07001943 arena_bin_info_t *bin_info;
Jason Evanse00572b2010-03-14 19:43:56 -07001944 arena_run_t *run;
Jason Evanse476f8a2010-01-16 09:53:50 -08001945
Jason Evans49f7e8f2011-03-15 13:59:15 -07001946 binind = arena_bin_index(arena, bin);
1947 bin_info = &arena_bin_info[binind];
Jason Evanse00572b2010-03-14 19:43:56 -07001948 bin->runcur = NULL;
1949 run = arena_bin_nonfull_run_get(arena, bin);
1950 if (bin->runcur != NULL && bin->runcur->nfree > 0) {
1951 /*
1952 * Another thread updated runcur while this one ran without the
1953 * bin lock in arena_bin_nonfull_run_get().
1954 */
Dmitry-Mea306a602015-09-04 13:15:28 +03001955 void *ret;
Jason Evanse00572b2010-03-14 19:43:56 -07001956 assert(bin->runcur->nfree > 0);
Jason Evans49f7e8f2011-03-15 13:59:15 -07001957 ret = arena_run_reg_alloc(bin->runcur, bin_info);
Jason Evanse00572b2010-03-14 19:43:56 -07001958 if (run != NULL) {
Jason Evans940a2e02010-10-17 17:51:37 -07001959 arena_chunk_t *chunk;
1960
1961 /*
Jason Evansaa5113b2014-01-14 16:23:03 -08001962 * arena_run_alloc_small() may have allocated run, or
1963 * it may have pulled run from the bin's run tree.
1964 * Therefore it is unsafe to make any assumptions about
1965 * how run has previously been used, and
1966 * arena_bin_lower_run() must be called, as if a region
1967 * were just deallocated from the run.
Jason Evans940a2e02010-10-17 17:51:37 -07001968 */
1969 chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(run);
Jason Evans49f7e8f2011-03-15 13:59:15 -07001970 if (run->nfree == bin_info->nregs)
Jason Evans8de6a022010-10-17 20:57:30 -07001971 arena_dalloc_bin_run(arena, chunk, run, bin);
1972 else
1973 arena_bin_lower_run(arena, chunk, run, bin);
Jason Evanse00572b2010-03-14 19:43:56 -07001974 }
1975 return (ret);
1976 }
1977
1978 if (run == NULL)
Jason Evanse476f8a2010-01-16 09:53:50 -08001979 return (NULL);
Jason Evanse00572b2010-03-14 19:43:56 -07001980
1981 bin->runcur = run;
1982
Jason Evanse476f8a2010-01-16 09:53:50 -08001983 assert(bin->runcur->nfree > 0);
1984
Jason Evans49f7e8f2011-03-15 13:59:15 -07001985 return (arena_run_reg_alloc(bin->runcur, bin_info));
Jason Evanse476f8a2010-01-16 09:53:50 -08001986}
1987
Jason Evans86815df2010-03-13 20:32:56 -08001988void
Jason Evansd01fd192015-08-19 15:21:32 -07001989arena_tcache_fill_small(arena_t *arena, tcache_bin_t *tbin, szind_t binind,
Jason Evans7372b152012-02-10 20:22:09 -08001990 uint64_t prof_accumbytes)
Jason Evanse476f8a2010-01-16 09:53:50 -08001991{
1992 unsigned i, nfill;
1993 arena_bin_t *bin;
Jason Evanse476f8a2010-01-16 09:53:50 -08001994
1995 assert(tbin->ncached == 0);
1996
Jason Evans88c222c2013-02-06 11:59:30 -08001997 if (config_prof && arena_prof_accum(arena, prof_accumbytes))
1998 prof_idump();
Jason Evanse69bee02010-03-15 22:25:23 -07001999 bin = &arena->bins[binind];
2000 malloc_mutex_lock(&bin->lock);
Jason Evans1dcb4f82011-03-21 00:18:17 -07002001 for (i = 0, nfill = (tcache_bin_info[binind].ncached_max >>
2002 tbin->lg_fill_div); i < nfill; i++) {
Dmitry-Mea306a602015-09-04 13:15:28 +03002003 arena_run_t *run;
2004 void *ptr;
Jason Evanse476f8a2010-01-16 09:53:50 -08002005 if ((run = bin->runcur) != NULL && run->nfree > 0)
Jason Evans49f7e8f2011-03-15 13:59:15 -07002006 ptr = arena_run_reg_alloc(run, &arena_bin_info[binind]);
Jason Evanse476f8a2010-01-16 09:53:50 -08002007 else
2008 ptr = arena_bin_malloc_hard(arena, bin);
Jason Evansf11a6772014-10-05 13:05:10 -07002009 if (ptr == NULL) {
2010 /*
2011 * OOM. tbin->avail isn't yet filled down to its first
2012 * element, so the successful allocations (if any) must
2013 * be moved to the base of tbin->avail before bailing
2014 * out.
2015 */
2016 if (i > 0) {
2017 memmove(tbin->avail, &tbin->avail[nfill - i],
2018 i * sizeof(void *));
2019 }
Jason Evanse476f8a2010-01-16 09:53:50 -08002020 break;
Jason Evansf11a6772014-10-05 13:05:10 -07002021 }
Guilherme Goncalves2c5cb612014-12-08 19:12:41 -02002022 if (config_fill && unlikely(opt_junk_alloc)) {
Jason Evans122449b2012-04-06 00:35:09 -07002023 arena_alloc_junk_small(ptr, &arena_bin_info[binind],
2024 true);
2025 }
Jason Evans9c43c132011-03-18 10:53:15 -07002026 /* Insert such that low regions get used first. */
2027 tbin->avail[nfill - 1 - i] = ptr;
Jason Evanse476f8a2010-01-16 09:53:50 -08002028 }
Jason Evans7372b152012-02-10 20:22:09 -08002029 if (config_stats) {
Jason Evans7372b152012-02-10 20:22:09 -08002030 bin->stats.nmalloc += i;
2031 bin->stats.nrequests += tbin->tstats.nrequests;
Jason Evans3c4d92e2014-10-12 22:53:59 -07002032 bin->stats.curregs += i;
Jason Evans7372b152012-02-10 20:22:09 -08002033 bin->stats.nfills++;
2034 tbin->tstats.nrequests = 0;
2035 }
Jason Evans86815df2010-03-13 20:32:56 -08002036 malloc_mutex_unlock(&bin->lock);
Jason Evanse476f8a2010-01-16 09:53:50 -08002037 tbin->ncached = i;
Jason Evanse476f8a2010-01-16 09:53:50 -08002038}
Jason Evanse476f8a2010-01-16 09:53:50 -08002039
Jason Evans122449b2012-04-06 00:35:09 -07002040void
2041arena_alloc_junk_small(void *ptr, arena_bin_info_t *bin_info, bool zero)
2042{
2043
2044 if (zero) {
2045 size_t redzone_size = bin_info->redzone_size;
2046 memset((void *)((uintptr_t)ptr - redzone_size), 0xa5,
2047 redzone_size);
2048 memset((void *)((uintptr_t)ptr + bin_info->reg_size), 0xa5,
2049 redzone_size);
2050 } else {
2051 memset((void *)((uintptr_t)ptr - bin_info->redzone_size), 0xa5,
2052 bin_info->reg_interval);
2053 }
2054}
2055
Jason Evans0d6c5d82013-12-17 15:14:36 -08002056#ifdef JEMALLOC_JET
2057#undef arena_redzone_corruption
2058#define arena_redzone_corruption JEMALLOC_N(arena_redzone_corruption_impl)
2059#endif
2060static void
2061arena_redzone_corruption(void *ptr, size_t usize, bool after,
2062 size_t offset, uint8_t byte)
2063{
2064
Jason Evans5fae7dc2015-07-23 13:56:25 -07002065 malloc_printf("<jemalloc>: Corrupt redzone %zu byte%s %s %p "
2066 "(size %zu), byte=%#x\n", offset, (offset == 1) ? "" : "s",
Jason Evans0d6c5d82013-12-17 15:14:36 -08002067 after ? "after" : "before", ptr, usize, byte);
2068}
2069#ifdef JEMALLOC_JET
Jason Evans0d6c5d82013-12-17 15:14:36 -08002070#undef arena_redzone_corruption
Jason Evans6b694c42014-01-07 16:47:56 -08002071#define arena_redzone_corruption JEMALLOC_N(arena_redzone_corruption)
2072arena_redzone_corruption_t *arena_redzone_corruption =
2073 JEMALLOC_N(arena_redzone_corruption_impl);
Jason Evans0d6c5d82013-12-17 15:14:36 -08002074#endif
2075
2076static void
2077arena_redzones_validate(void *ptr, arena_bin_info_t *bin_info, bool reset)
Jason Evans122449b2012-04-06 00:35:09 -07002078{
Jason Evans122449b2012-04-06 00:35:09 -07002079 bool error = false;
2080
Guilherme Goncalves2c5cb612014-12-08 19:12:41 -02002081 if (opt_junk_alloc) {
Dmitry-Mea306a602015-09-04 13:15:28 +03002082 size_t size = bin_info->reg_size;
2083 size_t redzone_size = bin_info->redzone_size;
2084 size_t i;
2085
Guilherme Goncalves2c5cb612014-12-08 19:12:41 -02002086 for (i = 1; i <= redzone_size; i++) {
2087 uint8_t *byte = (uint8_t *)((uintptr_t)ptr - i);
2088 if (*byte != 0xa5) {
2089 error = true;
Jason Evans8fadb1a2015-08-04 10:49:46 -07002090 arena_redzone_corruption(ptr, size, false, i,
2091 *byte);
Guilherme Goncalves2c5cb612014-12-08 19:12:41 -02002092 if (reset)
2093 *byte = 0xa5;
2094 }
2095 }
2096 for (i = 0; i < redzone_size; i++) {
2097 uint8_t *byte = (uint8_t *)((uintptr_t)ptr + size + i);
2098 if (*byte != 0xa5) {
2099 error = true;
Jason Evans8fadb1a2015-08-04 10:49:46 -07002100 arena_redzone_corruption(ptr, size, true, i,
2101 *byte);
Guilherme Goncalves2c5cb612014-12-08 19:12:41 -02002102 if (reset)
2103 *byte = 0xa5;
2104 }
Jason Evans122449b2012-04-06 00:35:09 -07002105 }
2106 }
Guilherme Goncalves2c5cb612014-12-08 19:12:41 -02002107
Jason Evans122449b2012-04-06 00:35:09 -07002108 if (opt_abort && error)
2109 abort();
Jason Evans0d6c5d82013-12-17 15:14:36 -08002110}
Jason Evans122449b2012-04-06 00:35:09 -07002111
Jason Evans6b694c42014-01-07 16:47:56 -08002112#ifdef JEMALLOC_JET
2113#undef arena_dalloc_junk_small
2114#define arena_dalloc_junk_small JEMALLOC_N(arena_dalloc_junk_small_impl)
2115#endif
Jason Evans0d6c5d82013-12-17 15:14:36 -08002116void
2117arena_dalloc_junk_small(void *ptr, arena_bin_info_t *bin_info)
2118{
2119 size_t redzone_size = bin_info->redzone_size;
2120
2121 arena_redzones_validate(ptr, bin_info, false);
Jason Evans122449b2012-04-06 00:35:09 -07002122 memset((void *)((uintptr_t)ptr - redzone_size), 0x5a,
2123 bin_info->reg_interval);
2124}
Jason Evans6b694c42014-01-07 16:47:56 -08002125#ifdef JEMALLOC_JET
2126#undef arena_dalloc_junk_small
2127#define arena_dalloc_junk_small JEMALLOC_N(arena_dalloc_junk_small)
2128arena_dalloc_junk_small_t *arena_dalloc_junk_small =
2129 JEMALLOC_N(arena_dalloc_junk_small_impl);
2130#endif
Jason Evans122449b2012-04-06 00:35:09 -07002131
Jason Evans0d6c5d82013-12-17 15:14:36 -08002132void
2133arena_quarantine_junk_small(void *ptr, size_t usize)
2134{
Jason Evansd01fd192015-08-19 15:21:32 -07002135 szind_t binind;
Jason Evans0d6c5d82013-12-17 15:14:36 -08002136 arena_bin_info_t *bin_info;
2137 cassert(config_fill);
Guilherme Goncalves2c5cb612014-12-08 19:12:41 -02002138 assert(opt_junk_free);
Jason Evans0d6c5d82013-12-17 15:14:36 -08002139 assert(opt_quarantine);
2140 assert(usize <= SMALL_MAXCLASS);
2141
Jason Evans155bfa72014-10-05 17:54:10 -07002142 binind = size2index(usize);
Jason Evans0d6c5d82013-12-17 15:14:36 -08002143 bin_info = &arena_bin_info[binind];
2144 arena_redzones_validate(ptr, bin_info, true);
2145}
2146
Jason Evanse476f8a2010-01-16 09:53:50 -08002147void *
2148arena_malloc_small(arena_t *arena, size_t size, bool zero)
2149{
2150 void *ret;
2151 arena_bin_t *bin;
2152 arena_run_t *run;
Jason Evansd01fd192015-08-19 15:21:32 -07002153 szind_t binind;
Jason Evanse476f8a2010-01-16 09:53:50 -08002154
Jason Evans155bfa72014-10-05 17:54:10 -07002155 binind = size2index(size);
Jason Evansb1726102012-02-28 16:50:47 -08002156 assert(binind < NBINS);
Jason Evanse476f8a2010-01-16 09:53:50 -08002157 bin = &arena->bins[binind];
Jason Evans155bfa72014-10-05 17:54:10 -07002158 size = index2size(binind);
Jason Evanse476f8a2010-01-16 09:53:50 -08002159
Jason Evans86815df2010-03-13 20:32:56 -08002160 malloc_mutex_lock(&bin->lock);
Jason Evanse476f8a2010-01-16 09:53:50 -08002161 if ((run = bin->runcur) != NULL && run->nfree > 0)
Jason Evans49f7e8f2011-03-15 13:59:15 -07002162 ret = arena_run_reg_alloc(run, &arena_bin_info[binind]);
Jason Evanse476f8a2010-01-16 09:53:50 -08002163 else
2164 ret = arena_bin_malloc_hard(arena, bin);
2165
2166 if (ret == NULL) {
Jason Evans86815df2010-03-13 20:32:56 -08002167 malloc_mutex_unlock(&bin->lock);
Jason Evanse476f8a2010-01-16 09:53:50 -08002168 return (NULL);
2169 }
2170
Jason Evans7372b152012-02-10 20:22:09 -08002171 if (config_stats) {
Jason Evans7372b152012-02-10 20:22:09 -08002172 bin->stats.nmalloc++;
2173 bin->stats.nrequests++;
Jason Evans3c4d92e2014-10-12 22:53:59 -07002174 bin->stats.curregs++;
Jason Evans7372b152012-02-10 20:22:09 -08002175 }
Jason Evans86815df2010-03-13 20:32:56 -08002176 malloc_mutex_unlock(&bin->lock);
Jason Evans551ebc42014-10-03 10:16:09 -07002177 if (config_prof && !isthreaded && arena_prof_accum(arena, size))
Jason Evans88c222c2013-02-06 11:59:30 -08002178 prof_idump();
Jason Evanse476f8a2010-01-16 09:53:50 -08002179
Jason Evans551ebc42014-10-03 10:16:09 -07002180 if (!zero) {
Jason Evans7372b152012-02-10 20:22:09 -08002181 if (config_fill) {
Guilherme Goncalves2c5cb612014-12-08 19:12:41 -02002182 if (unlikely(opt_junk_alloc)) {
Jason Evans122449b2012-04-06 00:35:09 -07002183 arena_alloc_junk_small(ret,
2184 &arena_bin_info[binind], false);
Jason Evans9c640bf2014-09-11 16:20:44 -07002185 } else if (unlikely(opt_zero))
Jason Evans7372b152012-02-10 20:22:09 -08002186 memset(ret, 0, size);
2187 }
Jason Evansbd87b012014-04-15 16:35:08 -07002188 JEMALLOC_VALGRIND_MAKE_MEM_UNDEFINED(ret, size);
Jason Evans122449b2012-04-06 00:35:09 -07002189 } else {
Guilherme Goncalves2c5cb612014-12-08 19:12:41 -02002190 if (config_fill && unlikely(opt_junk_alloc)) {
Jason Evans122449b2012-04-06 00:35:09 -07002191 arena_alloc_junk_small(ret, &arena_bin_info[binind],
2192 true);
2193 }
Jason Evansbd87b012014-04-15 16:35:08 -07002194 JEMALLOC_VALGRIND_MAKE_MEM_UNDEFINED(ret, size);
Jason Evanse476f8a2010-01-16 09:53:50 -08002195 memset(ret, 0, size);
Jason Evans122449b2012-04-06 00:35:09 -07002196 }
Jason Evanse476f8a2010-01-16 09:53:50 -08002197
2198 return (ret);
2199}
2200
2201void *
Jason Evanse476f8a2010-01-16 09:53:50 -08002202arena_malloc_large(arena_t *arena, size_t size, bool zero)
2203{
2204 void *ret;
Jason Evans155bfa72014-10-05 17:54:10 -07002205 size_t usize;
Jason Evans8a03cf02015-05-04 09:58:36 -07002206 uintptr_t random_offset;
Jason Evans0c5dd032014-09-29 01:31:39 -07002207 arena_run_t *run;
2208 arena_chunk_map_misc_t *miscelm;
Jason Evans88c222c2013-02-06 11:59:30 -08002209 UNUSED bool idump;
Jason Evanse476f8a2010-01-16 09:53:50 -08002210
2211 /* Large allocation. */
Jason Evans155bfa72014-10-05 17:54:10 -07002212 usize = s2u(size);
Jason Evanse476f8a2010-01-16 09:53:50 -08002213 malloc_mutex_lock(&arena->lock);
Jason Evans8a03cf02015-05-04 09:58:36 -07002214 if (config_cache_oblivious) {
Jason Evansbce61d62015-07-07 09:32:05 -07002215 uint64_t r;
2216
Jason Evans8a03cf02015-05-04 09:58:36 -07002217 /*
2218 * Compute a uniformly distributed offset within the first page
2219 * that is a multiple of the cacheline size, e.g. [0 .. 63) * 64
2220 * for 4 KiB pages and 64-byte cachelines.
2221 */
2222 prng64(r, LG_PAGE - LG_CACHELINE, arena->offset_state,
Jason Evans0a9f9a42015-06-22 18:50:32 -07002223 UINT64_C(6364136223846793009),
2224 UINT64_C(1442695040888963409));
Jason Evans8a03cf02015-05-04 09:58:36 -07002225 random_offset = ((uintptr_t)r) << LG_CACHELINE;
2226 } else
2227 random_offset = 0;
2228 run = arena_run_alloc_large(arena, usize + large_pad, zero);
Jason Evans0c5dd032014-09-29 01:31:39 -07002229 if (run == NULL) {
Jason Evanse476f8a2010-01-16 09:53:50 -08002230 malloc_mutex_unlock(&arena->lock);
2231 return (NULL);
2232 }
Jason Evans0c5dd032014-09-29 01:31:39 -07002233 miscelm = arena_run_to_miscelm(run);
Jason Evans8a03cf02015-05-04 09:58:36 -07002234 ret = (void *)((uintptr_t)arena_miscelm_to_rpages(miscelm) +
2235 random_offset);
Jason Evans7372b152012-02-10 20:22:09 -08002236 if (config_stats) {
Jason Evansd01fd192015-08-19 15:21:32 -07002237 szind_t index = size2index(usize) - NBINS;
Jason Evans155bfa72014-10-05 17:54:10 -07002238
Jason Evans7372b152012-02-10 20:22:09 -08002239 arena->stats.nmalloc_large++;
2240 arena->stats.nrequests_large++;
Jason Evans155bfa72014-10-05 17:54:10 -07002241 arena->stats.allocated_large += usize;
2242 arena->stats.lstats[index].nmalloc++;
2243 arena->stats.lstats[index].nrequests++;
2244 arena->stats.lstats[index].curruns++;
Jason Evanse476f8a2010-01-16 09:53:50 -08002245 }
Jason Evans7372b152012-02-10 20:22:09 -08002246 if (config_prof)
Jason Evans155bfa72014-10-05 17:54:10 -07002247 idump = arena_prof_accum_locked(arena, usize);
Jason Evanse476f8a2010-01-16 09:53:50 -08002248 malloc_mutex_unlock(&arena->lock);
Jason Evans88c222c2013-02-06 11:59:30 -08002249 if (config_prof && idump)
2250 prof_idump();
Jason Evanse476f8a2010-01-16 09:53:50 -08002251
Jason Evans551ebc42014-10-03 10:16:09 -07002252 if (!zero) {
Jason Evans7372b152012-02-10 20:22:09 -08002253 if (config_fill) {
Guilherme Goncalves2c5cb612014-12-08 19:12:41 -02002254 if (unlikely(opt_junk_alloc))
Jason Evans155bfa72014-10-05 17:54:10 -07002255 memset(ret, 0xa5, usize);
Jason Evans9c640bf2014-09-11 16:20:44 -07002256 else if (unlikely(opt_zero))
Jason Evans155bfa72014-10-05 17:54:10 -07002257 memset(ret, 0, usize);
Jason Evans7372b152012-02-10 20:22:09 -08002258 }
Jason Evanse476f8a2010-01-16 09:53:50 -08002259 }
2260
2261 return (ret);
2262}
2263
Jason Evanse476f8a2010-01-16 09:53:50 -08002264/* Only handles large allocations that require more than page alignment. */
Jason Evans88fef7c2015-02-12 14:06:37 -08002265static void *
Jason Evans50883de2015-07-23 17:13:18 -07002266arena_palloc_large(tsd_t *tsd, arena_t *arena, size_t usize, size_t alignment,
Jason Evans88fef7c2015-02-12 14:06:37 -08002267 bool zero)
Jason Evanse476f8a2010-01-16 09:53:50 -08002268{
2269 void *ret;
Jason Evans5ff709c2012-04-11 18:13:45 -07002270 size_t alloc_size, leadsize, trailsize;
2271 arena_run_t *run;
Jason Evanse476f8a2010-01-16 09:53:50 -08002272 arena_chunk_t *chunk;
Jason Evans0c5dd032014-09-29 01:31:39 -07002273 arena_chunk_map_misc_t *miscelm;
2274 void *rpages;
Jason Evanse476f8a2010-01-16 09:53:50 -08002275
Jason Evans50883de2015-07-23 17:13:18 -07002276 assert(usize == PAGE_CEILING(usize));
Jason Evans93443682010-10-20 17:39:18 -07002277
Jason Evans88fef7c2015-02-12 14:06:37 -08002278 arena = arena_choose(tsd, arena);
2279 if (unlikely(arena == NULL))
2280 return (NULL);
2281
Jason Evans93443682010-10-20 17:39:18 -07002282 alignment = PAGE_CEILING(alignment);
Jason Evans50883de2015-07-23 17:13:18 -07002283 alloc_size = usize + large_pad + alignment - PAGE;
Jason Evanse476f8a2010-01-16 09:53:50 -08002284
2285 malloc_mutex_lock(&arena->lock);
Jason Evansaa5113b2014-01-14 16:23:03 -08002286 run = arena_run_alloc_large(arena, alloc_size, false);
Jason Evans5ff709c2012-04-11 18:13:45 -07002287 if (run == NULL) {
Jason Evanse476f8a2010-01-16 09:53:50 -08002288 malloc_mutex_unlock(&arena->lock);
2289 return (NULL);
2290 }
Jason Evans5ff709c2012-04-11 18:13:45 -07002291 chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(run);
Jason Evans0c5dd032014-09-29 01:31:39 -07002292 miscelm = arena_run_to_miscelm(run);
2293 rpages = arena_miscelm_to_rpages(miscelm);
Jason Evanse476f8a2010-01-16 09:53:50 -08002294
Jason Evans0c5dd032014-09-29 01:31:39 -07002295 leadsize = ALIGNMENT_CEILING((uintptr_t)rpages, alignment) -
2296 (uintptr_t)rpages;
Jason Evans50883de2015-07-23 17:13:18 -07002297 assert(alloc_size >= leadsize + usize);
2298 trailsize = alloc_size - leadsize - usize - large_pad;
Jason Evans5ff709c2012-04-11 18:13:45 -07002299 if (leadsize != 0) {
Jason Evans0c5dd032014-09-29 01:31:39 -07002300 arena_chunk_map_misc_t *head_miscelm = miscelm;
2301 arena_run_t *head_run = run;
2302
2303 miscelm = arena_miscelm_get(chunk,
2304 arena_miscelm_to_pageind(head_miscelm) + (leadsize >>
2305 LG_PAGE));
2306 run = &miscelm->run;
2307
2308 arena_run_trim_head(arena, chunk, head_run, alloc_size,
2309 alloc_size - leadsize);
Jason Evans5ff709c2012-04-11 18:13:45 -07002310 }
2311 if (trailsize != 0) {
Jason Evans50883de2015-07-23 17:13:18 -07002312 arena_run_trim_tail(arena, chunk, run, usize + large_pad +
2313 trailsize, usize + large_pad, false);
Jason Evanse476f8a2010-01-16 09:53:50 -08002314 }
Jason Evans8fadb1a2015-08-04 10:49:46 -07002315 if (arena_run_init_large(arena, run, usize + large_pad, zero)) {
2316 size_t run_ind =
2317 arena_miscelm_to_pageind(arena_run_to_miscelm(run));
Jason Evansde249c82015-08-09 16:47:27 -07002318 bool dirty = (arena_mapbits_dirty_get(chunk, run_ind) != 0);
2319 bool decommitted = (arena_mapbits_decommitted_get(chunk,
2320 run_ind) != 0);
Jason Evans8fadb1a2015-08-04 10:49:46 -07002321
Jason Evansde249c82015-08-09 16:47:27 -07002322 assert(decommitted); /* Cause of OOM. */
2323 arena_run_dalloc(arena, run, dirty, false, decommitted);
Jason Evans8fadb1a2015-08-04 10:49:46 -07002324 malloc_mutex_unlock(&arena->lock);
2325 return (NULL);
2326 }
Jason Evans0c5dd032014-09-29 01:31:39 -07002327 ret = arena_miscelm_to_rpages(miscelm);
Jason Evanse476f8a2010-01-16 09:53:50 -08002328
Jason Evans7372b152012-02-10 20:22:09 -08002329 if (config_stats) {
Jason Evansd01fd192015-08-19 15:21:32 -07002330 szind_t index = size2index(usize) - NBINS;
Jason Evans155bfa72014-10-05 17:54:10 -07002331
Jason Evans7372b152012-02-10 20:22:09 -08002332 arena->stats.nmalloc_large++;
2333 arena->stats.nrequests_large++;
Jason Evans50883de2015-07-23 17:13:18 -07002334 arena->stats.allocated_large += usize;
Jason Evans155bfa72014-10-05 17:54:10 -07002335 arena->stats.lstats[index].nmalloc++;
2336 arena->stats.lstats[index].nrequests++;
2337 arena->stats.lstats[index].curruns++;
Jason Evanse476f8a2010-01-16 09:53:50 -08002338 }
Jason Evanse476f8a2010-01-16 09:53:50 -08002339 malloc_mutex_unlock(&arena->lock);
2340
Jason Evans551ebc42014-10-03 10:16:09 -07002341 if (config_fill && !zero) {
Guilherme Goncalves2c5cb612014-12-08 19:12:41 -02002342 if (unlikely(opt_junk_alloc))
Jason Evans50883de2015-07-23 17:13:18 -07002343 memset(ret, 0xa5, usize);
Jason Evans9c640bf2014-09-11 16:20:44 -07002344 else if (unlikely(opt_zero))
Jason Evans50883de2015-07-23 17:13:18 -07002345 memset(ret, 0, usize);
Jason Evans8e3c3c62010-09-17 15:46:18 -07002346 }
Jason Evanse476f8a2010-01-16 09:53:50 -08002347 return (ret);
2348}
2349
Jason Evans88fef7c2015-02-12 14:06:37 -08002350void *
2351arena_palloc(tsd_t *tsd, arena_t *arena, size_t usize, size_t alignment,
2352 bool zero, tcache_t *tcache)
2353{
2354 void *ret;
2355
Jason Evans8a03cf02015-05-04 09:58:36 -07002356 if (usize <= SMALL_MAXCLASS && (alignment < PAGE || (alignment == PAGE
Jason Evans51541752015-05-19 17:42:31 -07002357 && (usize & PAGE_MASK) == 0))) {
2358 /* Small; alignment doesn't require special run placement. */
Jason Evans88fef7c2015-02-12 14:06:37 -08002359 ret = arena_malloc(tsd, arena, usize, zero, tcache);
Jason Evans676df882015-09-11 20:50:20 -07002360 } else if (usize <= large_maxclass && alignment <= PAGE) {
Jason Evans51541752015-05-19 17:42:31 -07002361 /*
2362 * Large; alignment doesn't require special run placement.
2363 * However, the cached pointer may be at a random offset from
2364 * the base of the run, so do some bit manipulation to retrieve
2365 * the base.
2366 */
2367 ret = arena_malloc(tsd, arena, usize, zero, tcache);
2368 if (config_cache_oblivious)
2369 ret = (void *)((uintptr_t)ret & ~PAGE_MASK);
2370 } else {
Jason Evans676df882015-09-11 20:50:20 -07002371 if (likely(usize <= large_maxclass)) {
Jason Evans88fef7c2015-02-12 14:06:37 -08002372 ret = arena_palloc_large(tsd, arena, usize, alignment,
2373 zero);
2374 } else if (likely(alignment <= chunksize))
2375 ret = huge_malloc(tsd, arena, usize, zero, tcache);
2376 else {
2377 ret = huge_palloc(tsd, arena, usize, alignment, zero,
2378 tcache);
2379 }
2380 }
2381 return (ret);
2382}
2383
Jason Evans0b270a92010-03-31 16:45:04 -07002384void
2385arena_prof_promoted(const void *ptr, size_t size)
2386{
2387 arena_chunk_t *chunk;
Jason Evans155bfa72014-10-05 17:54:10 -07002388 size_t pageind;
Jason Evansd01fd192015-08-19 15:21:32 -07002389 szind_t binind;
Jason Evans0b270a92010-03-31 16:45:04 -07002390
Jason Evans78f73522012-04-18 13:38:40 -07002391 cassert(config_prof);
Jason Evans0b270a92010-03-31 16:45:04 -07002392 assert(ptr != NULL);
2393 assert(CHUNK_ADDR2BASE(ptr) != ptr);
Jason Evans155bfa72014-10-05 17:54:10 -07002394 assert(isalloc(ptr, false) == LARGE_MINCLASS);
2395 assert(isalloc(ptr, true) == LARGE_MINCLASS);
Jason Evansb1726102012-02-28 16:50:47 -08002396 assert(size <= SMALL_MAXCLASS);
Jason Evans0b270a92010-03-31 16:45:04 -07002397
2398 chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr);
Jason Evansae4c7b42012-04-02 07:04:34 -07002399 pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> LG_PAGE;
Jason Evans155bfa72014-10-05 17:54:10 -07002400 binind = size2index(size);
Jason Evansb1726102012-02-28 16:50:47 -08002401 assert(binind < NBINS);
Jason Evans203484e2012-05-02 00:30:36 -07002402 arena_mapbits_large_binind_set(chunk, pageind, binind);
Jason Evans0b270a92010-03-31 16:45:04 -07002403
Jason Evans155bfa72014-10-05 17:54:10 -07002404 assert(isalloc(ptr, false) == LARGE_MINCLASS);
Jason Evans122449b2012-04-06 00:35:09 -07002405 assert(isalloc(ptr, true) == size);
Jason Evans0b270a92010-03-31 16:45:04 -07002406}
Jason Evans6109fe02010-02-10 10:37:56 -08002407
Jason Evanse476f8a2010-01-16 09:53:50 -08002408static void
Jason Evans088e6a02010-10-18 00:04:44 -07002409arena_dissociate_bin_run(arena_chunk_t *chunk, arena_run_t *run,
Jason Evanse476f8a2010-01-16 09:53:50 -08002410 arena_bin_t *bin)
2411{
Jason Evanse476f8a2010-01-16 09:53:50 -08002412
Jason Evans19b3d612010-03-18 20:36:40 -07002413 /* Dissociate run from bin. */
Jason Evanse476f8a2010-01-16 09:53:50 -08002414 if (run == bin->runcur)
2415 bin->runcur = NULL;
Jason Evans49f7e8f2011-03-15 13:59:15 -07002416 else {
Jason Evansd01fd192015-08-19 15:21:32 -07002417 szind_t binind = arena_bin_index(extent_node_arena_get(
Jason Evansee41ad42015-02-15 18:04:46 -08002418 &chunk->node), bin);
Jason Evans49f7e8f2011-03-15 13:59:15 -07002419 arena_bin_info_t *bin_info = &arena_bin_info[binind];
2420
2421 if (bin_info->nregs != 1) {
Jason Evans49f7e8f2011-03-15 13:59:15 -07002422 /*
2423 * This block's conditional is necessary because if the
2424 * run only contains one region, then it never gets
2425 * inserted into the non-full runs tree.
2426 */
Jason Evanse7a10582012-02-13 17:36:52 -08002427 arena_bin_runs_remove(bin, run);
Jason Evans49f7e8f2011-03-15 13:59:15 -07002428 }
Jason Evanse476f8a2010-01-16 09:53:50 -08002429 }
Jason Evans088e6a02010-10-18 00:04:44 -07002430}
2431
2432static void
2433arena_dalloc_bin_run(arena_t *arena, arena_chunk_t *chunk, arena_run_t *run,
2434 arena_bin_t *bin)
2435{
Jason Evans088e6a02010-10-18 00:04:44 -07002436
2437 assert(run != bin->runcur);
Jason Evans0c5dd032014-09-29 01:31:39 -07002438 assert(arena_run_tree_search(&bin->runs, arena_run_to_miscelm(run)) ==
2439 NULL);
Jason Evans86815df2010-03-13 20:32:56 -08002440
Jason Evanse00572b2010-03-14 19:43:56 -07002441 malloc_mutex_unlock(&bin->lock);
2442 /******************************/
Jason Evans86815df2010-03-13 20:32:56 -08002443 malloc_mutex_lock(&arena->lock);
Jason Evansde249c82015-08-09 16:47:27 -07002444 arena_run_dalloc_decommit(arena, chunk, run);
Jason Evans86815df2010-03-13 20:32:56 -08002445 malloc_mutex_unlock(&arena->lock);
Jason Evanse00572b2010-03-14 19:43:56 -07002446 /****************************/
2447 malloc_mutex_lock(&bin->lock);
Jason Evans7372b152012-02-10 20:22:09 -08002448 if (config_stats)
2449 bin->stats.curruns--;
Jason Evanse476f8a2010-01-16 09:53:50 -08002450}
2451
Jason Evans940a2e02010-10-17 17:51:37 -07002452static void
2453arena_bin_lower_run(arena_t *arena, arena_chunk_t *chunk, arena_run_t *run,
2454 arena_bin_t *bin)
Jason Evanse476f8a2010-01-16 09:53:50 -08002455{
Jason Evanse476f8a2010-01-16 09:53:50 -08002456
Jason Evans8de6a022010-10-17 20:57:30 -07002457 /*
Jason Evanse7a10582012-02-13 17:36:52 -08002458 * Make sure that if bin->runcur is non-NULL, it refers to the lowest
2459 * non-full run. It is okay to NULL runcur out rather than proactively
2460 * keeping it pointing at the lowest non-full run.
Jason Evans8de6a022010-10-17 20:57:30 -07002461 */
Jason Evanse7a10582012-02-13 17:36:52 -08002462 if ((uintptr_t)run < (uintptr_t)bin->runcur) {
Jason Evans8de6a022010-10-17 20:57:30 -07002463 /* Switch runcur. */
Jason Evanse7a10582012-02-13 17:36:52 -08002464 if (bin->runcur->nfree > 0)
2465 arena_bin_runs_insert(bin, bin->runcur);
Jason Evans8de6a022010-10-17 20:57:30 -07002466 bin->runcur = run;
Jason Evanse7a10582012-02-13 17:36:52 -08002467 if (config_stats)
2468 bin->stats.reruns++;
2469 } else
2470 arena_bin_runs_insert(bin, run);
Jason Evans940a2e02010-10-17 17:51:37 -07002471}
2472
Jason Evansfc0b3b72014-10-09 17:54:06 -07002473static void
2474arena_dalloc_bin_locked_impl(arena_t *arena, arena_chunk_t *chunk, void *ptr,
2475 arena_chunk_map_bits_t *bitselm, bool junked)
Jason Evans940a2e02010-10-17 17:51:37 -07002476{
Jason Evans0c5dd032014-09-29 01:31:39 -07002477 size_t pageind, rpages_ind;
Jason Evans940a2e02010-10-17 17:51:37 -07002478 arena_run_t *run;
2479 arena_bin_t *bin;
Mike Hommey8b499712012-04-24 23:22:02 +02002480 arena_bin_info_t *bin_info;
Jason Evansd01fd192015-08-19 15:21:32 -07002481 szind_t binind;
Jason Evans940a2e02010-10-17 17:51:37 -07002482
Jason Evansae4c7b42012-04-02 07:04:34 -07002483 pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> LG_PAGE;
Jason Evans0c5dd032014-09-29 01:31:39 -07002484 rpages_ind = pageind - arena_mapbits_small_runind_get(chunk, pageind);
2485 run = &arena_miscelm_get(chunk, rpages_ind)->run;
Jason Evans381c23d2014-10-10 23:01:03 -07002486 binind = run->binind;
2487 bin = &arena->bins[binind];
Mike Hommey8b499712012-04-24 23:22:02 +02002488 bin_info = &arena_bin_info[binind];
Jason Evans940a2e02010-10-17 17:51:37 -07002489
Guilherme Goncalves2c5cb612014-12-08 19:12:41 -02002490 if (!junked && config_fill && unlikely(opt_junk_free))
Jason Evans122449b2012-04-06 00:35:09 -07002491 arena_dalloc_junk_small(ptr, bin_info);
Jason Evans940a2e02010-10-17 17:51:37 -07002492
2493 arena_run_reg_dalloc(run, ptr);
Jason Evans49f7e8f2011-03-15 13:59:15 -07002494 if (run->nfree == bin_info->nregs) {
Jason Evans088e6a02010-10-18 00:04:44 -07002495 arena_dissociate_bin_run(chunk, run, bin);
Jason Evans8de6a022010-10-17 20:57:30 -07002496 arena_dalloc_bin_run(arena, chunk, run, bin);
Jason Evans088e6a02010-10-18 00:04:44 -07002497 } else if (run->nfree == 1 && run != bin->runcur)
Jason Evans8de6a022010-10-17 20:57:30 -07002498 arena_bin_lower_run(arena, chunk, run, bin);
Jason Evanse476f8a2010-01-16 09:53:50 -08002499
Jason Evans7372b152012-02-10 20:22:09 -08002500 if (config_stats) {
Jason Evans7372b152012-02-10 20:22:09 -08002501 bin->stats.ndalloc++;
Jason Evans3c4d92e2014-10-12 22:53:59 -07002502 bin->stats.curregs--;
Jason Evans7372b152012-02-10 20:22:09 -08002503 }
Jason Evanse476f8a2010-01-16 09:53:50 -08002504}
2505
Jason Evanse476f8a2010-01-16 09:53:50 -08002506void
Jason Evansfc0b3b72014-10-09 17:54:06 -07002507arena_dalloc_bin_junked_locked(arena_t *arena, arena_chunk_t *chunk, void *ptr,
2508 arena_chunk_map_bits_t *bitselm)
2509{
2510
2511 arena_dalloc_bin_locked_impl(arena, chunk, ptr, bitselm, true);
2512}
2513
2514void
Jason Evans203484e2012-05-02 00:30:36 -07002515arena_dalloc_bin(arena_t *arena, arena_chunk_t *chunk, void *ptr,
Qinfan Wuff6a31d2014-08-29 13:34:40 -07002516 size_t pageind, arena_chunk_map_bits_t *bitselm)
Jason Evans203484e2012-05-02 00:30:36 -07002517{
2518 arena_run_t *run;
2519 arena_bin_t *bin;
Jason Evans0c5dd032014-09-29 01:31:39 -07002520 size_t rpages_ind;
Jason Evans203484e2012-05-02 00:30:36 -07002521
Jason Evans0c5dd032014-09-29 01:31:39 -07002522 rpages_ind = pageind - arena_mapbits_small_runind_get(chunk, pageind);
2523 run = &arena_miscelm_get(chunk, rpages_ind)->run;
Jason Evans381c23d2014-10-10 23:01:03 -07002524 bin = &arena->bins[run->binind];
Jason Evans203484e2012-05-02 00:30:36 -07002525 malloc_mutex_lock(&bin->lock);
Jason Evansfc0b3b72014-10-09 17:54:06 -07002526 arena_dalloc_bin_locked_impl(arena, chunk, ptr, bitselm, false);
Jason Evans203484e2012-05-02 00:30:36 -07002527 malloc_mutex_unlock(&bin->lock);
2528}
2529
2530void
2531arena_dalloc_small(arena_t *arena, arena_chunk_t *chunk, void *ptr,
2532 size_t pageind)
2533{
Qinfan Wuff6a31d2014-08-29 13:34:40 -07002534 arena_chunk_map_bits_t *bitselm;
Jason Evans203484e2012-05-02 00:30:36 -07002535
2536 if (config_debug) {
Jason Evans80737c32012-05-02 16:11:03 -07002537 /* arena_ptr_small_binind_get() does extra sanity checking. */
2538 assert(arena_ptr_small_binind_get(ptr, arena_mapbits_get(chunk,
2539 pageind)) != BININD_INVALID);
Jason Evans203484e2012-05-02 00:30:36 -07002540 }
Qinfan Wuff6a31d2014-08-29 13:34:40 -07002541 bitselm = arena_bitselm_get(chunk, pageind);
2542 arena_dalloc_bin(arena, chunk, ptr, pageind, bitselm);
Jason Evans203484e2012-05-02 00:30:36 -07002543}
Jason Evanse476f8a2010-01-16 09:53:50 -08002544
Jason Evans6b694c42014-01-07 16:47:56 -08002545#ifdef JEMALLOC_JET
2546#undef arena_dalloc_junk_large
2547#define arena_dalloc_junk_large JEMALLOC_N(arena_dalloc_junk_large_impl)
2548#endif
Jason Evansfc0b3b72014-10-09 17:54:06 -07002549void
Jason Evans6b694c42014-01-07 16:47:56 -08002550arena_dalloc_junk_large(void *ptr, size_t usize)
2551{
2552
Guilherme Goncalves2c5cb612014-12-08 19:12:41 -02002553 if (config_fill && unlikely(opt_junk_free))
Jason Evans6b694c42014-01-07 16:47:56 -08002554 memset(ptr, 0x5a, usize);
2555}
2556#ifdef JEMALLOC_JET
2557#undef arena_dalloc_junk_large
2558#define arena_dalloc_junk_large JEMALLOC_N(arena_dalloc_junk_large)
2559arena_dalloc_junk_large_t *arena_dalloc_junk_large =
2560 JEMALLOC_N(arena_dalloc_junk_large_impl);
2561#endif
2562
Jason Evanse476f8a2010-01-16 09:53:50 -08002563void
Jason Evansfc0b3b72014-10-09 17:54:06 -07002564arena_dalloc_large_locked_impl(arena_t *arena, arena_chunk_t *chunk,
2565 void *ptr, bool junked)
Jason Evanse476f8a2010-01-16 09:53:50 -08002566{
Jason Evans0c5dd032014-09-29 01:31:39 -07002567 size_t pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> LG_PAGE;
2568 arena_chunk_map_misc_t *miscelm = arena_miscelm_get(chunk, pageind);
2569 arena_run_t *run = &miscelm->run;
Jason Evans13668262010-01-31 03:57:29 -08002570
Jason Evans7372b152012-02-10 20:22:09 -08002571 if (config_fill || config_stats) {
Jason Evans8a03cf02015-05-04 09:58:36 -07002572 size_t usize = arena_mapbits_large_size_get(chunk, pageind) -
2573 large_pad;
Jason Evanse476f8a2010-01-16 09:53:50 -08002574
Jason Evansfc0b3b72014-10-09 17:54:06 -07002575 if (!junked)
2576 arena_dalloc_junk_large(ptr, usize);
Jason Evans7372b152012-02-10 20:22:09 -08002577 if (config_stats) {
Jason Evansd01fd192015-08-19 15:21:32 -07002578 szind_t index = size2index(usize) - NBINS;
Jason Evans155bfa72014-10-05 17:54:10 -07002579
Jason Evans7372b152012-02-10 20:22:09 -08002580 arena->stats.ndalloc_large++;
Jason Evans6b694c42014-01-07 16:47:56 -08002581 arena->stats.allocated_large -= usize;
Jason Evans155bfa72014-10-05 17:54:10 -07002582 arena->stats.lstats[index].ndalloc++;
2583 arena->stats.lstats[index].curruns--;
Jason Evans7372b152012-02-10 20:22:09 -08002584 }
Jason Evanse476f8a2010-01-16 09:53:50 -08002585 }
Jason Evanse476f8a2010-01-16 09:53:50 -08002586
Jason Evansde249c82015-08-09 16:47:27 -07002587 arena_run_dalloc_decommit(arena, chunk, run);
Jason Evanse476f8a2010-01-16 09:53:50 -08002588}
2589
Jason Evans203484e2012-05-02 00:30:36 -07002590void
Jason Evansfc0b3b72014-10-09 17:54:06 -07002591arena_dalloc_large_junked_locked(arena_t *arena, arena_chunk_t *chunk,
2592 void *ptr)
2593{
2594
2595 arena_dalloc_large_locked_impl(arena, chunk, ptr, true);
2596}
2597
2598void
Jason Evans203484e2012-05-02 00:30:36 -07002599arena_dalloc_large(arena_t *arena, arena_chunk_t *chunk, void *ptr)
2600{
2601
2602 malloc_mutex_lock(&arena->lock);
Jason Evansfc0b3b72014-10-09 17:54:06 -07002603 arena_dalloc_large_locked_impl(arena, chunk, ptr, false);
Jason Evans203484e2012-05-02 00:30:36 -07002604 malloc_mutex_unlock(&arena->lock);
2605}
2606
Jason Evanse476f8a2010-01-16 09:53:50 -08002607static void
2608arena_ralloc_large_shrink(arena_t *arena, arena_chunk_t *chunk, void *ptr,
Jason Evans8e3c3c62010-09-17 15:46:18 -07002609 size_t oldsize, size_t size)
Jason Evanse476f8a2010-01-16 09:53:50 -08002610{
Jason Evans0c5dd032014-09-29 01:31:39 -07002611 size_t pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> LG_PAGE;
2612 arena_chunk_map_misc_t *miscelm = arena_miscelm_get(chunk, pageind);
2613 arena_run_t *run = &miscelm->run;
Jason Evanse476f8a2010-01-16 09:53:50 -08002614
2615 assert(size < oldsize);
2616
2617 /*
2618 * Shrink the run, and make trailing pages available for other
2619 * allocations.
2620 */
2621 malloc_mutex_lock(&arena->lock);
Jason Evans8a03cf02015-05-04 09:58:36 -07002622 arena_run_trim_tail(arena, chunk, run, oldsize + large_pad, size +
2623 large_pad, true);
Jason Evans7372b152012-02-10 20:22:09 -08002624 if (config_stats) {
Jason Evansd01fd192015-08-19 15:21:32 -07002625 szind_t oldindex = size2index(oldsize) - NBINS;
2626 szind_t index = size2index(size) - NBINS;
Jason Evans155bfa72014-10-05 17:54:10 -07002627
Jason Evans7372b152012-02-10 20:22:09 -08002628 arena->stats.ndalloc_large++;
2629 arena->stats.allocated_large -= oldsize;
Jason Evans155bfa72014-10-05 17:54:10 -07002630 arena->stats.lstats[oldindex].ndalloc++;
2631 arena->stats.lstats[oldindex].curruns--;
Jason Evans990d10c2010-01-31 03:49:35 -08002632
Jason Evans7372b152012-02-10 20:22:09 -08002633 arena->stats.nmalloc_large++;
2634 arena->stats.nrequests_large++;
2635 arena->stats.allocated_large += size;
Jason Evans155bfa72014-10-05 17:54:10 -07002636 arena->stats.lstats[index].nmalloc++;
2637 arena->stats.lstats[index].nrequests++;
2638 arena->stats.lstats[index].curruns++;
Jason Evanse476f8a2010-01-16 09:53:50 -08002639 }
Jason Evanse476f8a2010-01-16 09:53:50 -08002640 malloc_mutex_unlock(&arena->lock);
2641}
2642
2643static bool
2644arena_ralloc_large_grow(arena_t *arena, arena_chunk_t *chunk, void *ptr,
Jason Evans560a4e12015-09-11 16:18:53 -07002645 size_t oldsize, size_t usize_min, size_t usize_max, bool zero)
Jason Evanse476f8a2010-01-16 09:53:50 -08002646{
Jason Evansae4c7b42012-04-02 07:04:34 -07002647 size_t pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> LG_PAGE;
Jason Evans5716d972015-08-06 23:34:12 -07002648 size_t npages = (oldsize + large_pad) >> LG_PAGE;
Jason Evans8e3c3c62010-09-17 15:46:18 -07002649 size_t followsize;
Jason Evanse476f8a2010-01-16 09:53:50 -08002650
Jason Evans8a03cf02015-05-04 09:58:36 -07002651 assert(oldsize == arena_mapbits_large_size_get(chunk, pageind) -
2652 large_pad);
Jason Evanse476f8a2010-01-16 09:53:50 -08002653
2654 /* Try to extend the run. */
Jason Evanse476f8a2010-01-16 09:53:50 -08002655 malloc_mutex_lock(&arena->lock);
Jason Evans560a4e12015-09-11 16:18:53 -07002656 if (pageind+npages >= chunk_npages || arena_mapbits_allocated_get(chunk,
2657 pageind+npages) != 0)
2658 goto label_fail;
2659 followsize = arena_mapbits_unallocated_size_get(chunk, pageind+npages);
2660 if (oldsize + followsize >= usize_min) {
Jason Evanse476f8a2010-01-16 09:53:50 -08002661 /*
2662 * The next run is available and sufficiently large. Split the
2663 * following run, then merge the first part with the existing
2664 * allocation.
2665 */
Guilherme Goncalves9c6a8d32014-12-17 14:46:35 -02002666 arena_run_t *run;
Jason Evans560a4e12015-09-11 16:18:53 -07002667 size_t usize, splitsize, size, flag_dirty, flag_unzeroed_mask;
Jason Evans155bfa72014-10-05 17:54:10 -07002668
Jason Evans560a4e12015-09-11 16:18:53 -07002669 usize = usize_max;
Jason Evans155bfa72014-10-05 17:54:10 -07002670 while (oldsize + followsize < usize)
2671 usize = index2size(size2index(usize)-1);
2672 assert(usize >= usize_min);
Jason Evans560a4e12015-09-11 16:18:53 -07002673 assert(usize >= oldsize);
Jason Evans5716d972015-08-06 23:34:12 -07002674 splitsize = usize - oldsize;
Jason Evans560a4e12015-09-11 16:18:53 -07002675 if (splitsize == 0)
2676 goto label_fail;
Jason Evans155bfa72014-10-05 17:54:10 -07002677
Guilherme Goncalves9c6a8d32014-12-17 14:46:35 -02002678 run = &arena_miscelm_get(chunk, pageind+npages)->run;
Jason Evans560a4e12015-09-11 16:18:53 -07002679 if (arena_run_split_large(arena, run, splitsize, zero))
2680 goto label_fail;
Jason Evanse476f8a2010-01-16 09:53:50 -08002681
Jason Evans088e6a02010-10-18 00:04:44 -07002682 size = oldsize + splitsize;
Jason Evans5716d972015-08-06 23:34:12 -07002683 npages = (size + large_pad) >> LG_PAGE;
Jason Evans940a2e02010-10-17 17:51:37 -07002684
2685 /*
2686 * Mark the extended run as dirty if either portion of the run
2687 * was dirty before allocation. This is rather pedantic,
2688 * because there's not actually any sequence of events that
2689 * could cause the resulting run to be passed to
2690 * arena_run_dalloc() with the dirty argument set to false
2691 * (which is when dirty flag consistency would really matter).
2692 */
Jason Evans203484e2012-05-02 00:30:36 -07002693 flag_dirty = arena_mapbits_dirty_get(chunk, pageind) |
2694 arena_mapbits_dirty_get(chunk, pageind+npages-1);
Jason Evans1f27abc2015-08-11 12:42:33 -07002695 flag_unzeroed_mask = flag_dirty == 0 ? CHUNK_MAP_UNZEROED : 0;
Jason Evans5716d972015-08-06 23:34:12 -07002696 arena_mapbits_large_set(chunk, pageind, size + large_pad,
Jason Evans1f27abc2015-08-11 12:42:33 -07002697 flag_dirty | (flag_unzeroed_mask &
2698 arena_mapbits_unzeroed_get(chunk, pageind)));
2699 arena_mapbits_large_set(chunk, pageind+npages-1, 0, flag_dirty |
2700 (flag_unzeroed_mask & arena_mapbits_unzeroed_get(chunk,
2701 pageind+npages-1)));
Jason Evanse476f8a2010-01-16 09:53:50 -08002702
Jason Evans7372b152012-02-10 20:22:09 -08002703 if (config_stats) {
Jason Evansd01fd192015-08-19 15:21:32 -07002704 szind_t oldindex = size2index(oldsize) - NBINS;
2705 szind_t index = size2index(size) - NBINS;
Jason Evans155bfa72014-10-05 17:54:10 -07002706
Jason Evans7372b152012-02-10 20:22:09 -08002707 arena->stats.ndalloc_large++;
2708 arena->stats.allocated_large -= oldsize;
Jason Evans155bfa72014-10-05 17:54:10 -07002709 arena->stats.lstats[oldindex].ndalloc++;
2710 arena->stats.lstats[oldindex].curruns--;
Jason Evans990d10c2010-01-31 03:49:35 -08002711
Jason Evans7372b152012-02-10 20:22:09 -08002712 arena->stats.nmalloc_large++;
2713 arena->stats.nrequests_large++;
2714 arena->stats.allocated_large += size;
Jason Evans155bfa72014-10-05 17:54:10 -07002715 arena->stats.lstats[index].nmalloc++;
2716 arena->stats.lstats[index].nrequests++;
2717 arena->stats.lstats[index].curruns++;
Jason Evans940a2e02010-10-17 17:51:37 -07002718 }
Jason Evanse476f8a2010-01-16 09:53:50 -08002719 malloc_mutex_unlock(&arena->lock);
2720 return (false);
2721 }
Jason Evans560a4e12015-09-11 16:18:53 -07002722label_fail:
Jason Evanse476f8a2010-01-16 09:53:50 -08002723 malloc_mutex_unlock(&arena->lock);
Jason Evanse476f8a2010-01-16 09:53:50 -08002724 return (true);
2725}
2726
Jason Evans6b694c42014-01-07 16:47:56 -08002727#ifdef JEMALLOC_JET
2728#undef arena_ralloc_junk_large
2729#define arena_ralloc_junk_large JEMALLOC_N(arena_ralloc_junk_large_impl)
2730#endif
2731static void
2732arena_ralloc_junk_large(void *ptr, size_t old_usize, size_t usize)
2733{
2734
Guilherme Goncalves2c5cb612014-12-08 19:12:41 -02002735 if (config_fill && unlikely(opt_junk_free)) {
Jason Evans6b694c42014-01-07 16:47:56 -08002736 memset((void *)((uintptr_t)ptr + usize), 0x5a,
2737 old_usize - usize);
2738 }
2739}
2740#ifdef JEMALLOC_JET
2741#undef arena_ralloc_junk_large
2742#define arena_ralloc_junk_large JEMALLOC_N(arena_ralloc_junk_large)
2743arena_ralloc_junk_large_t *arena_ralloc_junk_large =
2744 JEMALLOC_N(arena_ralloc_junk_large_impl);
2745#endif
2746
Jason Evanse476f8a2010-01-16 09:53:50 -08002747/*
2748 * Try to resize a large allocation, in order to avoid copying. This will
2749 * always fail if growing an object, and the following run is already in use.
2750 */
2751static bool
Jason Evans560a4e12015-09-11 16:18:53 -07002752arena_ralloc_large(void *ptr, size_t oldsize, size_t usize_min,
2753 size_t usize_max, bool zero)
Jason Evanse476f8a2010-01-16 09:53:50 -08002754{
Jason Evans560a4e12015-09-11 16:18:53 -07002755 arena_chunk_t *chunk;
2756 arena_t *arena;
Jason Evanse476f8a2010-01-16 09:53:50 -08002757
Jason Evans560a4e12015-09-11 16:18:53 -07002758 if (oldsize == usize_max) {
2759 /* Current size class is compatible and maximal. */
Jason Evanse476f8a2010-01-16 09:53:50 -08002760 return (false);
Jason Evanse476f8a2010-01-16 09:53:50 -08002761 }
Jason Evans560a4e12015-09-11 16:18:53 -07002762
2763 chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr);
2764 arena = extent_node_arena_get(&chunk->node);
2765
2766 if (oldsize < usize_max) {
2767 bool ret = arena_ralloc_large_grow(arena, chunk, ptr, oldsize,
2768 usize_min, usize_max, zero);
2769 if (config_fill && !ret && !zero) {
2770 if (unlikely(opt_junk_alloc)) {
2771 memset((void *)((uintptr_t)ptr + oldsize), 0xa5,
2772 isalloc(ptr, config_prof) - oldsize);
2773 } else if (unlikely(opt_zero)) {
2774 memset((void *)((uintptr_t)ptr + oldsize), 0,
2775 isalloc(ptr, config_prof) - oldsize);
2776 }
2777 }
2778 return (ret);
2779 }
2780
2781 assert(oldsize > usize_max);
2782 /* Fill before shrinking in order avoid a race. */
2783 arena_ralloc_junk_large(ptr, oldsize, usize_max);
2784 arena_ralloc_large_shrink(arena, chunk, ptr, oldsize, usize_max);
2785 return (false);
Jason Evanse476f8a2010-01-16 09:53:50 -08002786}
2787
Jason Evansb2c31662014-01-12 15:05:44 -08002788bool
Jason Evans8e3c3c62010-09-17 15:46:18 -07002789arena_ralloc_no_move(void *ptr, size_t oldsize, size_t size, size_t extra,
2790 bool zero)
Jason Evanse476f8a2010-01-16 09:53:50 -08002791{
Jason Evans560a4e12015-09-11 16:18:53 -07002792 size_t usize_min, usize_max;
Jason Evanse476f8a2010-01-16 09:53:50 -08002793
Jason Evans560a4e12015-09-11 16:18:53 -07002794 /* Check for size overflow. */
2795 if (unlikely(size > HUGE_MAXCLASS))
2796 return (true);
2797 usize_min = s2u(size);
2798 /* Clamp extra if necessary to avoid (size + extra) overflow. */
2799 if (unlikely(size + extra > HUGE_MAXCLASS))
2800 extra = HUGE_MAXCLASS - size;
2801 usize_max = s2u(size + extra);
2802
Jason Evans676df882015-09-11 20:50:20 -07002803 if (likely(oldsize <= large_maxclass && usize_min <= large_maxclass)) {
Jason Evans88fef7c2015-02-12 14:06:37 -08002804 /*
2805 * Avoid moving the allocation if the size class can be left the
2806 * same.
2807 */
Jason Evans560a4e12015-09-11 16:18:53 -07002808 if (oldsize <= SMALL_MAXCLASS) {
2809 assert(arena_bin_info[size2index(oldsize)].reg_size ==
2810 oldsize);
2811 if ((usize_max <= SMALL_MAXCLASS &&
2812 size2index(usize_max) == size2index(oldsize)) ||
2813 (size <= oldsize && usize_max >= oldsize))
2814 return (false);
2815 } else {
2816 if (usize_max > SMALL_MAXCLASS) {
2817 if (!arena_ralloc_large(ptr, oldsize, usize_min,
2818 usize_max, zero))
Jason Evansb2c31662014-01-12 15:05:44 -08002819 return (false);
Jason Evanse476f8a2010-01-16 09:53:50 -08002820 }
2821 }
Jason Evanse476f8a2010-01-16 09:53:50 -08002822
Jason Evans88fef7c2015-02-12 14:06:37 -08002823 /* Reallocation would require a move. */
2824 return (true);
Jason Evans560a4e12015-09-11 16:18:53 -07002825 } else {
2826 return (huge_ralloc_no_move(ptr, oldsize, usize_min, usize_max,
2827 zero));
2828 }
2829}
2830
2831static void *
2832arena_ralloc_move_helper(tsd_t *tsd, arena_t *arena, size_t usize,
2833 size_t alignment, bool zero, tcache_t *tcache)
2834{
2835
2836 if (alignment == 0)
2837 return (arena_malloc(tsd, arena, usize, zero, tcache));
2838 usize = sa2u(usize, alignment);
2839 if (usize == 0)
2840 return (NULL);
2841 return (ipalloct(tsd, usize, alignment, zero, tcache, arena));
Jason Evans8e3c3c62010-09-17 15:46:18 -07002842}
Jason Evanse476f8a2010-01-16 09:53:50 -08002843
Jason Evans8e3c3c62010-09-17 15:46:18 -07002844void *
Jason Evans5460aa62014-09-22 21:09:23 -07002845arena_ralloc(tsd_t *tsd, arena_t *arena, void *ptr, size_t oldsize, size_t size,
Jason Evans560a4e12015-09-11 16:18:53 -07002846 size_t alignment, bool zero, tcache_t *tcache)
Jason Evans8e3c3c62010-09-17 15:46:18 -07002847{
2848 void *ret;
Jason Evans560a4e12015-09-11 16:18:53 -07002849 size_t usize;
Jason Evans8e3c3c62010-09-17 15:46:18 -07002850
Jason Evans560a4e12015-09-11 16:18:53 -07002851 usize = s2u(size);
2852 if (usize == 0)
2853 return (NULL);
2854
Jason Evans676df882015-09-11 20:50:20 -07002855 if (likely(usize <= large_maxclass)) {
Jason Evans88fef7c2015-02-12 14:06:37 -08002856 size_t copysize;
Jason Evans8e3c3c62010-09-17 15:46:18 -07002857
Jason Evans88fef7c2015-02-12 14:06:37 -08002858 /* Try to avoid moving the allocation. */
Jason Evans560a4e12015-09-11 16:18:53 -07002859 if (!arena_ralloc_no_move(ptr, oldsize, usize, 0, zero))
Jason Evans88fef7c2015-02-12 14:06:37 -08002860 return (ptr);
Jason Evans8e3c3c62010-09-17 15:46:18 -07002861
Jason Evans88fef7c2015-02-12 14:06:37 -08002862 /*
2863 * size and oldsize are different enough that we need to move
2864 * the object. In that case, fall back to allocating new space
2865 * and copying.
2866 */
Jason Evans560a4e12015-09-11 16:18:53 -07002867 ret = arena_ralloc_move_helper(tsd, arena, usize, alignment,
2868 zero, tcache);
2869 if (ret == NULL)
2870 return (NULL);
Jason Evans88fef7c2015-02-12 14:06:37 -08002871
2872 /*
2873 * Junk/zero-filling were already done by
2874 * ipalloc()/arena_malloc().
2875 */
2876
Jason Evans560a4e12015-09-11 16:18:53 -07002877 copysize = (usize < oldsize) ? usize : oldsize;
Jason Evans88fef7c2015-02-12 14:06:37 -08002878 JEMALLOC_VALGRIND_MAKE_MEM_UNDEFINED(ret, copysize);
2879 memcpy(ret, ptr, copysize);
2880 isqalloc(tsd, ptr, oldsize, tcache);
2881 } else {
Jason Evans560a4e12015-09-11 16:18:53 -07002882 ret = huge_ralloc(tsd, arena, ptr, oldsize, usize, alignment,
2883 zero, tcache);
Jason Evans8e3c3c62010-09-17 15:46:18 -07002884 }
Jason Evanse476f8a2010-01-16 09:53:50 -08002885 return (ret);
Jason Evanse476f8a2010-01-16 09:53:50 -08002886}
2887
Jason Evans609ae592012-10-11 13:53:15 -07002888dss_prec_t
2889arena_dss_prec_get(arena_t *arena)
2890{
2891 dss_prec_t ret;
2892
2893 malloc_mutex_lock(&arena->lock);
2894 ret = arena->dss_prec;
2895 malloc_mutex_unlock(&arena->lock);
2896 return (ret);
2897}
2898
Jason Evans4d434ad2014-04-15 12:09:48 -07002899bool
Jason Evans609ae592012-10-11 13:53:15 -07002900arena_dss_prec_set(arena_t *arena, dss_prec_t dss_prec)
2901{
2902
Jason Evans551ebc42014-10-03 10:16:09 -07002903 if (!have_dss)
Jason Evans4d434ad2014-04-15 12:09:48 -07002904 return (dss_prec != dss_prec_disabled);
Jason Evans609ae592012-10-11 13:53:15 -07002905 malloc_mutex_lock(&arena->lock);
2906 arena->dss_prec = dss_prec;
2907 malloc_mutex_unlock(&arena->lock);
Jason Evans4d434ad2014-04-15 12:09:48 -07002908 return (false);
Jason Evans609ae592012-10-11 13:53:15 -07002909}
2910
Jason Evans8d6a3e82015-03-18 18:55:33 -07002911ssize_t
2912arena_lg_dirty_mult_default_get(void)
2913{
2914
2915 return ((ssize_t)atomic_read_z((size_t *)&lg_dirty_mult_default));
2916}
2917
2918bool
2919arena_lg_dirty_mult_default_set(ssize_t lg_dirty_mult)
2920{
2921
2922 if (!arena_lg_dirty_mult_valid(lg_dirty_mult))
2923 return (true);
2924 atomic_write_z((size_t *)&lg_dirty_mult_default, (size_t)lg_dirty_mult);
2925 return (false);
2926}
2927
Jason Evans609ae592012-10-11 13:53:15 -07002928void
Jason Evans562d2662015-03-24 16:36:12 -07002929arena_stats_merge(arena_t *arena, const char **dss, ssize_t *lg_dirty_mult,
2930 size_t *nactive, size_t *ndirty, arena_stats_t *astats,
2931 malloc_bin_stats_t *bstats, malloc_large_stats_t *lstats,
2932 malloc_huge_stats_t *hstats)
Jason Evans609ae592012-10-11 13:53:15 -07002933{
2934 unsigned i;
2935
2936 malloc_mutex_lock(&arena->lock);
2937 *dss = dss_prec_names[arena->dss_prec];
Jason Evans562d2662015-03-24 16:36:12 -07002938 *lg_dirty_mult = arena->lg_dirty_mult;
Jason Evans609ae592012-10-11 13:53:15 -07002939 *nactive += arena->nactive;
2940 *ndirty += arena->ndirty;
2941
2942 astats->mapped += arena->stats.mapped;
2943 astats->npurge += arena->stats.npurge;
2944 astats->nmadvise += arena->stats.nmadvise;
2945 astats->purged += arena->stats.purged;
Jason Evans4581b972014-11-27 17:22:36 -02002946 astats->metadata_mapped += arena->stats.metadata_mapped;
2947 astats->metadata_allocated += arena_metadata_allocated_get(arena);
Jason Evans609ae592012-10-11 13:53:15 -07002948 astats->allocated_large += arena->stats.allocated_large;
2949 astats->nmalloc_large += arena->stats.nmalloc_large;
2950 astats->ndalloc_large += arena->stats.ndalloc_large;
2951 astats->nrequests_large += arena->stats.nrequests_large;
Jason Evanse2deab72014-05-15 22:22:27 -07002952 astats->allocated_huge += arena->stats.allocated_huge;
2953 astats->nmalloc_huge += arena->stats.nmalloc_huge;
2954 astats->ndalloc_huge += arena->stats.ndalloc_huge;
Jason Evans609ae592012-10-11 13:53:15 -07002955
2956 for (i = 0; i < nlclasses; i++) {
2957 lstats[i].nmalloc += arena->stats.lstats[i].nmalloc;
2958 lstats[i].ndalloc += arena->stats.lstats[i].ndalloc;
2959 lstats[i].nrequests += arena->stats.lstats[i].nrequests;
2960 lstats[i].curruns += arena->stats.lstats[i].curruns;
2961 }
Jason Evans3c4d92e2014-10-12 22:53:59 -07002962
2963 for (i = 0; i < nhclasses; i++) {
2964 hstats[i].nmalloc += arena->stats.hstats[i].nmalloc;
2965 hstats[i].ndalloc += arena->stats.hstats[i].ndalloc;
2966 hstats[i].curhchunks += arena->stats.hstats[i].curhchunks;
2967 }
Jason Evans609ae592012-10-11 13:53:15 -07002968 malloc_mutex_unlock(&arena->lock);
2969
2970 for (i = 0; i < NBINS; i++) {
2971 arena_bin_t *bin = &arena->bins[i];
2972
2973 malloc_mutex_lock(&bin->lock);
Jason Evans609ae592012-10-11 13:53:15 -07002974 bstats[i].nmalloc += bin->stats.nmalloc;
2975 bstats[i].ndalloc += bin->stats.ndalloc;
2976 bstats[i].nrequests += bin->stats.nrequests;
Jason Evans3c4d92e2014-10-12 22:53:59 -07002977 bstats[i].curregs += bin->stats.curregs;
Jason Evans609ae592012-10-11 13:53:15 -07002978 if (config_tcache) {
2979 bstats[i].nfills += bin->stats.nfills;
2980 bstats[i].nflushes += bin->stats.nflushes;
2981 }
2982 bstats[i].nruns += bin->stats.nruns;
2983 bstats[i].reruns += bin->stats.reruns;
2984 bstats[i].curruns += bin->stats.curruns;
2985 malloc_mutex_unlock(&bin->lock);
2986 }
2987}
2988
Jason Evans8bb31982014-10-07 23:14:57 -07002989arena_t *
2990arena_new(unsigned ind)
Jason Evanse476f8a2010-01-16 09:53:50 -08002991{
Jason Evans8bb31982014-10-07 23:14:57 -07002992 arena_t *arena;
Jason Evanse476f8a2010-01-16 09:53:50 -08002993 unsigned i;
2994 arena_bin_t *bin;
Jason Evanse476f8a2010-01-16 09:53:50 -08002995
Jason Evans8bb31982014-10-07 23:14:57 -07002996 /*
Jason Evans3c4d92e2014-10-12 22:53:59 -07002997 * Allocate arena, arena->lstats, and arena->hstats contiguously, mainly
2998 * because there is no way to clean up if base_alloc() OOMs.
Jason Evans8bb31982014-10-07 23:14:57 -07002999 */
3000 if (config_stats) {
3001 arena = (arena_t *)base_alloc(CACHELINE_CEILING(sizeof(arena_t))
Jason Evans3c4d92e2014-10-12 22:53:59 -07003002 + QUANTUM_CEILING(nlclasses * sizeof(malloc_large_stats_t) +
3003 nhclasses) * sizeof(malloc_huge_stats_t));
Jason Evans8bb31982014-10-07 23:14:57 -07003004 } else
3005 arena = (arena_t *)base_alloc(sizeof(arena_t));
3006 if (arena == NULL)
3007 return (NULL);
3008
Jason Evans6109fe02010-02-10 10:37:56 -08003009 arena->ind = ind;
Jason Evans597632b2011-03-18 13:41:33 -07003010 arena->nthreads = 0;
Jason Evanscbf3a6d2015-02-11 12:24:27 -08003011 if (malloc_mutex_init(&arena->lock))
3012 return (NULL);
Jason Evanse476f8a2010-01-16 09:53:50 -08003013
Jason Evans7372b152012-02-10 20:22:09 -08003014 if (config_stats) {
3015 memset(&arena->stats, 0, sizeof(arena_stats_t));
Jason Evansd49cb682014-11-17 10:31:59 -08003016 arena->stats.lstats = (malloc_large_stats_t *)((uintptr_t)arena
3017 + CACHELINE_CEILING(sizeof(arena_t)));
Jason Evans7372b152012-02-10 20:22:09 -08003018 memset(arena->stats.lstats, 0, nlclasses *
3019 sizeof(malloc_large_stats_t));
Jason Evansd49cb682014-11-17 10:31:59 -08003020 arena->stats.hstats = (malloc_huge_stats_t *)((uintptr_t)arena
3021 + CACHELINE_CEILING(sizeof(arena_t)) +
Jason Evans3c4d92e2014-10-12 22:53:59 -07003022 QUANTUM_CEILING(nlclasses * sizeof(malloc_large_stats_t)));
3023 memset(arena->stats.hstats, 0, nhclasses *
3024 sizeof(malloc_huge_stats_t));
Jason Evans7372b152012-02-10 20:22:09 -08003025 if (config_tcache)
3026 ql_new(&arena->tcache_ql);
3027 }
Jason Evanse476f8a2010-01-16 09:53:50 -08003028
Jason Evans7372b152012-02-10 20:22:09 -08003029 if (config_prof)
3030 arena->prof_accumbytes = 0;
Jason Evansd34f9e72010-02-11 13:19:21 -08003031
Jason Evans8a03cf02015-05-04 09:58:36 -07003032 if (config_cache_oblivious) {
3033 /*
3034 * A nondeterministic seed based on the address of arena reduces
3035 * the likelihood of lockstep non-uniform cache index
3036 * utilization among identical concurrent processes, but at the
3037 * cost of test repeatability. For debug builds, instead use a
3038 * deterministic seed.
3039 */
3040 arena->offset_state = config_debug ? ind :
3041 (uint64_t)(uintptr_t)arena;
3042 }
3043
Jason Evans609ae592012-10-11 13:53:15 -07003044 arena->dss_prec = chunk_dss_prec_get();
3045
Jason Evanse476f8a2010-01-16 09:53:50 -08003046 arena->spare = NULL;
3047
Jason Evans8d6a3e82015-03-18 18:55:33 -07003048 arena->lg_dirty_mult = arena_lg_dirty_mult_default_get();
Jason Evans0a9f9a42015-06-22 18:50:32 -07003049 arena->purging = false;
Jason Evanse476f8a2010-01-16 09:53:50 -08003050 arena->nactive = 0;
3051 arena->ndirty = 0;
3052
Jason Evanse3d13062012-10-30 15:42:37 -07003053 arena_avail_tree_new(&arena->runs_avail);
Jason Evansee41ad42015-02-15 18:04:46 -08003054 qr_new(&arena->runs_dirty, rd_link);
Jason Evans738e0892015-02-18 01:15:50 -08003055 qr_new(&arena->chunks_cache, cc_link);
Jason Evansee41ad42015-02-15 18:04:46 -08003056
3057 ql_new(&arena->huge);
3058 if (malloc_mutex_init(&arena->huge_mtx))
3059 return (NULL);
3060
Jason Evansb49a3342015-07-28 11:28:19 -04003061 extent_tree_szad_new(&arena->chunks_szad_cached);
3062 extent_tree_ad_new(&arena->chunks_ad_cached);
3063 extent_tree_szad_new(&arena->chunks_szad_retained);
3064 extent_tree_ad_new(&arena->chunks_ad_retained);
Jason Evansee41ad42015-02-15 18:04:46 -08003065 if (malloc_mutex_init(&arena->chunks_mtx))
3066 return (NULL);
3067 ql_new(&arena->node_cache);
3068 if (malloc_mutex_init(&arena->node_cache_mtx))
3069 return (NULL);
3070
Jason Evansb49a3342015-07-28 11:28:19 -04003071 arena->chunk_hooks = chunk_hooks_default;
Jason Evanse476f8a2010-01-16 09:53:50 -08003072
3073 /* Initialize bins. */
Jason Evansb1726102012-02-28 16:50:47 -08003074 for (i = 0; i < NBINS; i++) {
Jason Evanse476f8a2010-01-16 09:53:50 -08003075 bin = &arena->bins[i];
Jason Evans86815df2010-03-13 20:32:56 -08003076 if (malloc_mutex_init(&bin->lock))
Jason Evans8bb31982014-10-07 23:14:57 -07003077 return (NULL);
Jason Evanse476f8a2010-01-16 09:53:50 -08003078 bin->runcur = NULL;
3079 arena_run_tree_new(&bin->runs);
Jason Evans7372b152012-02-10 20:22:09 -08003080 if (config_stats)
3081 memset(&bin->stats, 0, sizeof(malloc_bin_stats_t));
Jason Evanse476f8a2010-01-16 09:53:50 -08003082 }
3083
Jason Evans8bb31982014-10-07 23:14:57 -07003084 return (arena);
Jason Evanse476f8a2010-01-16 09:53:50 -08003085}
3086
Jason Evans49f7e8f2011-03-15 13:59:15 -07003087/*
3088 * Calculate bin_info->run_size such that it meets the following constraints:
3089 *
Jason Evans155bfa72014-10-05 17:54:10 -07003090 * *) bin_info->run_size <= arena_maxrun
Jason Evans47e57f92011-03-22 09:00:56 -07003091 * *) bin_info->nregs <= RUN_MAXREGS
Jason Evans49f7e8f2011-03-15 13:59:15 -07003092 *
Jason Evans0c5dd032014-09-29 01:31:39 -07003093 * bin_info->nregs and bin_info->reg0_offset are also calculated here, since
3094 * these settings are all interdependent.
Jason Evans49f7e8f2011-03-15 13:59:15 -07003095 */
Jason Evans0c5dd032014-09-29 01:31:39 -07003096static void
3097bin_info_run_size_calc(arena_bin_info_t *bin_info)
Jason Evans49f7e8f2011-03-15 13:59:15 -07003098{
Jason Evans122449b2012-04-06 00:35:09 -07003099 size_t pad_size;
Jason Evans0c5dd032014-09-29 01:31:39 -07003100 size_t try_run_size, perfect_run_size, actual_run_size;
3101 uint32_t try_nregs, perfect_nregs, actual_nregs;
Jason Evans49f7e8f2011-03-15 13:59:15 -07003102
3103 /*
Jason Evans122449b2012-04-06 00:35:09 -07003104 * Determine redzone size based on minimum alignment and minimum
3105 * redzone size. Add padding to the end of the run if it is needed to
3106 * align the regions. The padding allows each redzone to be half the
3107 * minimum alignment; without the padding, each redzone would have to
3108 * be twice as large in order to maintain alignment.
3109 */
Jason Evans9c640bf2014-09-11 16:20:44 -07003110 if (config_fill && unlikely(opt_redzone)) {
Qinfan Wuff6a31d2014-08-29 13:34:40 -07003111 size_t align_min = ZU(1) << (jemalloc_ffs(bin_info->reg_size) -
3112 1);
Jason Evans122449b2012-04-06 00:35:09 -07003113 if (align_min <= REDZONE_MINSIZE) {
3114 bin_info->redzone_size = REDZONE_MINSIZE;
3115 pad_size = 0;
3116 } else {
3117 bin_info->redzone_size = align_min >> 1;
3118 pad_size = bin_info->redzone_size;
3119 }
3120 } else {
3121 bin_info->redzone_size = 0;
3122 pad_size = 0;
3123 }
3124 bin_info->reg_interval = bin_info->reg_size +
3125 (bin_info->redzone_size << 1);
3126
3127 /*
Jason Evans0c5dd032014-09-29 01:31:39 -07003128 * Compute run size under ideal conditions (no redzones, no limit on run
3129 * size).
Jason Evans49f7e8f2011-03-15 13:59:15 -07003130 */
Jason Evans0c5dd032014-09-29 01:31:39 -07003131 try_run_size = PAGE;
3132 try_nregs = try_run_size / bin_info->reg_size;
Jason Evans49f7e8f2011-03-15 13:59:15 -07003133 do {
Jason Evans0c5dd032014-09-29 01:31:39 -07003134 perfect_run_size = try_run_size;
3135 perfect_nregs = try_nregs;
Jason Evans49f7e8f2011-03-15 13:59:15 -07003136
Jason Evansae4c7b42012-04-02 07:04:34 -07003137 try_run_size += PAGE;
Jason Evans0c5dd032014-09-29 01:31:39 -07003138 try_nregs = try_run_size / bin_info->reg_size;
3139 } while (perfect_run_size != perfect_nregs * bin_info->reg_size);
3140 assert(perfect_nregs <= RUN_MAXREGS);
Jason Evans49f7e8f2011-03-15 13:59:15 -07003141
Jason Evans0c5dd032014-09-29 01:31:39 -07003142 actual_run_size = perfect_run_size;
3143 actual_nregs = (actual_run_size - pad_size) / bin_info->reg_interval;
3144
3145 /*
3146 * Redzones can require enough padding that not even a single region can
3147 * fit within the number of pages that would normally be dedicated to a
3148 * run for this size class. Increase the run size until at least one
3149 * region fits.
3150 */
3151 while (actual_nregs == 0) {
3152 assert(config_fill && unlikely(opt_redzone));
3153
3154 actual_run_size += PAGE;
3155 actual_nregs = (actual_run_size - pad_size) /
3156 bin_info->reg_interval;
3157 }
3158
3159 /*
3160 * Make sure that the run will fit within an arena chunk.
3161 */
Jason Evans155bfa72014-10-05 17:54:10 -07003162 while (actual_run_size > arena_maxrun) {
Jason Evans0c5dd032014-09-29 01:31:39 -07003163 actual_run_size -= PAGE;
3164 actual_nregs = (actual_run_size - pad_size) /
3165 bin_info->reg_interval;
3166 }
3167 assert(actual_nregs > 0);
Jason Evans5707d6f2015-03-06 17:14:05 -08003168 assert(actual_run_size == s2u(actual_run_size));
Jason Evans49f7e8f2011-03-15 13:59:15 -07003169
3170 /* Copy final settings. */
Jason Evans0c5dd032014-09-29 01:31:39 -07003171 bin_info->run_size = actual_run_size;
3172 bin_info->nregs = actual_nregs;
3173 bin_info->reg0_offset = actual_run_size - (actual_nregs *
3174 bin_info->reg_interval) - pad_size + bin_info->redzone_size;
Jason Evans122449b2012-04-06 00:35:09 -07003175
Jason Evans8a03cf02015-05-04 09:58:36 -07003176 if (actual_run_size > small_maxrun)
3177 small_maxrun = actual_run_size;
3178
Jason Evans122449b2012-04-06 00:35:09 -07003179 assert(bin_info->reg0_offset - bin_info->redzone_size + (bin_info->nregs
3180 * bin_info->reg_interval) + pad_size == bin_info->run_size);
Jason Evans49f7e8f2011-03-15 13:59:15 -07003181}
3182
Jason Evansb1726102012-02-28 16:50:47 -08003183static void
Jason Evans49f7e8f2011-03-15 13:59:15 -07003184bin_info_init(void)
3185{
3186 arena_bin_info_t *bin_info;
Jason Evans49f7e8f2011-03-15 13:59:15 -07003187
Jason Evans8a03cf02015-05-04 09:58:36 -07003188#define BIN_INFO_INIT_bin_yes(index, size) \
Jason Evansd04047c2014-05-28 16:11:55 -07003189 bin_info = &arena_bin_info[index]; \
Jason Evansb1726102012-02-28 16:50:47 -08003190 bin_info->reg_size = size; \
Jason Evans0c5dd032014-09-29 01:31:39 -07003191 bin_info_run_size_calc(bin_info); \
Jason Evansb1726102012-02-28 16:50:47 -08003192 bitmap_info_init(&bin_info->bitmap_info, bin_info->nregs);
Jason Evansd04047c2014-05-28 16:11:55 -07003193#define BIN_INFO_INIT_bin_no(index, size)
3194#define SC(index, lg_grp, lg_delta, ndelta, bin, lg_delta_lookup) \
3195 BIN_INFO_INIT_bin_##bin(index, (ZU(1)<<lg_grp) + (ZU(ndelta)<<lg_delta))
Jason Evansb1726102012-02-28 16:50:47 -08003196 SIZE_CLASSES
Jason Evansd04047c2014-05-28 16:11:55 -07003197#undef BIN_INFO_INIT_bin_yes
3198#undef BIN_INFO_INIT_bin_no
3199#undef SC
Jason Evans49f7e8f2011-03-15 13:59:15 -07003200}
3201
Jason Evans8a03cf02015-05-04 09:58:36 -07003202static bool
3203small_run_size_init(void)
3204{
3205
3206 assert(small_maxrun != 0);
3207
3208 small_run_tab = (bool *)base_alloc(sizeof(bool) * (small_maxrun >>
3209 LG_PAGE));
3210 if (small_run_tab == NULL)
3211 return (true);
3212
3213#define TAB_INIT_bin_yes(index, size) { \
3214 arena_bin_info_t *bin_info = &arena_bin_info[index]; \
3215 small_run_tab[bin_info->run_size >> LG_PAGE] = true; \
3216 }
3217#define TAB_INIT_bin_no(index, size)
3218#define SC(index, lg_grp, lg_delta, ndelta, bin, lg_delta_lookup) \
3219 TAB_INIT_bin_##bin(index, (ZU(1)<<lg_grp) + (ZU(ndelta)<<lg_delta))
3220 SIZE_CLASSES
3221#undef TAB_INIT_bin_yes
3222#undef TAB_INIT_bin_no
3223#undef SC
3224
3225 return (false);
3226}
3227
3228bool
Jason Evansa0bf2422010-01-29 14:30:41 -08003229arena_boot(void)
Jason Evanse476f8a2010-01-16 09:53:50 -08003230{
Jason Evans7393f442010-10-01 17:35:43 -07003231 unsigned i;
Jason Evanse476f8a2010-01-16 09:53:50 -08003232
Jason Evans8d6a3e82015-03-18 18:55:33 -07003233 arena_lg_dirty_mult_default_set(opt_lg_dirty_mult);
3234
Jason Evanse476f8a2010-01-16 09:53:50 -08003235 /*
3236 * Compute the header size such that it is large enough to contain the
Jason Evans7393f442010-10-01 17:35:43 -07003237 * page map. The page map is biased to omit entries for the header
3238 * itself, so some iteration is necessary to compute the map bias.
3239 *
3240 * 1) Compute safe header_size and map_bias values that include enough
3241 * space for an unbiased page map.
3242 * 2) Refine map_bias based on (1) to omit the header pages in the page
3243 * map. The resulting map_bias may be one too small.
3244 * 3) Refine map_bias based on (2). The result will be >= the result
3245 * from (2), and will always be correct.
Jason Evanse476f8a2010-01-16 09:53:50 -08003246 */
Jason Evans7393f442010-10-01 17:35:43 -07003247 map_bias = 0;
3248 for (i = 0; i < 3; i++) {
Dmitry-Mea306a602015-09-04 13:15:28 +03003249 size_t header_size = offsetof(arena_chunk_t, map_bits) +
Qinfan Wuff6a31d2014-08-29 13:34:40 -07003250 ((sizeof(arena_chunk_map_bits_t) +
3251 sizeof(arena_chunk_map_misc_t)) * (chunk_npages-map_bias));
Jason Evans0c5dd032014-09-29 01:31:39 -07003252 map_bias = (header_size + PAGE_MASK) >> LG_PAGE;
Jason Evans7393f442010-10-01 17:35:43 -07003253 }
3254 assert(map_bias > 0);
3255
Qinfan Wuff6a31d2014-08-29 13:34:40 -07003256 map_misc_offset = offsetof(arena_chunk_t, map_bits) +
3257 sizeof(arena_chunk_map_bits_t) * (chunk_npages-map_bias);
3258
Jason Evans155bfa72014-10-05 17:54:10 -07003259 arena_maxrun = chunksize - (map_bias << LG_PAGE);
Jason Evansfc0b3b72014-10-09 17:54:06 -07003260 assert(arena_maxrun > 0);
Jason Evans676df882015-09-11 20:50:20 -07003261 large_maxclass = index2size(size2index(chunksize)-1);
3262 if (large_maxclass > arena_maxrun) {
Jason Evans155bfa72014-10-05 17:54:10 -07003263 /*
3264 * For small chunk sizes it's possible for there to be fewer
3265 * non-header pages available than are necessary to serve the
3266 * size classes just below chunksize.
3267 */
Jason Evans676df882015-09-11 20:50:20 -07003268 large_maxclass = arena_maxrun;
Jason Evans155bfa72014-10-05 17:54:10 -07003269 }
Jason Evans676df882015-09-11 20:50:20 -07003270 assert(large_maxclass > 0);
3271 nlclasses = size2index(large_maxclass) - size2index(SMALL_MAXCLASS);
Jason Evans3c4d92e2014-10-12 22:53:59 -07003272 nhclasses = NSIZES - nlclasses - NBINS;
Jason Evansa0bf2422010-01-29 14:30:41 -08003273
Jason Evansb1726102012-02-28 16:50:47 -08003274 bin_info_init();
Jason Evans8a03cf02015-05-04 09:58:36 -07003275 return (small_run_size_init());
Jason Evanse476f8a2010-01-16 09:53:50 -08003276}
Jason Evans4e2e3dd2012-03-13 16:31:41 -07003277
3278void
3279arena_prefork(arena_t *arena)
3280{
3281 unsigned i;
3282
3283 malloc_mutex_prefork(&arena->lock);
Jason Evanscbf3a6d2015-02-11 12:24:27 -08003284 malloc_mutex_prefork(&arena->huge_mtx);
3285 malloc_mutex_prefork(&arena->chunks_mtx);
3286 malloc_mutex_prefork(&arena->node_cache_mtx);
Jason Evans4e2e3dd2012-03-13 16:31:41 -07003287 for (i = 0; i < NBINS; i++)
3288 malloc_mutex_prefork(&arena->bins[i].lock);
3289}
3290
3291void
3292arena_postfork_parent(arena_t *arena)
3293{
3294 unsigned i;
3295
3296 for (i = 0; i < NBINS; i++)
3297 malloc_mutex_postfork_parent(&arena->bins[i].lock);
Jason Evanscbf3a6d2015-02-11 12:24:27 -08003298 malloc_mutex_postfork_parent(&arena->node_cache_mtx);
3299 malloc_mutex_postfork_parent(&arena->chunks_mtx);
3300 malloc_mutex_postfork_parent(&arena->huge_mtx);
Jason Evans4e2e3dd2012-03-13 16:31:41 -07003301 malloc_mutex_postfork_parent(&arena->lock);
3302}
3303
3304void
3305arena_postfork_child(arena_t *arena)
3306{
3307 unsigned i;
3308
3309 for (i = 0; i < NBINS; i++)
3310 malloc_mutex_postfork_child(&arena->bins[i].lock);
Jason Evanscbf3a6d2015-02-11 12:24:27 -08003311 malloc_mutex_postfork_child(&arena->node_cache_mtx);
3312 malloc_mutex_postfork_child(&arena->chunks_mtx);
3313 malloc_mutex_postfork_child(&arena->huge_mtx);
Jason Evans4e2e3dd2012-03-13 16:31:41 -07003314 malloc_mutex_postfork_child(&arena->lock);
3315}