blob: 762b81827cba6ae05be3554b68a32cad6377326a [file] [log] [blame]
Jason Evanse476f8a2010-01-16 09:53:50 -08001#define JEMALLOC_ARENA_C_
Jason Evans376b1522010-02-11 14:45:59 -08002#include "jemalloc/internal/jemalloc_internal.h"
Jason Evanse476f8a2010-01-16 09:53:50 -08003
4/******************************************************************************/
5/* Data. */
6
Jason Evanse476f8a2010-01-16 09:53:50 -08007ssize_t opt_lg_dirty_mult = LG_DIRTY_MULT_DEFAULT;
Jason Evansb1726102012-02-28 16:50:47 -08008arena_bin_info_t arena_bin_info[NBINS];
Jason Evanse476f8a2010-01-16 09:53:50 -08009
Jason Evans155bfa72014-10-05 17:54:10 -070010size_t map_bias;
11size_t map_misc_offset;
12size_t arena_maxrun; /* Max run size for arenas. */
13size_t arena_maxclass; /* Max size class for arenas. */
Jason Evans3c4d92e2014-10-12 22:53:59 -070014unsigned nlclasses; /* Number of large size classes. */
15unsigned nhclasses; /* Number of huge size classes. */
Jason Evanse476f8a2010-01-16 09:53:50 -080016
17/******************************************************************************/
Jason Evansaa5113b2014-01-14 16:23:03 -080018/*
19 * Function prototypes for static functions that are referenced prior to
20 * definition.
21 */
Jason Evanse476f8a2010-01-16 09:53:50 -080022
Jason Evanscbf3a6d2015-02-11 12:24:27 -080023static void arena_chunk_dalloc(arena_t *arena, arena_chunk_t *chunk);
Jason Evans6005f072010-09-30 16:55:08 -070024static void arena_purge(arena_t *arena, bool all);
Jason Evanse3d13062012-10-30 15:42:37 -070025static void arena_run_dalloc(arena_t *arena, arena_run_t *run, bool dirty,
26 bool cleaned);
Jason Evanse476f8a2010-01-16 09:53:50 -080027static void arena_dalloc_bin_run(arena_t *arena, arena_chunk_t *chunk,
28 arena_run_t *run, arena_bin_t *bin);
Jason Evans940a2e02010-10-17 17:51:37 -070029static void arena_bin_lower_run(arena_t *arena, arena_chunk_t *chunk,
30 arena_run_t *run, arena_bin_t *bin);
Jason Evanse476f8a2010-01-16 09:53:50 -080031
32/******************************************************************************/
33
Ben Maurerf9ff6032014-04-06 13:24:16 -070034JEMALLOC_INLINE_C size_t
Qinfan Wuff6a31d2014-08-29 13:34:40 -070035arena_miscelm_to_bits(arena_chunk_map_misc_t *miscelm)
Ben Maurerf9ff6032014-04-06 13:24:16 -070036{
Qinfan Wuff6a31d2014-08-29 13:34:40 -070037 arena_chunk_t *chunk = CHUNK_ADDR2BASE(miscelm);
38 size_t pageind = arena_miscelm_to_pageind(miscelm);
Ben Maurerf9ff6032014-04-06 13:24:16 -070039
Jason Evanse12eaf92014-12-08 14:40:14 -080040 return (arena_mapbits_get(chunk, pageind));
Ben Maurerf9ff6032014-04-06 13:24:16 -070041}
42
Jason Evansaf1f5922014-10-30 16:38:08 -070043JEMALLOC_INLINE_C int
Qinfan Wuff6a31d2014-08-29 13:34:40 -070044arena_run_comp(arena_chunk_map_misc_t *a, arena_chunk_map_misc_t *b)
Jason Evanse476f8a2010-01-16 09:53:50 -080045{
Qinfan Wuff6a31d2014-08-29 13:34:40 -070046 uintptr_t a_miscelm = (uintptr_t)a;
47 uintptr_t b_miscelm = (uintptr_t)b;
Jason Evanse476f8a2010-01-16 09:53:50 -080048
49 assert(a != NULL);
50 assert(b != NULL);
51
Qinfan Wuff6a31d2014-08-29 13:34:40 -070052 return ((a_miscelm > b_miscelm) - (a_miscelm < b_miscelm));
Jason Evanse476f8a2010-01-16 09:53:50 -080053}
54
Jason Evansf3ff7522010-02-28 15:00:18 -080055/* Generate red-black tree functions. */
Qinfan Wuff6a31d2014-08-29 13:34:40 -070056rb_gen(static UNUSED, arena_run_tree_, arena_run_tree_t, arena_chunk_map_misc_t,
Jason Evans070b3c32014-08-14 14:45:58 -070057 rb_link, arena_run_comp)
Jason Evanse476f8a2010-01-16 09:53:50 -080058
Jason Evansaf1f5922014-10-30 16:38:08 -070059JEMALLOC_INLINE_C int
Qinfan Wuff6a31d2014-08-29 13:34:40 -070060arena_avail_comp(arena_chunk_map_misc_t *a, arena_chunk_map_misc_t *b)
Jason Evanse476f8a2010-01-16 09:53:50 -080061{
62 int ret;
Ben Maurerf9ff6032014-04-06 13:24:16 -070063 size_t a_size;
Qinfan Wuff6a31d2014-08-29 13:34:40 -070064 size_t b_size = arena_miscelm_to_bits(b) & ~PAGE_MASK;
65 uintptr_t a_miscelm = (uintptr_t)a;
66 uintptr_t b_miscelm = (uintptr_t)b;
Ben Maurerf9ff6032014-04-06 13:24:16 -070067
Qinfan Wuff6a31d2014-08-29 13:34:40 -070068 if (a_miscelm & CHUNK_MAP_KEY)
69 a_size = a_miscelm & ~PAGE_MASK;
70 else
71 a_size = arena_miscelm_to_bits(a) & ~PAGE_MASK;
Jason Evanse476f8a2010-01-16 09:53:50 -080072
73 ret = (a_size > b_size) - (a_size < b_size);
Qinfan Wu55c9aa12014-08-06 16:10:08 -070074 if (ret == 0) {
Qinfan Wuff6a31d2014-08-29 13:34:40 -070075 if (!(a_miscelm & CHUNK_MAP_KEY))
76 ret = (a_miscelm > b_miscelm) - (a_miscelm < b_miscelm);
Qinfan Wuea73eb82014-08-06 16:43:01 -070077 else {
78 /*
79 * Treat keys as if they are lower than anything else.
80 */
Qinfan Wu55c9aa12014-08-06 16:10:08 -070081 ret = -1;
Qinfan Wuea73eb82014-08-06 16:43:01 -070082 }
Qinfan Wu55c9aa12014-08-06 16:10:08 -070083 }
Jason Evanse476f8a2010-01-16 09:53:50 -080084
85 return (ret);
86}
87
Jason Evansf3ff7522010-02-28 15:00:18 -080088/* Generate red-black tree functions. */
Qinfan Wuff6a31d2014-08-29 13:34:40 -070089rb_gen(static UNUSED, arena_avail_tree_, arena_avail_tree_t,
90 arena_chunk_map_misc_t, rb_link, arena_avail_comp)
Jason Evanse476f8a2010-01-16 09:53:50 -080091
Jason Evanse3d13062012-10-30 15:42:37 -070092static void
93arena_avail_insert(arena_t *arena, arena_chunk_t *chunk, size_t pageind,
Qinfan Wu90737fc2014-07-21 19:39:20 -070094 size_t npages)
Jason Evanse3d13062012-10-30 15:42:37 -070095{
96
97 assert(npages == (arena_mapbits_unallocated_size_get(chunk, pageind) >>
98 LG_PAGE));
Qinfan Wuff6a31d2014-08-29 13:34:40 -070099 arena_avail_tree_insert(&arena->runs_avail, arena_miscelm_get(chunk,
Jason Evanse3d13062012-10-30 15:42:37 -0700100 pageind));
101}
102
103static void
104arena_avail_remove(arena_t *arena, arena_chunk_t *chunk, size_t pageind,
Qinfan Wu90737fc2014-07-21 19:39:20 -0700105 size_t npages)
Jason Evanse3d13062012-10-30 15:42:37 -0700106{
107
108 assert(npages == (arena_mapbits_unallocated_size_get(chunk, pageind) >>
109 LG_PAGE));
Qinfan Wuff6a31d2014-08-29 13:34:40 -0700110 arena_avail_tree_remove(&arena->runs_avail, arena_miscelm_get(chunk,
Jason Evanse3d13062012-10-30 15:42:37 -0700111 pageind));
112}
113
Jason Evans070b3c32014-08-14 14:45:58 -0700114static void
Jason Evansee41ad42015-02-15 18:04:46 -0800115arena_run_dirty_insert(arena_t *arena, arena_chunk_t *chunk, size_t pageind,
Jason Evans070b3c32014-08-14 14:45:58 -0700116 size_t npages)
117{
Qinfan Wuff6a31d2014-08-29 13:34:40 -0700118 arena_chunk_map_misc_t *miscelm = arena_miscelm_get(chunk, pageind);
Jason Evansee41ad42015-02-15 18:04:46 -0800119
Jason Evans070b3c32014-08-14 14:45:58 -0700120 assert(npages == (arena_mapbits_unallocated_size_get(chunk, pageind) >>
121 LG_PAGE));
122 assert(arena_mapbits_dirty_get(chunk, pageind) == CHUNK_MAP_DIRTY);
123 assert(arena_mapbits_dirty_get(chunk, pageind+npages-1) ==
124 CHUNK_MAP_DIRTY);
Jason Evansee41ad42015-02-15 18:04:46 -0800125
126 qr_new(miscelm, rd_link);
127 qr_meld(&arena->runs_dirty, miscelm, rd_link);
Jason Evans070b3c32014-08-14 14:45:58 -0700128 arena->ndirty += npages;
129}
130
131static void
Jason Evansee41ad42015-02-15 18:04:46 -0800132arena_run_dirty_remove(arena_t *arena, arena_chunk_t *chunk, size_t pageind,
Jason Evans070b3c32014-08-14 14:45:58 -0700133 size_t npages)
134{
Qinfan Wuff6a31d2014-08-29 13:34:40 -0700135 arena_chunk_map_misc_t *miscelm = arena_miscelm_get(chunk, pageind);
Jason Evansee41ad42015-02-15 18:04:46 -0800136
Jason Evans070b3c32014-08-14 14:45:58 -0700137 assert(npages == (arena_mapbits_unallocated_size_get(chunk, pageind) >>
138 LG_PAGE));
139 assert(arena_mapbits_dirty_get(chunk, pageind) == CHUNK_MAP_DIRTY);
140 assert(arena_mapbits_dirty_get(chunk, pageind+npages-1) ==
141 CHUNK_MAP_DIRTY);
Jason Evansee41ad42015-02-15 18:04:46 -0800142
143 qr_remove(miscelm, rd_link);
144 assert(arena->ndirty >= npages);
Jason Evans070b3c32014-08-14 14:45:58 -0700145 arena->ndirty -= npages;
146}
147
Jason Evansee41ad42015-02-15 18:04:46 -0800148static size_t
149arena_chunk_dirty_npages(const extent_node_t *node)
150{
151
152 return (extent_node_size_get(node) >> LG_PAGE);
153}
154
Jason Evansee41ad42015-02-15 18:04:46 -0800155void
Jason Evans738e0892015-02-18 01:15:50 -0800156arena_chunk_cache_maybe_insert(arena_t *arena, extent_node_t *node, bool cache)
Jason Evansee41ad42015-02-15 18:04:46 -0800157{
158
Jason Evans738e0892015-02-18 01:15:50 -0800159 if (cache) {
Jason Evans47701b22015-02-17 22:23:10 -0800160 extent_node_dirty_linkage_init(node);
Jason Evans738e0892015-02-18 01:15:50 -0800161 extent_node_dirty_insert(node, &arena->runs_dirty,
162 &arena->chunks_cache);
Jason Evansee41ad42015-02-15 18:04:46 -0800163 arena->ndirty += arena_chunk_dirty_npages(node);
164 }
165}
166
167void
Jason Evans738e0892015-02-18 01:15:50 -0800168arena_chunk_cache_maybe_remove(arena_t *arena, extent_node_t *node, bool dirty)
Jason Evansee41ad42015-02-15 18:04:46 -0800169{
170
171 if (dirty) {
Jason Evans738e0892015-02-18 01:15:50 -0800172 extent_node_dirty_remove(node);
Jason Evansee41ad42015-02-15 18:04:46 -0800173 assert(arena->ndirty >= arena_chunk_dirty_npages(node));
174 arena->ndirty -= arena_chunk_dirty_npages(node);
175 }
176}
177
Jason Evansaf1f5922014-10-30 16:38:08 -0700178JEMALLOC_INLINE_C void *
Jason Evans49f7e8f2011-03-15 13:59:15 -0700179arena_run_reg_alloc(arena_run_t *run, arena_bin_info_t *bin_info)
Jason Evanse476f8a2010-01-16 09:53:50 -0800180{
181 void *ret;
Jason Evans84c8eef2011-03-16 10:30:13 -0700182 unsigned regind;
Jason Evans0c5dd032014-09-29 01:31:39 -0700183 arena_chunk_map_misc_t *miscelm;
184 void *rpages;
Jason Evanse476f8a2010-01-16 09:53:50 -0800185
Jason Evans1e0a6362010-03-13 13:41:58 -0800186 assert(run->nfree > 0);
Jason Evans551ebc42014-10-03 10:16:09 -0700187 assert(!bitmap_full(run->bitmap, &bin_info->bitmap_info));
Jason Evanse476f8a2010-01-16 09:53:50 -0800188
Jason Evans0c5dd032014-09-29 01:31:39 -0700189 regind = bitmap_sfu(run->bitmap, &bin_info->bitmap_info);
190 miscelm = arena_run_to_miscelm(run);
191 rpages = arena_miscelm_to_rpages(miscelm);
192 ret = (void *)((uintptr_t)rpages + (uintptr_t)bin_info->reg0_offset +
Jason Evans122449b2012-04-06 00:35:09 -0700193 (uintptr_t)(bin_info->reg_interval * regind));
Jason Evans1e0a6362010-03-13 13:41:58 -0800194 run->nfree--;
Jason Evans1e0a6362010-03-13 13:41:58 -0800195 return (ret);
Jason Evans6109fe02010-02-10 10:37:56 -0800196}
197
Jason Evansaf1f5922014-10-30 16:38:08 -0700198JEMALLOC_INLINE_C void
Jason Evans1e0a6362010-03-13 13:41:58 -0800199arena_run_reg_dalloc(arena_run_t *run, void *ptr)
Jason Evans6109fe02010-02-10 10:37:56 -0800200{
Jason Evans49f7e8f2011-03-15 13:59:15 -0700201 arena_chunk_t *chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(run);
Jason Evans203484e2012-05-02 00:30:36 -0700202 size_t pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> LG_PAGE;
203 size_t mapbits = arena_mapbits_get(chunk, pageind);
Jason Evans155bfa72014-10-05 17:54:10 -0700204 index_t binind = arena_ptr_small_binind_get(ptr, mapbits);
Jason Evans49f7e8f2011-03-15 13:59:15 -0700205 arena_bin_info_t *bin_info = &arena_bin_info[binind];
Jason Evans84c8eef2011-03-16 10:30:13 -0700206 unsigned regind = arena_run_regind(run, bin_info, ptr);
Jason Evans84c8eef2011-03-16 10:30:13 -0700207
Jason Evans49f7e8f2011-03-15 13:59:15 -0700208 assert(run->nfree < bin_info->nregs);
Jason Evans1e0a6362010-03-13 13:41:58 -0800209 /* Freeing an interior pointer can cause assertion failure. */
Jason Evans0c5dd032014-09-29 01:31:39 -0700210 assert(((uintptr_t)ptr -
211 ((uintptr_t)arena_miscelm_to_rpages(arena_run_to_miscelm(run)) +
Jason Evans122449b2012-04-06 00:35:09 -0700212 (uintptr_t)bin_info->reg0_offset)) %
213 (uintptr_t)bin_info->reg_interval == 0);
Jason Evans0c5dd032014-09-29 01:31:39 -0700214 assert((uintptr_t)ptr >=
215 (uintptr_t)arena_miscelm_to_rpages(arena_run_to_miscelm(run)) +
Jason Evans49f7e8f2011-03-15 13:59:15 -0700216 (uintptr_t)bin_info->reg0_offset);
Jason Evans84c8eef2011-03-16 10:30:13 -0700217 /* Freeing an unallocated pointer can cause assertion failure. */
Jason Evans0c5dd032014-09-29 01:31:39 -0700218 assert(bitmap_get(run->bitmap, &bin_info->bitmap_info, regind));
Jason Evanse476f8a2010-01-16 09:53:50 -0800219
Jason Evans0c5dd032014-09-29 01:31:39 -0700220 bitmap_unset(run->bitmap, &bin_info->bitmap_info, regind);
Jason Evans1e0a6362010-03-13 13:41:58 -0800221 run->nfree++;
Jason Evanse476f8a2010-01-16 09:53:50 -0800222}
223
Jason Evansaf1f5922014-10-30 16:38:08 -0700224JEMALLOC_INLINE_C void
Jason Evans38067482013-01-21 20:04:42 -0800225arena_run_zero(arena_chunk_t *chunk, size_t run_ind, size_t npages)
226{
227
Jason Evansbd87b012014-04-15 16:35:08 -0700228 JEMALLOC_VALGRIND_MAKE_MEM_UNDEFINED((void *)((uintptr_t)chunk +
229 (run_ind << LG_PAGE)), (npages << LG_PAGE));
Jason Evans38067482013-01-21 20:04:42 -0800230 memset((void *)((uintptr_t)chunk + (run_ind << LG_PAGE)), 0,
231 (npages << LG_PAGE));
Jason Evans38067482013-01-21 20:04:42 -0800232}
233
Jason Evansaf1f5922014-10-30 16:38:08 -0700234JEMALLOC_INLINE_C void
Jason Evansdda90f52013-10-19 23:48:40 -0700235arena_run_page_mark_zeroed(arena_chunk_t *chunk, size_t run_ind)
236{
237
Jason Evansbd87b012014-04-15 16:35:08 -0700238 JEMALLOC_VALGRIND_MAKE_MEM_DEFINED((void *)((uintptr_t)chunk + (run_ind
239 << LG_PAGE)), PAGE);
Jason Evansdda90f52013-10-19 23:48:40 -0700240}
241
Jason Evansaf1f5922014-10-30 16:38:08 -0700242JEMALLOC_INLINE_C void
Jason Evans38067482013-01-21 20:04:42 -0800243arena_run_page_validate_zeroed(arena_chunk_t *chunk, size_t run_ind)
Jason Evans21fb95b2010-10-18 17:45:40 -0700244{
Jason Evansd4bab212010-10-24 20:08:37 -0700245 size_t i;
Jason Evansae4c7b42012-04-02 07:04:34 -0700246 UNUSED size_t *p = (size_t *)((uintptr_t)chunk + (run_ind << LG_PAGE));
Jason Evansd4bab212010-10-24 20:08:37 -0700247
Jason Evansdda90f52013-10-19 23:48:40 -0700248 arena_run_page_mark_zeroed(chunk, run_ind);
Jason Evansae4c7b42012-04-02 07:04:34 -0700249 for (i = 0; i < PAGE / sizeof(size_t); i++)
Jason Evans21fb95b2010-10-18 17:45:40 -0700250 assert(p[i] == 0);
251}
Jason Evans21fb95b2010-10-18 17:45:40 -0700252
Jason Evanse476f8a2010-01-16 09:53:50 -0800253static void
Jason Evansaa5113b2014-01-14 16:23:03 -0800254arena_cactive_update(arena_t *arena, size_t add_pages, size_t sub_pages)
255{
256
257 if (config_stats) {
Jason Evans15229372014-08-06 23:38:39 -0700258 ssize_t cactive_diff = CHUNK_CEILING((arena->nactive + add_pages
259 - sub_pages) << LG_PAGE) - CHUNK_CEILING(arena->nactive <<
260 LG_PAGE);
Jason Evansaa5113b2014-01-14 16:23:03 -0800261 if (cactive_diff != 0)
262 stats_cactive_add(cactive_diff);
263 }
264}
265
266static void
267arena_run_split_remove(arena_t *arena, arena_chunk_t *chunk, size_t run_ind,
268 size_t flag_dirty, size_t need_pages)
269{
270 size_t total_pages, rem_pages;
271
272 total_pages = arena_mapbits_unallocated_size_get(chunk, run_ind) >>
273 LG_PAGE;
274 assert(arena_mapbits_dirty_get(chunk, run_ind+total_pages-1) ==
275 flag_dirty);
276 assert(need_pages <= total_pages);
277 rem_pages = total_pages - need_pages;
278
Qinfan Wu90737fc2014-07-21 19:39:20 -0700279 arena_avail_remove(arena, chunk, run_ind, total_pages);
Jason Evans070b3c32014-08-14 14:45:58 -0700280 if (flag_dirty != 0)
Jason Evansee41ad42015-02-15 18:04:46 -0800281 arena_run_dirty_remove(arena, chunk, run_ind, total_pages);
Jason Evansaa5113b2014-01-14 16:23:03 -0800282 arena_cactive_update(arena, need_pages, 0);
283 arena->nactive += need_pages;
284
285 /* Keep track of trailing unused pages for later use. */
286 if (rem_pages > 0) {
287 if (flag_dirty != 0) {
288 arena_mapbits_unallocated_set(chunk,
289 run_ind+need_pages, (rem_pages << LG_PAGE),
290 flag_dirty);
291 arena_mapbits_unallocated_set(chunk,
292 run_ind+total_pages-1, (rem_pages << LG_PAGE),
293 flag_dirty);
Jason Evansee41ad42015-02-15 18:04:46 -0800294 arena_run_dirty_insert(arena, chunk, run_ind+need_pages,
Jason Evans070b3c32014-08-14 14:45:58 -0700295 rem_pages);
Jason Evansaa5113b2014-01-14 16:23:03 -0800296 } else {
297 arena_mapbits_unallocated_set(chunk, run_ind+need_pages,
298 (rem_pages << LG_PAGE),
299 arena_mapbits_unzeroed_get(chunk,
300 run_ind+need_pages));
301 arena_mapbits_unallocated_set(chunk,
302 run_ind+total_pages-1, (rem_pages << LG_PAGE),
303 arena_mapbits_unzeroed_get(chunk,
304 run_ind+total_pages-1));
305 }
Qinfan Wu90737fc2014-07-21 19:39:20 -0700306 arena_avail_insert(arena, chunk, run_ind+need_pages, rem_pages);
Jason Evansaa5113b2014-01-14 16:23:03 -0800307 }
308}
309
310static void
311arena_run_split_large_helper(arena_t *arena, arena_run_t *run, size_t size,
312 bool remove, bool zero)
Jason Evanse476f8a2010-01-16 09:53:50 -0800313{
314 arena_chunk_t *chunk;
Jason Evans0c5dd032014-09-29 01:31:39 -0700315 arena_chunk_map_misc_t *miscelm;
Jason Evansaa5113b2014-01-14 16:23:03 -0800316 size_t flag_dirty, run_ind, need_pages, i;
Jason Evans203484e2012-05-02 00:30:36 -0700317
Jason Evanse476f8a2010-01-16 09:53:50 -0800318 chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(run);
Jason Evans0c5dd032014-09-29 01:31:39 -0700319 miscelm = arena_run_to_miscelm(run);
320 run_ind = arena_miscelm_to_pageind(miscelm);
Jason Evans203484e2012-05-02 00:30:36 -0700321 flag_dirty = arena_mapbits_dirty_get(chunk, run_ind);
Jason Evansae4c7b42012-04-02 07:04:34 -0700322 need_pages = (size >> LG_PAGE);
Jason Evanse476f8a2010-01-16 09:53:50 -0800323 assert(need_pages > 0);
Jason Evanse476f8a2010-01-16 09:53:50 -0800324
Jason Evansc368f8c2013-10-29 18:17:42 -0700325 if (remove) {
Jason Evansaa5113b2014-01-14 16:23:03 -0800326 arena_run_split_remove(arena, chunk, run_ind, flag_dirty,
327 need_pages);
Jason Evans19b3d612010-03-18 20:36:40 -0700328 }
329
Jason Evansaa5113b2014-01-14 16:23:03 -0800330 if (zero) {
331 if (flag_dirty == 0) {
332 /*
333 * The run is clean, so some pages may be zeroed (i.e.
334 * never before touched).
335 */
336 for (i = 0; i < need_pages; i++) {
337 if (arena_mapbits_unzeroed_get(chunk, run_ind+i)
338 != 0)
339 arena_run_zero(chunk, run_ind+i, 1);
340 else if (config_debug) {
341 arena_run_page_validate_zeroed(chunk,
342 run_ind+i);
343 } else {
344 arena_run_page_mark_zeroed(chunk,
345 run_ind+i);
Jason Evans19b3d612010-03-18 20:36:40 -0700346 }
Jason Evanse476f8a2010-01-16 09:53:50 -0800347 }
Jason Evansdda90f52013-10-19 23:48:40 -0700348 } else {
Jason Evansaa5113b2014-01-14 16:23:03 -0800349 /* The run is dirty, so all pages must be zeroed. */
350 arena_run_zero(chunk, run_ind, need_pages);
Jason Evanse476f8a2010-01-16 09:53:50 -0800351 }
Jason Evans19b3d612010-03-18 20:36:40 -0700352 } else {
Jason Evansbd87b012014-04-15 16:35:08 -0700353 JEMALLOC_VALGRIND_MAKE_MEM_UNDEFINED((void *)((uintptr_t)chunk +
Jason Evansdda90f52013-10-19 23:48:40 -0700354 (run_ind << LG_PAGE)), (need_pages << LG_PAGE));
Jason Evanse476f8a2010-01-16 09:53:50 -0800355 }
Jason Evansaa5113b2014-01-14 16:23:03 -0800356
357 /*
358 * Set the last element first, in case the run only contains one page
359 * (i.e. both statements set the same element).
360 */
361 arena_mapbits_large_set(chunk, run_ind+need_pages-1, 0, flag_dirty);
362 arena_mapbits_large_set(chunk, run_ind, size, flag_dirty);
Jason Evanse476f8a2010-01-16 09:53:50 -0800363}
364
Jason Evansc368f8c2013-10-29 18:17:42 -0700365static void
Jason Evansaa5113b2014-01-14 16:23:03 -0800366arena_run_split_large(arena_t *arena, arena_run_t *run, size_t size, bool zero)
Jason Evansc368f8c2013-10-29 18:17:42 -0700367{
368
Jason Evansaa5113b2014-01-14 16:23:03 -0800369 arena_run_split_large_helper(arena, run, size, true, zero);
Jason Evansc368f8c2013-10-29 18:17:42 -0700370}
371
372static void
Jason Evansaa5113b2014-01-14 16:23:03 -0800373arena_run_init_large(arena_t *arena, arena_run_t *run, size_t size, bool zero)
Jason Evansc368f8c2013-10-29 18:17:42 -0700374{
375
Jason Evansaa5113b2014-01-14 16:23:03 -0800376 arena_run_split_large_helper(arena, run, size, false, zero);
377}
378
379static void
380arena_run_split_small(arena_t *arena, arena_run_t *run, size_t size,
Jason Evans155bfa72014-10-05 17:54:10 -0700381 index_t binind)
Jason Evansaa5113b2014-01-14 16:23:03 -0800382{
383 arena_chunk_t *chunk;
Jason Evans0c5dd032014-09-29 01:31:39 -0700384 arena_chunk_map_misc_t *miscelm;
Jason Evansaa5113b2014-01-14 16:23:03 -0800385 size_t flag_dirty, run_ind, need_pages, i;
386
387 assert(binind != BININD_INVALID);
388
389 chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(run);
Jason Evans0c5dd032014-09-29 01:31:39 -0700390 miscelm = arena_run_to_miscelm(run);
391 run_ind = arena_miscelm_to_pageind(miscelm);
Jason Evansaa5113b2014-01-14 16:23:03 -0800392 flag_dirty = arena_mapbits_dirty_get(chunk, run_ind);
393 need_pages = (size >> LG_PAGE);
394 assert(need_pages > 0);
395
396 arena_run_split_remove(arena, chunk, run_ind, flag_dirty, need_pages);
397
Jason Evans381c23d2014-10-10 23:01:03 -0700398 for (i = 0; i < need_pages; i++) {
Jason Evansaa5113b2014-01-14 16:23:03 -0800399 arena_mapbits_small_set(chunk, run_ind+i, i, binind, 0);
400 if (config_debug && flag_dirty == 0 &&
401 arena_mapbits_unzeroed_get(chunk, run_ind+i) == 0)
402 arena_run_page_validate_zeroed(chunk, run_ind+i);
403 }
Jason Evansbd87b012014-04-15 16:35:08 -0700404 JEMALLOC_VALGRIND_MAKE_MEM_UNDEFINED((void *)((uintptr_t)chunk +
Jason Evansaa5113b2014-01-14 16:23:03 -0800405 (run_ind << LG_PAGE)), (need_pages << LG_PAGE));
406}
407
408static arena_chunk_t *
409arena_chunk_init_spare(arena_t *arena)
410{
411 arena_chunk_t *chunk;
412
413 assert(arena->spare != NULL);
414
415 chunk = arena->spare;
416 arena->spare = NULL;
417
418 assert(arena_mapbits_allocated_get(chunk, map_bias) == 0);
419 assert(arena_mapbits_allocated_get(chunk, chunk_npages-1) == 0);
420 assert(arena_mapbits_unallocated_size_get(chunk, map_bias) ==
Jason Evans155bfa72014-10-05 17:54:10 -0700421 arena_maxrun);
Jason Evansaa5113b2014-01-14 16:23:03 -0800422 assert(arena_mapbits_unallocated_size_get(chunk, chunk_npages-1) ==
Jason Evans155bfa72014-10-05 17:54:10 -0700423 arena_maxrun);
Jason Evansaa5113b2014-01-14 16:23:03 -0800424 assert(arena_mapbits_dirty_get(chunk, map_bias) ==
425 arena_mapbits_dirty_get(chunk, chunk_npages-1));
426
427 return (chunk);
428}
429
430static arena_chunk_t *
Jason Evanscbf3a6d2015-02-11 12:24:27 -0800431arena_chunk_alloc_internal(arena_t *arena, bool *zero)
Jason Evanse2deab72014-05-15 22:22:27 -0700432{
433 arena_chunk_t *chunk;
434 chunk_alloc_t *chunk_alloc;
435 chunk_dalloc_t *chunk_dalloc;
436
437 chunk_alloc = arena->chunk_alloc;
438 chunk_dalloc = arena->chunk_dalloc;
439 malloc_mutex_unlock(&arena->lock);
440 chunk = (arena_chunk_t *)chunk_alloc_arena(chunk_alloc, chunk_dalloc,
Jason Evanscbf3a6d2015-02-11 12:24:27 -0800441 arena->ind, NULL, chunksize, chunksize, zero);
442 if (chunk != NULL) {
Jason Evansa4e18882015-02-17 15:13:52 -0800443 extent_node_init(&chunk->node, arena, chunk, chunksize, *zero);
Jason Evansee41ad42015-02-15 18:04:46 -0800444 extent_node_achunk_set(&chunk->node, true);
Jason Evanscbf3a6d2015-02-11 12:24:27 -0800445 if (chunk_register(chunk, &chunk->node)) {
446 chunk_dalloc((void *)chunk, chunksize, arena->ind);
447 chunk = NULL;
448 }
449 }
Jason Evanse2deab72014-05-15 22:22:27 -0700450 malloc_mutex_lock(&arena->lock);
Jason Evans4581b972014-11-27 17:22:36 -0200451 if (config_stats && chunk != NULL) {
Jason Evanse2deab72014-05-15 22:22:27 -0700452 arena->stats.mapped += chunksize;
Jason Evans4581b972014-11-27 17:22:36 -0200453 arena->stats.metadata_mapped += (map_bias << LG_PAGE);
454 }
Jason Evanse2deab72014-05-15 22:22:27 -0700455
456 return (chunk);
457}
458
Jason Evanse2deab72014-05-15 22:22:27 -0700459static arena_chunk_t *
Jason Evansaa5113b2014-01-14 16:23:03 -0800460arena_chunk_init_hard(arena_t *arena)
461{
462 arena_chunk_t *chunk;
463 bool zero;
464 size_t unzeroed, i;
465
466 assert(arena->spare == NULL);
467
468 zero = false;
Jason Evanscbf3a6d2015-02-11 12:24:27 -0800469 chunk = arena_chunk_alloc_internal(arena, &zero);
Jason Evansaa5113b2014-01-14 16:23:03 -0800470 if (chunk == NULL)
471 return (NULL);
Jason Evansaa5113b2014-01-14 16:23:03 -0800472
Jason Evansaa5113b2014-01-14 16:23:03 -0800473 /*
Jason Evansaa5113b2014-01-14 16:23:03 -0800474 * Initialize the map to contain one maximal free untouched run. Mark
475 * the pages as zeroed iff chunk_alloc() returned a zeroed chunk.
476 */
477 unzeroed = zero ? 0 : CHUNK_MAP_UNZEROED;
Jason Evans155bfa72014-10-05 17:54:10 -0700478 arena_mapbits_unallocated_set(chunk, map_bias, arena_maxrun, unzeroed);
Jason Evansaa5113b2014-01-14 16:23:03 -0800479 /*
480 * There is no need to initialize the internal page map entries unless
481 * the chunk is not zeroed.
482 */
Jason Evans551ebc42014-10-03 10:16:09 -0700483 if (!zero) {
Jason Evansbd87b012014-04-15 16:35:08 -0700484 JEMALLOC_VALGRIND_MAKE_MEM_UNDEFINED(
Qinfan Wuff6a31d2014-08-29 13:34:40 -0700485 (void *)arena_bitselm_get(chunk, map_bias+1),
486 (size_t)((uintptr_t) arena_bitselm_get(chunk,
487 chunk_npages-1) - (uintptr_t)arena_bitselm_get(chunk,
488 map_bias+1)));
Jason Evansaa5113b2014-01-14 16:23:03 -0800489 for (i = map_bias+1; i < chunk_npages-1; i++)
490 arena_mapbits_unzeroed_set(chunk, i, unzeroed);
491 } else {
Qinfan Wuff6a31d2014-08-29 13:34:40 -0700492 JEMALLOC_VALGRIND_MAKE_MEM_DEFINED((void
493 *)arena_bitselm_get(chunk, map_bias+1), (size_t)((uintptr_t)
494 arena_bitselm_get(chunk, chunk_npages-1) -
495 (uintptr_t)arena_bitselm_get(chunk, map_bias+1)));
Jason Evansaa5113b2014-01-14 16:23:03 -0800496 if (config_debug) {
497 for (i = map_bias+1; i < chunk_npages-1; i++) {
498 assert(arena_mapbits_unzeroed_get(chunk, i) ==
499 unzeroed);
500 }
501 }
502 }
Jason Evans155bfa72014-10-05 17:54:10 -0700503 arena_mapbits_unallocated_set(chunk, chunk_npages-1, arena_maxrun,
Jason Evansaa5113b2014-01-14 16:23:03 -0800504 unzeroed);
505
506 return (chunk);
Jason Evansc368f8c2013-10-29 18:17:42 -0700507}
508
Jason Evanse476f8a2010-01-16 09:53:50 -0800509static arena_chunk_t *
510arena_chunk_alloc(arena_t *arena)
511{
512 arena_chunk_t *chunk;
Jason Evanse476f8a2010-01-16 09:53:50 -0800513
Jason Evansaa5113b2014-01-14 16:23:03 -0800514 if (arena->spare != NULL)
515 chunk = arena_chunk_init_spare(arena);
Chris Pride20a8c782014-03-25 22:36:05 -0700516 else {
Jason Evansaa5113b2014-01-14 16:23:03 -0800517 chunk = arena_chunk_init_hard(arena);
Chris Pride20a8c782014-03-25 22:36:05 -0700518 if (chunk == NULL)
519 return (NULL);
520 }
Jason Evanse476f8a2010-01-16 09:53:50 -0800521
Jason Evanse3d13062012-10-30 15:42:37 -0700522 /* Insert the run into the runs_avail tree. */
Qinfan Wu90737fc2014-07-21 19:39:20 -0700523 arena_avail_insert(arena, chunk, map_bias, chunk_npages-map_bias);
Jason Evanse3d13062012-10-30 15:42:37 -0700524
Jason Evanse476f8a2010-01-16 09:53:50 -0800525 return (chunk);
526}
527
528static void
Jason Evanse2deab72014-05-15 22:22:27 -0700529arena_chunk_dalloc(arena_t *arena, arena_chunk_t *chunk)
Jason Evanse476f8a2010-01-16 09:53:50 -0800530{
Qinfan Wu04d60a12014-07-18 14:21:17 -0700531
Jason Evans30fe12b2012-05-10 17:09:17 -0700532 assert(arena_mapbits_allocated_get(chunk, map_bias) == 0);
533 assert(arena_mapbits_allocated_get(chunk, chunk_npages-1) == 0);
534 assert(arena_mapbits_unallocated_size_get(chunk, map_bias) ==
Jason Evans155bfa72014-10-05 17:54:10 -0700535 arena_maxrun);
Jason Evans30fe12b2012-05-10 17:09:17 -0700536 assert(arena_mapbits_unallocated_size_get(chunk, chunk_npages-1) ==
Jason Evans155bfa72014-10-05 17:54:10 -0700537 arena_maxrun);
Jason Evans30fe12b2012-05-10 17:09:17 -0700538 assert(arena_mapbits_dirty_get(chunk, map_bias) ==
539 arena_mapbits_dirty_get(chunk, chunk_npages-1));
540
Jason Evanse476f8a2010-01-16 09:53:50 -0800541 /*
Jason Evanse3d13062012-10-30 15:42:37 -0700542 * Remove run from the runs_avail tree, so that the arena does not use
543 * it.
Jason Evanse476f8a2010-01-16 09:53:50 -0800544 */
Qinfan Wu90737fc2014-07-21 19:39:20 -0700545 arena_avail_remove(arena, chunk, map_bias, chunk_npages-map_bias);
Jason Evanse476f8a2010-01-16 09:53:50 -0800546
Jason Evans8d4203c2010-04-13 20:53:21 -0700547 if (arena->spare != NULL) {
548 arena_chunk_t *spare = arena->spare;
Jason Evans9b41ac92014-10-14 22:20:00 -0700549 chunk_dalloc_t *chunk_dalloc;
Jason Evans8d4203c2010-04-13 20:53:21 -0700550
551 arena->spare = chunk;
Jason Evans070b3c32014-08-14 14:45:58 -0700552 if (arena_mapbits_dirty_get(spare, map_bias) != 0) {
Jason Evansee41ad42015-02-15 18:04:46 -0800553 arena_run_dirty_remove(arena, spare, map_bias,
Jason Evans070b3c32014-08-14 14:45:58 -0700554 chunk_npages-map_bias);
555 }
Jason Evans9b41ac92014-10-14 22:20:00 -0700556 chunk_dalloc = arena->chunk_dalloc;
557 malloc_mutex_unlock(&arena->lock);
Jason Evanscbf3a6d2015-02-11 12:24:27 -0800558 chunk_deregister(spare, &spare->node);
Jason Evans9b41ac92014-10-14 22:20:00 -0700559 chunk_dalloc((void *)spare, chunksize, arena->ind);
560 malloc_mutex_lock(&arena->lock);
Jason Evans4581b972014-11-27 17:22:36 -0200561 if (config_stats) {
Jason Evans9b41ac92014-10-14 22:20:00 -0700562 arena->stats.mapped -= chunksize;
Jason Evans4581b972014-11-27 17:22:36 -0200563 arena->stats.metadata_mapped -= (map_bias << LG_PAGE);
564 }
Jason Evans8d4203c2010-04-13 20:53:21 -0700565 } else
566 arena->spare = chunk;
Jason Evanse476f8a2010-01-16 09:53:50 -0800567}
568
Jason Evans9b41ac92014-10-14 22:20:00 -0700569static void
570arena_huge_malloc_stats_update(arena_t *arena, size_t usize)
571{
572 index_t index = size2index(usize) - nlclasses - NBINS;
573
574 cassert(config_stats);
575
576 arena->stats.nmalloc_huge++;
577 arena->stats.allocated_huge += usize;
578 arena->stats.hstats[index].nmalloc++;
579 arena->stats.hstats[index].curhchunks++;
580}
581
582static void
583arena_huge_malloc_stats_update_undo(arena_t *arena, size_t usize)
584{
585 index_t index = size2index(usize) - nlclasses - NBINS;
586
587 cassert(config_stats);
588
589 arena->stats.nmalloc_huge--;
590 arena->stats.allocated_huge -= usize;
591 arena->stats.hstats[index].nmalloc--;
592 arena->stats.hstats[index].curhchunks--;
593}
594
595static void
596arena_huge_dalloc_stats_update(arena_t *arena, size_t usize)
597{
598 index_t index = size2index(usize) - nlclasses - NBINS;
599
600 cassert(config_stats);
601
602 arena->stats.ndalloc_huge++;
603 arena->stats.allocated_huge -= usize;
604 arena->stats.hstats[index].ndalloc++;
605 arena->stats.hstats[index].curhchunks--;
606}
607
608static void
609arena_huge_dalloc_stats_update_undo(arena_t *arena, size_t usize)
610{
611 index_t index = size2index(usize) - nlclasses - NBINS;
612
613 cassert(config_stats);
614
615 arena->stats.ndalloc_huge--;
616 arena->stats.allocated_huge += usize;
617 arena->stats.hstats[index].ndalloc--;
618 arena->stats.hstats[index].curhchunks++;
619}
620
621static void
622arena_huge_ralloc_stats_update(arena_t *arena, size_t oldsize, size_t usize)
623{
624
625 arena_huge_dalloc_stats_update(arena, oldsize);
626 arena_huge_malloc_stats_update(arena, usize);
627}
628
629static void
630arena_huge_ralloc_stats_update_undo(arena_t *arena, size_t oldsize,
631 size_t usize)
632{
633
634 arena_huge_dalloc_stats_update_undo(arena, oldsize);
635 arena_huge_malloc_stats_update_undo(arena, usize);
636}
637
Jason Evanscbf3a6d2015-02-11 12:24:27 -0800638extent_node_t *
639arena_node_alloc(arena_t *arena)
640{
641 extent_node_t *node;
642
643 malloc_mutex_lock(&arena->node_cache_mtx);
Jason Evans2195ba42015-02-15 16:43:52 -0800644 node = ql_last(&arena->node_cache, ql_link);
Jason Evanscbf3a6d2015-02-11 12:24:27 -0800645 if (node == NULL) {
646 malloc_mutex_unlock(&arena->node_cache_mtx);
647 return (base_alloc(sizeof(extent_node_t)));
648 }
Jason Evans2195ba42015-02-15 16:43:52 -0800649 ql_tail_remove(&arena->node_cache, extent_node_t, ql_link);
Jason Evanscbf3a6d2015-02-11 12:24:27 -0800650 malloc_mutex_unlock(&arena->node_cache_mtx);
651 return (node);
652}
653
654void
655arena_node_dalloc(arena_t *arena, extent_node_t *node)
656{
657
658 malloc_mutex_lock(&arena->node_cache_mtx);
Jason Evans2195ba42015-02-15 16:43:52 -0800659 ql_elm_new(node, ql_link);
660 ql_tail_insert(&arena->node_cache, node, ql_link);
Jason Evanscbf3a6d2015-02-11 12:24:27 -0800661 malloc_mutex_unlock(&arena->node_cache_mtx);
662}
663
Jason Evans9b41ac92014-10-14 22:20:00 -0700664void *
665arena_chunk_alloc_huge(arena_t *arena, size_t usize, size_t alignment,
666 bool *zero)
667{
668 void *ret;
669 chunk_alloc_t *chunk_alloc;
670 chunk_dalloc_t *chunk_dalloc;
671 size_t csize = CHUNK_CEILING(usize);
672
673 malloc_mutex_lock(&arena->lock);
674 chunk_alloc = arena->chunk_alloc;
675 chunk_dalloc = arena->chunk_dalloc;
676 if (config_stats) {
677 /* Optimistically update stats prior to unlocking. */
678 arena_huge_malloc_stats_update(arena, usize);
679 arena->stats.mapped += usize;
680 }
681 arena->nactive += (usize >> LG_PAGE);
682 malloc_mutex_unlock(&arena->lock);
683
684 ret = chunk_alloc_arena(chunk_alloc, chunk_dalloc, arena->ind, NULL,
685 csize, alignment, zero);
686 if (ret == NULL) {
687 /* Revert optimistic stats updates. */
688 malloc_mutex_lock(&arena->lock);
689 if (config_stats) {
690 arena_huge_malloc_stats_update_undo(arena, usize);
691 arena->stats.mapped -= usize;
692 }
693 arena->nactive -= (usize >> LG_PAGE);
694 malloc_mutex_unlock(&arena->lock);
695 return (NULL);
696 }
697
698 if (config_stats)
699 stats_cactive_add(usize);
700
701 return (ret);
702}
703
704void
705arena_chunk_dalloc_huge(arena_t *arena, void *chunk, size_t usize)
706{
707 chunk_dalloc_t *chunk_dalloc;
708
709 malloc_mutex_lock(&arena->lock);
710 chunk_dalloc = arena->chunk_dalloc;
711 if (config_stats) {
712 arena_huge_dalloc_stats_update(arena, usize);
713 arena->stats.mapped -= usize;
714 stats_cactive_sub(usize);
715 }
716 arena->nactive -= (usize >> LG_PAGE);
717 malloc_mutex_unlock(&arena->lock);
718 chunk_dalloc(chunk, CHUNK_CEILING(usize), arena->ind);
719}
720
721void
722arena_chunk_ralloc_huge_similar(arena_t *arena, void *chunk, size_t oldsize,
723 size_t usize)
724{
725
726 assert(CHUNK_CEILING(oldsize) == CHUNK_CEILING(usize));
727 assert(oldsize != usize);
728
729 malloc_mutex_lock(&arena->lock);
730 if (config_stats)
731 arena_huge_ralloc_stats_update(arena, oldsize, usize);
732 if (oldsize < usize) {
733 size_t udiff = usize - oldsize;
734 arena->nactive += udiff >> LG_PAGE;
735 if (config_stats)
736 stats_cactive_add(udiff);
737 } else {
738 size_t udiff = oldsize - usize;
739 arena->nactive -= udiff >> LG_PAGE;
740 if (config_stats)
741 stats_cactive_sub(udiff);
742 }
743 malloc_mutex_unlock(&arena->lock);
744}
745
746void
747arena_chunk_ralloc_huge_shrink(arena_t *arena, void *chunk, size_t oldsize,
748 size_t usize)
749{
750 chunk_dalloc_t *chunk_dalloc;
751 size_t udiff = oldsize - usize;
752 size_t cdiff = CHUNK_CEILING(oldsize) - CHUNK_CEILING(usize);
753
754 malloc_mutex_lock(&arena->lock);
755 chunk_dalloc = arena->chunk_dalloc;
756 if (config_stats) {
757 arena_huge_ralloc_stats_update(arena, oldsize, usize);
758 if (cdiff != 0) {
759 arena->stats.mapped -= cdiff;
760 stats_cactive_sub(udiff);
761 }
762 }
763 arena->nactive -= udiff >> LG_PAGE;
764 malloc_mutex_unlock(&arena->lock);
Jason Evans2012d5a2014-11-17 09:54:49 -0800765 if (cdiff != 0) {
766 chunk_dalloc((void *)((uintptr_t)chunk + CHUNK_CEILING(usize)),
767 cdiff, arena->ind);
768 }
Jason Evans9b41ac92014-10-14 22:20:00 -0700769}
770
771bool
772arena_chunk_ralloc_huge_expand(arena_t *arena, void *chunk, size_t oldsize,
773 size_t usize, bool *zero)
774{
775 chunk_alloc_t *chunk_alloc;
776 chunk_dalloc_t *chunk_dalloc;
777 size_t udiff = usize - oldsize;
778 size_t cdiff = CHUNK_CEILING(usize) - CHUNK_CEILING(oldsize);
779
780 malloc_mutex_lock(&arena->lock);
781 chunk_alloc = arena->chunk_alloc;
782 chunk_dalloc = arena->chunk_dalloc;
783 if (config_stats) {
784 /* Optimistically update stats prior to unlocking. */
785 arena_huge_ralloc_stats_update(arena, oldsize, usize);
786 arena->stats.mapped += cdiff;
787 }
788 arena->nactive += (udiff >> LG_PAGE);
789 malloc_mutex_unlock(&arena->lock);
790
Jason Evans2012d5a2014-11-17 09:54:49 -0800791 if (chunk_alloc_arena(chunk_alloc, chunk_dalloc, arena->ind,
792 (void *)((uintptr_t)chunk + CHUNK_CEILING(oldsize)), cdiff,
793 chunksize, zero) == NULL) {
Jason Evans9b41ac92014-10-14 22:20:00 -0700794 /* Revert optimistic stats updates. */
795 malloc_mutex_lock(&arena->lock);
796 if (config_stats) {
797 arena_huge_ralloc_stats_update_undo(arena,
798 oldsize, usize);
799 arena->stats.mapped -= cdiff;
800 }
801 arena->nactive -= (udiff >> LG_PAGE);
802 malloc_mutex_unlock(&arena->lock);
803 return (true);
804 }
805
806 if (config_stats)
807 stats_cactive_add(udiff);
808
809 return (false);
810}
811
Jason Evanse476f8a2010-01-16 09:53:50 -0800812static arena_run_t *
Jason Evansaa5113b2014-01-14 16:23:03 -0800813arena_run_alloc_large_helper(arena_t *arena, size_t size, bool zero)
Jason Evanse476f8a2010-01-16 09:53:50 -0800814{
Qinfan Wuff6a31d2014-08-29 13:34:40 -0700815 arena_chunk_map_misc_t *miscelm;
816 arena_chunk_map_misc_t *key;
Jason Evanse476f8a2010-01-16 09:53:50 -0800817
Qinfan Wuff6a31d2014-08-29 13:34:40 -0700818 key = (arena_chunk_map_misc_t *)(size | CHUNK_MAP_KEY);
819 miscelm = arena_avail_tree_nsearch(&arena->runs_avail, key);
820 if (miscelm != NULL) {
Jason Evans0c5dd032014-09-29 01:31:39 -0700821 arena_run_t *run = &miscelm->run;
822 arena_run_split_large(arena, &miscelm->run, size, zero);
Jason Evanse476f8a2010-01-16 09:53:50 -0800823 return (run);
824 }
825
Jason Evans5b0c9962012-05-10 15:47:24 -0700826 return (NULL);
827}
828
829static arena_run_t *
Jason Evansaa5113b2014-01-14 16:23:03 -0800830arena_run_alloc_large(arena_t *arena, size_t size, bool zero)
Jason Evans5b0c9962012-05-10 15:47:24 -0700831{
832 arena_chunk_t *chunk;
833 arena_run_t *run;
834
Jason Evansfc0b3b72014-10-09 17:54:06 -0700835 assert(size <= arena_maxrun);
Jason Evans5b0c9962012-05-10 15:47:24 -0700836 assert((size & PAGE_MASK) == 0);
Jason Evans5b0c9962012-05-10 15:47:24 -0700837
838 /* Search the arena's chunks for the lowest best fit. */
Jason Evansaa5113b2014-01-14 16:23:03 -0800839 run = arena_run_alloc_large_helper(arena, size, zero);
Jason Evans5b0c9962012-05-10 15:47:24 -0700840 if (run != NULL)
841 return (run);
842
Jason Evanse476f8a2010-01-16 09:53:50 -0800843 /*
844 * No usable runs. Create a new chunk from which to allocate the run.
845 */
846 chunk = arena_chunk_alloc(arena);
Jason Evanse00572b2010-03-14 19:43:56 -0700847 if (chunk != NULL) {
Jason Evans0c5dd032014-09-29 01:31:39 -0700848 run = &arena_miscelm_get(chunk, map_bias)->run;
Jason Evansaa5113b2014-01-14 16:23:03 -0800849 arena_run_split_large(arena, run, size, zero);
Jason Evanse00572b2010-03-14 19:43:56 -0700850 return (run);
851 }
852
853 /*
854 * arena_chunk_alloc() failed, but another thread may have made
855 * sufficient memory available while this one dropped arena->lock in
856 * arena_chunk_alloc(), so search one more time.
857 */
Jason Evansaa5113b2014-01-14 16:23:03 -0800858 return (arena_run_alloc_large_helper(arena, size, zero));
859}
860
861static arena_run_t *
Jason Evans155bfa72014-10-05 17:54:10 -0700862arena_run_alloc_small_helper(arena_t *arena, size_t size, index_t binind)
Jason Evansaa5113b2014-01-14 16:23:03 -0800863{
864 arena_run_t *run;
Qinfan Wuff6a31d2014-08-29 13:34:40 -0700865 arena_chunk_map_misc_t *miscelm;
866 arena_chunk_map_misc_t *key;
Jason Evansaa5113b2014-01-14 16:23:03 -0800867
Qinfan Wuff6a31d2014-08-29 13:34:40 -0700868 key = (arena_chunk_map_misc_t *)(size | CHUNK_MAP_KEY);
869 miscelm = arena_avail_tree_nsearch(&arena->runs_avail, key);
870 if (miscelm != NULL) {
Jason Evans0c5dd032014-09-29 01:31:39 -0700871 run = &miscelm->run;
Jason Evansaa5113b2014-01-14 16:23:03 -0800872 arena_run_split_small(arena, run, size, binind);
873 return (run);
874 }
875
876 return (NULL);
877}
878
879static arena_run_t *
Jason Evans155bfa72014-10-05 17:54:10 -0700880arena_run_alloc_small(arena_t *arena, size_t size, index_t binind)
Jason Evansaa5113b2014-01-14 16:23:03 -0800881{
882 arena_chunk_t *chunk;
883 arena_run_t *run;
884
Jason Evansfc0b3b72014-10-09 17:54:06 -0700885 assert(size <= arena_maxrun);
Jason Evansaa5113b2014-01-14 16:23:03 -0800886 assert((size & PAGE_MASK) == 0);
887 assert(binind != BININD_INVALID);
888
889 /* Search the arena's chunks for the lowest best fit. */
890 run = arena_run_alloc_small_helper(arena, size, binind);
891 if (run != NULL)
892 return (run);
893
894 /*
895 * No usable runs. Create a new chunk from which to allocate the run.
896 */
897 chunk = arena_chunk_alloc(arena);
898 if (chunk != NULL) {
Jason Evans0c5dd032014-09-29 01:31:39 -0700899 run = &arena_miscelm_get(chunk, map_bias)->run;
Jason Evansaa5113b2014-01-14 16:23:03 -0800900 arena_run_split_small(arena, run, size, binind);
901 return (run);
902 }
903
904 /*
905 * arena_chunk_alloc() failed, but another thread may have made
906 * sufficient memory available while this one dropped arena->lock in
907 * arena_chunk_alloc(), so search one more time.
908 */
909 return (arena_run_alloc_small_helper(arena, size, binind));
Jason Evanse476f8a2010-01-16 09:53:50 -0800910}
911
Jason Evansaf1f5922014-10-30 16:38:08 -0700912JEMALLOC_INLINE_C void
Jason Evans05b21be2010-03-14 17:36:10 -0700913arena_maybe_purge(arena_t *arena)
914{
Jason Evans070b3c32014-08-14 14:45:58 -0700915 size_t threshold;
Jason Evans05b21be2010-03-14 17:36:10 -0700916
Jason Evanse3d13062012-10-30 15:42:37 -0700917 /* Don't purge if the option is disabled. */
918 if (opt_lg_dirty_mult < 0)
919 return;
Jason Evanse3d13062012-10-30 15:42:37 -0700920 threshold = (arena->nactive >> opt_lg_dirty_mult);
Mike Hommey65057332015-02-04 07:16:55 +0900921 threshold = threshold < chunk_npages ? chunk_npages : threshold;
Jason Evanse3d13062012-10-30 15:42:37 -0700922 /*
923 * Don't purge unless the number of purgeable pages exceeds the
924 * threshold.
925 */
Jason Evans070b3c32014-08-14 14:45:58 -0700926 if (arena->ndirty <= threshold)
Jason Evanse3d13062012-10-30 15:42:37 -0700927 return;
928
929 arena_purge(arena, false);
Jason Evans05b21be2010-03-14 17:36:10 -0700930}
931
Qinfan Wua244e502014-07-21 10:23:36 -0700932static size_t
933arena_dirty_count(arena_t *arena)
934{
935 size_t ndirty = 0;
Jason Evansee41ad42015-02-15 18:04:46 -0800936 arena_chunk_map_misc_t *runselm;
937 extent_node_t *chunkselm;
Qinfan Wua244e502014-07-21 10:23:36 -0700938
Jason Evansee41ad42015-02-15 18:04:46 -0800939 for (runselm = qr_next(&arena->runs_dirty, rd_link),
Jason Evans738e0892015-02-18 01:15:50 -0800940 chunkselm = qr_next(&arena->chunks_cache, cc_link);
Jason Evansee41ad42015-02-15 18:04:46 -0800941 runselm != &arena->runs_dirty; runselm = qr_next(runselm,
942 rd_link)) {
943 size_t npages;
944
945 if (runselm == &chunkselm->runs_dirty) {
946 npages = extent_node_size_get(chunkselm) >> LG_PAGE;
Jason Evans738e0892015-02-18 01:15:50 -0800947 chunkselm = qr_next(chunkselm, cc_link);
Jason Evansee41ad42015-02-15 18:04:46 -0800948 } else {
949 arena_chunk_t *chunk = (arena_chunk_t
950 *)CHUNK_ADDR2BASE(runselm);
951 size_t pageind = arena_miscelm_to_pageind(runselm);
952 assert(arena_mapbits_allocated_get(chunk, pageind) ==
953 0);
954 assert(arena_mapbits_large_get(chunk, pageind) == 0);
955 assert(arena_mapbits_dirty_get(chunk, pageind) != 0);
956 npages = arena_mapbits_unallocated_size_get(chunk,
957 pageind) >> LG_PAGE;
958 }
Qinfan Wua244e502014-07-21 10:23:36 -0700959 ndirty += npages;
960 }
961
Jason Evans2b2f6dc2014-11-01 02:29:10 -0700962 return (ndirty);
Jason Evansaa5113b2014-01-14 16:23:03 -0800963}
964
965static size_t
Jason Evans070b3c32014-08-14 14:45:58 -0700966arena_compute_npurge(arena_t *arena, bool all)
Jason Evansaa5113b2014-01-14 16:23:03 -0800967{
Jason Evans070b3c32014-08-14 14:45:58 -0700968 size_t npurge;
Jason Evansaa5113b2014-01-14 16:23:03 -0800969
970 /*
971 * Compute the minimum number of pages that this thread should try to
972 * purge.
973 */
Jason Evans551ebc42014-10-03 10:16:09 -0700974 if (!all) {
Jason Evansaa5113b2014-01-14 16:23:03 -0800975 size_t threshold = (arena->nactive >> opt_lg_dirty_mult);
Mike Hommey65057332015-02-04 07:16:55 +0900976 threshold = threshold < chunk_npages ? chunk_npages : threshold;
Jason Evansaa5113b2014-01-14 16:23:03 -0800977
Jason Evans070b3c32014-08-14 14:45:58 -0700978 npurge = arena->ndirty - threshold;
Jason Evansaa5113b2014-01-14 16:23:03 -0800979 } else
Jason Evans070b3c32014-08-14 14:45:58 -0700980 npurge = arena->ndirty;
Jason Evansaa5113b2014-01-14 16:23:03 -0800981
Jason Evans070b3c32014-08-14 14:45:58 -0700982 return (npurge);
Jason Evansaa5113b2014-01-14 16:23:03 -0800983}
984
Qinfan Wue9708002014-07-21 18:09:04 -0700985static size_t
Jason Evans070b3c32014-08-14 14:45:58 -0700986arena_stash_dirty(arena_t *arena, bool all, size_t npurge,
Jason Evansee41ad42015-02-15 18:04:46 -0800987 arena_chunk_map_misc_t *purge_runs_sentinel,
988 extent_node_t *purge_chunks_sentinel)
Jason Evansaa5113b2014-01-14 16:23:03 -0800989{
Jason Evansee41ad42015-02-15 18:04:46 -0800990 arena_chunk_map_misc_t *runselm, *runselm_next;
991 extent_node_t *chunkselm;
Qinfan Wue9708002014-07-21 18:09:04 -0700992 size_t nstashed = 0;
Jason Evansaa5113b2014-01-14 16:23:03 -0800993
Jason Evansee41ad42015-02-15 18:04:46 -0800994 /* Stash at least npurge pages. */
995 for (runselm = qr_next(&arena->runs_dirty, rd_link),
Jason Evans738e0892015-02-18 01:15:50 -0800996 chunkselm = qr_next(&arena->chunks_cache, cc_link);
Jason Evansee41ad42015-02-15 18:04:46 -0800997 runselm != &arena->runs_dirty; runselm = runselm_next) {
998 size_t npages;
999 runselm_next = qr_next(runselm, rd_link);
Jason Evansaa5113b2014-01-14 16:23:03 -08001000
Jason Evansee41ad42015-02-15 18:04:46 -08001001 if (runselm == &chunkselm->runs_dirty) {
1002 extent_node_t *chunkselm_next, *tnode;
1003 void *addr;
1004 size_t size;
1005 bool zeroed, zero;
1006 UNUSED void *chunk;
Jason Evansaa5113b2014-01-14 16:23:03 -08001007
Jason Evans738e0892015-02-18 01:15:50 -08001008 chunkselm_next = qr_next(chunkselm, cc_link);
Jason Evansee41ad42015-02-15 18:04:46 -08001009 /*
1010 * Cache contents of chunkselm prior to it being
1011 * destroyed as a side effect of allocating the chunk.
1012 */
1013 addr = extent_node_addr_get(chunkselm);
1014 size = extent_node_size_get(chunkselm);
1015 zeroed = extent_node_zeroed_get(chunkselm);
1016 /* Allocate. */
1017 zero = false;
1018 chunk = arena->chunk_alloc(addr, size, chunksize, &zero,
1019 arena->ind);
1020 assert(chunk == addr);
Jason Evans339c2b22015-02-17 22:25:56 -08001021 assert(zero == zeroed);
Jason Evansee41ad42015-02-15 18:04:46 -08001022 /*
1023 * Create a temporary node to link into the ring of
Jason Evans738e0892015-02-18 01:15:50 -08001024 * stashed allocations. OOM shouldn't be possible
1025 * because chunk allocation just cached a node.
Jason Evansee41ad42015-02-15 18:04:46 -08001026 */
1027 tnode = arena_node_alloc(arena);
Jason Evansee41ad42015-02-15 18:04:46 -08001028 assert(tnode != NULL);
Jason Evans738e0892015-02-18 01:15:50 -08001029 /* Stash. */
Jason Evansa4e18882015-02-17 15:13:52 -08001030 extent_node_init(tnode, arena, addr, size, zeroed);
Jason Evans47701b22015-02-17 22:23:10 -08001031 extent_node_dirty_linkage_init(tnode);
Jason Evans738e0892015-02-18 01:15:50 -08001032 extent_node_dirty_insert(tnode, purge_runs_sentinel,
1033 purge_chunks_sentinel);
Jason Evansee41ad42015-02-15 18:04:46 -08001034 npages = size >> LG_PAGE;
1035 chunkselm = chunkselm_next;
1036 } else {
1037 arena_chunk_t *chunk =
1038 (arena_chunk_t *)CHUNK_ADDR2BASE(runselm);
1039 size_t pageind = arena_miscelm_to_pageind(runselm);
1040 arena_run_t *run = &runselm->run;
1041 size_t run_size =
1042 arena_mapbits_unallocated_size_get(chunk, pageind);
Jason Evans070b3c32014-08-14 14:45:58 -07001043
Jason Evansee41ad42015-02-15 18:04:46 -08001044 npages = run_size >> LG_PAGE;
1045
1046 assert(pageind + npages <= chunk_npages);
1047 assert(arena_mapbits_dirty_get(chunk, pageind) ==
1048 arena_mapbits_dirty_get(chunk, pageind+npages-1));
1049
1050 /*
1051 * If purging the spare chunk's run, make it available
1052 * prior to allocation.
1053 */
1054 if (chunk == arena->spare)
1055 arena_chunk_alloc(arena);
1056
1057 /* Temporarily allocate the free dirty run. */
1058 arena_run_split_large(arena, run, run_size, false);
Jason Evans339c2b22015-02-17 22:25:56 -08001059 /* Stash. */
Jason Evansee41ad42015-02-15 18:04:46 -08001060 if (false)
1061 qr_new(runselm, rd_link); /* Redundant. */
1062 else {
1063 assert(qr_next(runselm, rd_link) == runselm);
1064 assert(qr_prev(runselm, rd_link) == runselm);
1065 }
1066 qr_meld(purge_runs_sentinel, runselm, rd_link);
1067 }
Jason Evansaa5113b2014-01-14 16:23:03 -08001068
Qinfan Wue9708002014-07-21 18:09:04 -07001069 nstashed += npages;
Jason Evans551ebc42014-10-03 10:16:09 -07001070 if (!all && nstashed >= npurge)
Qinfan Wue9708002014-07-21 18:09:04 -07001071 break;
Jason Evansaa5113b2014-01-14 16:23:03 -08001072 }
Qinfan Wue9708002014-07-21 18:09:04 -07001073
1074 return (nstashed);
Jason Evansaa5113b2014-01-14 16:23:03 -08001075}
1076
1077static size_t
Jason Evansee41ad42015-02-15 18:04:46 -08001078arena_purge_stashed(arena_t *arena, arena_chunk_map_misc_t *purge_runs_sentinel,
1079 extent_node_t *purge_chunks_sentinel)
Jason Evansaa5113b2014-01-14 16:23:03 -08001080{
Qinfan Wue9708002014-07-21 18:09:04 -07001081 size_t npurged, nmadvise;
Jason Evansee41ad42015-02-15 18:04:46 -08001082 arena_chunk_map_misc_t *runselm;
1083 extent_node_t *chunkselm;
Jason Evansaa5113b2014-01-14 16:23:03 -08001084
Jason Evansaa5113b2014-01-14 16:23:03 -08001085 if (config_stats)
1086 nmadvise = 0;
1087 npurged = 0;
Qinfan Wue9708002014-07-21 18:09:04 -07001088
1089 malloc_mutex_unlock(&arena->lock);
Jason Evansee41ad42015-02-15 18:04:46 -08001090 for (runselm = qr_next(purge_runs_sentinel, rd_link),
Jason Evans738e0892015-02-18 01:15:50 -08001091 chunkselm = qr_next(purge_chunks_sentinel, cc_link);
Jason Evansee41ad42015-02-15 18:04:46 -08001092 runselm != purge_runs_sentinel; runselm = qr_next(runselm,
1093 rd_link)) {
1094 size_t npages;
Qinfan Wue9708002014-07-21 18:09:04 -07001095
Jason Evansee41ad42015-02-15 18:04:46 -08001096 if (runselm == &chunkselm->runs_dirty) {
1097 size_t size = extent_node_size_get(chunkselm);
Jason Evans339c2b22015-02-17 22:25:56 -08001098 bool unzeroed;
Jason Evansaa5113b2014-01-14 16:23:03 -08001099
Jason Evansee41ad42015-02-15 18:04:46 -08001100 npages = size >> LG_PAGE;
Jason Evans339c2b22015-02-17 22:25:56 -08001101 unzeroed = pages_purge(extent_node_addr_get(chunkselm),
1102 size);
1103 extent_node_zeroed_set(chunkselm, !unzeroed);
Jason Evans738e0892015-02-18 01:15:50 -08001104 chunkselm = qr_next(chunkselm, cc_link);
Jason Evansee41ad42015-02-15 18:04:46 -08001105 } else {
1106 arena_chunk_t *chunk;
1107 size_t pageind, run_size, flag_unzeroed, i;
1108 bool unzeroed;
Qinfan Wue9708002014-07-21 18:09:04 -07001109
Jason Evansee41ad42015-02-15 18:04:46 -08001110 chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(runselm);
1111 pageind = arena_miscelm_to_pageind(runselm);
1112 run_size = arena_mapbits_large_size_get(chunk, pageind);
1113 npages = run_size >> LG_PAGE;
Qinfan Wue9708002014-07-21 18:09:04 -07001114
Jason Evansee41ad42015-02-15 18:04:46 -08001115 assert(pageind + npages <= chunk_npages);
1116 unzeroed = pages_purge((void *)((uintptr_t)chunk +
1117 (pageind << LG_PAGE)), run_size);
1118 flag_unzeroed = unzeroed ? CHUNK_MAP_UNZEROED : 0;
1119
1120 /*
1121 * Set the unzeroed flag for all pages, now that
1122 * pages_purge() has returned whether the pages were
1123 * zeroed as a side effect of purging. This chunk map
1124 * modification is safe even though the arena mutex
1125 * isn't currently owned by this thread, because the run
1126 * is marked as allocated, thus protecting it from being
1127 * modified by any other thread. As long as these
1128 * writes don't perturb the first and last elements'
1129 * CHUNK_MAP_ALLOCATED bits, behavior is well defined.
1130 */
1131 for (i = 0; i < npages; i++) {
1132 arena_mapbits_unzeroed_set(chunk, pageind+i,
1133 flag_unzeroed);
1134 }
Jason Evansaa5113b2014-01-14 16:23:03 -08001135 }
Qinfan Wue9708002014-07-21 18:09:04 -07001136
Jason Evansaa5113b2014-01-14 16:23:03 -08001137 npurged += npages;
1138 if (config_stats)
1139 nmadvise++;
1140 }
1141 malloc_mutex_lock(&arena->lock);
Qinfan Wue9708002014-07-21 18:09:04 -07001142
1143 if (config_stats) {
Jason Evansaa5113b2014-01-14 16:23:03 -08001144 arena->stats.nmadvise += nmadvise;
Qinfan Wue9708002014-07-21 18:09:04 -07001145 arena->stats.purged += npurged;
1146 }
Jason Evansaa5113b2014-01-14 16:23:03 -08001147
1148 return (npurged);
1149}
1150
1151static void
Jason Evansee41ad42015-02-15 18:04:46 -08001152arena_unstash_purged(arena_t *arena,
1153 arena_chunk_map_misc_t *purge_runs_sentinel,
1154 extent_node_t *purge_chunks_sentinel)
Jason Evansaa5113b2014-01-14 16:23:03 -08001155{
Jason Evansee41ad42015-02-15 18:04:46 -08001156 arena_chunk_map_misc_t *runselm, *runselm_next;
1157 extent_node_t *chunkselm;
Jason Evansaa5113b2014-01-14 16:23:03 -08001158
1159 /* Deallocate runs. */
Jason Evansee41ad42015-02-15 18:04:46 -08001160 for (runselm = qr_next(purge_runs_sentinel, rd_link),
Jason Evans738e0892015-02-18 01:15:50 -08001161 chunkselm = qr_next(purge_chunks_sentinel, cc_link);
Jason Evansee41ad42015-02-15 18:04:46 -08001162 runselm != purge_runs_sentinel; runselm = runselm_next) {
1163 runselm_next = qr_next(runselm, rd_link);
1164 if (runselm == &chunkselm->runs_dirty) {
1165 extent_node_t *chunkselm_next = qr_next(chunkselm,
Jason Evans738e0892015-02-18 01:15:50 -08001166 cc_link);
Jason Evans339c2b22015-02-17 22:25:56 -08001167 void *addr = extent_node_addr_get(chunkselm);
1168 size_t size = extent_node_size_get(chunkselm);
Jason Evans738e0892015-02-18 01:15:50 -08001169 bool zeroed = extent_node_zeroed_get(chunkselm);
1170 extent_node_dirty_remove(chunkselm);
Jason Evansee41ad42015-02-15 18:04:46 -08001171 arena_node_dalloc(arena, chunkselm);
1172 chunkselm = chunkselm_next;
Jason Evans738e0892015-02-18 01:15:50 -08001173 chunk_unmap(arena, addr, size, zeroed);
Jason Evansee41ad42015-02-15 18:04:46 -08001174 } else {
1175 arena_run_t *run = &runselm->run;
1176 qr_remove(runselm, rd_link);
1177 arena_run_dalloc(arena, run, false, true);
1178 }
Jason Evansaa5113b2014-01-14 16:23:03 -08001179 }
1180}
1181
Qinfan Wue9708002014-07-21 18:09:04 -07001182void
Jason Evans6005f072010-09-30 16:55:08 -07001183arena_purge(arena_t *arena, bool all)
Jason Evanse476f8a2010-01-16 09:53:50 -08001184{
Jason Evans070b3c32014-08-14 14:45:58 -07001185 size_t npurge, npurgeable, npurged;
Jason Evansee41ad42015-02-15 18:04:46 -08001186 arena_chunk_map_misc_t purge_runs_sentinel;
1187 extent_node_t purge_chunks_sentinel;
Qinfan Wue9708002014-07-21 18:09:04 -07001188
Jason Evans2b2f6dc2014-11-01 02:29:10 -07001189 /*
1190 * Calls to arena_dirty_count() are disabled even for debug builds
1191 * because overhead grows nonlinearly as memory usage increases.
1192 */
1193 if (false && config_debug) {
Qinfan Wu90737fc2014-07-21 19:39:20 -07001194 size_t ndirty = arena_dirty_count(arena);
Qinfan Wua244e502014-07-21 10:23:36 -07001195 assert(ndirty == arena->ndirty);
Jason Evans2caa4712010-03-04 21:35:07 -08001196 }
Qinfan Wue8a2fd82014-07-21 20:00:14 -07001197 assert((arena->nactive >> opt_lg_dirty_mult) < arena->ndirty || all);
Jason Evanse476f8a2010-01-16 09:53:50 -08001198
Jason Evans7372b152012-02-10 20:22:09 -08001199 if (config_stats)
1200 arena->stats.npurge++;
Jason Evanse476f8a2010-01-16 09:53:50 -08001201
Jason Evans070b3c32014-08-14 14:45:58 -07001202 npurge = arena_compute_npurge(arena, all);
Jason Evansee41ad42015-02-15 18:04:46 -08001203 qr_new(&purge_runs_sentinel, rd_link);
Jason Evans47701b22015-02-17 22:23:10 -08001204 extent_node_dirty_linkage_init(&purge_chunks_sentinel);
Jason Evansee41ad42015-02-15 18:04:46 -08001205
1206 npurgeable = arena_stash_dirty(arena, all, npurge, &purge_runs_sentinel,
1207 &purge_chunks_sentinel);
Jason Evans070b3c32014-08-14 14:45:58 -07001208 assert(npurgeable >= npurge);
Jason Evansee41ad42015-02-15 18:04:46 -08001209 npurged = arena_purge_stashed(arena, &purge_runs_sentinel,
1210 &purge_chunks_sentinel);
Qinfan Wue9708002014-07-21 18:09:04 -07001211 assert(npurged == npurgeable);
Jason Evansee41ad42015-02-15 18:04:46 -08001212 arena_unstash_purged(arena, &purge_runs_sentinel,
1213 &purge_chunks_sentinel);
Jason Evanse476f8a2010-01-16 09:53:50 -08001214}
1215
Jason Evans6005f072010-09-30 16:55:08 -07001216void
1217arena_purge_all(arena_t *arena)
1218{
1219
1220 malloc_mutex_lock(&arena->lock);
1221 arena_purge(arena, true);
1222 malloc_mutex_unlock(&arena->lock);
1223}
1224
Jason Evanse476f8a2010-01-16 09:53:50 -08001225static void
Jason Evansaa5113b2014-01-14 16:23:03 -08001226arena_run_coalesce(arena_t *arena, arena_chunk_t *chunk, size_t *p_size,
1227 size_t *p_run_ind, size_t *p_run_pages, size_t flag_dirty)
Jason Evanse476f8a2010-01-16 09:53:50 -08001228{
Jason Evansaa5113b2014-01-14 16:23:03 -08001229 size_t size = *p_size;
1230 size_t run_ind = *p_run_ind;
1231 size_t run_pages = *p_run_pages;
Jason Evanse476f8a2010-01-16 09:53:50 -08001232
1233 /* Try to coalesce forward. */
1234 if (run_ind + run_pages < chunk_npages &&
Jason Evans203484e2012-05-02 00:30:36 -07001235 arena_mapbits_allocated_get(chunk, run_ind+run_pages) == 0 &&
1236 arena_mapbits_dirty_get(chunk, run_ind+run_pages) == flag_dirty) {
1237 size_t nrun_size = arena_mapbits_unallocated_size_get(chunk,
1238 run_ind+run_pages);
Jason Evansae4c7b42012-04-02 07:04:34 -07001239 size_t nrun_pages = nrun_size >> LG_PAGE;
Jason Evanse476f8a2010-01-16 09:53:50 -08001240
1241 /*
1242 * Remove successor from runs_avail; the coalesced run is
1243 * inserted later.
1244 */
Jason Evans203484e2012-05-02 00:30:36 -07001245 assert(arena_mapbits_unallocated_size_get(chunk,
1246 run_ind+run_pages+nrun_pages-1) == nrun_size);
1247 assert(arena_mapbits_dirty_get(chunk,
1248 run_ind+run_pages+nrun_pages-1) == flag_dirty);
Qinfan Wu90737fc2014-07-21 19:39:20 -07001249 arena_avail_remove(arena, chunk, run_ind+run_pages, nrun_pages);
Jason Evanse476f8a2010-01-16 09:53:50 -08001250
Jason Evansee41ad42015-02-15 18:04:46 -08001251 /*
1252 * If the successor is dirty, remove it from the set of dirty
1253 * pages.
1254 */
Qinfan Wu04d60a12014-07-18 14:21:17 -07001255 if (flag_dirty != 0) {
Jason Evansee41ad42015-02-15 18:04:46 -08001256 arena_run_dirty_remove(arena, chunk, run_ind+run_pages,
Jason Evans070b3c32014-08-14 14:45:58 -07001257 nrun_pages);
Qinfan Wu04d60a12014-07-18 14:21:17 -07001258 }
1259
Jason Evanse476f8a2010-01-16 09:53:50 -08001260 size += nrun_size;
Jason Evans12ca9142010-10-17 19:56:09 -07001261 run_pages += nrun_pages;
Jason Evanse476f8a2010-01-16 09:53:50 -08001262
Jason Evans203484e2012-05-02 00:30:36 -07001263 arena_mapbits_unallocated_size_set(chunk, run_ind, size);
1264 arena_mapbits_unallocated_size_set(chunk, run_ind+run_pages-1,
1265 size);
Jason Evanse476f8a2010-01-16 09:53:50 -08001266 }
1267
1268 /* Try to coalesce backward. */
Jason Evansaa5113b2014-01-14 16:23:03 -08001269 if (run_ind > map_bias && arena_mapbits_allocated_get(chunk,
1270 run_ind-1) == 0 && arena_mapbits_dirty_get(chunk, run_ind-1) ==
1271 flag_dirty) {
Jason Evans203484e2012-05-02 00:30:36 -07001272 size_t prun_size = arena_mapbits_unallocated_size_get(chunk,
1273 run_ind-1);
Jason Evansae4c7b42012-04-02 07:04:34 -07001274 size_t prun_pages = prun_size >> LG_PAGE;
Jason Evanse476f8a2010-01-16 09:53:50 -08001275
Jason Evans12ca9142010-10-17 19:56:09 -07001276 run_ind -= prun_pages;
Jason Evanse476f8a2010-01-16 09:53:50 -08001277
1278 /*
1279 * Remove predecessor from runs_avail; the coalesced run is
1280 * inserted later.
1281 */
Jason Evans203484e2012-05-02 00:30:36 -07001282 assert(arena_mapbits_unallocated_size_get(chunk, run_ind) ==
1283 prun_size);
1284 assert(arena_mapbits_dirty_get(chunk, run_ind) == flag_dirty);
Qinfan Wu90737fc2014-07-21 19:39:20 -07001285 arena_avail_remove(arena, chunk, run_ind, prun_pages);
Jason Evanse476f8a2010-01-16 09:53:50 -08001286
Jason Evansee41ad42015-02-15 18:04:46 -08001287 /*
1288 * If the predecessor is dirty, remove it from the set of dirty
1289 * pages.
1290 */
1291 if (flag_dirty != 0) {
1292 arena_run_dirty_remove(arena, chunk, run_ind,
1293 prun_pages);
1294 }
Qinfan Wu04d60a12014-07-18 14:21:17 -07001295
Jason Evanse476f8a2010-01-16 09:53:50 -08001296 size += prun_size;
Jason Evans12ca9142010-10-17 19:56:09 -07001297 run_pages += prun_pages;
Jason Evanse476f8a2010-01-16 09:53:50 -08001298
Jason Evans203484e2012-05-02 00:30:36 -07001299 arena_mapbits_unallocated_size_set(chunk, run_ind, size);
1300 arena_mapbits_unallocated_size_set(chunk, run_ind+run_pages-1,
1301 size);
Jason Evanse476f8a2010-01-16 09:53:50 -08001302 }
1303
Jason Evansaa5113b2014-01-14 16:23:03 -08001304 *p_size = size;
1305 *p_run_ind = run_ind;
1306 *p_run_pages = run_pages;
1307}
1308
1309static void
1310arena_run_dalloc(arena_t *arena, arena_run_t *run, bool dirty, bool cleaned)
1311{
1312 arena_chunk_t *chunk;
Jason Evans0c5dd032014-09-29 01:31:39 -07001313 arena_chunk_map_misc_t *miscelm;
Jason Evansaa5113b2014-01-14 16:23:03 -08001314 size_t size, run_ind, run_pages, flag_dirty;
1315
1316 chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(run);
Jason Evans0c5dd032014-09-29 01:31:39 -07001317 miscelm = arena_run_to_miscelm(run);
1318 run_ind = arena_miscelm_to_pageind(miscelm);
Jason Evansaa5113b2014-01-14 16:23:03 -08001319 assert(run_ind >= map_bias);
1320 assert(run_ind < chunk_npages);
1321 if (arena_mapbits_large_get(chunk, run_ind) != 0) {
1322 size = arena_mapbits_large_size_get(chunk, run_ind);
1323 assert(size == PAGE ||
1324 arena_mapbits_large_size_get(chunk,
1325 run_ind+(size>>LG_PAGE)-1) == 0);
1326 } else {
Jason Evans381c23d2014-10-10 23:01:03 -07001327 arena_bin_info_t *bin_info = &arena_bin_info[run->binind];
Jason Evansaa5113b2014-01-14 16:23:03 -08001328 size = bin_info->run_size;
1329 }
1330 run_pages = (size >> LG_PAGE);
1331 arena_cactive_update(arena, 0, run_pages);
1332 arena->nactive -= run_pages;
1333
1334 /*
1335 * The run is dirty if the caller claims to have dirtied it, as well as
1336 * if it was already dirty before being allocated and the caller
1337 * doesn't claim to have cleaned it.
1338 */
1339 assert(arena_mapbits_dirty_get(chunk, run_ind) ==
1340 arena_mapbits_dirty_get(chunk, run_ind+run_pages-1));
Jason Evans551ebc42014-10-03 10:16:09 -07001341 if (!cleaned && arena_mapbits_dirty_get(chunk, run_ind) != 0)
Jason Evansaa5113b2014-01-14 16:23:03 -08001342 dirty = true;
1343 flag_dirty = dirty ? CHUNK_MAP_DIRTY : 0;
1344
1345 /* Mark pages as unallocated in the chunk map. */
1346 if (dirty) {
1347 arena_mapbits_unallocated_set(chunk, run_ind, size,
1348 CHUNK_MAP_DIRTY);
1349 arena_mapbits_unallocated_set(chunk, run_ind+run_pages-1, size,
1350 CHUNK_MAP_DIRTY);
1351 } else {
1352 arena_mapbits_unallocated_set(chunk, run_ind, size,
1353 arena_mapbits_unzeroed_get(chunk, run_ind));
1354 arena_mapbits_unallocated_set(chunk, run_ind+run_pages-1, size,
1355 arena_mapbits_unzeroed_get(chunk, run_ind+run_pages-1));
1356 }
1357
Jason Evans0c5dd032014-09-29 01:31:39 -07001358 arena_run_coalesce(arena, chunk, &size, &run_ind, &run_pages, flag_dirty);
Jason Evansaa5113b2014-01-14 16:23:03 -08001359
Jason Evanse476f8a2010-01-16 09:53:50 -08001360 /* Insert into runs_avail, now that coalescing is complete. */
Jason Evans203484e2012-05-02 00:30:36 -07001361 assert(arena_mapbits_unallocated_size_get(chunk, run_ind) ==
1362 arena_mapbits_unallocated_size_get(chunk, run_ind+run_pages-1));
1363 assert(arena_mapbits_dirty_get(chunk, run_ind) ==
1364 arena_mapbits_dirty_get(chunk, run_ind+run_pages-1));
Qinfan Wu90737fc2014-07-21 19:39:20 -07001365 arena_avail_insert(arena, chunk, run_ind, run_pages);
Jason Evans8d4203c2010-04-13 20:53:21 -07001366
Jason Evans070b3c32014-08-14 14:45:58 -07001367 if (dirty)
Jason Evansee41ad42015-02-15 18:04:46 -08001368 arena_run_dirty_insert(arena, chunk, run_ind, run_pages);
Qinfan Wu04d60a12014-07-18 14:21:17 -07001369
Jason Evans203484e2012-05-02 00:30:36 -07001370 /* Deallocate chunk if it is now completely unused. */
Jason Evans155bfa72014-10-05 17:54:10 -07001371 if (size == arena_maxrun) {
Jason Evans203484e2012-05-02 00:30:36 -07001372 assert(run_ind == map_bias);
Jason Evans155bfa72014-10-05 17:54:10 -07001373 assert(run_pages == (arena_maxrun >> LG_PAGE));
Jason Evanse2deab72014-05-15 22:22:27 -07001374 arena_chunk_dalloc(arena, chunk);
Jason Evans203484e2012-05-02 00:30:36 -07001375 }
Jason Evanse476f8a2010-01-16 09:53:50 -08001376
Jason Evans4fb7f512010-01-27 18:27:09 -08001377 /*
Jason Evans8d4203c2010-04-13 20:53:21 -07001378 * It is okay to do dirty page processing here even if the chunk was
Jason Evans4fb7f512010-01-27 18:27:09 -08001379 * deallocated above, since in that case it is the spare. Waiting
1380 * until after possible chunk deallocation to do dirty processing
1381 * allows for an old spare to be fully deallocated, thus decreasing the
1382 * chances of spuriously crossing the dirty page purging threshold.
1383 */
Jason Evans8d4203c2010-04-13 20:53:21 -07001384 if (dirty)
Jason Evans05b21be2010-03-14 17:36:10 -07001385 arena_maybe_purge(arena);
Jason Evanse476f8a2010-01-16 09:53:50 -08001386}
1387
1388static void
1389arena_run_trim_head(arena_t *arena, arena_chunk_t *chunk, arena_run_t *run,
1390 size_t oldsize, size_t newsize)
1391{
Jason Evans0c5dd032014-09-29 01:31:39 -07001392 arena_chunk_map_misc_t *miscelm = arena_run_to_miscelm(run);
1393 size_t pageind = arena_miscelm_to_pageind(miscelm);
Jason Evansae4c7b42012-04-02 07:04:34 -07001394 size_t head_npages = (oldsize - newsize) >> LG_PAGE;
Jason Evans203484e2012-05-02 00:30:36 -07001395 size_t flag_dirty = arena_mapbits_dirty_get(chunk, pageind);
Jason Evanse476f8a2010-01-16 09:53:50 -08001396
1397 assert(oldsize > newsize);
1398
1399 /*
1400 * Update the chunk map so that arena_run_dalloc() can treat the
Jason Evans940a2e02010-10-17 17:51:37 -07001401 * leading run as separately allocated. Set the last element of each
1402 * run first, in case of single-page runs.
Jason Evanse476f8a2010-01-16 09:53:50 -08001403 */
Jason Evans203484e2012-05-02 00:30:36 -07001404 assert(arena_mapbits_large_size_get(chunk, pageind) == oldsize);
Jason Evansd8ceef62012-05-10 20:59:39 -07001405 arena_mapbits_large_set(chunk, pageind+head_npages-1, 0, flag_dirty);
1406 arena_mapbits_large_set(chunk, pageind, oldsize-newsize, flag_dirty);
Jason Evans940a2e02010-10-17 17:51:37 -07001407
Jason Evans7372b152012-02-10 20:22:09 -08001408 if (config_debug) {
Jason Evansae4c7b42012-04-02 07:04:34 -07001409 UNUSED size_t tail_npages = newsize >> LG_PAGE;
Jason Evans203484e2012-05-02 00:30:36 -07001410 assert(arena_mapbits_large_size_get(chunk,
1411 pageind+head_npages+tail_npages-1) == 0);
1412 assert(arena_mapbits_dirty_get(chunk,
1413 pageind+head_npages+tail_npages-1) == flag_dirty);
Jason Evans940a2e02010-10-17 17:51:37 -07001414 }
Jason Evansd8ceef62012-05-10 20:59:39 -07001415 arena_mapbits_large_set(chunk, pageind+head_npages, newsize,
1416 flag_dirty);
Jason Evanse476f8a2010-01-16 09:53:50 -08001417
Jason Evanse3d13062012-10-30 15:42:37 -07001418 arena_run_dalloc(arena, run, false, false);
Jason Evanse476f8a2010-01-16 09:53:50 -08001419}
1420
1421static void
1422arena_run_trim_tail(arena_t *arena, arena_chunk_t *chunk, arena_run_t *run,
1423 size_t oldsize, size_t newsize, bool dirty)
1424{
Jason Evans0c5dd032014-09-29 01:31:39 -07001425 arena_chunk_map_misc_t *miscelm = arena_run_to_miscelm(run);
1426 size_t pageind = arena_miscelm_to_pageind(miscelm);
Jason Evansae4c7b42012-04-02 07:04:34 -07001427 size_t head_npages = newsize >> LG_PAGE;
Jason Evans203484e2012-05-02 00:30:36 -07001428 size_t flag_dirty = arena_mapbits_dirty_get(chunk, pageind);
Jason Evans0c5dd032014-09-29 01:31:39 -07001429 arena_chunk_map_misc_t *tail_miscelm;
1430 arena_run_t *tail_run;
Jason Evanse476f8a2010-01-16 09:53:50 -08001431
1432 assert(oldsize > newsize);
1433
1434 /*
1435 * Update the chunk map so that arena_run_dalloc() can treat the
Jason Evans940a2e02010-10-17 17:51:37 -07001436 * trailing run as separately allocated. Set the last element of each
1437 * run first, in case of single-page runs.
Jason Evanse476f8a2010-01-16 09:53:50 -08001438 */
Jason Evans203484e2012-05-02 00:30:36 -07001439 assert(arena_mapbits_large_size_get(chunk, pageind) == oldsize);
Jason Evansd8ceef62012-05-10 20:59:39 -07001440 arena_mapbits_large_set(chunk, pageind+head_npages-1, 0, flag_dirty);
1441 arena_mapbits_large_set(chunk, pageind, newsize, flag_dirty);
Jason Evans940a2e02010-10-17 17:51:37 -07001442
Jason Evans203484e2012-05-02 00:30:36 -07001443 if (config_debug) {
1444 UNUSED size_t tail_npages = (oldsize - newsize) >> LG_PAGE;
1445 assert(arena_mapbits_large_size_get(chunk,
1446 pageind+head_npages+tail_npages-1) == 0);
1447 assert(arena_mapbits_dirty_get(chunk,
1448 pageind+head_npages+tail_npages-1) == flag_dirty);
1449 }
1450 arena_mapbits_large_set(chunk, pageind+head_npages, oldsize-newsize,
Jason Evansd8ceef62012-05-10 20:59:39 -07001451 flag_dirty);
Jason Evanse476f8a2010-01-16 09:53:50 -08001452
Jason Evans0c5dd032014-09-29 01:31:39 -07001453 tail_miscelm = arena_miscelm_get(chunk, pageind + head_npages);
1454 tail_run = &tail_miscelm->run;
1455 arena_run_dalloc(arena, tail_run, dirty, false);
Jason Evanse476f8a2010-01-16 09:53:50 -08001456}
1457
1458static arena_run_t *
Jason Evanse7a10582012-02-13 17:36:52 -08001459arena_bin_runs_first(arena_bin_t *bin)
1460{
Qinfan Wuff6a31d2014-08-29 13:34:40 -07001461 arena_chunk_map_misc_t *miscelm = arena_run_tree_first(&bin->runs);
Jason Evans0c5dd032014-09-29 01:31:39 -07001462 if (miscelm != NULL)
1463 return (&miscelm->run);
Jason Evanse7a10582012-02-13 17:36:52 -08001464
1465 return (NULL);
1466}
1467
1468static void
1469arena_bin_runs_insert(arena_bin_t *bin, arena_run_t *run)
1470{
Jason Evans0c5dd032014-09-29 01:31:39 -07001471 arena_chunk_map_misc_t *miscelm = arena_run_to_miscelm(run);
Jason Evanse7a10582012-02-13 17:36:52 -08001472
Qinfan Wuff6a31d2014-08-29 13:34:40 -07001473 assert(arena_run_tree_search(&bin->runs, miscelm) == NULL);
Jason Evanse7a10582012-02-13 17:36:52 -08001474
Qinfan Wuff6a31d2014-08-29 13:34:40 -07001475 arena_run_tree_insert(&bin->runs, miscelm);
Jason Evanse7a10582012-02-13 17:36:52 -08001476}
1477
1478static void
1479arena_bin_runs_remove(arena_bin_t *bin, arena_run_t *run)
1480{
Jason Evans0c5dd032014-09-29 01:31:39 -07001481 arena_chunk_map_misc_t *miscelm = arena_run_to_miscelm(run);
Jason Evanse7a10582012-02-13 17:36:52 -08001482
Qinfan Wuff6a31d2014-08-29 13:34:40 -07001483 assert(arena_run_tree_search(&bin->runs, miscelm) != NULL);
Jason Evanse7a10582012-02-13 17:36:52 -08001484
Qinfan Wuff6a31d2014-08-29 13:34:40 -07001485 arena_run_tree_remove(&bin->runs, miscelm);
Jason Evanse7a10582012-02-13 17:36:52 -08001486}
1487
1488static arena_run_t *
1489arena_bin_nonfull_run_tryget(arena_bin_t *bin)
1490{
1491 arena_run_t *run = arena_bin_runs_first(bin);
1492 if (run != NULL) {
1493 arena_bin_runs_remove(bin, run);
1494 if (config_stats)
1495 bin->stats.reruns++;
1496 }
1497 return (run);
1498}
1499
1500static arena_run_t *
Jason Evanse476f8a2010-01-16 09:53:50 -08001501arena_bin_nonfull_run_get(arena_t *arena, arena_bin_t *bin)
1502{
Jason Evanse476f8a2010-01-16 09:53:50 -08001503 arena_run_t *run;
Jason Evans155bfa72014-10-05 17:54:10 -07001504 index_t binind;
Jason Evans49f7e8f2011-03-15 13:59:15 -07001505 arena_bin_info_t *bin_info;
Jason Evanse476f8a2010-01-16 09:53:50 -08001506
1507 /* Look for a usable run. */
Jason Evanse7a10582012-02-13 17:36:52 -08001508 run = arena_bin_nonfull_run_tryget(bin);
1509 if (run != NULL)
Jason Evanse476f8a2010-01-16 09:53:50 -08001510 return (run);
Jason Evanse476f8a2010-01-16 09:53:50 -08001511 /* No existing runs have any space available. */
1512
Jason Evans49f7e8f2011-03-15 13:59:15 -07001513 binind = arena_bin_index(arena, bin);
1514 bin_info = &arena_bin_info[binind];
1515
Jason Evanse476f8a2010-01-16 09:53:50 -08001516 /* Allocate a new run. */
Jason Evanse00572b2010-03-14 19:43:56 -07001517 malloc_mutex_unlock(&bin->lock);
Jason Evanse69bee02010-03-15 22:25:23 -07001518 /******************************/
Jason Evans86815df2010-03-13 20:32:56 -08001519 malloc_mutex_lock(&arena->lock);
Jason Evansaa5113b2014-01-14 16:23:03 -08001520 run = arena_run_alloc_small(arena, bin_info->run_size, binind);
Jason Evanse00572b2010-03-14 19:43:56 -07001521 if (run != NULL) {
1522 /* Initialize run internals. */
Jason Evans381c23d2014-10-10 23:01:03 -07001523 run->binind = binind;
Jason Evans49f7e8f2011-03-15 13:59:15 -07001524 run->nfree = bin_info->nregs;
Jason Evans0c5dd032014-09-29 01:31:39 -07001525 bitmap_init(run->bitmap, &bin_info->bitmap_info);
Jason Evanse69bee02010-03-15 22:25:23 -07001526 }
1527 malloc_mutex_unlock(&arena->lock);
1528 /********************************/
1529 malloc_mutex_lock(&bin->lock);
1530 if (run != NULL) {
Jason Evans7372b152012-02-10 20:22:09 -08001531 if (config_stats) {
1532 bin->stats.nruns++;
1533 bin->stats.curruns++;
Jason Evans7372b152012-02-10 20:22:09 -08001534 }
Jason Evanse00572b2010-03-14 19:43:56 -07001535 return (run);
1536 }
1537
1538 /*
Jason Evansaa5113b2014-01-14 16:23:03 -08001539 * arena_run_alloc_small() failed, but another thread may have made
Jason Evans940a2e02010-10-17 17:51:37 -07001540 * sufficient memory available while this one dropped bin->lock above,
Jason Evanse00572b2010-03-14 19:43:56 -07001541 * so search one more time.
1542 */
Jason Evanse7a10582012-02-13 17:36:52 -08001543 run = arena_bin_nonfull_run_tryget(bin);
1544 if (run != NULL)
Jason Evanse00572b2010-03-14 19:43:56 -07001545 return (run);
Jason Evanse00572b2010-03-14 19:43:56 -07001546
1547 return (NULL);
Jason Evanse476f8a2010-01-16 09:53:50 -08001548}
1549
Jason Evans1e0a6362010-03-13 13:41:58 -08001550/* Re-fill bin->runcur, then call arena_run_reg_alloc(). */
Jason Evanse476f8a2010-01-16 09:53:50 -08001551static void *
1552arena_bin_malloc_hard(arena_t *arena, arena_bin_t *bin)
1553{
Jason Evanse00572b2010-03-14 19:43:56 -07001554 void *ret;
Jason Evans155bfa72014-10-05 17:54:10 -07001555 index_t binind;
Jason Evans49f7e8f2011-03-15 13:59:15 -07001556 arena_bin_info_t *bin_info;
Jason Evanse00572b2010-03-14 19:43:56 -07001557 arena_run_t *run;
Jason Evanse476f8a2010-01-16 09:53:50 -08001558
Jason Evans49f7e8f2011-03-15 13:59:15 -07001559 binind = arena_bin_index(arena, bin);
1560 bin_info = &arena_bin_info[binind];
Jason Evanse00572b2010-03-14 19:43:56 -07001561 bin->runcur = NULL;
1562 run = arena_bin_nonfull_run_get(arena, bin);
1563 if (bin->runcur != NULL && bin->runcur->nfree > 0) {
1564 /*
1565 * Another thread updated runcur while this one ran without the
1566 * bin lock in arena_bin_nonfull_run_get().
1567 */
Jason Evanse00572b2010-03-14 19:43:56 -07001568 assert(bin->runcur->nfree > 0);
Jason Evans49f7e8f2011-03-15 13:59:15 -07001569 ret = arena_run_reg_alloc(bin->runcur, bin_info);
Jason Evanse00572b2010-03-14 19:43:56 -07001570 if (run != NULL) {
Jason Evans940a2e02010-10-17 17:51:37 -07001571 arena_chunk_t *chunk;
1572
1573 /*
Jason Evansaa5113b2014-01-14 16:23:03 -08001574 * arena_run_alloc_small() may have allocated run, or
1575 * it may have pulled run from the bin's run tree.
1576 * Therefore it is unsafe to make any assumptions about
1577 * how run has previously been used, and
1578 * arena_bin_lower_run() must be called, as if a region
1579 * were just deallocated from the run.
Jason Evans940a2e02010-10-17 17:51:37 -07001580 */
1581 chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(run);
Jason Evans49f7e8f2011-03-15 13:59:15 -07001582 if (run->nfree == bin_info->nregs)
Jason Evans8de6a022010-10-17 20:57:30 -07001583 arena_dalloc_bin_run(arena, chunk, run, bin);
1584 else
1585 arena_bin_lower_run(arena, chunk, run, bin);
Jason Evanse00572b2010-03-14 19:43:56 -07001586 }
1587 return (ret);
1588 }
1589
1590 if (run == NULL)
Jason Evanse476f8a2010-01-16 09:53:50 -08001591 return (NULL);
Jason Evanse00572b2010-03-14 19:43:56 -07001592
1593 bin->runcur = run;
1594
Jason Evanse476f8a2010-01-16 09:53:50 -08001595 assert(bin->runcur->nfree > 0);
1596
Jason Evans49f7e8f2011-03-15 13:59:15 -07001597 return (arena_run_reg_alloc(bin->runcur, bin_info));
Jason Evanse476f8a2010-01-16 09:53:50 -08001598}
1599
Jason Evans86815df2010-03-13 20:32:56 -08001600void
Jason Evans155bfa72014-10-05 17:54:10 -07001601arena_tcache_fill_small(arena_t *arena, tcache_bin_t *tbin, index_t binind,
Jason Evans7372b152012-02-10 20:22:09 -08001602 uint64_t prof_accumbytes)
Jason Evanse476f8a2010-01-16 09:53:50 -08001603{
1604 unsigned i, nfill;
1605 arena_bin_t *bin;
1606 arena_run_t *run;
1607 void *ptr;
1608
1609 assert(tbin->ncached == 0);
1610
Jason Evans88c222c2013-02-06 11:59:30 -08001611 if (config_prof && arena_prof_accum(arena, prof_accumbytes))
1612 prof_idump();
Jason Evanse69bee02010-03-15 22:25:23 -07001613 bin = &arena->bins[binind];
1614 malloc_mutex_lock(&bin->lock);
Jason Evans1dcb4f82011-03-21 00:18:17 -07001615 for (i = 0, nfill = (tcache_bin_info[binind].ncached_max >>
1616 tbin->lg_fill_div); i < nfill; i++) {
Jason Evanse476f8a2010-01-16 09:53:50 -08001617 if ((run = bin->runcur) != NULL && run->nfree > 0)
Jason Evans49f7e8f2011-03-15 13:59:15 -07001618 ptr = arena_run_reg_alloc(run, &arena_bin_info[binind]);
Jason Evanse476f8a2010-01-16 09:53:50 -08001619 else
1620 ptr = arena_bin_malloc_hard(arena, bin);
Jason Evansf11a6772014-10-05 13:05:10 -07001621 if (ptr == NULL) {
1622 /*
1623 * OOM. tbin->avail isn't yet filled down to its first
1624 * element, so the successful allocations (if any) must
1625 * be moved to the base of tbin->avail before bailing
1626 * out.
1627 */
1628 if (i > 0) {
1629 memmove(tbin->avail, &tbin->avail[nfill - i],
1630 i * sizeof(void *));
1631 }
Jason Evanse476f8a2010-01-16 09:53:50 -08001632 break;
Jason Evansf11a6772014-10-05 13:05:10 -07001633 }
Guilherme Goncalves2c5cb612014-12-08 19:12:41 -02001634 if (config_fill && unlikely(opt_junk_alloc)) {
Jason Evans122449b2012-04-06 00:35:09 -07001635 arena_alloc_junk_small(ptr, &arena_bin_info[binind],
1636 true);
1637 }
Jason Evans9c43c132011-03-18 10:53:15 -07001638 /* Insert such that low regions get used first. */
1639 tbin->avail[nfill - 1 - i] = ptr;
Jason Evanse476f8a2010-01-16 09:53:50 -08001640 }
Jason Evans7372b152012-02-10 20:22:09 -08001641 if (config_stats) {
Jason Evans7372b152012-02-10 20:22:09 -08001642 bin->stats.nmalloc += i;
1643 bin->stats.nrequests += tbin->tstats.nrequests;
Jason Evans3c4d92e2014-10-12 22:53:59 -07001644 bin->stats.curregs += i;
Jason Evans7372b152012-02-10 20:22:09 -08001645 bin->stats.nfills++;
1646 tbin->tstats.nrequests = 0;
1647 }
Jason Evans86815df2010-03-13 20:32:56 -08001648 malloc_mutex_unlock(&bin->lock);
Jason Evanse476f8a2010-01-16 09:53:50 -08001649 tbin->ncached = i;
Jason Evanse476f8a2010-01-16 09:53:50 -08001650}
Jason Evanse476f8a2010-01-16 09:53:50 -08001651
Jason Evans122449b2012-04-06 00:35:09 -07001652void
1653arena_alloc_junk_small(void *ptr, arena_bin_info_t *bin_info, bool zero)
1654{
1655
1656 if (zero) {
1657 size_t redzone_size = bin_info->redzone_size;
1658 memset((void *)((uintptr_t)ptr - redzone_size), 0xa5,
1659 redzone_size);
1660 memset((void *)((uintptr_t)ptr + bin_info->reg_size), 0xa5,
1661 redzone_size);
1662 } else {
1663 memset((void *)((uintptr_t)ptr - bin_info->redzone_size), 0xa5,
1664 bin_info->reg_interval);
1665 }
1666}
1667
Jason Evans0d6c5d82013-12-17 15:14:36 -08001668#ifdef JEMALLOC_JET
1669#undef arena_redzone_corruption
1670#define arena_redzone_corruption JEMALLOC_N(arena_redzone_corruption_impl)
1671#endif
1672static void
1673arena_redzone_corruption(void *ptr, size_t usize, bool after,
1674 size_t offset, uint8_t byte)
1675{
1676
1677 malloc_printf("<jemalloc>: Corrupt redzone %zu byte%s %s %p "
1678 "(size %zu), byte=%#x\n", offset, (offset == 1) ? "" : "s",
1679 after ? "after" : "before", ptr, usize, byte);
1680}
1681#ifdef JEMALLOC_JET
Jason Evans0d6c5d82013-12-17 15:14:36 -08001682#undef arena_redzone_corruption
Jason Evans6b694c42014-01-07 16:47:56 -08001683#define arena_redzone_corruption JEMALLOC_N(arena_redzone_corruption)
1684arena_redzone_corruption_t *arena_redzone_corruption =
1685 JEMALLOC_N(arena_redzone_corruption_impl);
Jason Evans0d6c5d82013-12-17 15:14:36 -08001686#endif
1687
1688static void
1689arena_redzones_validate(void *ptr, arena_bin_info_t *bin_info, bool reset)
Jason Evans122449b2012-04-06 00:35:09 -07001690{
1691 size_t size = bin_info->reg_size;
1692 size_t redzone_size = bin_info->redzone_size;
1693 size_t i;
1694 bool error = false;
1695
Guilherme Goncalves2c5cb612014-12-08 19:12:41 -02001696 if (opt_junk_alloc) {
1697 for (i = 1; i <= redzone_size; i++) {
1698 uint8_t *byte = (uint8_t *)((uintptr_t)ptr - i);
1699 if (*byte != 0xa5) {
1700 error = true;
1701 arena_redzone_corruption(ptr, size, false, i, *byte);
1702 if (reset)
1703 *byte = 0xa5;
1704 }
1705 }
1706 for (i = 0; i < redzone_size; i++) {
1707 uint8_t *byte = (uint8_t *)((uintptr_t)ptr + size + i);
1708 if (*byte != 0xa5) {
1709 error = true;
1710 arena_redzone_corruption(ptr, size, true, i, *byte);
1711 if (reset)
1712 *byte = 0xa5;
1713 }
Jason Evans122449b2012-04-06 00:35:09 -07001714 }
1715 }
Guilherme Goncalves2c5cb612014-12-08 19:12:41 -02001716
Jason Evans122449b2012-04-06 00:35:09 -07001717 if (opt_abort && error)
1718 abort();
Jason Evans0d6c5d82013-12-17 15:14:36 -08001719}
Jason Evans122449b2012-04-06 00:35:09 -07001720
Jason Evans6b694c42014-01-07 16:47:56 -08001721#ifdef JEMALLOC_JET
1722#undef arena_dalloc_junk_small
1723#define arena_dalloc_junk_small JEMALLOC_N(arena_dalloc_junk_small_impl)
1724#endif
Jason Evans0d6c5d82013-12-17 15:14:36 -08001725void
1726arena_dalloc_junk_small(void *ptr, arena_bin_info_t *bin_info)
1727{
1728 size_t redzone_size = bin_info->redzone_size;
1729
1730 arena_redzones_validate(ptr, bin_info, false);
Jason Evans122449b2012-04-06 00:35:09 -07001731 memset((void *)((uintptr_t)ptr - redzone_size), 0x5a,
1732 bin_info->reg_interval);
1733}
Jason Evans6b694c42014-01-07 16:47:56 -08001734#ifdef JEMALLOC_JET
1735#undef arena_dalloc_junk_small
1736#define arena_dalloc_junk_small JEMALLOC_N(arena_dalloc_junk_small)
1737arena_dalloc_junk_small_t *arena_dalloc_junk_small =
1738 JEMALLOC_N(arena_dalloc_junk_small_impl);
1739#endif
Jason Evans122449b2012-04-06 00:35:09 -07001740
Jason Evans0d6c5d82013-12-17 15:14:36 -08001741void
1742arena_quarantine_junk_small(void *ptr, size_t usize)
1743{
Jason Evans155bfa72014-10-05 17:54:10 -07001744 index_t binind;
Jason Evans0d6c5d82013-12-17 15:14:36 -08001745 arena_bin_info_t *bin_info;
1746 cassert(config_fill);
Guilherme Goncalves2c5cb612014-12-08 19:12:41 -02001747 assert(opt_junk_free);
Jason Evans0d6c5d82013-12-17 15:14:36 -08001748 assert(opt_quarantine);
1749 assert(usize <= SMALL_MAXCLASS);
1750
Jason Evans155bfa72014-10-05 17:54:10 -07001751 binind = size2index(usize);
Jason Evans0d6c5d82013-12-17 15:14:36 -08001752 bin_info = &arena_bin_info[binind];
1753 arena_redzones_validate(ptr, bin_info, true);
1754}
1755
Jason Evanse476f8a2010-01-16 09:53:50 -08001756void *
1757arena_malloc_small(arena_t *arena, size_t size, bool zero)
1758{
1759 void *ret;
1760 arena_bin_t *bin;
1761 arena_run_t *run;
Jason Evans155bfa72014-10-05 17:54:10 -07001762 index_t binind;
Jason Evanse476f8a2010-01-16 09:53:50 -08001763
Jason Evans155bfa72014-10-05 17:54:10 -07001764 binind = size2index(size);
Jason Evansb1726102012-02-28 16:50:47 -08001765 assert(binind < NBINS);
Jason Evanse476f8a2010-01-16 09:53:50 -08001766 bin = &arena->bins[binind];
Jason Evans155bfa72014-10-05 17:54:10 -07001767 size = index2size(binind);
Jason Evanse476f8a2010-01-16 09:53:50 -08001768
Jason Evans86815df2010-03-13 20:32:56 -08001769 malloc_mutex_lock(&bin->lock);
Jason Evanse476f8a2010-01-16 09:53:50 -08001770 if ((run = bin->runcur) != NULL && run->nfree > 0)
Jason Evans49f7e8f2011-03-15 13:59:15 -07001771 ret = arena_run_reg_alloc(run, &arena_bin_info[binind]);
Jason Evanse476f8a2010-01-16 09:53:50 -08001772 else
1773 ret = arena_bin_malloc_hard(arena, bin);
1774
1775 if (ret == NULL) {
Jason Evans86815df2010-03-13 20:32:56 -08001776 malloc_mutex_unlock(&bin->lock);
Jason Evanse476f8a2010-01-16 09:53:50 -08001777 return (NULL);
1778 }
1779
Jason Evans7372b152012-02-10 20:22:09 -08001780 if (config_stats) {
Jason Evans7372b152012-02-10 20:22:09 -08001781 bin->stats.nmalloc++;
1782 bin->stats.nrequests++;
Jason Evans3c4d92e2014-10-12 22:53:59 -07001783 bin->stats.curregs++;
Jason Evans7372b152012-02-10 20:22:09 -08001784 }
Jason Evans86815df2010-03-13 20:32:56 -08001785 malloc_mutex_unlock(&bin->lock);
Jason Evans551ebc42014-10-03 10:16:09 -07001786 if (config_prof && !isthreaded && arena_prof_accum(arena, size))
Jason Evans88c222c2013-02-06 11:59:30 -08001787 prof_idump();
Jason Evanse476f8a2010-01-16 09:53:50 -08001788
Jason Evans551ebc42014-10-03 10:16:09 -07001789 if (!zero) {
Jason Evans7372b152012-02-10 20:22:09 -08001790 if (config_fill) {
Guilherme Goncalves2c5cb612014-12-08 19:12:41 -02001791 if (unlikely(opt_junk_alloc)) {
Jason Evans122449b2012-04-06 00:35:09 -07001792 arena_alloc_junk_small(ret,
1793 &arena_bin_info[binind], false);
Jason Evans9c640bf2014-09-11 16:20:44 -07001794 } else if (unlikely(opt_zero))
Jason Evans7372b152012-02-10 20:22:09 -08001795 memset(ret, 0, size);
1796 }
Jason Evansbd87b012014-04-15 16:35:08 -07001797 JEMALLOC_VALGRIND_MAKE_MEM_UNDEFINED(ret, size);
Jason Evans122449b2012-04-06 00:35:09 -07001798 } else {
Guilherme Goncalves2c5cb612014-12-08 19:12:41 -02001799 if (config_fill && unlikely(opt_junk_alloc)) {
Jason Evans122449b2012-04-06 00:35:09 -07001800 arena_alloc_junk_small(ret, &arena_bin_info[binind],
1801 true);
1802 }
Jason Evansbd87b012014-04-15 16:35:08 -07001803 JEMALLOC_VALGRIND_MAKE_MEM_UNDEFINED(ret, size);
Jason Evanse476f8a2010-01-16 09:53:50 -08001804 memset(ret, 0, size);
Jason Evans122449b2012-04-06 00:35:09 -07001805 }
Jason Evanse476f8a2010-01-16 09:53:50 -08001806
1807 return (ret);
1808}
1809
1810void *
Jason Evanse476f8a2010-01-16 09:53:50 -08001811arena_malloc_large(arena_t *arena, size_t size, bool zero)
1812{
1813 void *ret;
Jason Evans155bfa72014-10-05 17:54:10 -07001814 size_t usize;
Jason Evans0c5dd032014-09-29 01:31:39 -07001815 arena_run_t *run;
1816 arena_chunk_map_misc_t *miscelm;
Jason Evans88c222c2013-02-06 11:59:30 -08001817 UNUSED bool idump;
Jason Evanse476f8a2010-01-16 09:53:50 -08001818
1819 /* Large allocation. */
Jason Evans155bfa72014-10-05 17:54:10 -07001820 usize = s2u(size);
Jason Evanse476f8a2010-01-16 09:53:50 -08001821 malloc_mutex_lock(&arena->lock);
Jason Evans155bfa72014-10-05 17:54:10 -07001822 run = arena_run_alloc_large(arena, usize, zero);
Jason Evans0c5dd032014-09-29 01:31:39 -07001823 if (run == NULL) {
Jason Evanse476f8a2010-01-16 09:53:50 -08001824 malloc_mutex_unlock(&arena->lock);
1825 return (NULL);
1826 }
Jason Evans0c5dd032014-09-29 01:31:39 -07001827 miscelm = arena_run_to_miscelm(run);
1828 ret = arena_miscelm_to_rpages(miscelm);
Jason Evans7372b152012-02-10 20:22:09 -08001829 if (config_stats) {
Jason Evans155bfa72014-10-05 17:54:10 -07001830 index_t index = size2index(usize) - NBINS;
1831
Jason Evans7372b152012-02-10 20:22:09 -08001832 arena->stats.nmalloc_large++;
1833 arena->stats.nrequests_large++;
Jason Evans155bfa72014-10-05 17:54:10 -07001834 arena->stats.allocated_large += usize;
1835 arena->stats.lstats[index].nmalloc++;
1836 arena->stats.lstats[index].nrequests++;
1837 arena->stats.lstats[index].curruns++;
Jason Evanse476f8a2010-01-16 09:53:50 -08001838 }
Jason Evans7372b152012-02-10 20:22:09 -08001839 if (config_prof)
Jason Evans155bfa72014-10-05 17:54:10 -07001840 idump = arena_prof_accum_locked(arena, usize);
Jason Evanse476f8a2010-01-16 09:53:50 -08001841 malloc_mutex_unlock(&arena->lock);
Jason Evans88c222c2013-02-06 11:59:30 -08001842 if (config_prof && idump)
1843 prof_idump();
Jason Evanse476f8a2010-01-16 09:53:50 -08001844
Jason Evans551ebc42014-10-03 10:16:09 -07001845 if (!zero) {
Jason Evans7372b152012-02-10 20:22:09 -08001846 if (config_fill) {
Guilherme Goncalves2c5cb612014-12-08 19:12:41 -02001847 if (unlikely(opt_junk_alloc))
Jason Evans155bfa72014-10-05 17:54:10 -07001848 memset(ret, 0xa5, usize);
Jason Evans9c640bf2014-09-11 16:20:44 -07001849 else if (unlikely(opt_zero))
Jason Evans155bfa72014-10-05 17:54:10 -07001850 memset(ret, 0, usize);
Jason Evans7372b152012-02-10 20:22:09 -08001851 }
Jason Evanse476f8a2010-01-16 09:53:50 -08001852 }
1853
1854 return (ret);
1855}
1856
Jason Evanse476f8a2010-01-16 09:53:50 -08001857/* Only handles large allocations that require more than page alignment. */
Jason Evans88fef7c2015-02-12 14:06:37 -08001858static void *
1859arena_palloc_large(tsd_t *tsd, arena_t *arena, size_t size, size_t alignment,
1860 bool zero)
Jason Evanse476f8a2010-01-16 09:53:50 -08001861{
1862 void *ret;
Jason Evans5ff709c2012-04-11 18:13:45 -07001863 size_t alloc_size, leadsize, trailsize;
1864 arena_run_t *run;
Jason Evanse476f8a2010-01-16 09:53:50 -08001865 arena_chunk_t *chunk;
Jason Evans0c5dd032014-09-29 01:31:39 -07001866 arena_chunk_map_misc_t *miscelm;
1867 void *rpages;
Jason Evanse476f8a2010-01-16 09:53:50 -08001868
1869 assert((size & PAGE_MASK) == 0);
Jason Evans93443682010-10-20 17:39:18 -07001870
Jason Evans88fef7c2015-02-12 14:06:37 -08001871 arena = arena_choose(tsd, arena);
1872 if (unlikely(arena == NULL))
1873 return (NULL);
1874
Jason Evans93443682010-10-20 17:39:18 -07001875 alignment = PAGE_CEILING(alignment);
Jason Evans5ff709c2012-04-11 18:13:45 -07001876 alloc_size = size + alignment - PAGE;
Jason Evanse476f8a2010-01-16 09:53:50 -08001877
1878 malloc_mutex_lock(&arena->lock);
Jason Evansaa5113b2014-01-14 16:23:03 -08001879 run = arena_run_alloc_large(arena, alloc_size, false);
Jason Evans5ff709c2012-04-11 18:13:45 -07001880 if (run == NULL) {
Jason Evanse476f8a2010-01-16 09:53:50 -08001881 malloc_mutex_unlock(&arena->lock);
1882 return (NULL);
1883 }
Jason Evans5ff709c2012-04-11 18:13:45 -07001884 chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(run);
Jason Evans0c5dd032014-09-29 01:31:39 -07001885 miscelm = arena_run_to_miscelm(run);
1886 rpages = arena_miscelm_to_rpages(miscelm);
Jason Evanse476f8a2010-01-16 09:53:50 -08001887
Jason Evans0c5dd032014-09-29 01:31:39 -07001888 leadsize = ALIGNMENT_CEILING((uintptr_t)rpages, alignment) -
1889 (uintptr_t)rpages;
Jason Evans5ff709c2012-04-11 18:13:45 -07001890 assert(alloc_size >= leadsize + size);
1891 trailsize = alloc_size - leadsize - size;
Jason Evans5ff709c2012-04-11 18:13:45 -07001892 if (leadsize != 0) {
Jason Evans0c5dd032014-09-29 01:31:39 -07001893 arena_chunk_map_misc_t *head_miscelm = miscelm;
1894 arena_run_t *head_run = run;
1895
1896 miscelm = arena_miscelm_get(chunk,
1897 arena_miscelm_to_pageind(head_miscelm) + (leadsize >>
1898 LG_PAGE));
1899 run = &miscelm->run;
1900
1901 arena_run_trim_head(arena, chunk, head_run, alloc_size,
1902 alloc_size - leadsize);
Jason Evans5ff709c2012-04-11 18:13:45 -07001903 }
1904 if (trailsize != 0) {
Jason Evans0c5dd032014-09-29 01:31:39 -07001905 arena_run_trim_tail(arena, chunk, run, size + trailsize, size,
Jason Evans5ff709c2012-04-11 18:13:45 -07001906 false);
Jason Evanse476f8a2010-01-16 09:53:50 -08001907 }
Jason Evans0c5dd032014-09-29 01:31:39 -07001908 arena_run_init_large(arena, run, size, zero);
1909 ret = arena_miscelm_to_rpages(miscelm);
Jason Evanse476f8a2010-01-16 09:53:50 -08001910
Jason Evans7372b152012-02-10 20:22:09 -08001911 if (config_stats) {
Jason Evans155bfa72014-10-05 17:54:10 -07001912 index_t index = size2index(size) - NBINS;
1913
Jason Evans7372b152012-02-10 20:22:09 -08001914 arena->stats.nmalloc_large++;
1915 arena->stats.nrequests_large++;
1916 arena->stats.allocated_large += size;
Jason Evans155bfa72014-10-05 17:54:10 -07001917 arena->stats.lstats[index].nmalloc++;
1918 arena->stats.lstats[index].nrequests++;
1919 arena->stats.lstats[index].curruns++;
Jason Evanse476f8a2010-01-16 09:53:50 -08001920 }
Jason Evanse476f8a2010-01-16 09:53:50 -08001921 malloc_mutex_unlock(&arena->lock);
1922
Jason Evans551ebc42014-10-03 10:16:09 -07001923 if (config_fill && !zero) {
Guilherme Goncalves2c5cb612014-12-08 19:12:41 -02001924 if (unlikely(opt_junk_alloc))
Jason Evans8e3c3c62010-09-17 15:46:18 -07001925 memset(ret, 0xa5, size);
Jason Evans9c640bf2014-09-11 16:20:44 -07001926 else if (unlikely(opt_zero))
Jason Evans8e3c3c62010-09-17 15:46:18 -07001927 memset(ret, 0, size);
1928 }
Jason Evanse476f8a2010-01-16 09:53:50 -08001929 return (ret);
1930}
1931
Jason Evans88fef7c2015-02-12 14:06:37 -08001932void *
1933arena_palloc(tsd_t *tsd, arena_t *arena, size_t usize, size_t alignment,
1934 bool zero, tcache_t *tcache)
1935{
1936 void *ret;
1937
1938 if (usize <= SMALL_MAXCLASS && alignment < PAGE)
1939 ret = arena_malloc(tsd, arena, usize, zero, tcache);
1940 else {
1941 if (likely(usize <= arena_maxclass)) {
1942 ret = arena_palloc_large(tsd, arena, usize, alignment,
1943 zero);
1944 } else if (likely(alignment <= chunksize))
1945 ret = huge_malloc(tsd, arena, usize, zero, tcache);
1946 else {
1947 ret = huge_palloc(tsd, arena, usize, alignment, zero,
1948 tcache);
1949 }
1950 }
1951 return (ret);
1952}
1953
Jason Evans0b270a92010-03-31 16:45:04 -07001954void
1955arena_prof_promoted(const void *ptr, size_t size)
1956{
1957 arena_chunk_t *chunk;
Jason Evans155bfa72014-10-05 17:54:10 -07001958 size_t pageind;
1959 index_t binind;
Jason Evans0b270a92010-03-31 16:45:04 -07001960
Jason Evans78f73522012-04-18 13:38:40 -07001961 cassert(config_prof);
Jason Evans0b270a92010-03-31 16:45:04 -07001962 assert(ptr != NULL);
1963 assert(CHUNK_ADDR2BASE(ptr) != ptr);
Jason Evans155bfa72014-10-05 17:54:10 -07001964 assert(isalloc(ptr, false) == LARGE_MINCLASS);
1965 assert(isalloc(ptr, true) == LARGE_MINCLASS);
Jason Evansb1726102012-02-28 16:50:47 -08001966 assert(size <= SMALL_MAXCLASS);
Jason Evans0b270a92010-03-31 16:45:04 -07001967
1968 chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr);
Jason Evansae4c7b42012-04-02 07:04:34 -07001969 pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> LG_PAGE;
Jason Evans155bfa72014-10-05 17:54:10 -07001970 binind = size2index(size);
Jason Evansb1726102012-02-28 16:50:47 -08001971 assert(binind < NBINS);
Jason Evans203484e2012-05-02 00:30:36 -07001972 arena_mapbits_large_binind_set(chunk, pageind, binind);
Jason Evans0b270a92010-03-31 16:45:04 -07001973
Jason Evans155bfa72014-10-05 17:54:10 -07001974 assert(isalloc(ptr, false) == LARGE_MINCLASS);
Jason Evans122449b2012-04-06 00:35:09 -07001975 assert(isalloc(ptr, true) == size);
Jason Evans0b270a92010-03-31 16:45:04 -07001976}
Jason Evans6109fe02010-02-10 10:37:56 -08001977
Jason Evanse476f8a2010-01-16 09:53:50 -08001978static void
Jason Evans088e6a02010-10-18 00:04:44 -07001979arena_dissociate_bin_run(arena_chunk_t *chunk, arena_run_t *run,
Jason Evanse476f8a2010-01-16 09:53:50 -08001980 arena_bin_t *bin)
1981{
Jason Evanse476f8a2010-01-16 09:53:50 -08001982
Jason Evans19b3d612010-03-18 20:36:40 -07001983 /* Dissociate run from bin. */
Jason Evanse476f8a2010-01-16 09:53:50 -08001984 if (run == bin->runcur)
1985 bin->runcur = NULL;
Jason Evans49f7e8f2011-03-15 13:59:15 -07001986 else {
Jason Evansee41ad42015-02-15 18:04:46 -08001987 index_t binind = arena_bin_index(extent_node_arena_get(
1988 &chunk->node), bin);
Jason Evans49f7e8f2011-03-15 13:59:15 -07001989 arena_bin_info_t *bin_info = &arena_bin_info[binind];
1990
1991 if (bin_info->nregs != 1) {
Jason Evans49f7e8f2011-03-15 13:59:15 -07001992 /*
1993 * This block's conditional is necessary because if the
1994 * run only contains one region, then it never gets
1995 * inserted into the non-full runs tree.
1996 */
Jason Evanse7a10582012-02-13 17:36:52 -08001997 arena_bin_runs_remove(bin, run);
Jason Evans49f7e8f2011-03-15 13:59:15 -07001998 }
Jason Evanse476f8a2010-01-16 09:53:50 -08001999 }
Jason Evans088e6a02010-10-18 00:04:44 -07002000}
2001
2002static void
2003arena_dalloc_bin_run(arena_t *arena, arena_chunk_t *chunk, arena_run_t *run,
2004 arena_bin_t *bin)
2005{
Jason Evans088e6a02010-10-18 00:04:44 -07002006
2007 assert(run != bin->runcur);
Jason Evans0c5dd032014-09-29 01:31:39 -07002008 assert(arena_run_tree_search(&bin->runs, arena_run_to_miscelm(run)) ==
2009 NULL);
Jason Evans86815df2010-03-13 20:32:56 -08002010
Jason Evanse00572b2010-03-14 19:43:56 -07002011 malloc_mutex_unlock(&bin->lock);
2012 /******************************/
Jason Evans86815df2010-03-13 20:32:56 -08002013 malloc_mutex_lock(&arena->lock);
Jason Evans381c23d2014-10-10 23:01:03 -07002014 arena_run_dalloc(arena, run, true, false);
Jason Evans86815df2010-03-13 20:32:56 -08002015 malloc_mutex_unlock(&arena->lock);
Jason Evanse00572b2010-03-14 19:43:56 -07002016 /****************************/
2017 malloc_mutex_lock(&bin->lock);
Jason Evans7372b152012-02-10 20:22:09 -08002018 if (config_stats)
2019 bin->stats.curruns--;
Jason Evanse476f8a2010-01-16 09:53:50 -08002020}
2021
Jason Evans940a2e02010-10-17 17:51:37 -07002022static void
2023arena_bin_lower_run(arena_t *arena, arena_chunk_t *chunk, arena_run_t *run,
2024 arena_bin_t *bin)
Jason Evanse476f8a2010-01-16 09:53:50 -08002025{
Jason Evanse476f8a2010-01-16 09:53:50 -08002026
Jason Evans8de6a022010-10-17 20:57:30 -07002027 /*
Jason Evanse7a10582012-02-13 17:36:52 -08002028 * Make sure that if bin->runcur is non-NULL, it refers to the lowest
2029 * non-full run. It is okay to NULL runcur out rather than proactively
2030 * keeping it pointing at the lowest non-full run.
Jason Evans8de6a022010-10-17 20:57:30 -07002031 */
Jason Evanse7a10582012-02-13 17:36:52 -08002032 if ((uintptr_t)run < (uintptr_t)bin->runcur) {
Jason Evans8de6a022010-10-17 20:57:30 -07002033 /* Switch runcur. */
Jason Evanse7a10582012-02-13 17:36:52 -08002034 if (bin->runcur->nfree > 0)
2035 arena_bin_runs_insert(bin, bin->runcur);
Jason Evans8de6a022010-10-17 20:57:30 -07002036 bin->runcur = run;
Jason Evanse7a10582012-02-13 17:36:52 -08002037 if (config_stats)
2038 bin->stats.reruns++;
2039 } else
2040 arena_bin_runs_insert(bin, run);
Jason Evans940a2e02010-10-17 17:51:37 -07002041}
2042
Jason Evansfc0b3b72014-10-09 17:54:06 -07002043static void
2044arena_dalloc_bin_locked_impl(arena_t *arena, arena_chunk_t *chunk, void *ptr,
2045 arena_chunk_map_bits_t *bitselm, bool junked)
Jason Evans940a2e02010-10-17 17:51:37 -07002046{
Jason Evans0c5dd032014-09-29 01:31:39 -07002047 size_t pageind, rpages_ind;
Jason Evans940a2e02010-10-17 17:51:37 -07002048 arena_run_t *run;
2049 arena_bin_t *bin;
Mike Hommey8b499712012-04-24 23:22:02 +02002050 arena_bin_info_t *bin_info;
Jason Evans155bfa72014-10-05 17:54:10 -07002051 index_t binind;
Jason Evans940a2e02010-10-17 17:51:37 -07002052
Jason Evansae4c7b42012-04-02 07:04:34 -07002053 pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> LG_PAGE;
Jason Evans0c5dd032014-09-29 01:31:39 -07002054 rpages_ind = pageind - arena_mapbits_small_runind_get(chunk, pageind);
2055 run = &arena_miscelm_get(chunk, rpages_ind)->run;
Jason Evans381c23d2014-10-10 23:01:03 -07002056 binind = run->binind;
2057 bin = &arena->bins[binind];
Mike Hommey8b499712012-04-24 23:22:02 +02002058 bin_info = &arena_bin_info[binind];
Jason Evans940a2e02010-10-17 17:51:37 -07002059
Guilherme Goncalves2c5cb612014-12-08 19:12:41 -02002060 if (!junked && config_fill && unlikely(opt_junk_free))
Jason Evans122449b2012-04-06 00:35:09 -07002061 arena_dalloc_junk_small(ptr, bin_info);
Jason Evans940a2e02010-10-17 17:51:37 -07002062
2063 arena_run_reg_dalloc(run, ptr);
Jason Evans49f7e8f2011-03-15 13:59:15 -07002064 if (run->nfree == bin_info->nregs) {
Jason Evans088e6a02010-10-18 00:04:44 -07002065 arena_dissociate_bin_run(chunk, run, bin);
Jason Evans8de6a022010-10-17 20:57:30 -07002066 arena_dalloc_bin_run(arena, chunk, run, bin);
Jason Evans088e6a02010-10-18 00:04:44 -07002067 } else if (run->nfree == 1 && run != bin->runcur)
Jason Evans8de6a022010-10-17 20:57:30 -07002068 arena_bin_lower_run(arena, chunk, run, bin);
Jason Evanse476f8a2010-01-16 09:53:50 -08002069
Jason Evans7372b152012-02-10 20:22:09 -08002070 if (config_stats) {
Jason Evans7372b152012-02-10 20:22:09 -08002071 bin->stats.ndalloc++;
Jason Evans3c4d92e2014-10-12 22:53:59 -07002072 bin->stats.curregs--;
Jason Evans7372b152012-02-10 20:22:09 -08002073 }
Jason Evanse476f8a2010-01-16 09:53:50 -08002074}
2075
Jason Evanse476f8a2010-01-16 09:53:50 -08002076void
Jason Evansfc0b3b72014-10-09 17:54:06 -07002077arena_dalloc_bin_junked_locked(arena_t *arena, arena_chunk_t *chunk, void *ptr,
2078 arena_chunk_map_bits_t *bitselm)
2079{
2080
2081 arena_dalloc_bin_locked_impl(arena, chunk, ptr, bitselm, true);
2082}
2083
2084void
Jason Evans203484e2012-05-02 00:30:36 -07002085arena_dalloc_bin(arena_t *arena, arena_chunk_t *chunk, void *ptr,
Qinfan Wuff6a31d2014-08-29 13:34:40 -07002086 size_t pageind, arena_chunk_map_bits_t *bitselm)
Jason Evans203484e2012-05-02 00:30:36 -07002087{
2088 arena_run_t *run;
2089 arena_bin_t *bin;
Jason Evans0c5dd032014-09-29 01:31:39 -07002090 size_t rpages_ind;
Jason Evans203484e2012-05-02 00:30:36 -07002091
Jason Evans0c5dd032014-09-29 01:31:39 -07002092 rpages_ind = pageind - arena_mapbits_small_runind_get(chunk, pageind);
2093 run = &arena_miscelm_get(chunk, rpages_ind)->run;
Jason Evans381c23d2014-10-10 23:01:03 -07002094 bin = &arena->bins[run->binind];
Jason Evans203484e2012-05-02 00:30:36 -07002095 malloc_mutex_lock(&bin->lock);
Jason Evansfc0b3b72014-10-09 17:54:06 -07002096 arena_dalloc_bin_locked_impl(arena, chunk, ptr, bitselm, false);
Jason Evans203484e2012-05-02 00:30:36 -07002097 malloc_mutex_unlock(&bin->lock);
2098}
2099
2100void
2101arena_dalloc_small(arena_t *arena, arena_chunk_t *chunk, void *ptr,
2102 size_t pageind)
2103{
Qinfan Wuff6a31d2014-08-29 13:34:40 -07002104 arena_chunk_map_bits_t *bitselm;
Jason Evans203484e2012-05-02 00:30:36 -07002105
2106 if (config_debug) {
Jason Evans80737c32012-05-02 16:11:03 -07002107 /* arena_ptr_small_binind_get() does extra sanity checking. */
2108 assert(arena_ptr_small_binind_get(ptr, arena_mapbits_get(chunk,
2109 pageind)) != BININD_INVALID);
Jason Evans203484e2012-05-02 00:30:36 -07002110 }
Qinfan Wuff6a31d2014-08-29 13:34:40 -07002111 bitselm = arena_bitselm_get(chunk, pageind);
2112 arena_dalloc_bin(arena, chunk, ptr, pageind, bitselm);
Jason Evans203484e2012-05-02 00:30:36 -07002113}
Jason Evanse476f8a2010-01-16 09:53:50 -08002114
Jason Evans6b694c42014-01-07 16:47:56 -08002115#ifdef JEMALLOC_JET
2116#undef arena_dalloc_junk_large
2117#define arena_dalloc_junk_large JEMALLOC_N(arena_dalloc_junk_large_impl)
2118#endif
Jason Evansfc0b3b72014-10-09 17:54:06 -07002119void
Jason Evans6b694c42014-01-07 16:47:56 -08002120arena_dalloc_junk_large(void *ptr, size_t usize)
2121{
2122
Guilherme Goncalves2c5cb612014-12-08 19:12:41 -02002123 if (config_fill && unlikely(opt_junk_free))
Jason Evans6b694c42014-01-07 16:47:56 -08002124 memset(ptr, 0x5a, usize);
2125}
2126#ifdef JEMALLOC_JET
2127#undef arena_dalloc_junk_large
2128#define arena_dalloc_junk_large JEMALLOC_N(arena_dalloc_junk_large)
2129arena_dalloc_junk_large_t *arena_dalloc_junk_large =
2130 JEMALLOC_N(arena_dalloc_junk_large_impl);
2131#endif
2132
Jason Evanse476f8a2010-01-16 09:53:50 -08002133void
Jason Evansfc0b3b72014-10-09 17:54:06 -07002134arena_dalloc_large_locked_impl(arena_t *arena, arena_chunk_t *chunk,
2135 void *ptr, bool junked)
Jason Evanse476f8a2010-01-16 09:53:50 -08002136{
Jason Evans0c5dd032014-09-29 01:31:39 -07002137 size_t pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> LG_PAGE;
2138 arena_chunk_map_misc_t *miscelm = arena_miscelm_get(chunk, pageind);
2139 arena_run_t *run = &miscelm->run;
Jason Evans13668262010-01-31 03:57:29 -08002140
Jason Evans7372b152012-02-10 20:22:09 -08002141 if (config_fill || config_stats) {
Jason Evans6b694c42014-01-07 16:47:56 -08002142 size_t usize = arena_mapbits_large_size_get(chunk, pageind);
Jason Evanse476f8a2010-01-16 09:53:50 -08002143
Jason Evansfc0b3b72014-10-09 17:54:06 -07002144 if (!junked)
2145 arena_dalloc_junk_large(ptr, usize);
Jason Evans7372b152012-02-10 20:22:09 -08002146 if (config_stats) {
Jason Evans155bfa72014-10-05 17:54:10 -07002147 index_t index = size2index(usize) - NBINS;
2148
Jason Evans7372b152012-02-10 20:22:09 -08002149 arena->stats.ndalloc_large++;
Jason Evans6b694c42014-01-07 16:47:56 -08002150 arena->stats.allocated_large -= usize;
Jason Evans155bfa72014-10-05 17:54:10 -07002151 arena->stats.lstats[index].ndalloc++;
2152 arena->stats.lstats[index].curruns--;
Jason Evans7372b152012-02-10 20:22:09 -08002153 }
Jason Evanse476f8a2010-01-16 09:53:50 -08002154 }
Jason Evanse476f8a2010-01-16 09:53:50 -08002155
Jason Evans0c5dd032014-09-29 01:31:39 -07002156 arena_run_dalloc(arena, run, true, false);
Jason Evanse476f8a2010-01-16 09:53:50 -08002157}
2158
Jason Evans203484e2012-05-02 00:30:36 -07002159void
Jason Evansfc0b3b72014-10-09 17:54:06 -07002160arena_dalloc_large_junked_locked(arena_t *arena, arena_chunk_t *chunk,
2161 void *ptr)
2162{
2163
2164 arena_dalloc_large_locked_impl(arena, chunk, ptr, true);
2165}
2166
2167void
Jason Evans203484e2012-05-02 00:30:36 -07002168arena_dalloc_large(arena_t *arena, arena_chunk_t *chunk, void *ptr)
2169{
2170
2171 malloc_mutex_lock(&arena->lock);
Jason Evansfc0b3b72014-10-09 17:54:06 -07002172 arena_dalloc_large_locked_impl(arena, chunk, ptr, false);
Jason Evans203484e2012-05-02 00:30:36 -07002173 malloc_mutex_unlock(&arena->lock);
2174}
2175
Jason Evanse476f8a2010-01-16 09:53:50 -08002176static void
2177arena_ralloc_large_shrink(arena_t *arena, arena_chunk_t *chunk, void *ptr,
Jason Evans8e3c3c62010-09-17 15:46:18 -07002178 size_t oldsize, size_t size)
Jason Evanse476f8a2010-01-16 09:53:50 -08002179{
Jason Evans0c5dd032014-09-29 01:31:39 -07002180 size_t pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> LG_PAGE;
2181 arena_chunk_map_misc_t *miscelm = arena_miscelm_get(chunk, pageind);
2182 arena_run_t *run = &miscelm->run;
Jason Evanse476f8a2010-01-16 09:53:50 -08002183
2184 assert(size < oldsize);
2185
2186 /*
2187 * Shrink the run, and make trailing pages available for other
2188 * allocations.
2189 */
2190 malloc_mutex_lock(&arena->lock);
Jason Evans0c5dd032014-09-29 01:31:39 -07002191 arena_run_trim_tail(arena, chunk, run, oldsize, size, true);
Jason Evans7372b152012-02-10 20:22:09 -08002192 if (config_stats) {
Jason Evans155bfa72014-10-05 17:54:10 -07002193 index_t oldindex = size2index(oldsize) - NBINS;
2194 index_t index = size2index(size) - NBINS;
2195
Jason Evans7372b152012-02-10 20:22:09 -08002196 arena->stats.ndalloc_large++;
2197 arena->stats.allocated_large -= oldsize;
Jason Evans155bfa72014-10-05 17:54:10 -07002198 arena->stats.lstats[oldindex].ndalloc++;
2199 arena->stats.lstats[oldindex].curruns--;
Jason Evans990d10c2010-01-31 03:49:35 -08002200
Jason Evans7372b152012-02-10 20:22:09 -08002201 arena->stats.nmalloc_large++;
2202 arena->stats.nrequests_large++;
2203 arena->stats.allocated_large += size;
Jason Evans155bfa72014-10-05 17:54:10 -07002204 arena->stats.lstats[index].nmalloc++;
2205 arena->stats.lstats[index].nrequests++;
2206 arena->stats.lstats[index].curruns++;
Jason Evanse476f8a2010-01-16 09:53:50 -08002207 }
Jason Evanse476f8a2010-01-16 09:53:50 -08002208 malloc_mutex_unlock(&arena->lock);
2209}
2210
2211static bool
2212arena_ralloc_large_grow(arena_t *arena, arena_chunk_t *chunk, void *ptr,
Jason Evans8e3c3c62010-09-17 15:46:18 -07002213 size_t oldsize, size_t size, size_t extra, bool zero)
Jason Evanse476f8a2010-01-16 09:53:50 -08002214{
Jason Evansae4c7b42012-04-02 07:04:34 -07002215 size_t pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> LG_PAGE;
2216 size_t npages = oldsize >> LG_PAGE;
Jason Evans8e3c3c62010-09-17 15:46:18 -07002217 size_t followsize;
Jason Evans155bfa72014-10-05 17:54:10 -07002218 size_t usize_min = s2u(size);
Jason Evanse476f8a2010-01-16 09:53:50 -08002219
Jason Evans203484e2012-05-02 00:30:36 -07002220 assert(oldsize == arena_mapbits_large_size_get(chunk, pageind));
Jason Evanse476f8a2010-01-16 09:53:50 -08002221
2222 /* Try to extend the run. */
Jason Evans155bfa72014-10-05 17:54:10 -07002223 assert(usize_min > oldsize);
Jason Evanse476f8a2010-01-16 09:53:50 -08002224 malloc_mutex_lock(&arena->lock);
Jason Evans7393f442010-10-01 17:35:43 -07002225 if (pageind + npages < chunk_npages &&
Jason Evans203484e2012-05-02 00:30:36 -07002226 arena_mapbits_allocated_get(chunk, pageind+npages) == 0 &&
2227 (followsize = arena_mapbits_unallocated_size_get(chunk,
Jason Evans155bfa72014-10-05 17:54:10 -07002228 pageind+npages)) >= usize_min - oldsize) {
Jason Evanse476f8a2010-01-16 09:53:50 -08002229 /*
2230 * The next run is available and sufficiently large. Split the
2231 * following run, then merge the first part with the existing
2232 * allocation.
2233 */
Guilherme Goncalves9c6a8d32014-12-17 14:46:35 -02002234 arena_run_t *run;
Jason Evans155bfa72014-10-05 17:54:10 -07002235 size_t flag_dirty, splitsize, usize;
2236
2237 usize = s2u(size + extra);
2238 while (oldsize + followsize < usize)
2239 usize = index2size(size2index(usize)-1);
2240 assert(usize >= usize_min);
2241 splitsize = usize - oldsize;
2242
Guilherme Goncalves9c6a8d32014-12-17 14:46:35 -02002243 run = &arena_miscelm_get(chunk, pageind+npages)->run;
Jason Evans0c5dd032014-09-29 01:31:39 -07002244 arena_run_split_large(arena, run, splitsize, zero);
Jason Evanse476f8a2010-01-16 09:53:50 -08002245
Jason Evans088e6a02010-10-18 00:04:44 -07002246 size = oldsize + splitsize;
Jason Evansae4c7b42012-04-02 07:04:34 -07002247 npages = size >> LG_PAGE;
Jason Evans940a2e02010-10-17 17:51:37 -07002248
2249 /*
2250 * Mark the extended run as dirty if either portion of the run
2251 * was dirty before allocation. This is rather pedantic,
2252 * because there's not actually any sequence of events that
2253 * could cause the resulting run to be passed to
2254 * arena_run_dalloc() with the dirty argument set to false
2255 * (which is when dirty flag consistency would really matter).
2256 */
Jason Evans203484e2012-05-02 00:30:36 -07002257 flag_dirty = arena_mapbits_dirty_get(chunk, pageind) |
2258 arena_mapbits_dirty_get(chunk, pageind+npages-1);
2259 arena_mapbits_large_set(chunk, pageind, size, flag_dirty);
2260 arena_mapbits_large_set(chunk, pageind+npages-1, 0, flag_dirty);
Jason Evanse476f8a2010-01-16 09:53:50 -08002261
Jason Evans7372b152012-02-10 20:22:09 -08002262 if (config_stats) {
Jason Evans155bfa72014-10-05 17:54:10 -07002263 index_t oldindex = size2index(oldsize) - NBINS;
2264 index_t index = size2index(size) - NBINS;
2265
Jason Evans7372b152012-02-10 20:22:09 -08002266 arena->stats.ndalloc_large++;
2267 arena->stats.allocated_large -= oldsize;
Jason Evans155bfa72014-10-05 17:54:10 -07002268 arena->stats.lstats[oldindex].ndalloc++;
2269 arena->stats.lstats[oldindex].curruns--;
Jason Evans990d10c2010-01-31 03:49:35 -08002270
Jason Evans7372b152012-02-10 20:22:09 -08002271 arena->stats.nmalloc_large++;
2272 arena->stats.nrequests_large++;
2273 arena->stats.allocated_large += size;
Jason Evans155bfa72014-10-05 17:54:10 -07002274 arena->stats.lstats[index].nmalloc++;
2275 arena->stats.lstats[index].nrequests++;
2276 arena->stats.lstats[index].curruns++;
Jason Evans940a2e02010-10-17 17:51:37 -07002277 }
Jason Evanse476f8a2010-01-16 09:53:50 -08002278 malloc_mutex_unlock(&arena->lock);
2279 return (false);
2280 }
2281 malloc_mutex_unlock(&arena->lock);
2282
2283 return (true);
2284}
2285
Jason Evans6b694c42014-01-07 16:47:56 -08002286#ifdef JEMALLOC_JET
2287#undef arena_ralloc_junk_large
2288#define arena_ralloc_junk_large JEMALLOC_N(arena_ralloc_junk_large_impl)
2289#endif
2290static void
2291arena_ralloc_junk_large(void *ptr, size_t old_usize, size_t usize)
2292{
2293
Guilherme Goncalves2c5cb612014-12-08 19:12:41 -02002294 if (config_fill && unlikely(opt_junk_free)) {
Jason Evans6b694c42014-01-07 16:47:56 -08002295 memset((void *)((uintptr_t)ptr + usize), 0x5a,
2296 old_usize - usize);
2297 }
2298}
2299#ifdef JEMALLOC_JET
2300#undef arena_ralloc_junk_large
2301#define arena_ralloc_junk_large JEMALLOC_N(arena_ralloc_junk_large)
2302arena_ralloc_junk_large_t *arena_ralloc_junk_large =
2303 JEMALLOC_N(arena_ralloc_junk_large_impl);
2304#endif
2305
Jason Evanse476f8a2010-01-16 09:53:50 -08002306/*
2307 * Try to resize a large allocation, in order to avoid copying. This will
2308 * always fail if growing an object, and the following run is already in use.
2309 */
2310static bool
Jason Evans8e3c3c62010-09-17 15:46:18 -07002311arena_ralloc_large(void *ptr, size_t oldsize, size_t size, size_t extra,
2312 bool zero)
Jason Evanse476f8a2010-01-16 09:53:50 -08002313{
Jason Evans155bfa72014-10-05 17:54:10 -07002314 size_t usize;
Jason Evanse476f8a2010-01-16 09:53:50 -08002315
Jason Evans155bfa72014-10-05 17:54:10 -07002316 /* Make sure extra can't cause size_t overflow. */
Daniel Micay809b0ac2014-10-23 10:30:52 -04002317 if (unlikely(extra >= arena_maxclass))
Jason Evans155bfa72014-10-05 17:54:10 -07002318 return (true);
2319
2320 usize = s2u(size + extra);
2321 if (usize == oldsize) {
Jason Evanse476f8a2010-01-16 09:53:50 -08002322 /* Same size class. */
Jason Evanse476f8a2010-01-16 09:53:50 -08002323 return (false);
2324 } else {
2325 arena_chunk_t *chunk;
2326 arena_t *arena;
2327
2328 chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr);
Jason Evansee41ad42015-02-15 18:04:46 -08002329 arena = extent_node_arena_get(&chunk->node);
Jason Evanse476f8a2010-01-16 09:53:50 -08002330
Jason Evans155bfa72014-10-05 17:54:10 -07002331 if (usize < oldsize) {
Jason Evanse476f8a2010-01-16 09:53:50 -08002332 /* Fill before shrinking in order avoid a race. */
Jason Evans155bfa72014-10-05 17:54:10 -07002333 arena_ralloc_junk_large(ptr, oldsize, usize);
Jason Evans8e3c3c62010-09-17 15:46:18 -07002334 arena_ralloc_large_shrink(arena, chunk, ptr, oldsize,
Jason Evans155bfa72014-10-05 17:54:10 -07002335 usize);
Jason Evanse476f8a2010-01-16 09:53:50 -08002336 return (false);
2337 } else {
2338 bool ret = arena_ralloc_large_grow(arena, chunk, ptr,
Jason Evans155bfa72014-10-05 17:54:10 -07002339 oldsize, size, extra, zero);
Jason Evans551ebc42014-10-03 10:16:09 -07002340 if (config_fill && !ret && !zero) {
Guilherme Goncalves2c5cb612014-12-08 19:12:41 -02002341 if (unlikely(opt_junk_alloc)) {
Jason Evans6b694c42014-01-07 16:47:56 -08002342 memset((void *)((uintptr_t)ptr +
2343 oldsize), 0xa5, isalloc(ptr,
2344 config_prof) - oldsize);
Jason Evans9c640bf2014-09-11 16:20:44 -07002345 } else if (unlikely(opt_zero)) {
Jason Evans6b694c42014-01-07 16:47:56 -08002346 memset((void *)((uintptr_t)ptr +
2347 oldsize), 0, isalloc(ptr,
2348 config_prof) - oldsize);
2349 }
Jason Evanse476f8a2010-01-16 09:53:50 -08002350 }
Jason Evanse476f8a2010-01-16 09:53:50 -08002351 return (ret);
2352 }
2353 }
2354}
2355
Jason Evansb2c31662014-01-12 15:05:44 -08002356bool
Jason Evans8e3c3c62010-09-17 15:46:18 -07002357arena_ralloc_no_move(void *ptr, size_t oldsize, size_t size, size_t extra,
2358 bool zero)
Jason Evanse476f8a2010-01-16 09:53:50 -08002359{
Jason Evanse476f8a2010-01-16 09:53:50 -08002360
Jason Evans88fef7c2015-02-12 14:06:37 -08002361 if (likely(size <= arena_maxclass)) {
2362 /*
2363 * Avoid moving the allocation if the size class can be left the
2364 * same.
2365 */
2366 if (likely(oldsize <= arena_maxclass)) {
2367 if (oldsize <= SMALL_MAXCLASS) {
2368 assert(
2369 arena_bin_info[size2index(oldsize)].reg_size
2370 == oldsize);
2371 if ((size + extra <= SMALL_MAXCLASS &&
2372 size2index(size + extra) ==
2373 size2index(oldsize)) || (size <= oldsize &&
2374 size + extra >= oldsize))
Jason Evansb2c31662014-01-12 15:05:44 -08002375 return (false);
Jason Evans88fef7c2015-02-12 14:06:37 -08002376 } else {
2377 assert(size <= arena_maxclass);
2378 if (size + extra > SMALL_MAXCLASS) {
2379 if (!arena_ralloc_large(ptr, oldsize,
2380 size, extra, zero))
2381 return (false);
2382 }
Jason Evanse476f8a2010-01-16 09:53:50 -08002383 }
2384 }
Jason Evanse476f8a2010-01-16 09:53:50 -08002385
Jason Evans88fef7c2015-02-12 14:06:37 -08002386 /* Reallocation would require a move. */
2387 return (true);
2388 } else
2389 return (huge_ralloc_no_move(ptr, oldsize, size, extra, zero));
Jason Evans8e3c3c62010-09-17 15:46:18 -07002390}
Jason Evanse476f8a2010-01-16 09:53:50 -08002391
Jason Evans8e3c3c62010-09-17 15:46:18 -07002392void *
Jason Evans5460aa62014-09-22 21:09:23 -07002393arena_ralloc(tsd_t *tsd, arena_t *arena, void *ptr, size_t oldsize, size_t size,
Jason Evans1cb181e2015-01-29 15:30:47 -08002394 size_t extra, size_t alignment, bool zero, tcache_t *tcache)
Jason Evans8e3c3c62010-09-17 15:46:18 -07002395{
2396 void *ret;
Jason Evans8e3c3c62010-09-17 15:46:18 -07002397
Jason Evans88fef7c2015-02-12 14:06:37 -08002398 if (likely(size <= arena_maxclass)) {
2399 size_t copysize;
Jason Evans8e3c3c62010-09-17 15:46:18 -07002400
Jason Evans88fef7c2015-02-12 14:06:37 -08002401 /* Try to avoid moving the allocation. */
2402 if (!arena_ralloc_no_move(ptr, oldsize, size, extra, zero))
2403 return (ptr);
Jason Evans8e3c3c62010-09-17 15:46:18 -07002404
Jason Evans88fef7c2015-02-12 14:06:37 -08002405 /*
2406 * size and oldsize are different enough that we need to move
2407 * the object. In that case, fall back to allocating new space
2408 * and copying.
2409 */
Jason Evans38d92102011-03-23 00:37:29 -07002410 if (alignment != 0) {
Jason Evans88fef7c2015-02-12 14:06:37 -08002411 size_t usize = sa2u(size + extra, alignment);
Jason Evans38d92102011-03-23 00:37:29 -07002412 if (usize == 0)
2413 return (NULL);
Jason Evans1cb181e2015-01-29 15:30:47 -08002414 ret = ipalloct(tsd, usize, alignment, zero, tcache,
2415 arena);
Jason Evans88fef7c2015-02-12 14:06:37 -08002416 } else {
2417 ret = arena_malloc(tsd, arena, size + extra, zero,
2418 tcache);
2419 }
Jason Evans8e3c3c62010-09-17 15:46:18 -07002420
Jason Evans88fef7c2015-02-12 14:06:37 -08002421 if (ret == NULL) {
2422 if (extra == 0)
2423 return (NULL);
2424 /* Try again, this time without extra. */
2425 if (alignment != 0) {
2426 size_t usize = sa2u(size, alignment);
2427 if (usize == 0)
2428 return (NULL);
2429 ret = ipalloct(tsd, usize, alignment, zero,
2430 tcache, arena);
2431 } else {
2432 ret = arena_malloc(tsd, arena, size, zero,
2433 tcache);
2434 }
2435
2436 if (ret == NULL)
2437 return (NULL);
2438 }
2439
2440 /*
2441 * Junk/zero-filling were already done by
2442 * ipalloc()/arena_malloc().
2443 */
2444
2445 /*
2446 * Copy at most size bytes (not size+extra), since the caller
2447 * has no expectation that the extra bytes will be reliably
2448 * preserved.
2449 */
2450 copysize = (size < oldsize) ? size : oldsize;
2451 JEMALLOC_VALGRIND_MAKE_MEM_UNDEFINED(ret, copysize);
2452 memcpy(ret, ptr, copysize);
2453 isqalloc(tsd, ptr, oldsize, tcache);
2454 } else {
2455 ret = huge_ralloc(tsd, arena, ptr, oldsize, size, extra,
2456 alignment, zero, tcache);
Jason Evans8e3c3c62010-09-17 15:46:18 -07002457 }
Jason Evanse476f8a2010-01-16 09:53:50 -08002458 return (ret);
Jason Evanse476f8a2010-01-16 09:53:50 -08002459}
2460
Jason Evans609ae592012-10-11 13:53:15 -07002461dss_prec_t
2462arena_dss_prec_get(arena_t *arena)
2463{
2464 dss_prec_t ret;
2465
2466 malloc_mutex_lock(&arena->lock);
2467 ret = arena->dss_prec;
2468 malloc_mutex_unlock(&arena->lock);
2469 return (ret);
2470}
2471
Jason Evans4d434ad2014-04-15 12:09:48 -07002472bool
Jason Evans609ae592012-10-11 13:53:15 -07002473arena_dss_prec_set(arena_t *arena, dss_prec_t dss_prec)
2474{
2475
Jason Evans551ebc42014-10-03 10:16:09 -07002476 if (!have_dss)
Jason Evans4d434ad2014-04-15 12:09:48 -07002477 return (dss_prec != dss_prec_disabled);
Jason Evans609ae592012-10-11 13:53:15 -07002478 malloc_mutex_lock(&arena->lock);
2479 arena->dss_prec = dss_prec;
2480 malloc_mutex_unlock(&arena->lock);
Jason Evans4d434ad2014-04-15 12:09:48 -07002481 return (false);
Jason Evans609ae592012-10-11 13:53:15 -07002482}
2483
2484void
2485arena_stats_merge(arena_t *arena, const char **dss, size_t *nactive,
2486 size_t *ndirty, arena_stats_t *astats, malloc_bin_stats_t *bstats,
Jason Evans3c4d92e2014-10-12 22:53:59 -07002487 malloc_large_stats_t *lstats, malloc_huge_stats_t *hstats)
Jason Evans609ae592012-10-11 13:53:15 -07002488{
2489 unsigned i;
2490
2491 malloc_mutex_lock(&arena->lock);
2492 *dss = dss_prec_names[arena->dss_prec];
2493 *nactive += arena->nactive;
2494 *ndirty += arena->ndirty;
2495
2496 astats->mapped += arena->stats.mapped;
2497 astats->npurge += arena->stats.npurge;
2498 astats->nmadvise += arena->stats.nmadvise;
2499 astats->purged += arena->stats.purged;
Jason Evans4581b972014-11-27 17:22:36 -02002500 astats->metadata_mapped += arena->stats.metadata_mapped;
2501 astats->metadata_allocated += arena_metadata_allocated_get(arena);
Jason Evans609ae592012-10-11 13:53:15 -07002502 astats->allocated_large += arena->stats.allocated_large;
2503 astats->nmalloc_large += arena->stats.nmalloc_large;
2504 astats->ndalloc_large += arena->stats.ndalloc_large;
2505 astats->nrequests_large += arena->stats.nrequests_large;
Jason Evanse2deab72014-05-15 22:22:27 -07002506 astats->allocated_huge += arena->stats.allocated_huge;
2507 astats->nmalloc_huge += arena->stats.nmalloc_huge;
2508 astats->ndalloc_huge += arena->stats.ndalloc_huge;
Jason Evans609ae592012-10-11 13:53:15 -07002509
2510 for (i = 0; i < nlclasses; i++) {
2511 lstats[i].nmalloc += arena->stats.lstats[i].nmalloc;
2512 lstats[i].ndalloc += arena->stats.lstats[i].ndalloc;
2513 lstats[i].nrequests += arena->stats.lstats[i].nrequests;
2514 lstats[i].curruns += arena->stats.lstats[i].curruns;
2515 }
Jason Evans3c4d92e2014-10-12 22:53:59 -07002516
2517 for (i = 0; i < nhclasses; i++) {
2518 hstats[i].nmalloc += arena->stats.hstats[i].nmalloc;
2519 hstats[i].ndalloc += arena->stats.hstats[i].ndalloc;
2520 hstats[i].curhchunks += arena->stats.hstats[i].curhchunks;
2521 }
Jason Evans609ae592012-10-11 13:53:15 -07002522 malloc_mutex_unlock(&arena->lock);
2523
2524 for (i = 0; i < NBINS; i++) {
2525 arena_bin_t *bin = &arena->bins[i];
2526
2527 malloc_mutex_lock(&bin->lock);
Jason Evans609ae592012-10-11 13:53:15 -07002528 bstats[i].nmalloc += bin->stats.nmalloc;
2529 bstats[i].ndalloc += bin->stats.ndalloc;
2530 bstats[i].nrequests += bin->stats.nrequests;
Jason Evans3c4d92e2014-10-12 22:53:59 -07002531 bstats[i].curregs += bin->stats.curregs;
Jason Evans609ae592012-10-11 13:53:15 -07002532 if (config_tcache) {
2533 bstats[i].nfills += bin->stats.nfills;
2534 bstats[i].nflushes += bin->stats.nflushes;
2535 }
2536 bstats[i].nruns += bin->stats.nruns;
2537 bstats[i].reruns += bin->stats.reruns;
2538 bstats[i].curruns += bin->stats.curruns;
2539 malloc_mutex_unlock(&bin->lock);
2540 }
2541}
2542
Jason Evans8bb31982014-10-07 23:14:57 -07002543arena_t *
2544arena_new(unsigned ind)
Jason Evanse476f8a2010-01-16 09:53:50 -08002545{
Jason Evans8bb31982014-10-07 23:14:57 -07002546 arena_t *arena;
Jason Evanse476f8a2010-01-16 09:53:50 -08002547 unsigned i;
2548 arena_bin_t *bin;
Jason Evanse476f8a2010-01-16 09:53:50 -08002549
Jason Evans8bb31982014-10-07 23:14:57 -07002550 /*
Jason Evans3c4d92e2014-10-12 22:53:59 -07002551 * Allocate arena, arena->lstats, and arena->hstats contiguously, mainly
2552 * because there is no way to clean up if base_alloc() OOMs.
Jason Evans8bb31982014-10-07 23:14:57 -07002553 */
2554 if (config_stats) {
2555 arena = (arena_t *)base_alloc(CACHELINE_CEILING(sizeof(arena_t))
Jason Evans3c4d92e2014-10-12 22:53:59 -07002556 + QUANTUM_CEILING(nlclasses * sizeof(malloc_large_stats_t) +
2557 nhclasses) * sizeof(malloc_huge_stats_t));
Jason Evans8bb31982014-10-07 23:14:57 -07002558 } else
2559 arena = (arena_t *)base_alloc(sizeof(arena_t));
2560 if (arena == NULL)
2561 return (NULL);
2562
Jason Evans6109fe02010-02-10 10:37:56 -08002563 arena->ind = ind;
Jason Evans597632b2011-03-18 13:41:33 -07002564 arena->nthreads = 0;
Jason Evanscbf3a6d2015-02-11 12:24:27 -08002565 if (malloc_mutex_init(&arena->lock))
2566 return (NULL);
Jason Evanse476f8a2010-01-16 09:53:50 -08002567
Jason Evans7372b152012-02-10 20:22:09 -08002568 if (config_stats) {
2569 memset(&arena->stats, 0, sizeof(arena_stats_t));
Jason Evansd49cb682014-11-17 10:31:59 -08002570 arena->stats.lstats = (malloc_large_stats_t *)((uintptr_t)arena
2571 + CACHELINE_CEILING(sizeof(arena_t)));
Jason Evans7372b152012-02-10 20:22:09 -08002572 memset(arena->stats.lstats, 0, nlclasses *
2573 sizeof(malloc_large_stats_t));
Jason Evansd49cb682014-11-17 10:31:59 -08002574 arena->stats.hstats = (malloc_huge_stats_t *)((uintptr_t)arena
2575 + CACHELINE_CEILING(sizeof(arena_t)) +
Jason Evans3c4d92e2014-10-12 22:53:59 -07002576 QUANTUM_CEILING(nlclasses * sizeof(malloc_large_stats_t)));
2577 memset(arena->stats.hstats, 0, nhclasses *
2578 sizeof(malloc_huge_stats_t));
Jason Evans7372b152012-02-10 20:22:09 -08002579 if (config_tcache)
2580 ql_new(&arena->tcache_ql);
2581 }
Jason Evanse476f8a2010-01-16 09:53:50 -08002582
Jason Evans7372b152012-02-10 20:22:09 -08002583 if (config_prof)
2584 arena->prof_accumbytes = 0;
Jason Evansd34f9e72010-02-11 13:19:21 -08002585
Jason Evans609ae592012-10-11 13:53:15 -07002586 arena->dss_prec = chunk_dss_prec_get();
2587
Jason Evanse476f8a2010-01-16 09:53:50 -08002588 arena->spare = NULL;
2589
2590 arena->nactive = 0;
2591 arena->ndirty = 0;
2592
Jason Evanse3d13062012-10-30 15:42:37 -07002593 arena_avail_tree_new(&arena->runs_avail);
Jason Evansee41ad42015-02-15 18:04:46 -08002594 qr_new(&arena->runs_dirty, rd_link);
Jason Evans738e0892015-02-18 01:15:50 -08002595 qr_new(&arena->chunks_cache, cc_link);
Jason Evansee41ad42015-02-15 18:04:46 -08002596
2597 ql_new(&arena->huge);
2598 if (malloc_mutex_init(&arena->huge_mtx))
2599 return (NULL);
2600
Jason Evans738e0892015-02-18 01:15:50 -08002601 extent_tree_szad_new(&arena->chunks_szad_cache);
2602 extent_tree_ad_new(&arena->chunks_ad_cache);
Jason Evansee41ad42015-02-15 18:04:46 -08002603 extent_tree_szad_new(&arena->chunks_szad_mmap);
2604 extent_tree_ad_new(&arena->chunks_ad_mmap);
2605 extent_tree_szad_new(&arena->chunks_szad_dss);
2606 extent_tree_ad_new(&arena->chunks_ad_dss);
2607 if (malloc_mutex_init(&arena->chunks_mtx))
2608 return (NULL);
2609 ql_new(&arena->node_cache);
2610 if (malloc_mutex_init(&arena->node_cache_mtx))
2611 return (NULL);
2612
2613 arena->chunk_alloc = chunk_alloc_default;
2614 arena->chunk_dalloc = chunk_dalloc_default;
Jason Evanse476f8a2010-01-16 09:53:50 -08002615
2616 /* Initialize bins. */
Jason Evansb1726102012-02-28 16:50:47 -08002617 for (i = 0; i < NBINS; i++) {
Jason Evanse476f8a2010-01-16 09:53:50 -08002618 bin = &arena->bins[i];
Jason Evans86815df2010-03-13 20:32:56 -08002619 if (malloc_mutex_init(&bin->lock))
Jason Evans8bb31982014-10-07 23:14:57 -07002620 return (NULL);
Jason Evanse476f8a2010-01-16 09:53:50 -08002621 bin->runcur = NULL;
2622 arena_run_tree_new(&bin->runs);
Jason Evans7372b152012-02-10 20:22:09 -08002623 if (config_stats)
2624 memset(&bin->stats, 0, sizeof(malloc_bin_stats_t));
Jason Evanse476f8a2010-01-16 09:53:50 -08002625 }
2626
Jason Evans8bb31982014-10-07 23:14:57 -07002627 return (arena);
Jason Evanse476f8a2010-01-16 09:53:50 -08002628}
2629
Jason Evans49f7e8f2011-03-15 13:59:15 -07002630/*
2631 * Calculate bin_info->run_size such that it meets the following constraints:
2632 *
Jason Evans155bfa72014-10-05 17:54:10 -07002633 * *) bin_info->run_size <= arena_maxrun
Jason Evans47e57f92011-03-22 09:00:56 -07002634 * *) bin_info->nregs <= RUN_MAXREGS
Jason Evans49f7e8f2011-03-15 13:59:15 -07002635 *
Jason Evans0c5dd032014-09-29 01:31:39 -07002636 * bin_info->nregs and bin_info->reg0_offset are also calculated here, since
2637 * these settings are all interdependent.
Jason Evans49f7e8f2011-03-15 13:59:15 -07002638 */
Jason Evans0c5dd032014-09-29 01:31:39 -07002639static void
2640bin_info_run_size_calc(arena_bin_info_t *bin_info)
Jason Evans49f7e8f2011-03-15 13:59:15 -07002641{
Jason Evans122449b2012-04-06 00:35:09 -07002642 size_t pad_size;
Jason Evans0c5dd032014-09-29 01:31:39 -07002643 size_t try_run_size, perfect_run_size, actual_run_size;
2644 uint32_t try_nregs, perfect_nregs, actual_nregs;
Jason Evans49f7e8f2011-03-15 13:59:15 -07002645
2646 /*
Jason Evans122449b2012-04-06 00:35:09 -07002647 * Determine redzone size based on minimum alignment and minimum
2648 * redzone size. Add padding to the end of the run if it is needed to
2649 * align the regions. The padding allows each redzone to be half the
2650 * minimum alignment; without the padding, each redzone would have to
2651 * be twice as large in order to maintain alignment.
2652 */
Jason Evans9c640bf2014-09-11 16:20:44 -07002653 if (config_fill && unlikely(opt_redzone)) {
Qinfan Wuff6a31d2014-08-29 13:34:40 -07002654 size_t align_min = ZU(1) << (jemalloc_ffs(bin_info->reg_size) -
2655 1);
Jason Evans122449b2012-04-06 00:35:09 -07002656 if (align_min <= REDZONE_MINSIZE) {
2657 bin_info->redzone_size = REDZONE_MINSIZE;
2658 pad_size = 0;
2659 } else {
2660 bin_info->redzone_size = align_min >> 1;
2661 pad_size = bin_info->redzone_size;
2662 }
2663 } else {
2664 bin_info->redzone_size = 0;
2665 pad_size = 0;
2666 }
2667 bin_info->reg_interval = bin_info->reg_size +
2668 (bin_info->redzone_size << 1);
2669
2670 /*
Jason Evans0c5dd032014-09-29 01:31:39 -07002671 * Compute run size under ideal conditions (no redzones, no limit on run
2672 * size).
Jason Evans49f7e8f2011-03-15 13:59:15 -07002673 */
Jason Evans0c5dd032014-09-29 01:31:39 -07002674 try_run_size = PAGE;
2675 try_nregs = try_run_size / bin_info->reg_size;
Jason Evans49f7e8f2011-03-15 13:59:15 -07002676 do {
Jason Evans0c5dd032014-09-29 01:31:39 -07002677 perfect_run_size = try_run_size;
2678 perfect_nregs = try_nregs;
Jason Evans49f7e8f2011-03-15 13:59:15 -07002679
Jason Evansae4c7b42012-04-02 07:04:34 -07002680 try_run_size += PAGE;
Jason Evans0c5dd032014-09-29 01:31:39 -07002681 try_nregs = try_run_size / bin_info->reg_size;
2682 } while (perfect_run_size != perfect_nregs * bin_info->reg_size);
2683 assert(perfect_nregs <= RUN_MAXREGS);
Jason Evans49f7e8f2011-03-15 13:59:15 -07002684
Jason Evans0c5dd032014-09-29 01:31:39 -07002685 actual_run_size = perfect_run_size;
2686 actual_nregs = (actual_run_size - pad_size) / bin_info->reg_interval;
2687
2688 /*
2689 * Redzones can require enough padding that not even a single region can
2690 * fit within the number of pages that would normally be dedicated to a
2691 * run for this size class. Increase the run size until at least one
2692 * region fits.
2693 */
2694 while (actual_nregs == 0) {
2695 assert(config_fill && unlikely(opt_redzone));
2696
2697 actual_run_size += PAGE;
2698 actual_nregs = (actual_run_size - pad_size) /
2699 bin_info->reg_interval;
2700 }
2701
2702 /*
2703 * Make sure that the run will fit within an arena chunk.
2704 */
Jason Evans155bfa72014-10-05 17:54:10 -07002705 while (actual_run_size > arena_maxrun) {
Jason Evans0c5dd032014-09-29 01:31:39 -07002706 actual_run_size -= PAGE;
2707 actual_nregs = (actual_run_size - pad_size) /
2708 bin_info->reg_interval;
2709 }
2710 assert(actual_nregs > 0);
Jason Evans49f7e8f2011-03-15 13:59:15 -07002711
2712 /* Copy final settings. */
Jason Evans0c5dd032014-09-29 01:31:39 -07002713 bin_info->run_size = actual_run_size;
2714 bin_info->nregs = actual_nregs;
2715 bin_info->reg0_offset = actual_run_size - (actual_nregs *
2716 bin_info->reg_interval) - pad_size + bin_info->redzone_size;
Jason Evans122449b2012-04-06 00:35:09 -07002717
2718 assert(bin_info->reg0_offset - bin_info->redzone_size + (bin_info->nregs
2719 * bin_info->reg_interval) + pad_size == bin_info->run_size);
Jason Evans49f7e8f2011-03-15 13:59:15 -07002720}
2721
Jason Evansb1726102012-02-28 16:50:47 -08002722static void
Jason Evans49f7e8f2011-03-15 13:59:15 -07002723bin_info_init(void)
2724{
2725 arena_bin_info_t *bin_info;
Jason Evans49f7e8f2011-03-15 13:59:15 -07002726
Jason Evansd04047c2014-05-28 16:11:55 -07002727#define BIN_INFO_INIT_bin_yes(index, size) \
2728 bin_info = &arena_bin_info[index]; \
Jason Evansb1726102012-02-28 16:50:47 -08002729 bin_info->reg_size = size; \
Jason Evans0c5dd032014-09-29 01:31:39 -07002730 bin_info_run_size_calc(bin_info); \
Jason Evansb1726102012-02-28 16:50:47 -08002731 bitmap_info_init(&bin_info->bitmap_info, bin_info->nregs);
Jason Evansd04047c2014-05-28 16:11:55 -07002732#define BIN_INFO_INIT_bin_no(index, size)
2733#define SC(index, lg_grp, lg_delta, ndelta, bin, lg_delta_lookup) \
2734 BIN_INFO_INIT_bin_##bin(index, (ZU(1)<<lg_grp) + (ZU(ndelta)<<lg_delta))
Jason Evansb1726102012-02-28 16:50:47 -08002735 SIZE_CLASSES
Jason Evansd04047c2014-05-28 16:11:55 -07002736#undef BIN_INFO_INIT_bin_yes
2737#undef BIN_INFO_INIT_bin_no
2738#undef SC
Jason Evans49f7e8f2011-03-15 13:59:15 -07002739}
2740
Jason Evansb1726102012-02-28 16:50:47 -08002741void
Jason Evansa0bf2422010-01-29 14:30:41 -08002742arena_boot(void)
Jason Evanse476f8a2010-01-16 09:53:50 -08002743{
Jason Evansa0bf2422010-01-29 14:30:41 -08002744 size_t header_size;
Jason Evans7393f442010-10-01 17:35:43 -07002745 unsigned i;
Jason Evanse476f8a2010-01-16 09:53:50 -08002746
Jason Evanse476f8a2010-01-16 09:53:50 -08002747 /*
2748 * Compute the header size such that it is large enough to contain the
Jason Evans7393f442010-10-01 17:35:43 -07002749 * page map. The page map is biased to omit entries for the header
2750 * itself, so some iteration is necessary to compute the map bias.
2751 *
2752 * 1) Compute safe header_size and map_bias values that include enough
2753 * space for an unbiased page map.
2754 * 2) Refine map_bias based on (1) to omit the header pages in the page
2755 * map. The resulting map_bias may be one too small.
2756 * 3) Refine map_bias based on (2). The result will be >= the result
2757 * from (2), and will always be correct.
Jason Evanse476f8a2010-01-16 09:53:50 -08002758 */
Jason Evans7393f442010-10-01 17:35:43 -07002759 map_bias = 0;
2760 for (i = 0; i < 3; i++) {
Qinfan Wuff6a31d2014-08-29 13:34:40 -07002761 header_size = offsetof(arena_chunk_t, map_bits) +
2762 ((sizeof(arena_chunk_map_bits_t) +
2763 sizeof(arena_chunk_map_misc_t)) * (chunk_npages-map_bias));
Jason Evans0c5dd032014-09-29 01:31:39 -07002764 map_bias = (header_size + PAGE_MASK) >> LG_PAGE;
Jason Evans7393f442010-10-01 17:35:43 -07002765 }
2766 assert(map_bias > 0);
2767
Qinfan Wuff6a31d2014-08-29 13:34:40 -07002768 map_misc_offset = offsetof(arena_chunk_t, map_bits) +
2769 sizeof(arena_chunk_map_bits_t) * (chunk_npages-map_bias);
2770
Jason Evans155bfa72014-10-05 17:54:10 -07002771 arena_maxrun = chunksize - (map_bias << LG_PAGE);
Jason Evansfc0b3b72014-10-09 17:54:06 -07002772 assert(arena_maxrun > 0);
Jason Evans155bfa72014-10-05 17:54:10 -07002773 arena_maxclass = index2size(size2index(chunksize)-1);
2774 if (arena_maxclass > arena_maxrun) {
2775 /*
2776 * For small chunk sizes it's possible for there to be fewer
2777 * non-header pages available than are necessary to serve the
2778 * size classes just below chunksize.
2779 */
2780 arena_maxclass = arena_maxrun;
2781 }
Jason Evansfc0b3b72014-10-09 17:54:06 -07002782 assert(arena_maxclass > 0);
Jason Evans155bfa72014-10-05 17:54:10 -07002783 nlclasses = size2index(arena_maxclass) - size2index(SMALL_MAXCLASS);
Jason Evans3c4d92e2014-10-12 22:53:59 -07002784 nhclasses = NSIZES - nlclasses - NBINS;
Jason Evansa0bf2422010-01-29 14:30:41 -08002785
Jason Evansb1726102012-02-28 16:50:47 -08002786 bin_info_init();
Jason Evanse476f8a2010-01-16 09:53:50 -08002787}
Jason Evans4e2e3dd2012-03-13 16:31:41 -07002788
2789void
2790arena_prefork(arena_t *arena)
2791{
2792 unsigned i;
2793
2794 malloc_mutex_prefork(&arena->lock);
Jason Evanscbf3a6d2015-02-11 12:24:27 -08002795 malloc_mutex_prefork(&arena->huge_mtx);
2796 malloc_mutex_prefork(&arena->chunks_mtx);
2797 malloc_mutex_prefork(&arena->node_cache_mtx);
Jason Evans4e2e3dd2012-03-13 16:31:41 -07002798 for (i = 0; i < NBINS; i++)
2799 malloc_mutex_prefork(&arena->bins[i].lock);
2800}
2801
2802void
2803arena_postfork_parent(arena_t *arena)
2804{
2805 unsigned i;
2806
2807 for (i = 0; i < NBINS; i++)
2808 malloc_mutex_postfork_parent(&arena->bins[i].lock);
Jason Evanscbf3a6d2015-02-11 12:24:27 -08002809 malloc_mutex_postfork_parent(&arena->node_cache_mtx);
2810 malloc_mutex_postfork_parent(&arena->chunks_mtx);
2811 malloc_mutex_postfork_parent(&arena->huge_mtx);
Jason Evans4e2e3dd2012-03-13 16:31:41 -07002812 malloc_mutex_postfork_parent(&arena->lock);
2813}
2814
2815void
2816arena_postfork_child(arena_t *arena)
2817{
2818 unsigned i;
2819
2820 for (i = 0; i < NBINS; i++)
2821 malloc_mutex_postfork_child(&arena->bins[i].lock);
Jason Evanscbf3a6d2015-02-11 12:24:27 -08002822 malloc_mutex_postfork_child(&arena->node_cache_mtx);
2823 malloc_mutex_postfork_child(&arena->chunks_mtx);
2824 malloc_mutex_postfork_child(&arena->huge_mtx);
Jason Evans4e2e3dd2012-03-13 16:31:41 -07002825 malloc_mutex_postfork_child(&arena->lock);
2826}