blob: ef391b16d3910c8824d2666c673a917462187b89 [file] [log] [blame]
Jason Evanse476f8a2010-01-16 09:53:50 -08001#define JEMALLOC_ARENA_C_
Jason Evans376b1522010-02-11 14:45:59 -08002#include "jemalloc/internal/jemalloc_internal.h"
Jason Evanse476f8a2010-01-16 09:53:50 -08003
4/******************************************************************************/
5/* Data. */
6
Jason Evanse476f8a2010-01-16 09:53:50 -08007ssize_t opt_lg_dirty_mult = LG_DIRTY_MULT_DEFAULT;
Jason Evansb1726102012-02-28 16:50:47 -08008arena_bin_info_t arena_bin_info[NBINS];
Jason Evanse476f8a2010-01-16 09:53:50 -08009
Mike Hommeyda99e312012-04-30 12:38:29 +020010JEMALLOC_ALIGNED(CACHELINE)
Jason Evans3541a902014-04-16 17:14:33 -070011const uint32_t small_bin2size_tab[NBINS] = {
Jason Evansd04047c2014-05-28 16:11:55 -070012#define B2S_bin_yes(size) \
Ben Maurer021136c2014-04-16 14:31:24 -070013 size,
Jason Evansd04047c2014-05-28 16:11:55 -070014#define B2S_bin_no(size)
15#define SC(index, lg_grp, lg_delta, ndelta, bin, lg_delta_lookup) \
16 B2S_bin_##bin((ZU(1)<<lg_grp) + (ZU(ndelta)<<lg_delta))
Ben Maurer021136c2014-04-16 14:31:24 -070017 SIZE_CLASSES
Jason Evansd04047c2014-05-28 16:11:55 -070018#undef B2S_bin_yes
19#undef B2S_bin_no
20#undef SC
Ben Maurer021136c2014-04-16 14:31:24 -070021};
22
23JEMALLOC_ALIGNED(CACHELINE)
Jason Evans3541a902014-04-16 17:14:33 -070024const uint8_t small_size2bin_tab[] = {
Jason Evansd04047c2014-05-28 16:11:55 -070025#define S2B_3(i) i,
26#define S2B_4(i) S2B_3(i) S2B_3(i)
27#define S2B_5(i) S2B_4(i) S2B_4(i)
28#define S2B_6(i) S2B_5(i) S2B_5(i)
29#define S2B_7(i) S2B_6(i) S2B_6(i)
30#define S2B_8(i) S2B_7(i) S2B_7(i)
31#define S2B_9(i) S2B_8(i) S2B_8(i)
32#define S2B_no(i)
33#define SC(index, lg_grp, lg_delta, ndelta, bin, lg_delta_lookup) \
34 S2B_##lg_delta_lookup(index)
Jason Evansb1726102012-02-28 16:50:47 -080035 SIZE_CLASSES
Jason Evansd04047c2014-05-28 16:11:55 -070036#undef S2B_3
37#undef S2B_4
38#undef S2B_5
39#undef S2B_6
40#undef S2B_7
Jason Evanse476f8a2010-01-16 09:53:50 -080041#undef S2B_8
Jason Evansd04047c2014-05-28 16:11:55 -070042#undef S2B_9
43#undef S2B_no
44#undef SC
Jason Evansb1726102012-02-28 16:50:47 -080045};
Jason Evanse476f8a2010-01-16 09:53:50 -080046
47/******************************************************************************/
Jason Evansaa5113b2014-01-14 16:23:03 -080048/*
49 * Function prototypes for static functions that are referenced prior to
50 * definition.
51 */
Jason Evanse476f8a2010-01-16 09:53:50 -080052
Jason Evans6005f072010-09-30 16:55:08 -070053static void arena_purge(arena_t *arena, bool all);
Jason Evanse3d13062012-10-30 15:42:37 -070054static void arena_run_dalloc(arena_t *arena, arena_run_t *run, bool dirty,
55 bool cleaned);
Jason Evanse476f8a2010-01-16 09:53:50 -080056static void arena_dalloc_bin_run(arena_t *arena, arena_chunk_t *chunk,
57 arena_run_t *run, arena_bin_t *bin);
Jason Evans940a2e02010-10-17 17:51:37 -070058static void arena_bin_lower_run(arena_t *arena, arena_chunk_t *chunk,
59 arena_run_t *run, arena_bin_t *bin);
Jason Evanse476f8a2010-01-16 09:53:50 -080060
61/******************************************************************************/
62
Ben Maurerf9ff6032014-04-06 13:24:16 -070063JEMALLOC_INLINE_C size_t
Qinfan Wuff6a31d2014-08-29 13:34:40 -070064arena_miscelm_to_bits(arena_chunk_map_misc_t *miscelm)
Ben Maurerf9ff6032014-04-06 13:24:16 -070065{
Qinfan Wuff6a31d2014-08-29 13:34:40 -070066 arena_chunk_t *chunk = CHUNK_ADDR2BASE(miscelm);
67 size_t pageind = arena_miscelm_to_pageind(miscelm);
Ben Maurerf9ff6032014-04-06 13:24:16 -070068
Qinfan Wuff6a31d2014-08-29 13:34:40 -070069 return arena_mapbits_get(chunk, pageind);
Ben Maurerf9ff6032014-04-06 13:24:16 -070070}
71
Jason Evanse476f8a2010-01-16 09:53:50 -080072static inline int
Qinfan Wuff6a31d2014-08-29 13:34:40 -070073arena_run_comp(arena_chunk_map_misc_t *a, arena_chunk_map_misc_t *b)
Jason Evanse476f8a2010-01-16 09:53:50 -080074{
Qinfan Wuff6a31d2014-08-29 13:34:40 -070075 uintptr_t a_miscelm = (uintptr_t)a;
76 uintptr_t b_miscelm = (uintptr_t)b;
Jason Evanse476f8a2010-01-16 09:53:50 -080077
78 assert(a != NULL);
79 assert(b != NULL);
80
Qinfan Wuff6a31d2014-08-29 13:34:40 -070081 return ((a_miscelm > b_miscelm) - (a_miscelm < b_miscelm));
Jason Evanse476f8a2010-01-16 09:53:50 -080082}
83
Jason Evansf3ff7522010-02-28 15:00:18 -080084/* Generate red-black tree functions. */
Qinfan Wuff6a31d2014-08-29 13:34:40 -070085rb_gen(static UNUSED, arena_run_tree_, arena_run_tree_t, arena_chunk_map_misc_t,
Jason Evans070b3c32014-08-14 14:45:58 -070086 rb_link, arena_run_comp)
Jason Evanse476f8a2010-01-16 09:53:50 -080087
88static inline int
Qinfan Wuff6a31d2014-08-29 13:34:40 -070089arena_avail_comp(arena_chunk_map_misc_t *a, arena_chunk_map_misc_t *b)
Jason Evanse476f8a2010-01-16 09:53:50 -080090{
91 int ret;
Ben Maurerf9ff6032014-04-06 13:24:16 -070092 size_t a_size;
Qinfan Wuff6a31d2014-08-29 13:34:40 -070093 size_t b_size = arena_miscelm_to_bits(b) & ~PAGE_MASK;
94 uintptr_t a_miscelm = (uintptr_t)a;
95 uintptr_t b_miscelm = (uintptr_t)b;
Ben Maurerf9ff6032014-04-06 13:24:16 -070096
Qinfan Wuff6a31d2014-08-29 13:34:40 -070097 if (a_miscelm & CHUNK_MAP_KEY)
98 a_size = a_miscelm & ~PAGE_MASK;
99 else
100 a_size = arena_miscelm_to_bits(a) & ~PAGE_MASK;
Jason Evanse476f8a2010-01-16 09:53:50 -0800101
102 ret = (a_size > b_size) - (a_size < b_size);
Qinfan Wu55c9aa12014-08-06 16:10:08 -0700103 if (ret == 0) {
Qinfan Wuff6a31d2014-08-29 13:34:40 -0700104 if (!(a_miscelm & CHUNK_MAP_KEY))
105 ret = (a_miscelm > b_miscelm) - (a_miscelm < b_miscelm);
Qinfan Wuea73eb82014-08-06 16:43:01 -0700106 else {
107 /*
108 * Treat keys as if they are lower than anything else.
109 */
Qinfan Wu55c9aa12014-08-06 16:10:08 -0700110 ret = -1;
Qinfan Wuea73eb82014-08-06 16:43:01 -0700111 }
Qinfan Wu55c9aa12014-08-06 16:10:08 -0700112 }
Jason Evanse476f8a2010-01-16 09:53:50 -0800113
114 return (ret);
115}
116
Jason Evansf3ff7522010-02-28 15:00:18 -0800117/* Generate red-black tree functions. */
Qinfan Wuff6a31d2014-08-29 13:34:40 -0700118rb_gen(static UNUSED, arena_avail_tree_, arena_avail_tree_t,
119 arena_chunk_map_misc_t, rb_link, arena_avail_comp)
Jason Evanse476f8a2010-01-16 09:53:50 -0800120
Jason Evanse3d13062012-10-30 15:42:37 -0700121static void
122arena_avail_insert(arena_t *arena, arena_chunk_t *chunk, size_t pageind,
Qinfan Wu90737fc2014-07-21 19:39:20 -0700123 size_t npages)
Jason Evanse3d13062012-10-30 15:42:37 -0700124{
125
126 assert(npages == (arena_mapbits_unallocated_size_get(chunk, pageind) >>
127 LG_PAGE));
Qinfan Wuff6a31d2014-08-29 13:34:40 -0700128 arena_avail_tree_insert(&arena->runs_avail, arena_miscelm_get(chunk,
Jason Evanse3d13062012-10-30 15:42:37 -0700129 pageind));
130}
131
132static void
133arena_avail_remove(arena_t *arena, arena_chunk_t *chunk, size_t pageind,
Qinfan Wu90737fc2014-07-21 19:39:20 -0700134 size_t npages)
Jason Evanse3d13062012-10-30 15:42:37 -0700135{
136
137 assert(npages == (arena_mapbits_unallocated_size_get(chunk, pageind) >>
138 LG_PAGE));
Qinfan Wuff6a31d2014-08-29 13:34:40 -0700139 arena_avail_tree_remove(&arena->runs_avail, arena_miscelm_get(chunk,
Jason Evanse3d13062012-10-30 15:42:37 -0700140 pageind));
141}
142
Jason Evans070b3c32014-08-14 14:45:58 -0700143static void
144arena_dirty_insert(arena_t *arena, arena_chunk_t *chunk, size_t pageind,
145 size_t npages)
146{
Qinfan Wuff6a31d2014-08-29 13:34:40 -0700147 arena_chunk_map_misc_t *miscelm = arena_miscelm_get(chunk, pageind);
Jason Evans070b3c32014-08-14 14:45:58 -0700148 assert(npages == (arena_mapbits_unallocated_size_get(chunk, pageind) >>
149 LG_PAGE));
150 assert(arena_mapbits_dirty_get(chunk, pageind) == CHUNK_MAP_DIRTY);
151 assert(arena_mapbits_dirty_get(chunk, pageind+npages-1) ==
152 CHUNK_MAP_DIRTY);
Qinfan Wuff6a31d2014-08-29 13:34:40 -0700153 ql_elm_new(miscelm, dr_link);
154 ql_tail_insert(&arena->runs_dirty, miscelm, dr_link);
Jason Evans070b3c32014-08-14 14:45:58 -0700155 arena->ndirty += npages;
156}
157
158static void
159arena_dirty_remove(arena_t *arena, arena_chunk_t *chunk, size_t pageind,
160 size_t npages)
161{
Qinfan Wuff6a31d2014-08-29 13:34:40 -0700162 arena_chunk_map_misc_t *miscelm = arena_miscelm_get(chunk, pageind);
Jason Evans070b3c32014-08-14 14:45:58 -0700163 assert(npages == (arena_mapbits_unallocated_size_get(chunk, pageind) >>
164 LG_PAGE));
165 assert(arena_mapbits_dirty_get(chunk, pageind) == CHUNK_MAP_DIRTY);
166 assert(arena_mapbits_dirty_get(chunk, pageind+npages-1) ==
167 CHUNK_MAP_DIRTY);
Qinfan Wuff6a31d2014-08-29 13:34:40 -0700168 ql_remove(&arena->runs_dirty, miscelm, dr_link);
Jason Evans070b3c32014-08-14 14:45:58 -0700169 arena->ndirty -= npages;
170}
171
Jason Evanse476f8a2010-01-16 09:53:50 -0800172static inline void *
Jason Evans49f7e8f2011-03-15 13:59:15 -0700173arena_run_reg_alloc(arena_run_t *run, arena_bin_info_t *bin_info)
Jason Evanse476f8a2010-01-16 09:53:50 -0800174{
175 void *ret;
Jason Evans84c8eef2011-03-16 10:30:13 -0700176 unsigned regind;
Jason Evans0c5dd032014-09-29 01:31:39 -0700177 arena_chunk_map_misc_t *miscelm;
178 void *rpages;
Jason Evanse476f8a2010-01-16 09:53:50 -0800179
Jason Evans1e0a6362010-03-13 13:41:58 -0800180 assert(run->nfree > 0);
Jason Evans0c5dd032014-09-29 01:31:39 -0700181 assert(bitmap_full(run->bitmap, &bin_info->bitmap_info) == false);
Jason Evanse476f8a2010-01-16 09:53:50 -0800182
Jason Evans0c5dd032014-09-29 01:31:39 -0700183 regind = bitmap_sfu(run->bitmap, &bin_info->bitmap_info);
184 miscelm = arena_run_to_miscelm(run);
185 rpages = arena_miscelm_to_rpages(miscelm);
186 ret = (void *)((uintptr_t)rpages + (uintptr_t)bin_info->reg0_offset +
Jason Evans122449b2012-04-06 00:35:09 -0700187 (uintptr_t)(bin_info->reg_interval * regind));
Jason Evans1e0a6362010-03-13 13:41:58 -0800188 run->nfree--;
Jason Evans84c8eef2011-03-16 10:30:13 -0700189 if (regind == run->nextind)
190 run->nextind++;
191 assert(regind < run->nextind);
Jason Evans1e0a6362010-03-13 13:41:58 -0800192 return (ret);
Jason Evans6109fe02010-02-10 10:37:56 -0800193}
194
195static inline void
Jason Evans1e0a6362010-03-13 13:41:58 -0800196arena_run_reg_dalloc(arena_run_t *run, void *ptr)
Jason Evans6109fe02010-02-10 10:37:56 -0800197{
Jason Evans49f7e8f2011-03-15 13:59:15 -0700198 arena_chunk_t *chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(run);
Jason Evans203484e2012-05-02 00:30:36 -0700199 size_t pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> LG_PAGE;
200 size_t mapbits = arena_mapbits_get(chunk, pageind);
Jason Evans80737c32012-05-02 16:11:03 -0700201 size_t binind = arena_ptr_small_binind_get(ptr, mapbits);
Jason Evans49f7e8f2011-03-15 13:59:15 -0700202 arena_bin_info_t *bin_info = &arena_bin_info[binind];
Jason Evans84c8eef2011-03-16 10:30:13 -0700203 unsigned regind = arena_run_regind(run, bin_info, ptr);
Jason Evans84c8eef2011-03-16 10:30:13 -0700204
Jason Evans49f7e8f2011-03-15 13:59:15 -0700205 assert(run->nfree < bin_info->nregs);
Jason Evans1e0a6362010-03-13 13:41:58 -0800206 /* Freeing an interior pointer can cause assertion failure. */
Jason Evans0c5dd032014-09-29 01:31:39 -0700207 assert(((uintptr_t)ptr -
208 ((uintptr_t)arena_miscelm_to_rpages(arena_run_to_miscelm(run)) +
Jason Evans122449b2012-04-06 00:35:09 -0700209 (uintptr_t)bin_info->reg0_offset)) %
210 (uintptr_t)bin_info->reg_interval == 0);
Jason Evans0c5dd032014-09-29 01:31:39 -0700211 assert((uintptr_t)ptr >=
212 (uintptr_t)arena_miscelm_to_rpages(arena_run_to_miscelm(run)) +
Jason Evans49f7e8f2011-03-15 13:59:15 -0700213 (uintptr_t)bin_info->reg0_offset);
Jason Evans84c8eef2011-03-16 10:30:13 -0700214 /* Freeing an unallocated pointer can cause assertion failure. */
Jason Evans0c5dd032014-09-29 01:31:39 -0700215 assert(bitmap_get(run->bitmap, &bin_info->bitmap_info, regind));
Jason Evanse476f8a2010-01-16 09:53:50 -0800216
Jason Evans0c5dd032014-09-29 01:31:39 -0700217 bitmap_unset(run->bitmap, &bin_info->bitmap_info, regind);
Jason Evans1e0a6362010-03-13 13:41:58 -0800218 run->nfree++;
Jason Evanse476f8a2010-01-16 09:53:50 -0800219}
220
Jason Evans21fb95b2010-10-18 17:45:40 -0700221static inline void
Jason Evans38067482013-01-21 20:04:42 -0800222arena_run_zero(arena_chunk_t *chunk, size_t run_ind, size_t npages)
223{
224
Jason Evansbd87b012014-04-15 16:35:08 -0700225 JEMALLOC_VALGRIND_MAKE_MEM_UNDEFINED((void *)((uintptr_t)chunk +
226 (run_ind << LG_PAGE)), (npages << LG_PAGE));
Jason Evans38067482013-01-21 20:04:42 -0800227 memset((void *)((uintptr_t)chunk + (run_ind << LG_PAGE)), 0,
228 (npages << LG_PAGE));
Jason Evans38067482013-01-21 20:04:42 -0800229}
230
231static inline void
Jason Evansdda90f52013-10-19 23:48:40 -0700232arena_run_page_mark_zeroed(arena_chunk_t *chunk, size_t run_ind)
233{
234
Jason Evansbd87b012014-04-15 16:35:08 -0700235 JEMALLOC_VALGRIND_MAKE_MEM_DEFINED((void *)((uintptr_t)chunk + (run_ind
236 << LG_PAGE)), PAGE);
Jason Evansdda90f52013-10-19 23:48:40 -0700237}
238
239static inline void
Jason Evans38067482013-01-21 20:04:42 -0800240arena_run_page_validate_zeroed(arena_chunk_t *chunk, size_t run_ind)
Jason Evans21fb95b2010-10-18 17:45:40 -0700241{
Jason Evansd4bab212010-10-24 20:08:37 -0700242 size_t i;
Jason Evansae4c7b42012-04-02 07:04:34 -0700243 UNUSED size_t *p = (size_t *)((uintptr_t)chunk + (run_ind << LG_PAGE));
Jason Evansd4bab212010-10-24 20:08:37 -0700244
Jason Evansdda90f52013-10-19 23:48:40 -0700245 arena_run_page_mark_zeroed(chunk, run_ind);
Jason Evansae4c7b42012-04-02 07:04:34 -0700246 for (i = 0; i < PAGE / sizeof(size_t); i++)
Jason Evans21fb95b2010-10-18 17:45:40 -0700247 assert(p[i] == 0);
248}
Jason Evans21fb95b2010-10-18 17:45:40 -0700249
Jason Evanse476f8a2010-01-16 09:53:50 -0800250static void
Jason Evansaa5113b2014-01-14 16:23:03 -0800251arena_cactive_update(arena_t *arena, size_t add_pages, size_t sub_pages)
252{
253
254 if (config_stats) {
Jason Evans15229372014-08-06 23:38:39 -0700255 ssize_t cactive_diff = CHUNK_CEILING((arena->nactive + add_pages
256 - sub_pages) << LG_PAGE) - CHUNK_CEILING(arena->nactive <<
257 LG_PAGE);
Jason Evansaa5113b2014-01-14 16:23:03 -0800258 if (cactive_diff != 0)
259 stats_cactive_add(cactive_diff);
260 }
261}
262
263static void
264arena_run_split_remove(arena_t *arena, arena_chunk_t *chunk, size_t run_ind,
265 size_t flag_dirty, size_t need_pages)
266{
267 size_t total_pages, rem_pages;
268
269 total_pages = arena_mapbits_unallocated_size_get(chunk, run_ind) >>
270 LG_PAGE;
271 assert(arena_mapbits_dirty_get(chunk, run_ind+total_pages-1) ==
272 flag_dirty);
273 assert(need_pages <= total_pages);
274 rem_pages = total_pages - need_pages;
275
Qinfan Wu90737fc2014-07-21 19:39:20 -0700276 arena_avail_remove(arena, chunk, run_ind, total_pages);
Jason Evans070b3c32014-08-14 14:45:58 -0700277 if (flag_dirty != 0)
278 arena_dirty_remove(arena, chunk, run_ind, total_pages);
Jason Evansaa5113b2014-01-14 16:23:03 -0800279 arena_cactive_update(arena, need_pages, 0);
280 arena->nactive += need_pages;
281
282 /* Keep track of trailing unused pages for later use. */
283 if (rem_pages > 0) {
284 if (flag_dirty != 0) {
285 arena_mapbits_unallocated_set(chunk,
286 run_ind+need_pages, (rem_pages << LG_PAGE),
287 flag_dirty);
288 arena_mapbits_unallocated_set(chunk,
289 run_ind+total_pages-1, (rem_pages << LG_PAGE),
290 flag_dirty);
Jason Evans070b3c32014-08-14 14:45:58 -0700291 arena_dirty_insert(arena, chunk, run_ind+need_pages,
292 rem_pages);
Jason Evansaa5113b2014-01-14 16:23:03 -0800293 } else {
294 arena_mapbits_unallocated_set(chunk, run_ind+need_pages,
295 (rem_pages << LG_PAGE),
296 arena_mapbits_unzeroed_get(chunk,
297 run_ind+need_pages));
298 arena_mapbits_unallocated_set(chunk,
299 run_ind+total_pages-1, (rem_pages << LG_PAGE),
300 arena_mapbits_unzeroed_get(chunk,
301 run_ind+total_pages-1));
302 }
Qinfan Wu90737fc2014-07-21 19:39:20 -0700303 arena_avail_insert(arena, chunk, run_ind+need_pages, rem_pages);
Jason Evansaa5113b2014-01-14 16:23:03 -0800304 }
305}
306
307static void
308arena_run_split_large_helper(arena_t *arena, arena_run_t *run, size_t size,
309 bool remove, bool zero)
Jason Evanse476f8a2010-01-16 09:53:50 -0800310{
311 arena_chunk_t *chunk;
Jason Evans0c5dd032014-09-29 01:31:39 -0700312 arena_chunk_map_misc_t *miscelm;
Jason Evansaa5113b2014-01-14 16:23:03 -0800313 size_t flag_dirty, run_ind, need_pages, i;
Jason Evans203484e2012-05-02 00:30:36 -0700314
Jason Evanse476f8a2010-01-16 09:53:50 -0800315 chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(run);
Jason Evans0c5dd032014-09-29 01:31:39 -0700316 miscelm = arena_run_to_miscelm(run);
317 run_ind = arena_miscelm_to_pageind(miscelm);
Jason Evans203484e2012-05-02 00:30:36 -0700318 flag_dirty = arena_mapbits_dirty_get(chunk, run_ind);
Jason Evansae4c7b42012-04-02 07:04:34 -0700319 need_pages = (size >> LG_PAGE);
Jason Evanse476f8a2010-01-16 09:53:50 -0800320 assert(need_pages > 0);
Jason Evanse476f8a2010-01-16 09:53:50 -0800321
Jason Evansc368f8c2013-10-29 18:17:42 -0700322 if (remove) {
Jason Evansaa5113b2014-01-14 16:23:03 -0800323 arena_run_split_remove(arena, chunk, run_ind, flag_dirty,
324 need_pages);
Jason Evans19b3d612010-03-18 20:36:40 -0700325 }
326
Jason Evansaa5113b2014-01-14 16:23:03 -0800327 if (zero) {
328 if (flag_dirty == 0) {
329 /*
330 * The run is clean, so some pages may be zeroed (i.e.
331 * never before touched).
332 */
333 for (i = 0; i < need_pages; i++) {
334 if (arena_mapbits_unzeroed_get(chunk, run_ind+i)
335 != 0)
336 arena_run_zero(chunk, run_ind+i, 1);
337 else if (config_debug) {
338 arena_run_page_validate_zeroed(chunk,
339 run_ind+i);
340 } else {
341 arena_run_page_mark_zeroed(chunk,
342 run_ind+i);
Jason Evans19b3d612010-03-18 20:36:40 -0700343 }
Jason Evanse476f8a2010-01-16 09:53:50 -0800344 }
Jason Evansdda90f52013-10-19 23:48:40 -0700345 } else {
Jason Evansaa5113b2014-01-14 16:23:03 -0800346 /* The run is dirty, so all pages must be zeroed. */
347 arena_run_zero(chunk, run_ind, need_pages);
Jason Evanse476f8a2010-01-16 09:53:50 -0800348 }
Jason Evans19b3d612010-03-18 20:36:40 -0700349 } else {
Jason Evansbd87b012014-04-15 16:35:08 -0700350 JEMALLOC_VALGRIND_MAKE_MEM_UNDEFINED((void *)((uintptr_t)chunk +
Jason Evansdda90f52013-10-19 23:48:40 -0700351 (run_ind << LG_PAGE)), (need_pages << LG_PAGE));
Jason Evanse476f8a2010-01-16 09:53:50 -0800352 }
Jason Evansaa5113b2014-01-14 16:23:03 -0800353
354 /*
355 * Set the last element first, in case the run only contains one page
356 * (i.e. both statements set the same element).
357 */
358 arena_mapbits_large_set(chunk, run_ind+need_pages-1, 0, flag_dirty);
359 arena_mapbits_large_set(chunk, run_ind, size, flag_dirty);
Jason Evanse476f8a2010-01-16 09:53:50 -0800360}
361
Jason Evansc368f8c2013-10-29 18:17:42 -0700362static void
Jason Evansaa5113b2014-01-14 16:23:03 -0800363arena_run_split_large(arena_t *arena, arena_run_t *run, size_t size, bool zero)
Jason Evansc368f8c2013-10-29 18:17:42 -0700364{
365
Jason Evansaa5113b2014-01-14 16:23:03 -0800366 arena_run_split_large_helper(arena, run, size, true, zero);
Jason Evansc368f8c2013-10-29 18:17:42 -0700367}
368
369static void
Jason Evansaa5113b2014-01-14 16:23:03 -0800370arena_run_init_large(arena_t *arena, arena_run_t *run, size_t size, bool zero)
Jason Evansc368f8c2013-10-29 18:17:42 -0700371{
372
Jason Evansaa5113b2014-01-14 16:23:03 -0800373 arena_run_split_large_helper(arena, run, size, false, zero);
374}
375
376static void
377arena_run_split_small(arena_t *arena, arena_run_t *run, size_t size,
378 size_t binind)
379{
380 arena_chunk_t *chunk;
Jason Evans0c5dd032014-09-29 01:31:39 -0700381 arena_chunk_map_misc_t *miscelm;
Jason Evansaa5113b2014-01-14 16:23:03 -0800382 size_t flag_dirty, run_ind, need_pages, i;
383
384 assert(binind != BININD_INVALID);
385
386 chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(run);
Jason Evans0c5dd032014-09-29 01:31:39 -0700387 miscelm = arena_run_to_miscelm(run);
388 run_ind = arena_miscelm_to_pageind(miscelm);
Jason Evansaa5113b2014-01-14 16:23:03 -0800389 flag_dirty = arena_mapbits_dirty_get(chunk, run_ind);
390 need_pages = (size >> LG_PAGE);
391 assert(need_pages > 0);
392
393 arena_run_split_remove(arena, chunk, run_ind, flag_dirty, need_pages);
394
395 /*
396 * Propagate the dirty and unzeroed flags to the allocated small run,
397 * so that arena_dalloc_bin_run() has the ability to conditionally trim
398 * clean pages.
399 */
400 arena_mapbits_small_set(chunk, run_ind, 0, binind, flag_dirty);
Jason Evansaa5113b2014-01-14 16:23:03 -0800401 if (config_debug && flag_dirty == 0 && arena_mapbits_unzeroed_get(chunk,
402 run_ind) == 0)
403 arena_run_page_validate_zeroed(chunk, run_ind);
404 for (i = 1; i < need_pages - 1; i++) {
405 arena_mapbits_small_set(chunk, run_ind+i, i, binind, 0);
406 if (config_debug && flag_dirty == 0 &&
407 arena_mapbits_unzeroed_get(chunk, run_ind+i) == 0)
408 arena_run_page_validate_zeroed(chunk, run_ind+i);
409 }
410 arena_mapbits_small_set(chunk, run_ind+need_pages-1, need_pages-1,
411 binind, flag_dirty);
412 if (config_debug && flag_dirty == 0 && arena_mapbits_unzeroed_get(chunk,
413 run_ind+need_pages-1) == 0)
414 arena_run_page_validate_zeroed(chunk, run_ind+need_pages-1);
Jason Evansbd87b012014-04-15 16:35:08 -0700415 JEMALLOC_VALGRIND_MAKE_MEM_UNDEFINED((void *)((uintptr_t)chunk +
Jason Evansaa5113b2014-01-14 16:23:03 -0800416 (run_ind << LG_PAGE)), (need_pages << LG_PAGE));
417}
418
419static arena_chunk_t *
420arena_chunk_init_spare(arena_t *arena)
421{
422 arena_chunk_t *chunk;
423
424 assert(arena->spare != NULL);
425
426 chunk = arena->spare;
427 arena->spare = NULL;
428
429 assert(arena_mapbits_allocated_get(chunk, map_bias) == 0);
430 assert(arena_mapbits_allocated_get(chunk, chunk_npages-1) == 0);
431 assert(arena_mapbits_unallocated_size_get(chunk, map_bias) ==
432 arena_maxclass);
433 assert(arena_mapbits_unallocated_size_get(chunk, chunk_npages-1) ==
434 arena_maxclass);
435 assert(arena_mapbits_dirty_get(chunk, map_bias) ==
436 arena_mapbits_dirty_get(chunk, chunk_npages-1));
437
438 return (chunk);
439}
440
441static arena_chunk_t *
Jason Evanse2deab72014-05-15 22:22:27 -0700442arena_chunk_alloc_internal(arena_t *arena, size_t size, size_t alignment,
443 bool *zero)
444{
445 arena_chunk_t *chunk;
446 chunk_alloc_t *chunk_alloc;
447 chunk_dalloc_t *chunk_dalloc;
448
449 chunk_alloc = arena->chunk_alloc;
450 chunk_dalloc = arena->chunk_dalloc;
451 malloc_mutex_unlock(&arena->lock);
452 chunk = (arena_chunk_t *)chunk_alloc_arena(chunk_alloc, chunk_dalloc,
453 arena->ind, size, alignment, zero);
454 malloc_mutex_lock(&arena->lock);
455 if (config_stats && chunk != NULL)
456 arena->stats.mapped += chunksize;
457
458 return (chunk);
459}
460
461void *
462arena_chunk_alloc_huge(arena_t *arena, size_t size, size_t alignment,
463 bool *zero)
464{
465 void *ret;
466 chunk_alloc_t *chunk_alloc;
467 chunk_dalloc_t *chunk_dalloc;
468
469 malloc_mutex_lock(&arena->lock);
470 chunk_alloc = arena->chunk_alloc;
471 chunk_dalloc = arena->chunk_dalloc;
472 if (config_stats) {
473 /* Optimistically update stats prior to unlocking. */
474 arena->stats.mapped += size;
475 arena->stats.allocated_huge += size;
476 arena->stats.nmalloc_huge++;
477 arena->stats.nrequests_huge++;
478 }
479 arena->nactive += (size >> LG_PAGE);
480 malloc_mutex_unlock(&arena->lock);
481
482 ret = chunk_alloc_arena(chunk_alloc, chunk_dalloc, arena->ind,
483 size, alignment, zero);
484 if (config_stats) {
485 if (ret != NULL)
486 stats_cactive_add(size);
487 else {
488 /* Revert optimistic stats updates. */
489 malloc_mutex_lock(&arena->lock);
490 arena->stats.mapped -= size;
491 arena->stats.allocated_huge -= size;
492 arena->stats.nmalloc_huge--;
493 malloc_mutex_unlock(&arena->lock);
494 }
495 }
496
497 return (ret);
498}
499
500static arena_chunk_t *
Jason Evansaa5113b2014-01-14 16:23:03 -0800501arena_chunk_init_hard(arena_t *arena)
502{
503 arena_chunk_t *chunk;
504 bool zero;
505 size_t unzeroed, i;
506
507 assert(arena->spare == NULL);
508
509 zero = false;
Jason Evanse2deab72014-05-15 22:22:27 -0700510 chunk = arena_chunk_alloc_internal(arena, chunksize, chunksize, &zero);
Jason Evansaa5113b2014-01-14 16:23:03 -0800511 if (chunk == NULL)
512 return (NULL);
Jason Evansaa5113b2014-01-14 16:23:03 -0800513
514 chunk->arena = arena;
515
516 /*
Jason Evansaa5113b2014-01-14 16:23:03 -0800517 * Initialize the map to contain one maximal free untouched run. Mark
518 * the pages as zeroed iff chunk_alloc() returned a zeroed chunk.
519 */
520 unzeroed = zero ? 0 : CHUNK_MAP_UNZEROED;
521 arena_mapbits_unallocated_set(chunk, map_bias, arena_maxclass,
522 unzeroed);
523 /*
524 * There is no need to initialize the internal page map entries unless
525 * the chunk is not zeroed.
526 */
527 if (zero == false) {
Jason Evansbd87b012014-04-15 16:35:08 -0700528 JEMALLOC_VALGRIND_MAKE_MEM_UNDEFINED(
Qinfan Wuff6a31d2014-08-29 13:34:40 -0700529 (void *)arena_bitselm_get(chunk, map_bias+1),
530 (size_t)((uintptr_t) arena_bitselm_get(chunk,
531 chunk_npages-1) - (uintptr_t)arena_bitselm_get(chunk,
532 map_bias+1)));
Jason Evansaa5113b2014-01-14 16:23:03 -0800533 for (i = map_bias+1; i < chunk_npages-1; i++)
534 arena_mapbits_unzeroed_set(chunk, i, unzeroed);
535 } else {
Qinfan Wuff6a31d2014-08-29 13:34:40 -0700536 JEMALLOC_VALGRIND_MAKE_MEM_DEFINED((void
537 *)arena_bitselm_get(chunk, map_bias+1), (size_t)((uintptr_t)
538 arena_bitselm_get(chunk, chunk_npages-1) -
539 (uintptr_t)arena_bitselm_get(chunk, map_bias+1)));
Jason Evansaa5113b2014-01-14 16:23:03 -0800540 if (config_debug) {
541 for (i = map_bias+1; i < chunk_npages-1; i++) {
542 assert(arena_mapbits_unzeroed_get(chunk, i) ==
543 unzeroed);
544 }
545 }
546 }
547 arena_mapbits_unallocated_set(chunk, chunk_npages-1, arena_maxclass,
548 unzeroed);
549
550 return (chunk);
Jason Evansc368f8c2013-10-29 18:17:42 -0700551}
552
Jason Evanse476f8a2010-01-16 09:53:50 -0800553static arena_chunk_t *
554arena_chunk_alloc(arena_t *arena)
555{
556 arena_chunk_t *chunk;
Jason Evanse476f8a2010-01-16 09:53:50 -0800557
Jason Evansaa5113b2014-01-14 16:23:03 -0800558 if (arena->spare != NULL)
559 chunk = arena_chunk_init_spare(arena);
Chris Pride20a8c782014-03-25 22:36:05 -0700560 else {
Jason Evansaa5113b2014-01-14 16:23:03 -0800561 chunk = arena_chunk_init_hard(arena);
Chris Pride20a8c782014-03-25 22:36:05 -0700562 if (chunk == NULL)
563 return (NULL);
564 }
Jason Evanse476f8a2010-01-16 09:53:50 -0800565
Jason Evanse3d13062012-10-30 15:42:37 -0700566 /* Insert the run into the runs_avail tree. */
Qinfan Wu90737fc2014-07-21 19:39:20 -0700567 arena_avail_insert(arena, chunk, map_bias, chunk_npages-map_bias);
Jason Evanse3d13062012-10-30 15:42:37 -0700568
Jason Evanse476f8a2010-01-16 09:53:50 -0800569 return (chunk);
570}
571
572static void
Jason Evanse2deab72014-05-15 22:22:27 -0700573arena_chunk_dalloc_internal(arena_t *arena, arena_chunk_t *chunk)
574{
575 chunk_dalloc_t *chunk_dalloc;
576
577 chunk_dalloc = arena->chunk_dalloc;
578 malloc_mutex_unlock(&arena->lock);
579 chunk_dalloc((void *)chunk, chunksize, arena->ind);
580 malloc_mutex_lock(&arena->lock);
581 if (config_stats)
582 arena->stats.mapped -= chunksize;
583}
584
585void
586arena_chunk_dalloc_huge(arena_t *arena, void *chunk, size_t size)
587{
588 chunk_dalloc_t *chunk_dalloc;
589
590 malloc_mutex_lock(&arena->lock);
591 chunk_dalloc = arena->chunk_dalloc;
592 if (config_stats) {
593 arena->stats.mapped -= size;
594 arena->stats.allocated_huge -= size;
595 arena->stats.ndalloc_huge++;
596 stats_cactive_sub(size);
597 }
598 arena->nactive -= (size >> LG_PAGE);
599 malloc_mutex_unlock(&arena->lock);
600 chunk_dalloc(chunk, size, arena->ind);
601}
602
603static void
604arena_chunk_dalloc(arena_t *arena, arena_chunk_t *chunk)
Jason Evanse476f8a2010-01-16 09:53:50 -0800605{
Qinfan Wu04d60a12014-07-18 14:21:17 -0700606
Jason Evans30fe12b2012-05-10 17:09:17 -0700607 assert(arena_mapbits_allocated_get(chunk, map_bias) == 0);
608 assert(arena_mapbits_allocated_get(chunk, chunk_npages-1) == 0);
609 assert(arena_mapbits_unallocated_size_get(chunk, map_bias) ==
610 arena_maxclass);
611 assert(arena_mapbits_unallocated_size_get(chunk, chunk_npages-1) ==
612 arena_maxclass);
613 assert(arena_mapbits_dirty_get(chunk, map_bias) ==
614 arena_mapbits_dirty_get(chunk, chunk_npages-1));
615
Jason Evanse476f8a2010-01-16 09:53:50 -0800616 /*
Jason Evanse3d13062012-10-30 15:42:37 -0700617 * Remove run from the runs_avail tree, so that the arena does not use
618 * it.
Jason Evanse476f8a2010-01-16 09:53:50 -0800619 */
Qinfan Wu90737fc2014-07-21 19:39:20 -0700620 arena_avail_remove(arena, chunk, map_bias, chunk_npages-map_bias);
Jason Evanse476f8a2010-01-16 09:53:50 -0800621
Jason Evans8d4203c2010-04-13 20:53:21 -0700622 if (arena->spare != NULL) {
623 arena_chunk_t *spare = arena->spare;
624
625 arena->spare = chunk;
Jason Evans070b3c32014-08-14 14:45:58 -0700626 if (arena_mapbits_dirty_get(spare, map_bias) != 0) {
627 arena_dirty_remove(arena, spare, map_bias,
628 chunk_npages-map_bias);
629 }
Jason Evanse2deab72014-05-15 22:22:27 -0700630 arena_chunk_dalloc_internal(arena, spare);
Jason Evans8d4203c2010-04-13 20:53:21 -0700631 } else
632 arena->spare = chunk;
Jason Evanse476f8a2010-01-16 09:53:50 -0800633}
634
635static arena_run_t *
Jason Evansaa5113b2014-01-14 16:23:03 -0800636arena_run_alloc_large_helper(arena_t *arena, size_t size, bool zero)
Jason Evanse476f8a2010-01-16 09:53:50 -0800637{
Qinfan Wuff6a31d2014-08-29 13:34:40 -0700638 arena_chunk_map_misc_t *miscelm;
639 arena_chunk_map_misc_t *key;
Jason Evanse476f8a2010-01-16 09:53:50 -0800640
Qinfan Wuff6a31d2014-08-29 13:34:40 -0700641 key = (arena_chunk_map_misc_t *)(size | CHUNK_MAP_KEY);
642 miscelm = arena_avail_tree_nsearch(&arena->runs_avail, key);
643 if (miscelm != NULL) {
Jason Evans0c5dd032014-09-29 01:31:39 -0700644 arena_run_t *run = &miscelm->run;
645 arena_run_split_large(arena, &miscelm->run, size, zero);
Jason Evanse476f8a2010-01-16 09:53:50 -0800646 return (run);
647 }
648
Jason Evans5b0c9962012-05-10 15:47:24 -0700649 return (NULL);
650}
651
652static arena_run_t *
Jason Evansaa5113b2014-01-14 16:23:03 -0800653arena_run_alloc_large(arena_t *arena, size_t size, bool zero)
Jason Evans5b0c9962012-05-10 15:47:24 -0700654{
655 arena_chunk_t *chunk;
656 arena_run_t *run;
657
658 assert(size <= arena_maxclass);
659 assert((size & PAGE_MASK) == 0);
Jason Evans5b0c9962012-05-10 15:47:24 -0700660
661 /* Search the arena's chunks for the lowest best fit. */
Jason Evansaa5113b2014-01-14 16:23:03 -0800662 run = arena_run_alloc_large_helper(arena, size, zero);
Jason Evans5b0c9962012-05-10 15:47:24 -0700663 if (run != NULL)
664 return (run);
665
Jason Evanse476f8a2010-01-16 09:53:50 -0800666 /*
667 * No usable runs. Create a new chunk from which to allocate the run.
668 */
669 chunk = arena_chunk_alloc(arena);
Jason Evanse00572b2010-03-14 19:43:56 -0700670 if (chunk != NULL) {
Jason Evans0c5dd032014-09-29 01:31:39 -0700671 run = &arena_miscelm_get(chunk, map_bias)->run;
Jason Evansaa5113b2014-01-14 16:23:03 -0800672 arena_run_split_large(arena, run, size, zero);
Jason Evanse00572b2010-03-14 19:43:56 -0700673 return (run);
674 }
675
676 /*
677 * arena_chunk_alloc() failed, but another thread may have made
678 * sufficient memory available while this one dropped arena->lock in
679 * arena_chunk_alloc(), so search one more time.
680 */
Jason Evansaa5113b2014-01-14 16:23:03 -0800681 return (arena_run_alloc_large_helper(arena, size, zero));
682}
683
684static arena_run_t *
685arena_run_alloc_small_helper(arena_t *arena, size_t size, size_t binind)
686{
687 arena_run_t *run;
Qinfan Wuff6a31d2014-08-29 13:34:40 -0700688 arena_chunk_map_misc_t *miscelm;
689 arena_chunk_map_misc_t *key;
Jason Evansaa5113b2014-01-14 16:23:03 -0800690
Qinfan Wuff6a31d2014-08-29 13:34:40 -0700691 key = (arena_chunk_map_misc_t *)(size | CHUNK_MAP_KEY);
692 miscelm = arena_avail_tree_nsearch(&arena->runs_avail, key);
693 if (miscelm != NULL) {
Jason Evans0c5dd032014-09-29 01:31:39 -0700694 run = &miscelm->run;
Jason Evansaa5113b2014-01-14 16:23:03 -0800695 arena_run_split_small(arena, run, size, binind);
696 return (run);
697 }
698
699 return (NULL);
700}
701
702static arena_run_t *
703arena_run_alloc_small(arena_t *arena, size_t size, size_t binind)
704{
705 arena_chunk_t *chunk;
706 arena_run_t *run;
707
708 assert(size <= arena_maxclass);
709 assert((size & PAGE_MASK) == 0);
710 assert(binind != BININD_INVALID);
711
712 /* Search the arena's chunks for the lowest best fit. */
713 run = arena_run_alloc_small_helper(arena, size, binind);
714 if (run != NULL)
715 return (run);
716
717 /*
718 * No usable runs. Create a new chunk from which to allocate the run.
719 */
720 chunk = arena_chunk_alloc(arena);
721 if (chunk != NULL) {
Jason Evans0c5dd032014-09-29 01:31:39 -0700722 run = &arena_miscelm_get(chunk, map_bias)->run;
Jason Evansaa5113b2014-01-14 16:23:03 -0800723 arena_run_split_small(arena, run, size, binind);
724 return (run);
725 }
726
727 /*
728 * arena_chunk_alloc() failed, but another thread may have made
729 * sufficient memory available while this one dropped arena->lock in
730 * arena_chunk_alloc(), so search one more time.
731 */
732 return (arena_run_alloc_small_helper(arena, size, binind));
Jason Evanse476f8a2010-01-16 09:53:50 -0800733}
734
Jason Evans05b21be2010-03-14 17:36:10 -0700735static inline void
736arena_maybe_purge(arena_t *arena)
737{
Jason Evans070b3c32014-08-14 14:45:58 -0700738 size_t threshold;
Jason Evans05b21be2010-03-14 17:36:10 -0700739
Jason Evanse3d13062012-10-30 15:42:37 -0700740 /* Don't purge if the option is disabled. */
741 if (opt_lg_dirty_mult < 0)
742 return;
Jason Evanse3d13062012-10-30 15:42:37 -0700743 threshold = (arena->nactive >> opt_lg_dirty_mult);
744 /*
745 * Don't purge unless the number of purgeable pages exceeds the
746 * threshold.
747 */
Jason Evans070b3c32014-08-14 14:45:58 -0700748 if (arena->ndirty <= threshold)
Jason Evanse3d13062012-10-30 15:42:37 -0700749 return;
750
751 arena_purge(arena, false);
Jason Evans05b21be2010-03-14 17:36:10 -0700752}
753
Qinfan Wua244e502014-07-21 10:23:36 -0700754static size_t
755arena_dirty_count(arena_t *arena)
756{
757 size_t ndirty = 0;
Qinfan Wuff6a31d2014-08-29 13:34:40 -0700758 arena_chunk_map_misc_t *miscelm;
Qinfan Wua244e502014-07-21 10:23:36 -0700759 arena_chunk_t *chunk;
760 size_t pageind, npages;
761
Qinfan Wuff6a31d2014-08-29 13:34:40 -0700762 ql_foreach(miscelm, &arena->runs_dirty, dr_link) {
763 chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(miscelm);
764 pageind = arena_miscelm_to_pageind(miscelm);
Qinfan Wua244e502014-07-21 10:23:36 -0700765 assert(arena_mapbits_allocated_get(chunk, pageind) == 0);
766 assert(arena_mapbits_large_get(chunk, pageind) == 0);
767 assert(arena_mapbits_dirty_get(chunk, pageind) != 0);
768 npages = arena_mapbits_unallocated_size_get(chunk, pageind) >>
769 LG_PAGE;
770 ndirty += npages;
771 }
772
773 return (ndirty);
Jason Evansaa5113b2014-01-14 16:23:03 -0800774}
775
776static size_t
Jason Evans070b3c32014-08-14 14:45:58 -0700777arena_compute_npurge(arena_t *arena, bool all)
Jason Evansaa5113b2014-01-14 16:23:03 -0800778{
Jason Evans070b3c32014-08-14 14:45:58 -0700779 size_t npurge;
Jason Evansaa5113b2014-01-14 16:23:03 -0800780
781 /*
782 * Compute the minimum number of pages that this thread should try to
783 * purge.
784 */
Jason Evansaa5113b2014-01-14 16:23:03 -0800785 if (all == false) {
786 size_t threshold = (arena->nactive >> opt_lg_dirty_mult);
787
Jason Evans070b3c32014-08-14 14:45:58 -0700788 npurge = arena->ndirty - threshold;
Jason Evansaa5113b2014-01-14 16:23:03 -0800789 } else
Jason Evans070b3c32014-08-14 14:45:58 -0700790 npurge = arena->ndirty;
Jason Evansaa5113b2014-01-14 16:23:03 -0800791
Jason Evans070b3c32014-08-14 14:45:58 -0700792 return (npurge);
Jason Evansaa5113b2014-01-14 16:23:03 -0800793}
794
Qinfan Wue9708002014-07-21 18:09:04 -0700795static size_t
Jason Evans070b3c32014-08-14 14:45:58 -0700796arena_stash_dirty(arena_t *arena, bool all, size_t npurge,
Qinfan Wuff6a31d2014-08-29 13:34:40 -0700797 arena_chunk_miscelms_t *miscelms)
Jason Evansaa5113b2014-01-14 16:23:03 -0800798{
Qinfan Wuff6a31d2014-08-29 13:34:40 -0700799 arena_chunk_map_misc_t *miscelm;
Qinfan Wue9708002014-07-21 18:09:04 -0700800 size_t nstashed = 0;
Jason Evansaa5113b2014-01-14 16:23:03 -0800801
Jason Evans070b3c32014-08-14 14:45:58 -0700802 /* Add at least npurge pages to purge_list. */
Qinfan Wuff6a31d2014-08-29 13:34:40 -0700803 for (miscelm = ql_first(&arena->runs_dirty); miscelm != NULL;
804 miscelm = ql_first(&arena->runs_dirty)) {
805 arena_chunk_t *chunk =
806 (arena_chunk_t *)CHUNK_ADDR2BASE(miscelm);
807 size_t pageind = arena_miscelm_to_pageind(miscelm);
Jason Evans070b3c32014-08-14 14:45:58 -0700808 size_t run_size = arena_mapbits_unallocated_size_get(chunk,
809 pageind);
810 size_t npages = run_size >> LG_PAGE;
Jason Evans0c5dd032014-09-29 01:31:39 -0700811 arena_run_t *run = &miscelm->run;
Jason Evansaa5113b2014-01-14 16:23:03 -0800812
Qinfan Wue9708002014-07-21 18:09:04 -0700813 assert(pageind + npages <= chunk_npages);
814 assert(arena_mapbits_dirty_get(chunk, pageind) ==
815 arena_mapbits_dirty_get(chunk, pageind+npages-1));
Jason Evansaa5113b2014-01-14 16:23:03 -0800816
Jason Evans070b3c32014-08-14 14:45:58 -0700817 /*
818 * If purging the spare chunk's run, make it available prior to
819 * allocation.
820 */
821 if (chunk == arena->spare)
822 arena_chunk_alloc(arena);
823
Qinfan Wue9708002014-07-21 18:09:04 -0700824 /* Temporarily allocate the free dirty run. */
825 arena_run_split_large(arena, run, run_size, false);
826 /* Append to purge_list for later processing. */
Qinfan Wuff6a31d2014-08-29 13:34:40 -0700827 ql_elm_new(miscelm, dr_link);
828 ql_tail_insert(miscelms, miscelm, dr_link);
Jason Evansaa5113b2014-01-14 16:23:03 -0800829
Qinfan Wue9708002014-07-21 18:09:04 -0700830 nstashed += npages;
Jason Evansaa5113b2014-01-14 16:23:03 -0800831
Jason Evans070b3c32014-08-14 14:45:58 -0700832 if (all == false && nstashed >= npurge)
Qinfan Wue9708002014-07-21 18:09:04 -0700833 break;
Jason Evansaa5113b2014-01-14 16:23:03 -0800834 }
Qinfan Wue9708002014-07-21 18:09:04 -0700835
836 return (nstashed);
Jason Evansaa5113b2014-01-14 16:23:03 -0800837}
838
839static size_t
Qinfan Wuff6a31d2014-08-29 13:34:40 -0700840arena_purge_stashed(arena_t *arena, arena_chunk_miscelms_t *miscelms)
Jason Evansaa5113b2014-01-14 16:23:03 -0800841{
Qinfan Wue9708002014-07-21 18:09:04 -0700842 size_t npurged, nmadvise;
Qinfan Wuff6a31d2014-08-29 13:34:40 -0700843 arena_chunk_map_misc_t *miscelm;
Jason Evansaa5113b2014-01-14 16:23:03 -0800844
Jason Evansaa5113b2014-01-14 16:23:03 -0800845 if (config_stats)
846 nmadvise = 0;
847 npurged = 0;
Qinfan Wue9708002014-07-21 18:09:04 -0700848
849 malloc_mutex_unlock(&arena->lock);
850
Qinfan Wuff6a31d2014-08-29 13:34:40 -0700851 ql_foreach(miscelm, miscelms, dr_link) {
Jason Evans070b3c32014-08-14 14:45:58 -0700852 arena_chunk_t *chunk;
853 size_t pageind, run_size, npages, flag_unzeroed, i;
Jason Evansaa5113b2014-01-14 16:23:03 -0800854 bool unzeroed;
Jason Evansaa5113b2014-01-14 16:23:03 -0800855
Qinfan Wuff6a31d2014-08-29 13:34:40 -0700856 chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(miscelm);
857 pageind = arena_miscelm_to_pageind(miscelm);
Qinfan Wue9708002014-07-21 18:09:04 -0700858 run_size = arena_mapbits_large_size_get(chunk, pageind);
859 npages = run_size >> LG_PAGE;
860
Jason Evansaa5113b2014-01-14 16:23:03 -0800861 assert(pageind + npages <= chunk_npages);
862 unzeroed = pages_purge((void *)((uintptr_t)chunk + (pageind <<
Qinfan Wue9708002014-07-21 18:09:04 -0700863 LG_PAGE)), run_size);
Jason Evansaa5113b2014-01-14 16:23:03 -0800864 flag_unzeroed = unzeroed ? CHUNK_MAP_UNZEROED : 0;
Qinfan Wue9708002014-07-21 18:09:04 -0700865
Jason Evansaa5113b2014-01-14 16:23:03 -0800866 /*
867 * Set the unzeroed flag for all pages, now that pages_purge()
868 * has returned whether the pages were zeroed as a side effect
869 * of purging. This chunk map modification is safe even though
870 * the arena mutex isn't currently owned by this thread,
871 * because the run is marked as allocated, thus protecting it
872 * from being modified by any other thread. As long as these
873 * writes don't perturb the first and last elements'
874 * CHUNK_MAP_ALLOCATED bits, behavior is well defined.
875 */
876 for (i = 0; i < npages; i++) {
877 arena_mapbits_unzeroed_set(chunk, pageind+i,
878 flag_unzeroed);
879 }
Qinfan Wue9708002014-07-21 18:09:04 -0700880
Jason Evansaa5113b2014-01-14 16:23:03 -0800881 npurged += npages;
882 if (config_stats)
883 nmadvise++;
884 }
Qinfan Wue9708002014-07-21 18:09:04 -0700885
Jason Evansaa5113b2014-01-14 16:23:03 -0800886 malloc_mutex_lock(&arena->lock);
Qinfan Wue9708002014-07-21 18:09:04 -0700887
888 if (config_stats) {
Jason Evansaa5113b2014-01-14 16:23:03 -0800889 arena->stats.nmadvise += nmadvise;
Qinfan Wue9708002014-07-21 18:09:04 -0700890 arena->stats.purged += npurged;
891 }
Jason Evansaa5113b2014-01-14 16:23:03 -0800892
893 return (npurged);
894}
895
896static void
Qinfan Wuff6a31d2014-08-29 13:34:40 -0700897arena_unstash_purged(arena_t *arena, arena_chunk_miscelms_t *miscelms)
Jason Evansaa5113b2014-01-14 16:23:03 -0800898{
Qinfan Wuff6a31d2014-08-29 13:34:40 -0700899 arena_chunk_map_misc_t *miscelm;
Jason Evansaa5113b2014-01-14 16:23:03 -0800900
901 /* Deallocate runs. */
Qinfan Wuff6a31d2014-08-29 13:34:40 -0700902 for (miscelm = ql_first(miscelms); miscelm != NULL;
903 miscelm = ql_first(miscelms)) {
Jason Evans0c5dd032014-09-29 01:31:39 -0700904 arena_run_t *run = &miscelm->run;
Qinfan Wuff6a31d2014-08-29 13:34:40 -0700905 ql_remove(miscelms, miscelm, dr_link);
Jason Evansaa5113b2014-01-14 16:23:03 -0800906 arena_run_dalloc(arena, run, false, true);
907 }
908}
909
Qinfan Wue9708002014-07-21 18:09:04 -0700910void
Jason Evans6005f072010-09-30 16:55:08 -0700911arena_purge(arena_t *arena, bool all)
Jason Evanse476f8a2010-01-16 09:53:50 -0800912{
Jason Evans070b3c32014-08-14 14:45:58 -0700913 size_t npurge, npurgeable, npurged;
Qinfan Wuff6a31d2014-08-29 13:34:40 -0700914 arena_chunk_miscelms_t purge_list;
Qinfan Wue9708002014-07-21 18:09:04 -0700915
Jason Evans7372b152012-02-10 20:22:09 -0800916 if (config_debug) {
Qinfan Wu90737fc2014-07-21 19:39:20 -0700917 size_t ndirty = arena_dirty_count(arena);
Qinfan Wua244e502014-07-21 10:23:36 -0700918 assert(ndirty == arena->ndirty);
Jason Evans2caa4712010-03-04 21:35:07 -0800919 }
Qinfan Wue8a2fd82014-07-21 20:00:14 -0700920 assert((arena->nactive >> opt_lg_dirty_mult) < arena->ndirty || all);
Jason Evanse476f8a2010-01-16 09:53:50 -0800921
Jason Evans7372b152012-02-10 20:22:09 -0800922 if (config_stats)
923 arena->stats.npurge++;
Jason Evanse476f8a2010-01-16 09:53:50 -0800924
Jason Evans070b3c32014-08-14 14:45:58 -0700925 npurge = arena_compute_npurge(arena, all);
Qinfan Wue9708002014-07-21 18:09:04 -0700926 ql_new(&purge_list);
Jason Evans070b3c32014-08-14 14:45:58 -0700927 npurgeable = arena_stash_dirty(arena, all, npurge, &purge_list);
928 assert(npurgeable >= npurge);
Qinfan Wue9708002014-07-21 18:09:04 -0700929 npurged = arena_purge_stashed(arena, &purge_list);
930 assert(npurged == npurgeable);
Qinfan Wue9708002014-07-21 18:09:04 -0700931 arena_unstash_purged(arena, &purge_list);
Jason Evanse476f8a2010-01-16 09:53:50 -0800932}
933
Jason Evans6005f072010-09-30 16:55:08 -0700934void
935arena_purge_all(arena_t *arena)
936{
937
938 malloc_mutex_lock(&arena->lock);
939 arena_purge(arena, true);
940 malloc_mutex_unlock(&arena->lock);
941}
942
Jason Evanse476f8a2010-01-16 09:53:50 -0800943static void
Jason Evansaa5113b2014-01-14 16:23:03 -0800944arena_run_coalesce(arena_t *arena, arena_chunk_t *chunk, size_t *p_size,
945 size_t *p_run_ind, size_t *p_run_pages, size_t flag_dirty)
Jason Evanse476f8a2010-01-16 09:53:50 -0800946{
Jason Evansaa5113b2014-01-14 16:23:03 -0800947 size_t size = *p_size;
948 size_t run_ind = *p_run_ind;
949 size_t run_pages = *p_run_pages;
Jason Evanse476f8a2010-01-16 09:53:50 -0800950
951 /* Try to coalesce forward. */
952 if (run_ind + run_pages < chunk_npages &&
Jason Evans203484e2012-05-02 00:30:36 -0700953 arena_mapbits_allocated_get(chunk, run_ind+run_pages) == 0 &&
954 arena_mapbits_dirty_get(chunk, run_ind+run_pages) == flag_dirty) {
955 size_t nrun_size = arena_mapbits_unallocated_size_get(chunk,
956 run_ind+run_pages);
Jason Evansae4c7b42012-04-02 07:04:34 -0700957 size_t nrun_pages = nrun_size >> LG_PAGE;
Jason Evanse476f8a2010-01-16 09:53:50 -0800958
959 /*
960 * Remove successor from runs_avail; the coalesced run is
961 * inserted later.
962 */
Jason Evans203484e2012-05-02 00:30:36 -0700963 assert(arena_mapbits_unallocated_size_get(chunk,
964 run_ind+run_pages+nrun_pages-1) == nrun_size);
965 assert(arena_mapbits_dirty_get(chunk,
966 run_ind+run_pages+nrun_pages-1) == flag_dirty);
Qinfan Wu90737fc2014-07-21 19:39:20 -0700967 arena_avail_remove(arena, chunk, run_ind+run_pages, nrun_pages);
Jason Evanse476f8a2010-01-16 09:53:50 -0800968
Qinfan Wu04d60a12014-07-18 14:21:17 -0700969 /* If the successor is dirty, remove it from runs_dirty. */
970 if (flag_dirty != 0) {
Jason Evans070b3c32014-08-14 14:45:58 -0700971 arena_dirty_remove(arena, chunk, run_ind+run_pages,
972 nrun_pages);
Qinfan Wu04d60a12014-07-18 14:21:17 -0700973 }
974
Jason Evanse476f8a2010-01-16 09:53:50 -0800975 size += nrun_size;
Jason Evans12ca9142010-10-17 19:56:09 -0700976 run_pages += nrun_pages;
Jason Evanse476f8a2010-01-16 09:53:50 -0800977
Jason Evans203484e2012-05-02 00:30:36 -0700978 arena_mapbits_unallocated_size_set(chunk, run_ind, size);
979 arena_mapbits_unallocated_size_set(chunk, run_ind+run_pages-1,
980 size);
Jason Evanse476f8a2010-01-16 09:53:50 -0800981 }
982
983 /* Try to coalesce backward. */
Jason Evansaa5113b2014-01-14 16:23:03 -0800984 if (run_ind > map_bias && arena_mapbits_allocated_get(chunk,
985 run_ind-1) == 0 && arena_mapbits_dirty_get(chunk, run_ind-1) ==
986 flag_dirty) {
Jason Evans203484e2012-05-02 00:30:36 -0700987 size_t prun_size = arena_mapbits_unallocated_size_get(chunk,
988 run_ind-1);
Jason Evansae4c7b42012-04-02 07:04:34 -0700989 size_t prun_pages = prun_size >> LG_PAGE;
Jason Evanse476f8a2010-01-16 09:53:50 -0800990
Jason Evans12ca9142010-10-17 19:56:09 -0700991 run_ind -= prun_pages;
Jason Evanse476f8a2010-01-16 09:53:50 -0800992
993 /*
994 * Remove predecessor from runs_avail; the coalesced run is
995 * inserted later.
996 */
Jason Evans203484e2012-05-02 00:30:36 -0700997 assert(arena_mapbits_unallocated_size_get(chunk, run_ind) ==
998 prun_size);
999 assert(arena_mapbits_dirty_get(chunk, run_ind) == flag_dirty);
Qinfan Wu90737fc2014-07-21 19:39:20 -07001000 arena_avail_remove(arena, chunk, run_ind, prun_pages);
Jason Evanse476f8a2010-01-16 09:53:50 -08001001
Qinfan Wu04d60a12014-07-18 14:21:17 -07001002 /* If the predecessor is dirty, remove it from runs_dirty. */
Jason Evans070b3c32014-08-14 14:45:58 -07001003 if (flag_dirty != 0)
1004 arena_dirty_remove(arena, chunk, run_ind, prun_pages);
Qinfan Wu04d60a12014-07-18 14:21:17 -07001005
Jason Evanse476f8a2010-01-16 09:53:50 -08001006 size += prun_size;
Jason Evans12ca9142010-10-17 19:56:09 -07001007 run_pages += prun_pages;
Jason Evanse476f8a2010-01-16 09:53:50 -08001008
Jason Evans203484e2012-05-02 00:30:36 -07001009 arena_mapbits_unallocated_size_set(chunk, run_ind, size);
1010 arena_mapbits_unallocated_size_set(chunk, run_ind+run_pages-1,
1011 size);
Jason Evanse476f8a2010-01-16 09:53:50 -08001012 }
1013
Jason Evansaa5113b2014-01-14 16:23:03 -08001014 *p_size = size;
1015 *p_run_ind = run_ind;
1016 *p_run_pages = run_pages;
1017}
1018
1019static void
1020arena_run_dalloc(arena_t *arena, arena_run_t *run, bool dirty, bool cleaned)
1021{
1022 arena_chunk_t *chunk;
Jason Evans0c5dd032014-09-29 01:31:39 -07001023 arena_chunk_map_misc_t *miscelm;
Jason Evansaa5113b2014-01-14 16:23:03 -08001024 size_t size, run_ind, run_pages, flag_dirty;
1025
1026 chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(run);
Jason Evans0c5dd032014-09-29 01:31:39 -07001027 miscelm = arena_run_to_miscelm(run);
1028 run_ind = arena_miscelm_to_pageind(miscelm);
Jason Evansaa5113b2014-01-14 16:23:03 -08001029 assert(run_ind >= map_bias);
1030 assert(run_ind < chunk_npages);
1031 if (arena_mapbits_large_get(chunk, run_ind) != 0) {
1032 size = arena_mapbits_large_size_get(chunk, run_ind);
1033 assert(size == PAGE ||
1034 arena_mapbits_large_size_get(chunk,
1035 run_ind+(size>>LG_PAGE)-1) == 0);
1036 } else {
1037 size_t binind = arena_bin_index(arena, run->bin);
1038 arena_bin_info_t *bin_info = &arena_bin_info[binind];
1039 size = bin_info->run_size;
1040 }
1041 run_pages = (size >> LG_PAGE);
1042 arena_cactive_update(arena, 0, run_pages);
1043 arena->nactive -= run_pages;
1044
1045 /*
1046 * The run is dirty if the caller claims to have dirtied it, as well as
1047 * if it was already dirty before being allocated and the caller
1048 * doesn't claim to have cleaned it.
1049 */
1050 assert(arena_mapbits_dirty_get(chunk, run_ind) ==
1051 arena_mapbits_dirty_get(chunk, run_ind+run_pages-1));
1052 if (cleaned == false && arena_mapbits_dirty_get(chunk, run_ind) != 0)
1053 dirty = true;
1054 flag_dirty = dirty ? CHUNK_MAP_DIRTY : 0;
1055
1056 /* Mark pages as unallocated in the chunk map. */
1057 if (dirty) {
1058 arena_mapbits_unallocated_set(chunk, run_ind, size,
1059 CHUNK_MAP_DIRTY);
1060 arena_mapbits_unallocated_set(chunk, run_ind+run_pages-1, size,
1061 CHUNK_MAP_DIRTY);
1062 } else {
1063 arena_mapbits_unallocated_set(chunk, run_ind, size,
1064 arena_mapbits_unzeroed_get(chunk, run_ind));
1065 arena_mapbits_unallocated_set(chunk, run_ind+run_pages-1, size,
1066 arena_mapbits_unzeroed_get(chunk, run_ind+run_pages-1));
1067 }
1068
Jason Evans0c5dd032014-09-29 01:31:39 -07001069 arena_run_coalesce(arena, chunk, &size, &run_ind, &run_pages, flag_dirty);
Jason Evansaa5113b2014-01-14 16:23:03 -08001070
Jason Evanse476f8a2010-01-16 09:53:50 -08001071 /* Insert into runs_avail, now that coalescing is complete. */
Jason Evans203484e2012-05-02 00:30:36 -07001072 assert(arena_mapbits_unallocated_size_get(chunk, run_ind) ==
1073 arena_mapbits_unallocated_size_get(chunk, run_ind+run_pages-1));
1074 assert(arena_mapbits_dirty_get(chunk, run_ind) ==
1075 arena_mapbits_dirty_get(chunk, run_ind+run_pages-1));
Qinfan Wu90737fc2014-07-21 19:39:20 -07001076 arena_avail_insert(arena, chunk, run_ind, run_pages);
Jason Evans8d4203c2010-04-13 20:53:21 -07001077
Jason Evans070b3c32014-08-14 14:45:58 -07001078 if (dirty)
1079 arena_dirty_insert(arena, chunk, run_ind, run_pages);
Qinfan Wu04d60a12014-07-18 14:21:17 -07001080
Jason Evans203484e2012-05-02 00:30:36 -07001081 /* Deallocate chunk if it is now completely unused. */
1082 if (size == arena_maxclass) {
1083 assert(run_ind == map_bias);
1084 assert(run_pages == (arena_maxclass >> LG_PAGE));
Jason Evanse2deab72014-05-15 22:22:27 -07001085 arena_chunk_dalloc(arena, chunk);
Jason Evans203484e2012-05-02 00:30:36 -07001086 }
Jason Evanse476f8a2010-01-16 09:53:50 -08001087
Jason Evans4fb7f512010-01-27 18:27:09 -08001088 /*
Jason Evans8d4203c2010-04-13 20:53:21 -07001089 * It is okay to do dirty page processing here even if the chunk was
Jason Evans4fb7f512010-01-27 18:27:09 -08001090 * deallocated above, since in that case it is the spare. Waiting
1091 * until after possible chunk deallocation to do dirty processing
1092 * allows for an old spare to be fully deallocated, thus decreasing the
1093 * chances of spuriously crossing the dirty page purging threshold.
1094 */
Jason Evans8d4203c2010-04-13 20:53:21 -07001095 if (dirty)
Jason Evans05b21be2010-03-14 17:36:10 -07001096 arena_maybe_purge(arena);
Jason Evanse476f8a2010-01-16 09:53:50 -08001097}
1098
1099static void
1100arena_run_trim_head(arena_t *arena, arena_chunk_t *chunk, arena_run_t *run,
1101 size_t oldsize, size_t newsize)
1102{
Jason Evans0c5dd032014-09-29 01:31:39 -07001103 arena_chunk_map_misc_t *miscelm = arena_run_to_miscelm(run);
1104 size_t pageind = arena_miscelm_to_pageind(miscelm);
Jason Evansae4c7b42012-04-02 07:04:34 -07001105 size_t head_npages = (oldsize - newsize) >> LG_PAGE;
Jason Evans203484e2012-05-02 00:30:36 -07001106 size_t flag_dirty = arena_mapbits_dirty_get(chunk, pageind);
Jason Evanse476f8a2010-01-16 09:53:50 -08001107
1108 assert(oldsize > newsize);
1109
1110 /*
1111 * Update the chunk map so that arena_run_dalloc() can treat the
Jason Evans940a2e02010-10-17 17:51:37 -07001112 * leading run as separately allocated. Set the last element of each
1113 * run first, in case of single-page runs.
Jason Evanse476f8a2010-01-16 09:53:50 -08001114 */
Jason Evans203484e2012-05-02 00:30:36 -07001115 assert(arena_mapbits_large_size_get(chunk, pageind) == oldsize);
Jason Evansd8ceef62012-05-10 20:59:39 -07001116 arena_mapbits_large_set(chunk, pageind+head_npages-1, 0, flag_dirty);
1117 arena_mapbits_large_set(chunk, pageind, oldsize-newsize, flag_dirty);
Jason Evans940a2e02010-10-17 17:51:37 -07001118
Jason Evans7372b152012-02-10 20:22:09 -08001119 if (config_debug) {
Jason Evansae4c7b42012-04-02 07:04:34 -07001120 UNUSED size_t tail_npages = newsize >> LG_PAGE;
Jason Evans203484e2012-05-02 00:30:36 -07001121 assert(arena_mapbits_large_size_get(chunk,
1122 pageind+head_npages+tail_npages-1) == 0);
1123 assert(arena_mapbits_dirty_get(chunk,
1124 pageind+head_npages+tail_npages-1) == flag_dirty);
Jason Evans940a2e02010-10-17 17:51:37 -07001125 }
Jason Evansd8ceef62012-05-10 20:59:39 -07001126 arena_mapbits_large_set(chunk, pageind+head_npages, newsize,
1127 flag_dirty);
Jason Evanse476f8a2010-01-16 09:53:50 -08001128
Jason Evanse3d13062012-10-30 15:42:37 -07001129 arena_run_dalloc(arena, run, false, false);
Jason Evanse476f8a2010-01-16 09:53:50 -08001130}
1131
1132static void
1133arena_run_trim_tail(arena_t *arena, arena_chunk_t *chunk, arena_run_t *run,
1134 size_t oldsize, size_t newsize, bool dirty)
1135{
Jason Evans0c5dd032014-09-29 01:31:39 -07001136 arena_chunk_map_misc_t *miscelm = arena_run_to_miscelm(run);
1137 size_t pageind = arena_miscelm_to_pageind(miscelm);
Jason Evansae4c7b42012-04-02 07:04:34 -07001138 size_t head_npages = newsize >> LG_PAGE;
Jason Evans203484e2012-05-02 00:30:36 -07001139 size_t flag_dirty = arena_mapbits_dirty_get(chunk, pageind);
Jason Evans0c5dd032014-09-29 01:31:39 -07001140 arena_chunk_map_misc_t *tail_miscelm;
1141 arena_run_t *tail_run;
Jason Evanse476f8a2010-01-16 09:53:50 -08001142
1143 assert(oldsize > newsize);
1144
1145 /*
1146 * Update the chunk map so that arena_run_dalloc() can treat the
Jason Evans940a2e02010-10-17 17:51:37 -07001147 * trailing run as separately allocated. Set the last element of each
1148 * run first, in case of single-page runs.
Jason Evanse476f8a2010-01-16 09:53:50 -08001149 */
Jason Evans203484e2012-05-02 00:30:36 -07001150 assert(arena_mapbits_large_size_get(chunk, pageind) == oldsize);
Jason Evansd8ceef62012-05-10 20:59:39 -07001151 arena_mapbits_large_set(chunk, pageind+head_npages-1, 0, flag_dirty);
1152 arena_mapbits_large_set(chunk, pageind, newsize, flag_dirty);
Jason Evans940a2e02010-10-17 17:51:37 -07001153
Jason Evans203484e2012-05-02 00:30:36 -07001154 if (config_debug) {
1155 UNUSED size_t tail_npages = (oldsize - newsize) >> LG_PAGE;
1156 assert(arena_mapbits_large_size_get(chunk,
1157 pageind+head_npages+tail_npages-1) == 0);
1158 assert(arena_mapbits_dirty_get(chunk,
1159 pageind+head_npages+tail_npages-1) == flag_dirty);
1160 }
1161 arena_mapbits_large_set(chunk, pageind+head_npages, oldsize-newsize,
Jason Evansd8ceef62012-05-10 20:59:39 -07001162 flag_dirty);
Jason Evanse476f8a2010-01-16 09:53:50 -08001163
Jason Evans0c5dd032014-09-29 01:31:39 -07001164 tail_miscelm = arena_miscelm_get(chunk, pageind + head_npages);
1165 tail_run = &tail_miscelm->run;
1166 arena_run_dalloc(arena, tail_run, dirty, false);
Jason Evanse476f8a2010-01-16 09:53:50 -08001167}
1168
1169static arena_run_t *
Jason Evanse7a10582012-02-13 17:36:52 -08001170arena_bin_runs_first(arena_bin_t *bin)
1171{
Qinfan Wuff6a31d2014-08-29 13:34:40 -07001172 arena_chunk_map_misc_t *miscelm = arena_run_tree_first(&bin->runs);
Jason Evans0c5dd032014-09-29 01:31:39 -07001173 if (miscelm != NULL)
1174 return (&miscelm->run);
Jason Evanse7a10582012-02-13 17:36:52 -08001175
1176 return (NULL);
1177}
1178
1179static void
1180arena_bin_runs_insert(arena_bin_t *bin, arena_run_t *run)
1181{
Jason Evans0c5dd032014-09-29 01:31:39 -07001182 arena_chunk_map_misc_t *miscelm = arena_run_to_miscelm(run);
Jason Evanse7a10582012-02-13 17:36:52 -08001183
Qinfan Wuff6a31d2014-08-29 13:34:40 -07001184 assert(arena_run_tree_search(&bin->runs, miscelm) == NULL);
Jason Evanse7a10582012-02-13 17:36:52 -08001185
Qinfan Wuff6a31d2014-08-29 13:34:40 -07001186 arena_run_tree_insert(&bin->runs, miscelm);
Jason Evanse7a10582012-02-13 17:36:52 -08001187}
1188
1189static void
1190arena_bin_runs_remove(arena_bin_t *bin, arena_run_t *run)
1191{
Jason Evans0c5dd032014-09-29 01:31:39 -07001192 arena_chunk_map_misc_t *miscelm = arena_run_to_miscelm(run);
Jason Evanse7a10582012-02-13 17:36:52 -08001193
Qinfan Wuff6a31d2014-08-29 13:34:40 -07001194 assert(arena_run_tree_search(&bin->runs, miscelm) != NULL);
Jason Evanse7a10582012-02-13 17:36:52 -08001195
Qinfan Wuff6a31d2014-08-29 13:34:40 -07001196 arena_run_tree_remove(&bin->runs, miscelm);
Jason Evanse7a10582012-02-13 17:36:52 -08001197}
1198
1199static arena_run_t *
1200arena_bin_nonfull_run_tryget(arena_bin_t *bin)
1201{
1202 arena_run_t *run = arena_bin_runs_first(bin);
1203 if (run != NULL) {
1204 arena_bin_runs_remove(bin, run);
1205 if (config_stats)
1206 bin->stats.reruns++;
1207 }
1208 return (run);
1209}
1210
1211static arena_run_t *
Jason Evanse476f8a2010-01-16 09:53:50 -08001212arena_bin_nonfull_run_get(arena_t *arena, arena_bin_t *bin)
1213{
Jason Evanse476f8a2010-01-16 09:53:50 -08001214 arena_run_t *run;
Jason Evans49f7e8f2011-03-15 13:59:15 -07001215 size_t binind;
1216 arena_bin_info_t *bin_info;
Jason Evanse476f8a2010-01-16 09:53:50 -08001217
1218 /* Look for a usable run. */
Jason Evanse7a10582012-02-13 17:36:52 -08001219 run = arena_bin_nonfull_run_tryget(bin);
1220 if (run != NULL)
Jason Evanse476f8a2010-01-16 09:53:50 -08001221 return (run);
Jason Evanse476f8a2010-01-16 09:53:50 -08001222 /* No existing runs have any space available. */
1223
Jason Evans49f7e8f2011-03-15 13:59:15 -07001224 binind = arena_bin_index(arena, bin);
1225 bin_info = &arena_bin_info[binind];
1226
Jason Evanse476f8a2010-01-16 09:53:50 -08001227 /* Allocate a new run. */
Jason Evanse00572b2010-03-14 19:43:56 -07001228 malloc_mutex_unlock(&bin->lock);
Jason Evanse69bee02010-03-15 22:25:23 -07001229 /******************************/
Jason Evans86815df2010-03-13 20:32:56 -08001230 malloc_mutex_lock(&arena->lock);
Jason Evansaa5113b2014-01-14 16:23:03 -08001231 run = arena_run_alloc_small(arena, bin_info->run_size, binind);
Jason Evanse00572b2010-03-14 19:43:56 -07001232 if (run != NULL) {
1233 /* Initialize run internals. */
1234 run->bin = bin;
Jason Evans84c8eef2011-03-16 10:30:13 -07001235 run->nextind = 0;
Jason Evans49f7e8f2011-03-15 13:59:15 -07001236 run->nfree = bin_info->nregs;
Jason Evans0c5dd032014-09-29 01:31:39 -07001237 bitmap_init(run->bitmap, &bin_info->bitmap_info);
Jason Evanse69bee02010-03-15 22:25:23 -07001238 }
1239 malloc_mutex_unlock(&arena->lock);
1240 /********************************/
1241 malloc_mutex_lock(&bin->lock);
1242 if (run != NULL) {
Jason Evans7372b152012-02-10 20:22:09 -08001243 if (config_stats) {
1244 bin->stats.nruns++;
1245 bin->stats.curruns++;
Jason Evans7372b152012-02-10 20:22:09 -08001246 }
Jason Evanse00572b2010-03-14 19:43:56 -07001247 return (run);
1248 }
1249
1250 /*
Jason Evansaa5113b2014-01-14 16:23:03 -08001251 * arena_run_alloc_small() failed, but another thread may have made
Jason Evans940a2e02010-10-17 17:51:37 -07001252 * sufficient memory available while this one dropped bin->lock above,
Jason Evanse00572b2010-03-14 19:43:56 -07001253 * so search one more time.
1254 */
Jason Evanse7a10582012-02-13 17:36:52 -08001255 run = arena_bin_nonfull_run_tryget(bin);
1256 if (run != NULL)
Jason Evanse00572b2010-03-14 19:43:56 -07001257 return (run);
Jason Evanse00572b2010-03-14 19:43:56 -07001258
1259 return (NULL);
Jason Evanse476f8a2010-01-16 09:53:50 -08001260}
1261
Jason Evans1e0a6362010-03-13 13:41:58 -08001262/* Re-fill bin->runcur, then call arena_run_reg_alloc(). */
Jason Evanse476f8a2010-01-16 09:53:50 -08001263static void *
1264arena_bin_malloc_hard(arena_t *arena, arena_bin_t *bin)
1265{
Jason Evanse00572b2010-03-14 19:43:56 -07001266 void *ret;
Jason Evans49f7e8f2011-03-15 13:59:15 -07001267 size_t binind;
1268 arena_bin_info_t *bin_info;
Jason Evanse00572b2010-03-14 19:43:56 -07001269 arena_run_t *run;
Jason Evanse476f8a2010-01-16 09:53:50 -08001270
Jason Evans49f7e8f2011-03-15 13:59:15 -07001271 binind = arena_bin_index(arena, bin);
1272 bin_info = &arena_bin_info[binind];
Jason Evanse00572b2010-03-14 19:43:56 -07001273 bin->runcur = NULL;
1274 run = arena_bin_nonfull_run_get(arena, bin);
1275 if (bin->runcur != NULL && bin->runcur->nfree > 0) {
1276 /*
1277 * Another thread updated runcur while this one ran without the
1278 * bin lock in arena_bin_nonfull_run_get().
1279 */
Jason Evanse00572b2010-03-14 19:43:56 -07001280 assert(bin->runcur->nfree > 0);
Jason Evans49f7e8f2011-03-15 13:59:15 -07001281 ret = arena_run_reg_alloc(bin->runcur, bin_info);
Jason Evanse00572b2010-03-14 19:43:56 -07001282 if (run != NULL) {
Jason Evans940a2e02010-10-17 17:51:37 -07001283 arena_chunk_t *chunk;
1284
1285 /*
Jason Evansaa5113b2014-01-14 16:23:03 -08001286 * arena_run_alloc_small() may have allocated run, or
1287 * it may have pulled run from the bin's run tree.
1288 * Therefore it is unsafe to make any assumptions about
1289 * how run has previously been used, and
1290 * arena_bin_lower_run() must be called, as if a region
1291 * were just deallocated from the run.
Jason Evans940a2e02010-10-17 17:51:37 -07001292 */
1293 chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(run);
Jason Evans49f7e8f2011-03-15 13:59:15 -07001294 if (run->nfree == bin_info->nregs)
Jason Evans8de6a022010-10-17 20:57:30 -07001295 arena_dalloc_bin_run(arena, chunk, run, bin);
1296 else
1297 arena_bin_lower_run(arena, chunk, run, bin);
Jason Evanse00572b2010-03-14 19:43:56 -07001298 }
1299 return (ret);
1300 }
1301
1302 if (run == NULL)
Jason Evanse476f8a2010-01-16 09:53:50 -08001303 return (NULL);
Jason Evanse00572b2010-03-14 19:43:56 -07001304
1305 bin->runcur = run;
1306
Jason Evanse476f8a2010-01-16 09:53:50 -08001307 assert(bin->runcur->nfree > 0);
1308
Jason Evans49f7e8f2011-03-15 13:59:15 -07001309 return (arena_run_reg_alloc(bin->runcur, bin_info));
Jason Evanse476f8a2010-01-16 09:53:50 -08001310}
1311
Jason Evans86815df2010-03-13 20:32:56 -08001312void
Jason Evans7372b152012-02-10 20:22:09 -08001313arena_tcache_fill_small(arena_t *arena, tcache_bin_t *tbin, size_t binind,
1314 uint64_t prof_accumbytes)
Jason Evanse476f8a2010-01-16 09:53:50 -08001315{
1316 unsigned i, nfill;
1317 arena_bin_t *bin;
1318 arena_run_t *run;
1319 void *ptr;
1320
1321 assert(tbin->ncached == 0);
1322
Jason Evans88c222c2013-02-06 11:59:30 -08001323 if (config_prof && arena_prof_accum(arena, prof_accumbytes))
1324 prof_idump();
Jason Evanse69bee02010-03-15 22:25:23 -07001325 bin = &arena->bins[binind];
1326 malloc_mutex_lock(&bin->lock);
Jason Evans1dcb4f82011-03-21 00:18:17 -07001327 for (i = 0, nfill = (tcache_bin_info[binind].ncached_max >>
1328 tbin->lg_fill_div); i < nfill; i++) {
Jason Evanse476f8a2010-01-16 09:53:50 -08001329 if ((run = bin->runcur) != NULL && run->nfree > 0)
Jason Evans49f7e8f2011-03-15 13:59:15 -07001330 ptr = arena_run_reg_alloc(run, &arena_bin_info[binind]);
Jason Evanse476f8a2010-01-16 09:53:50 -08001331 else
1332 ptr = arena_bin_malloc_hard(arena, bin);
Jason Evans3fa9a2f2010-03-07 15:34:14 -08001333 if (ptr == NULL)
Jason Evanse476f8a2010-01-16 09:53:50 -08001334 break;
Jason Evans9c640bf2014-09-11 16:20:44 -07001335 if (config_fill && unlikely(opt_junk)) {
Jason Evans122449b2012-04-06 00:35:09 -07001336 arena_alloc_junk_small(ptr, &arena_bin_info[binind],
1337 true);
1338 }
Jason Evans9c43c132011-03-18 10:53:15 -07001339 /* Insert such that low regions get used first. */
1340 tbin->avail[nfill - 1 - i] = ptr;
Jason Evanse476f8a2010-01-16 09:53:50 -08001341 }
Jason Evans7372b152012-02-10 20:22:09 -08001342 if (config_stats) {
1343 bin->stats.allocated += i * arena_bin_info[binind].reg_size;
1344 bin->stats.nmalloc += i;
1345 bin->stats.nrequests += tbin->tstats.nrequests;
1346 bin->stats.nfills++;
1347 tbin->tstats.nrequests = 0;
1348 }
Jason Evans86815df2010-03-13 20:32:56 -08001349 malloc_mutex_unlock(&bin->lock);
Jason Evanse476f8a2010-01-16 09:53:50 -08001350 tbin->ncached = i;
Jason Evanse476f8a2010-01-16 09:53:50 -08001351}
Jason Evanse476f8a2010-01-16 09:53:50 -08001352
Jason Evans122449b2012-04-06 00:35:09 -07001353void
1354arena_alloc_junk_small(void *ptr, arena_bin_info_t *bin_info, bool zero)
1355{
1356
1357 if (zero) {
1358 size_t redzone_size = bin_info->redzone_size;
1359 memset((void *)((uintptr_t)ptr - redzone_size), 0xa5,
1360 redzone_size);
1361 memset((void *)((uintptr_t)ptr + bin_info->reg_size), 0xa5,
1362 redzone_size);
1363 } else {
1364 memset((void *)((uintptr_t)ptr - bin_info->redzone_size), 0xa5,
1365 bin_info->reg_interval);
1366 }
1367}
1368
Jason Evans0d6c5d82013-12-17 15:14:36 -08001369#ifdef JEMALLOC_JET
1370#undef arena_redzone_corruption
1371#define arena_redzone_corruption JEMALLOC_N(arena_redzone_corruption_impl)
1372#endif
1373static void
1374arena_redzone_corruption(void *ptr, size_t usize, bool after,
1375 size_t offset, uint8_t byte)
1376{
1377
1378 malloc_printf("<jemalloc>: Corrupt redzone %zu byte%s %s %p "
1379 "(size %zu), byte=%#x\n", offset, (offset == 1) ? "" : "s",
1380 after ? "after" : "before", ptr, usize, byte);
1381}
1382#ifdef JEMALLOC_JET
Jason Evans0d6c5d82013-12-17 15:14:36 -08001383#undef arena_redzone_corruption
Jason Evans6b694c42014-01-07 16:47:56 -08001384#define arena_redzone_corruption JEMALLOC_N(arena_redzone_corruption)
1385arena_redzone_corruption_t *arena_redzone_corruption =
1386 JEMALLOC_N(arena_redzone_corruption_impl);
Jason Evans0d6c5d82013-12-17 15:14:36 -08001387#endif
1388
1389static void
1390arena_redzones_validate(void *ptr, arena_bin_info_t *bin_info, bool reset)
Jason Evans122449b2012-04-06 00:35:09 -07001391{
1392 size_t size = bin_info->reg_size;
1393 size_t redzone_size = bin_info->redzone_size;
1394 size_t i;
1395 bool error = false;
1396
1397 for (i = 1; i <= redzone_size; i++) {
Jason Evans0d6c5d82013-12-17 15:14:36 -08001398 uint8_t *byte = (uint8_t *)((uintptr_t)ptr - i);
1399 if (*byte != 0xa5) {
Jason Evans122449b2012-04-06 00:35:09 -07001400 error = true;
Jason Evans0d6c5d82013-12-17 15:14:36 -08001401 arena_redzone_corruption(ptr, size, false, i, *byte);
1402 if (reset)
1403 *byte = 0xa5;
Jason Evans122449b2012-04-06 00:35:09 -07001404 }
1405 }
1406 for (i = 0; i < redzone_size; i++) {
Jason Evans0d6c5d82013-12-17 15:14:36 -08001407 uint8_t *byte = (uint8_t *)((uintptr_t)ptr + size + i);
1408 if (*byte != 0xa5) {
Jason Evans122449b2012-04-06 00:35:09 -07001409 error = true;
Jason Evans0d6c5d82013-12-17 15:14:36 -08001410 arena_redzone_corruption(ptr, size, true, i, *byte);
1411 if (reset)
1412 *byte = 0xa5;
Jason Evans122449b2012-04-06 00:35:09 -07001413 }
1414 }
1415 if (opt_abort && error)
1416 abort();
Jason Evans0d6c5d82013-12-17 15:14:36 -08001417}
Jason Evans122449b2012-04-06 00:35:09 -07001418
Jason Evans6b694c42014-01-07 16:47:56 -08001419#ifdef JEMALLOC_JET
1420#undef arena_dalloc_junk_small
1421#define arena_dalloc_junk_small JEMALLOC_N(arena_dalloc_junk_small_impl)
1422#endif
Jason Evans0d6c5d82013-12-17 15:14:36 -08001423void
1424arena_dalloc_junk_small(void *ptr, arena_bin_info_t *bin_info)
1425{
1426 size_t redzone_size = bin_info->redzone_size;
1427
1428 arena_redzones_validate(ptr, bin_info, false);
Jason Evans122449b2012-04-06 00:35:09 -07001429 memset((void *)((uintptr_t)ptr - redzone_size), 0x5a,
1430 bin_info->reg_interval);
1431}
Jason Evans6b694c42014-01-07 16:47:56 -08001432#ifdef JEMALLOC_JET
1433#undef arena_dalloc_junk_small
1434#define arena_dalloc_junk_small JEMALLOC_N(arena_dalloc_junk_small)
1435arena_dalloc_junk_small_t *arena_dalloc_junk_small =
1436 JEMALLOC_N(arena_dalloc_junk_small_impl);
1437#endif
Jason Evans122449b2012-04-06 00:35:09 -07001438
Jason Evans0d6c5d82013-12-17 15:14:36 -08001439void
1440arena_quarantine_junk_small(void *ptr, size_t usize)
1441{
1442 size_t binind;
1443 arena_bin_info_t *bin_info;
1444 cassert(config_fill);
1445 assert(opt_junk);
1446 assert(opt_quarantine);
1447 assert(usize <= SMALL_MAXCLASS);
1448
Jason Evans3541a902014-04-16 17:14:33 -07001449 binind = small_size2bin(usize);
Jason Evans0d6c5d82013-12-17 15:14:36 -08001450 bin_info = &arena_bin_info[binind];
1451 arena_redzones_validate(ptr, bin_info, true);
1452}
1453
Jason Evanse476f8a2010-01-16 09:53:50 -08001454void *
1455arena_malloc_small(arena_t *arena, size_t size, bool zero)
1456{
1457 void *ret;
1458 arena_bin_t *bin;
1459 arena_run_t *run;
1460 size_t binind;
1461
Jason Evans3541a902014-04-16 17:14:33 -07001462 binind = small_size2bin(size);
Jason Evansb1726102012-02-28 16:50:47 -08001463 assert(binind < NBINS);
Jason Evanse476f8a2010-01-16 09:53:50 -08001464 bin = &arena->bins[binind];
Jason Evans3541a902014-04-16 17:14:33 -07001465 size = small_bin2size(binind);
Jason Evanse476f8a2010-01-16 09:53:50 -08001466
Jason Evans86815df2010-03-13 20:32:56 -08001467 malloc_mutex_lock(&bin->lock);
Jason Evanse476f8a2010-01-16 09:53:50 -08001468 if ((run = bin->runcur) != NULL && run->nfree > 0)
Jason Evans49f7e8f2011-03-15 13:59:15 -07001469 ret = arena_run_reg_alloc(run, &arena_bin_info[binind]);
Jason Evanse476f8a2010-01-16 09:53:50 -08001470 else
1471 ret = arena_bin_malloc_hard(arena, bin);
1472
1473 if (ret == NULL) {
Jason Evans86815df2010-03-13 20:32:56 -08001474 malloc_mutex_unlock(&bin->lock);
Jason Evanse476f8a2010-01-16 09:53:50 -08001475 return (NULL);
1476 }
1477
Jason Evans7372b152012-02-10 20:22:09 -08001478 if (config_stats) {
1479 bin->stats.allocated += size;
1480 bin->stats.nmalloc++;
1481 bin->stats.nrequests++;
1482 }
Jason Evans86815df2010-03-13 20:32:56 -08001483 malloc_mutex_unlock(&bin->lock);
Jason Evans88c222c2013-02-06 11:59:30 -08001484 if (config_prof && isthreaded == false && arena_prof_accum(arena, size))
1485 prof_idump();
Jason Evanse476f8a2010-01-16 09:53:50 -08001486
1487 if (zero == false) {
Jason Evans7372b152012-02-10 20:22:09 -08001488 if (config_fill) {
Jason Evans9c640bf2014-09-11 16:20:44 -07001489 if (unlikely(opt_junk)) {
Jason Evans122449b2012-04-06 00:35:09 -07001490 arena_alloc_junk_small(ret,
1491 &arena_bin_info[binind], false);
Jason Evans9c640bf2014-09-11 16:20:44 -07001492 } else if (unlikely(opt_zero))
Jason Evans7372b152012-02-10 20:22:09 -08001493 memset(ret, 0, size);
1494 }
Jason Evansbd87b012014-04-15 16:35:08 -07001495 JEMALLOC_VALGRIND_MAKE_MEM_UNDEFINED(ret, size);
Jason Evans122449b2012-04-06 00:35:09 -07001496 } else {
Jason Evans9c640bf2014-09-11 16:20:44 -07001497 if (config_fill && unlikely(opt_junk)) {
Jason Evans122449b2012-04-06 00:35:09 -07001498 arena_alloc_junk_small(ret, &arena_bin_info[binind],
1499 true);
1500 }
Jason Evansbd87b012014-04-15 16:35:08 -07001501 JEMALLOC_VALGRIND_MAKE_MEM_UNDEFINED(ret, size);
Jason Evanse476f8a2010-01-16 09:53:50 -08001502 memset(ret, 0, size);
Jason Evans122449b2012-04-06 00:35:09 -07001503 }
Jason Evanse476f8a2010-01-16 09:53:50 -08001504
1505 return (ret);
1506}
1507
1508void *
Jason Evanse476f8a2010-01-16 09:53:50 -08001509arena_malloc_large(arena_t *arena, size_t size, bool zero)
1510{
1511 void *ret;
Jason Evans0c5dd032014-09-29 01:31:39 -07001512 arena_run_t *run;
1513 arena_chunk_map_misc_t *miscelm;
Jason Evans88c222c2013-02-06 11:59:30 -08001514 UNUSED bool idump;
Jason Evanse476f8a2010-01-16 09:53:50 -08001515
1516 /* Large allocation. */
1517 size = PAGE_CEILING(size);
1518 malloc_mutex_lock(&arena->lock);
Jason Evans0c5dd032014-09-29 01:31:39 -07001519 run = arena_run_alloc_large(arena, size, zero);
1520 if (run == NULL) {
Jason Evanse476f8a2010-01-16 09:53:50 -08001521 malloc_mutex_unlock(&arena->lock);
1522 return (NULL);
1523 }
Jason Evans0c5dd032014-09-29 01:31:39 -07001524 miscelm = arena_run_to_miscelm(run);
1525 ret = arena_miscelm_to_rpages(miscelm);
Jason Evans7372b152012-02-10 20:22:09 -08001526 if (config_stats) {
1527 arena->stats.nmalloc_large++;
1528 arena->stats.nrequests_large++;
1529 arena->stats.allocated_large += size;
Jason Evansae4c7b42012-04-02 07:04:34 -07001530 arena->stats.lstats[(size >> LG_PAGE) - 1].nmalloc++;
1531 arena->stats.lstats[(size >> LG_PAGE) - 1].nrequests++;
1532 arena->stats.lstats[(size >> LG_PAGE) - 1].curruns++;
Jason Evanse476f8a2010-01-16 09:53:50 -08001533 }
Jason Evans7372b152012-02-10 20:22:09 -08001534 if (config_prof)
Jason Evans88c222c2013-02-06 11:59:30 -08001535 idump = arena_prof_accum_locked(arena, size);
Jason Evanse476f8a2010-01-16 09:53:50 -08001536 malloc_mutex_unlock(&arena->lock);
Jason Evans88c222c2013-02-06 11:59:30 -08001537 if (config_prof && idump)
1538 prof_idump();
Jason Evanse476f8a2010-01-16 09:53:50 -08001539
1540 if (zero == false) {
Jason Evans7372b152012-02-10 20:22:09 -08001541 if (config_fill) {
Jason Evans9c640bf2014-09-11 16:20:44 -07001542 if (unlikely(opt_junk))
Jason Evans7372b152012-02-10 20:22:09 -08001543 memset(ret, 0xa5, size);
Jason Evans9c640bf2014-09-11 16:20:44 -07001544 else if (unlikely(opt_zero))
Jason Evans7372b152012-02-10 20:22:09 -08001545 memset(ret, 0, size);
1546 }
Jason Evanse476f8a2010-01-16 09:53:50 -08001547 }
1548
1549 return (ret);
1550}
1551
Jason Evanse476f8a2010-01-16 09:53:50 -08001552/* Only handles large allocations that require more than page alignment. */
1553void *
Jason Evans5ff709c2012-04-11 18:13:45 -07001554arena_palloc(arena_t *arena, size_t size, size_t alignment, bool zero)
Jason Evanse476f8a2010-01-16 09:53:50 -08001555{
1556 void *ret;
Jason Evans5ff709c2012-04-11 18:13:45 -07001557 size_t alloc_size, leadsize, trailsize;
1558 arena_run_t *run;
Jason Evanse476f8a2010-01-16 09:53:50 -08001559 arena_chunk_t *chunk;
Jason Evans0c5dd032014-09-29 01:31:39 -07001560 arena_chunk_map_misc_t *miscelm;
1561 void *rpages;
Jason Evanse476f8a2010-01-16 09:53:50 -08001562
1563 assert((size & PAGE_MASK) == 0);
Jason Evans93443682010-10-20 17:39:18 -07001564
1565 alignment = PAGE_CEILING(alignment);
Jason Evans5ff709c2012-04-11 18:13:45 -07001566 alloc_size = size + alignment - PAGE;
Jason Evanse476f8a2010-01-16 09:53:50 -08001567
1568 malloc_mutex_lock(&arena->lock);
Jason Evansaa5113b2014-01-14 16:23:03 -08001569 run = arena_run_alloc_large(arena, alloc_size, false);
Jason Evans5ff709c2012-04-11 18:13:45 -07001570 if (run == NULL) {
Jason Evanse476f8a2010-01-16 09:53:50 -08001571 malloc_mutex_unlock(&arena->lock);
1572 return (NULL);
1573 }
Jason Evans5ff709c2012-04-11 18:13:45 -07001574 chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(run);
Jason Evans0c5dd032014-09-29 01:31:39 -07001575 miscelm = arena_run_to_miscelm(run);
1576 rpages = arena_miscelm_to_rpages(miscelm);
Jason Evanse476f8a2010-01-16 09:53:50 -08001577
Jason Evans0c5dd032014-09-29 01:31:39 -07001578 leadsize = ALIGNMENT_CEILING((uintptr_t)rpages, alignment) -
1579 (uintptr_t)rpages;
Jason Evans5ff709c2012-04-11 18:13:45 -07001580 assert(alloc_size >= leadsize + size);
1581 trailsize = alloc_size - leadsize - size;
Jason Evans5ff709c2012-04-11 18:13:45 -07001582 if (leadsize != 0) {
Jason Evans0c5dd032014-09-29 01:31:39 -07001583 arena_chunk_map_misc_t *head_miscelm = miscelm;
1584 arena_run_t *head_run = run;
1585
1586 miscelm = arena_miscelm_get(chunk,
1587 arena_miscelm_to_pageind(head_miscelm) + (leadsize >>
1588 LG_PAGE));
1589 run = &miscelm->run;
1590
1591 arena_run_trim_head(arena, chunk, head_run, alloc_size,
1592 alloc_size - leadsize);
Jason Evans5ff709c2012-04-11 18:13:45 -07001593 }
1594 if (trailsize != 0) {
Jason Evans0c5dd032014-09-29 01:31:39 -07001595 arena_run_trim_tail(arena, chunk, run, size + trailsize, size,
Jason Evans5ff709c2012-04-11 18:13:45 -07001596 false);
Jason Evanse476f8a2010-01-16 09:53:50 -08001597 }
Jason Evans0c5dd032014-09-29 01:31:39 -07001598 arena_run_init_large(arena, run, size, zero);
1599 ret = arena_miscelm_to_rpages(miscelm);
Jason Evanse476f8a2010-01-16 09:53:50 -08001600
Jason Evans7372b152012-02-10 20:22:09 -08001601 if (config_stats) {
1602 arena->stats.nmalloc_large++;
1603 arena->stats.nrequests_large++;
1604 arena->stats.allocated_large += size;
Jason Evansae4c7b42012-04-02 07:04:34 -07001605 arena->stats.lstats[(size >> LG_PAGE) - 1].nmalloc++;
1606 arena->stats.lstats[(size >> LG_PAGE) - 1].nrequests++;
1607 arena->stats.lstats[(size >> LG_PAGE) - 1].curruns++;
Jason Evanse476f8a2010-01-16 09:53:50 -08001608 }
Jason Evanse476f8a2010-01-16 09:53:50 -08001609 malloc_mutex_unlock(&arena->lock);
1610
Jason Evans7372b152012-02-10 20:22:09 -08001611 if (config_fill && zero == false) {
Jason Evans9c640bf2014-09-11 16:20:44 -07001612 if (unlikely(opt_junk))
Jason Evans8e3c3c62010-09-17 15:46:18 -07001613 memset(ret, 0xa5, size);
Jason Evans9c640bf2014-09-11 16:20:44 -07001614 else if (unlikely(opt_zero))
Jason Evans8e3c3c62010-09-17 15:46:18 -07001615 memset(ret, 0, size);
1616 }
Jason Evanse476f8a2010-01-16 09:53:50 -08001617 return (ret);
1618}
1619
Jason Evans0b270a92010-03-31 16:45:04 -07001620void
1621arena_prof_promoted(const void *ptr, size_t size)
1622{
1623 arena_chunk_t *chunk;
1624 size_t pageind, binind;
1625
Jason Evans78f73522012-04-18 13:38:40 -07001626 cassert(config_prof);
Jason Evans0b270a92010-03-31 16:45:04 -07001627 assert(ptr != NULL);
1628 assert(CHUNK_ADDR2BASE(ptr) != ptr);
Jason Evans122449b2012-04-06 00:35:09 -07001629 assert(isalloc(ptr, false) == PAGE);
1630 assert(isalloc(ptr, true) == PAGE);
Jason Evansb1726102012-02-28 16:50:47 -08001631 assert(size <= SMALL_MAXCLASS);
Jason Evans0b270a92010-03-31 16:45:04 -07001632
1633 chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr);
Jason Evansae4c7b42012-04-02 07:04:34 -07001634 pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> LG_PAGE;
Jason Evans3541a902014-04-16 17:14:33 -07001635 binind = small_size2bin(size);
Jason Evansb1726102012-02-28 16:50:47 -08001636 assert(binind < NBINS);
Jason Evans203484e2012-05-02 00:30:36 -07001637 arena_mapbits_large_binind_set(chunk, pageind, binind);
Jason Evans0b270a92010-03-31 16:45:04 -07001638
Jason Evans122449b2012-04-06 00:35:09 -07001639 assert(isalloc(ptr, false) == PAGE);
1640 assert(isalloc(ptr, true) == size);
Jason Evans0b270a92010-03-31 16:45:04 -07001641}
Jason Evans6109fe02010-02-10 10:37:56 -08001642
Jason Evanse476f8a2010-01-16 09:53:50 -08001643static void
Jason Evans088e6a02010-10-18 00:04:44 -07001644arena_dissociate_bin_run(arena_chunk_t *chunk, arena_run_t *run,
Jason Evanse476f8a2010-01-16 09:53:50 -08001645 arena_bin_t *bin)
1646{
Jason Evanse476f8a2010-01-16 09:53:50 -08001647
Jason Evans19b3d612010-03-18 20:36:40 -07001648 /* Dissociate run from bin. */
Jason Evanse476f8a2010-01-16 09:53:50 -08001649 if (run == bin->runcur)
1650 bin->runcur = NULL;
Jason Evans49f7e8f2011-03-15 13:59:15 -07001651 else {
1652 size_t binind = arena_bin_index(chunk->arena, bin);
1653 arena_bin_info_t *bin_info = &arena_bin_info[binind];
1654
1655 if (bin_info->nregs != 1) {
Jason Evans49f7e8f2011-03-15 13:59:15 -07001656 /*
1657 * This block's conditional is necessary because if the
1658 * run only contains one region, then it never gets
1659 * inserted into the non-full runs tree.
1660 */
Jason Evanse7a10582012-02-13 17:36:52 -08001661 arena_bin_runs_remove(bin, run);
Jason Evans49f7e8f2011-03-15 13:59:15 -07001662 }
Jason Evanse476f8a2010-01-16 09:53:50 -08001663 }
Jason Evans088e6a02010-10-18 00:04:44 -07001664}
1665
1666static void
1667arena_dalloc_bin_run(arena_t *arena, arena_chunk_t *chunk, arena_run_t *run,
1668 arena_bin_t *bin)
1669{
Jason Evans49f7e8f2011-03-15 13:59:15 -07001670 size_t binind;
1671 arena_bin_info_t *bin_info;
Jason Evans088e6a02010-10-18 00:04:44 -07001672 size_t npages, run_ind, past;
Jason Evans0c5dd032014-09-29 01:31:39 -07001673 arena_chunk_map_misc_t *miscelm;
1674 void *rpages;
Jason Evans088e6a02010-10-18 00:04:44 -07001675
1676 assert(run != bin->runcur);
Jason Evans0c5dd032014-09-29 01:31:39 -07001677 assert(arena_run_tree_search(&bin->runs, arena_run_to_miscelm(run)) ==
1678 NULL);
Jason Evans86815df2010-03-13 20:32:56 -08001679
Jason Evans49f7e8f2011-03-15 13:59:15 -07001680 binind = arena_bin_index(chunk->arena, run->bin);
1681 bin_info = &arena_bin_info[binind];
1682
Jason Evanse00572b2010-03-14 19:43:56 -07001683 malloc_mutex_unlock(&bin->lock);
1684 /******************************/
Jason Evansae4c7b42012-04-02 07:04:34 -07001685 npages = bin_info->run_size >> LG_PAGE;
Jason Evans0c5dd032014-09-29 01:31:39 -07001686 miscelm = arena_run_to_miscelm(run);
1687 run_ind = arena_miscelm_to_pageind(miscelm);
1688 rpages = arena_miscelm_to_rpages(miscelm);
1689 past = (size_t)(PAGE_CEILING((uintptr_t)rpages +
Jason Evans84c8eef2011-03-16 10:30:13 -07001690 (uintptr_t)bin_info->reg0_offset + (uintptr_t)(run->nextind *
Jason Evans122449b2012-04-06 00:35:09 -07001691 bin_info->reg_interval - bin_info->redzone_size) -
1692 (uintptr_t)chunk) >> LG_PAGE);
Jason Evans86815df2010-03-13 20:32:56 -08001693 malloc_mutex_lock(&arena->lock);
Jason Evans19b3d612010-03-18 20:36:40 -07001694
1695 /*
1696 * If the run was originally clean, and some pages were never touched,
1697 * trim the clean pages before deallocating the dirty portion of the
1698 * run.
1699 */
Jason Evans30fe12b2012-05-10 17:09:17 -07001700 assert(arena_mapbits_dirty_get(chunk, run_ind) ==
1701 arena_mapbits_dirty_get(chunk, run_ind+npages-1));
Jason Evans203484e2012-05-02 00:30:36 -07001702 if (arena_mapbits_dirty_get(chunk, run_ind) == 0 && past - run_ind <
1703 npages) {
Jason Evans30fe12b2012-05-10 17:09:17 -07001704 /* Trim clean pages. Convert to large run beforehand. */
1705 assert(npages > 0);
Jason Evans0c5dd032014-09-29 01:31:39 -07001706 if (past > run_ind) {
1707 arena_mapbits_large_set(chunk, run_ind,
1708 bin_info->run_size, 0);
1709 arena_mapbits_large_set(chunk, run_ind+npages-1, 0, 0);
1710 arena_run_trim_tail(arena, chunk, run, (npages <<
1711 LG_PAGE), ((past - run_ind) << LG_PAGE), false);
1712 arena_run_dalloc(arena, run, true, false);
1713 } else
1714 arena_run_dalloc(arena, run, false, false);
Jason Evans940a2e02010-10-17 17:51:37 -07001715 /* npages = past - run_ind; */
Jason Evans0c5dd032014-09-29 01:31:39 -07001716 } else
1717 arena_run_dalloc(arena, run, true, false);
Jason Evans86815df2010-03-13 20:32:56 -08001718 malloc_mutex_unlock(&arena->lock);
Jason Evanse00572b2010-03-14 19:43:56 -07001719 /****************************/
1720 malloc_mutex_lock(&bin->lock);
Jason Evans7372b152012-02-10 20:22:09 -08001721 if (config_stats)
1722 bin->stats.curruns--;
Jason Evanse476f8a2010-01-16 09:53:50 -08001723}
1724
Jason Evans940a2e02010-10-17 17:51:37 -07001725static void
1726arena_bin_lower_run(arena_t *arena, arena_chunk_t *chunk, arena_run_t *run,
1727 arena_bin_t *bin)
Jason Evanse476f8a2010-01-16 09:53:50 -08001728{
Jason Evanse476f8a2010-01-16 09:53:50 -08001729
Jason Evans8de6a022010-10-17 20:57:30 -07001730 /*
Jason Evanse7a10582012-02-13 17:36:52 -08001731 * Make sure that if bin->runcur is non-NULL, it refers to the lowest
1732 * non-full run. It is okay to NULL runcur out rather than proactively
1733 * keeping it pointing at the lowest non-full run.
Jason Evans8de6a022010-10-17 20:57:30 -07001734 */
Jason Evanse7a10582012-02-13 17:36:52 -08001735 if ((uintptr_t)run < (uintptr_t)bin->runcur) {
Jason Evans8de6a022010-10-17 20:57:30 -07001736 /* Switch runcur. */
Jason Evanse7a10582012-02-13 17:36:52 -08001737 if (bin->runcur->nfree > 0)
1738 arena_bin_runs_insert(bin, bin->runcur);
Jason Evans8de6a022010-10-17 20:57:30 -07001739 bin->runcur = run;
Jason Evanse7a10582012-02-13 17:36:52 -08001740 if (config_stats)
1741 bin->stats.reruns++;
1742 } else
1743 arena_bin_runs_insert(bin, run);
Jason Evans940a2e02010-10-17 17:51:37 -07001744}
1745
1746void
Jason Evans203484e2012-05-02 00:30:36 -07001747arena_dalloc_bin_locked(arena_t *arena, arena_chunk_t *chunk, void *ptr,
Qinfan Wuff6a31d2014-08-29 13:34:40 -07001748 arena_chunk_map_bits_t *bitselm)
Jason Evans940a2e02010-10-17 17:51:37 -07001749{
Jason Evans0c5dd032014-09-29 01:31:39 -07001750 size_t pageind, rpages_ind;
Jason Evans940a2e02010-10-17 17:51:37 -07001751 arena_run_t *run;
1752 arena_bin_t *bin;
Mike Hommey8b499712012-04-24 23:22:02 +02001753 arena_bin_info_t *bin_info;
1754 size_t size, binind;
Jason Evans940a2e02010-10-17 17:51:37 -07001755
Jason Evansae4c7b42012-04-02 07:04:34 -07001756 pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> LG_PAGE;
Jason Evans0c5dd032014-09-29 01:31:39 -07001757 rpages_ind = pageind - arena_mapbits_small_runind_get(chunk, pageind);
1758 run = &arena_miscelm_get(chunk, rpages_ind)->run;
Jason Evans940a2e02010-10-17 17:51:37 -07001759 bin = run->bin;
Qinfan Wuff6a31d2014-08-29 13:34:40 -07001760 binind = arena_ptr_small_binind_get(ptr, arena_mapbits_get(chunk,
1761 pageind));
Mike Hommey8b499712012-04-24 23:22:02 +02001762 bin_info = &arena_bin_info[binind];
Jason Evans7372b152012-02-10 20:22:09 -08001763 if (config_fill || config_stats)
1764 size = bin_info->reg_size;
Jason Evans940a2e02010-10-17 17:51:37 -07001765
Jason Evans9c640bf2014-09-11 16:20:44 -07001766 if (config_fill && unlikely(opt_junk))
Jason Evans122449b2012-04-06 00:35:09 -07001767 arena_dalloc_junk_small(ptr, bin_info);
Jason Evans940a2e02010-10-17 17:51:37 -07001768
1769 arena_run_reg_dalloc(run, ptr);
Jason Evans49f7e8f2011-03-15 13:59:15 -07001770 if (run->nfree == bin_info->nregs) {
Jason Evans088e6a02010-10-18 00:04:44 -07001771 arena_dissociate_bin_run(chunk, run, bin);
Jason Evans8de6a022010-10-17 20:57:30 -07001772 arena_dalloc_bin_run(arena, chunk, run, bin);
Jason Evans088e6a02010-10-18 00:04:44 -07001773 } else if (run->nfree == 1 && run != bin->runcur)
Jason Evans8de6a022010-10-17 20:57:30 -07001774 arena_bin_lower_run(arena, chunk, run, bin);
Jason Evanse476f8a2010-01-16 09:53:50 -08001775
Jason Evans7372b152012-02-10 20:22:09 -08001776 if (config_stats) {
1777 bin->stats.allocated -= size;
1778 bin->stats.ndalloc++;
1779 }
Jason Evanse476f8a2010-01-16 09:53:50 -08001780}
1781
Jason Evanse476f8a2010-01-16 09:53:50 -08001782void
Jason Evans203484e2012-05-02 00:30:36 -07001783arena_dalloc_bin(arena_t *arena, arena_chunk_t *chunk, void *ptr,
Qinfan Wuff6a31d2014-08-29 13:34:40 -07001784 size_t pageind, arena_chunk_map_bits_t *bitselm)
Jason Evans203484e2012-05-02 00:30:36 -07001785{
1786 arena_run_t *run;
1787 arena_bin_t *bin;
Jason Evans0c5dd032014-09-29 01:31:39 -07001788 size_t rpages_ind;
Jason Evans203484e2012-05-02 00:30:36 -07001789
Jason Evans0c5dd032014-09-29 01:31:39 -07001790 rpages_ind = pageind - arena_mapbits_small_runind_get(chunk, pageind);
1791 run = &arena_miscelm_get(chunk, rpages_ind)->run;
Jason Evans203484e2012-05-02 00:30:36 -07001792 bin = run->bin;
1793 malloc_mutex_lock(&bin->lock);
Qinfan Wuff6a31d2014-08-29 13:34:40 -07001794 arena_dalloc_bin_locked(arena, chunk, ptr, bitselm);
Jason Evans203484e2012-05-02 00:30:36 -07001795 malloc_mutex_unlock(&bin->lock);
1796}
1797
1798void
1799arena_dalloc_small(arena_t *arena, arena_chunk_t *chunk, void *ptr,
1800 size_t pageind)
1801{
Qinfan Wuff6a31d2014-08-29 13:34:40 -07001802 arena_chunk_map_bits_t *bitselm;
Jason Evans203484e2012-05-02 00:30:36 -07001803
1804 if (config_debug) {
Jason Evans80737c32012-05-02 16:11:03 -07001805 /* arena_ptr_small_binind_get() does extra sanity checking. */
1806 assert(arena_ptr_small_binind_get(ptr, arena_mapbits_get(chunk,
1807 pageind)) != BININD_INVALID);
Jason Evans203484e2012-05-02 00:30:36 -07001808 }
Qinfan Wuff6a31d2014-08-29 13:34:40 -07001809 bitselm = arena_bitselm_get(chunk, pageind);
1810 arena_dalloc_bin(arena, chunk, ptr, pageind, bitselm);
Jason Evans203484e2012-05-02 00:30:36 -07001811}
Jason Evanse476f8a2010-01-16 09:53:50 -08001812
Jason Evans6b694c42014-01-07 16:47:56 -08001813#ifdef JEMALLOC_JET
1814#undef arena_dalloc_junk_large
1815#define arena_dalloc_junk_large JEMALLOC_N(arena_dalloc_junk_large_impl)
1816#endif
1817static void
1818arena_dalloc_junk_large(void *ptr, size_t usize)
1819{
1820
Jason Evans9c640bf2014-09-11 16:20:44 -07001821 if (config_fill && unlikely(opt_junk))
Jason Evans6b694c42014-01-07 16:47:56 -08001822 memset(ptr, 0x5a, usize);
1823}
1824#ifdef JEMALLOC_JET
1825#undef arena_dalloc_junk_large
1826#define arena_dalloc_junk_large JEMALLOC_N(arena_dalloc_junk_large)
1827arena_dalloc_junk_large_t *arena_dalloc_junk_large =
1828 JEMALLOC_N(arena_dalloc_junk_large_impl);
1829#endif
1830
Jason Evanse476f8a2010-01-16 09:53:50 -08001831void
Jason Evans203484e2012-05-02 00:30:36 -07001832arena_dalloc_large_locked(arena_t *arena, arena_chunk_t *chunk, void *ptr)
Jason Evanse476f8a2010-01-16 09:53:50 -08001833{
Jason Evans0c5dd032014-09-29 01:31:39 -07001834 size_t pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> LG_PAGE;
1835 arena_chunk_map_misc_t *miscelm = arena_miscelm_get(chunk, pageind);
1836 arena_run_t *run = &miscelm->run;
Jason Evans13668262010-01-31 03:57:29 -08001837
Jason Evans7372b152012-02-10 20:22:09 -08001838 if (config_fill || config_stats) {
Jason Evans6b694c42014-01-07 16:47:56 -08001839 size_t usize = arena_mapbits_large_size_get(chunk, pageind);
Jason Evanse476f8a2010-01-16 09:53:50 -08001840
Jason Evans6b694c42014-01-07 16:47:56 -08001841 arena_dalloc_junk_large(ptr, usize);
Jason Evans7372b152012-02-10 20:22:09 -08001842 if (config_stats) {
1843 arena->stats.ndalloc_large++;
Jason Evans6b694c42014-01-07 16:47:56 -08001844 arena->stats.allocated_large -= usize;
1845 arena->stats.lstats[(usize >> LG_PAGE) - 1].ndalloc++;
1846 arena->stats.lstats[(usize >> LG_PAGE) - 1].curruns--;
Jason Evans7372b152012-02-10 20:22:09 -08001847 }
Jason Evanse476f8a2010-01-16 09:53:50 -08001848 }
Jason Evanse476f8a2010-01-16 09:53:50 -08001849
Jason Evans0c5dd032014-09-29 01:31:39 -07001850 arena_run_dalloc(arena, run, true, false);
Jason Evanse476f8a2010-01-16 09:53:50 -08001851}
1852
Jason Evans203484e2012-05-02 00:30:36 -07001853void
1854arena_dalloc_large(arena_t *arena, arena_chunk_t *chunk, void *ptr)
1855{
1856
1857 malloc_mutex_lock(&arena->lock);
1858 arena_dalloc_large_locked(arena, chunk, ptr);
1859 malloc_mutex_unlock(&arena->lock);
1860}
1861
Jason Evanse476f8a2010-01-16 09:53:50 -08001862static void
1863arena_ralloc_large_shrink(arena_t *arena, arena_chunk_t *chunk, void *ptr,
Jason Evans8e3c3c62010-09-17 15:46:18 -07001864 size_t oldsize, size_t size)
Jason Evanse476f8a2010-01-16 09:53:50 -08001865{
Jason Evans0c5dd032014-09-29 01:31:39 -07001866 size_t pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> LG_PAGE;
1867 arena_chunk_map_misc_t *miscelm = arena_miscelm_get(chunk, pageind);
1868 arena_run_t *run = &miscelm->run;
Jason Evanse476f8a2010-01-16 09:53:50 -08001869
1870 assert(size < oldsize);
1871
1872 /*
1873 * Shrink the run, and make trailing pages available for other
1874 * allocations.
1875 */
1876 malloc_mutex_lock(&arena->lock);
Jason Evans0c5dd032014-09-29 01:31:39 -07001877 arena_run_trim_tail(arena, chunk, run, oldsize, size, true);
Jason Evans7372b152012-02-10 20:22:09 -08001878 if (config_stats) {
1879 arena->stats.ndalloc_large++;
1880 arena->stats.allocated_large -= oldsize;
Jason Evansae4c7b42012-04-02 07:04:34 -07001881 arena->stats.lstats[(oldsize >> LG_PAGE) - 1].ndalloc++;
1882 arena->stats.lstats[(oldsize >> LG_PAGE) - 1].curruns--;
Jason Evans990d10c2010-01-31 03:49:35 -08001883
Jason Evans7372b152012-02-10 20:22:09 -08001884 arena->stats.nmalloc_large++;
1885 arena->stats.nrequests_large++;
1886 arena->stats.allocated_large += size;
Jason Evansae4c7b42012-04-02 07:04:34 -07001887 arena->stats.lstats[(size >> LG_PAGE) - 1].nmalloc++;
1888 arena->stats.lstats[(size >> LG_PAGE) - 1].nrequests++;
1889 arena->stats.lstats[(size >> LG_PAGE) - 1].curruns++;
Jason Evanse476f8a2010-01-16 09:53:50 -08001890 }
Jason Evanse476f8a2010-01-16 09:53:50 -08001891 malloc_mutex_unlock(&arena->lock);
1892}
1893
1894static bool
1895arena_ralloc_large_grow(arena_t *arena, arena_chunk_t *chunk, void *ptr,
Jason Evans8e3c3c62010-09-17 15:46:18 -07001896 size_t oldsize, size_t size, size_t extra, bool zero)
Jason Evanse476f8a2010-01-16 09:53:50 -08001897{
Jason Evansae4c7b42012-04-02 07:04:34 -07001898 size_t pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> LG_PAGE;
1899 size_t npages = oldsize >> LG_PAGE;
Jason Evans8e3c3c62010-09-17 15:46:18 -07001900 size_t followsize;
Jason Evanse476f8a2010-01-16 09:53:50 -08001901
Jason Evans203484e2012-05-02 00:30:36 -07001902 assert(oldsize == arena_mapbits_large_size_get(chunk, pageind));
Jason Evanse476f8a2010-01-16 09:53:50 -08001903
1904 /* Try to extend the run. */
Jason Evans8e3c3c62010-09-17 15:46:18 -07001905 assert(size + extra > oldsize);
Jason Evanse476f8a2010-01-16 09:53:50 -08001906 malloc_mutex_lock(&arena->lock);
Jason Evans7393f442010-10-01 17:35:43 -07001907 if (pageind + npages < chunk_npages &&
Jason Evans203484e2012-05-02 00:30:36 -07001908 arena_mapbits_allocated_get(chunk, pageind+npages) == 0 &&
1909 (followsize = arena_mapbits_unallocated_size_get(chunk,
1910 pageind+npages)) >= size - oldsize) {
Jason Evanse476f8a2010-01-16 09:53:50 -08001911 /*
1912 * The next run is available and sufficiently large. Split the
1913 * following run, then merge the first part with the existing
1914 * allocation.
1915 */
Jason Evans940a2e02010-10-17 17:51:37 -07001916 size_t flag_dirty;
Jason Evans8e3c3c62010-09-17 15:46:18 -07001917 size_t splitsize = (oldsize + followsize <= size + extra)
1918 ? followsize : size + extra - oldsize;
Jason Evans0c5dd032014-09-29 01:31:39 -07001919 arena_run_t *run = &arena_miscelm_get(chunk,
1920 pageind+npages)->run;
1921 arena_run_split_large(arena, run, splitsize, zero);
Jason Evanse476f8a2010-01-16 09:53:50 -08001922
Jason Evans088e6a02010-10-18 00:04:44 -07001923 size = oldsize + splitsize;
Jason Evansae4c7b42012-04-02 07:04:34 -07001924 npages = size >> LG_PAGE;
Jason Evans940a2e02010-10-17 17:51:37 -07001925
1926 /*
1927 * Mark the extended run as dirty if either portion of the run
1928 * was dirty before allocation. This is rather pedantic,
1929 * because there's not actually any sequence of events that
1930 * could cause the resulting run to be passed to
1931 * arena_run_dalloc() with the dirty argument set to false
1932 * (which is when dirty flag consistency would really matter).
1933 */
Jason Evans203484e2012-05-02 00:30:36 -07001934 flag_dirty = arena_mapbits_dirty_get(chunk, pageind) |
1935 arena_mapbits_dirty_get(chunk, pageind+npages-1);
1936 arena_mapbits_large_set(chunk, pageind, size, flag_dirty);
1937 arena_mapbits_large_set(chunk, pageind+npages-1, 0, flag_dirty);
Jason Evanse476f8a2010-01-16 09:53:50 -08001938
Jason Evans7372b152012-02-10 20:22:09 -08001939 if (config_stats) {
1940 arena->stats.ndalloc_large++;
1941 arena->stats.allocated_large -= oldsize;
Jason Evans203484e2012-05-02 00:30:36 -07001942 arena->stats.lstats[(oldsize >> LG_PAGE) - 1].ndalloc++;
1943 arena->stats.lstats[(oldsize >> LG_PAGE) - 1].curruns--;
Jason Evans990d10c2010-01-31 03:49:35 -08001944
Jason Evans7372b152012-02-10 20:22:09 -08001945 arena->stats.nmalloc_large++;
1946 arena->stats.nrequests_large++;
1947 arena->stats.allocated_large += size;
Jason Evansae4c7b42012-04-02 07:04:34 -07001948 arena->stats.lstats[(size >> LG_PAGE) - 1].nmalloc++;
Jason Evans203484e2012-05-02 00:30:36 -07001949 arena->stats.lstats[(size >> LG_PAGE) - 1].nrequests++;
Jason Evansae4c7b42012-04-02 07:04:34 -07001950 arena->stats.lstats[(size >> LG_PAGE) - 1].curruns++;
Jason Evans940a2e02010-10-17 17:51:37 -07001951 }
Jason Evanse476f8a2010-01-16 09:53:50 -08001952 malloc_mutex_unlock(&arena->lock);
1953 return (false);
1954 }
1955 malloc_mutex_unlock(&arena->lock);
1956
1957 return (true);
1958}
1959
Jason Evans6b694c42014-01-07 16:47:56 -08001960#ifdef JEMALLOC_JET
1961#undef arena_ralloc_junk_large
1962#define arena_ralloc_junk_large JEMALLOC_N(arena_ralloc_junk_large_impl)
1963#endif
1964static void
1965arena_ralloc_junk_large(void *ptr, size_t old_usize, size_t usize)
1966{
1967
Jason Evans9c640bf2014-09-11 16:20:44 -07001968 if (config_fill && unlikely(opt_junk)) {
Jason Evans6b694c42014-01-07 16:47:56 -08001969 memset((void *)((uintptr_t)ptr + usize), 0x5a,
1970 old_usize - usize);
1971 }
1972}
1973#ifdef JEMALLOC_JET
1974#undef arena_ralloc_junk_large
1975#define arena_ralloc_junk_large JEMALLOC_N(arena_ralloc_junk_large)
1976arena_ralloc_junk_large_t *arena_ralloc_junk_large =
1977 JEMALLOC_N(arena_ralloc_junk_large_impl);
1978#endif
1979
Jason Evanse476f8a2010-01-16 09:53:50 -08001980/*
1981 * Try to resize a large allocation, in order to avoid copying. This will
1982 * always fail if growing an object, and the following run is already in use.
1983 */
1984static bool
Jason Evans8e3c3c62010-09-17 15:46:18 -07001985arena_ralloc_large(void *ptr, size_t oldsize, size_t size, size_t extra,
1986 bool zero)
Jason Evanse476f8a2010-01-16 09:53:50 -08001987{
1988 size_t psize;
1989
Jason Evans8e3c3c62010-09-17 15:46:18 -07001990 psize = PAGE_CEILING(size + extra);
Jason Evanse476f8a2010-01-16 09:53:50 -08001991 if (psize == oldsize) {
1992 /* Same size class. */
Jason Evanse476f8a2010-01-16 09:53:50 -08001993 return (false);
1994 } else {
1995 arena_chunk_t *chunk;
1996 arena_t *arena;
1997
1998 chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr);
1999 arena = chunk->arena;
Jason Evanse476f8a2010-01-16 09:53:50 -08002000
2001 if (psize < oldsize) {
Jason Evanse476f8a2010-01-16 09:53:50 -08002002 /* Fill before shrinking in order avoid a race. */
Jason Evans6b694c42014-01-07 16:47:56 -08002003 arena_ralloc_junk_large(ptr, oldsize, psize);
Jason Evans8e3c3c62010-09-17 15:46:18 -07002004 arena_ralloc_large_shrink(arena, chunk, ptr, oldsize,
2005 psize);
Jason Evanse476f8a2010-01-16 09:53:50 -08002006 return (false);
2007 } else {
2008 bool ret = arena_ralloc_large_grow(arena, chunk, ptr,
Jason Evans8e3c3c62010-09-17 15:46:18 -07002009 oldsize, PAGE_CEILING(size),
2010 psize - PAGE_CEILING(size), zero);
Jason Evans6b694c42014-01-07 16:47:56 -08002011 if (config_fill && ret == false && zero == false) {
Jason Evans9c640bf2014-09-11 16:20:44 -07002012 if (unlikely(opt_junk)) {
Jason Evans6b694c42014-01-07 16:47:56 -08002013 memset((void *)((uintptr_t)ptr +
2014 oldsize), 0xa5, isalloc(ptr,
2015 config_prof) - oldsize);
Jason Evans9c640bf2014-09-11 16:20:44 -07002016 } else if (unlikely(opt_zero)) {
Jason Evans6b694c42014-01-07 16:47:56 -08002017 memset((void *)((uintptr_t)ptr +
2018 oldsize), 0, isalloc(ptr,
2019 config_prof) - oldsize);
2020 }
Jason Evanse476f8a2010-01-16 09:53:50 -08002021 }
Jason Evanse476f8a2010-01-16 09:53:50 -08002022 return (ret);
2023 }
2024 }
2025}
2026
Jason Evansb2c31662014-01-12 15:05:44 -08002027bool
Jason Evans8e3c3c62010-09-17 15:46:18 -07002028arena_ralloc_no_move(void *ptr, size_t oldsize, size_t size, size_t extra,
2029 bool zero)
Jason Evanse476f8a2010-01-16 09:53:50 -08002030{
Jason Evanse476f8a2010-01-16 09:53:50 -08002031
Jason Evans8e3c3c62010-09-17 15:46:18 -07002032 /*
2033 * Avoid moving the allocation if the size class can be left the same.
2034 */
Jason Evanse476f8a2010-01-16 09:53:50 -08002035 if (oldsize <= arena_maxclass) {
Jason Evansb1726102012-02-28 16:50:47 -08002036 if (oldsize <= SMALL_MAXCLASS) {
Jason Evans3541a902014-04-16 17:14:33 -07002037 assert(arena_bin_info[small_size2bin(oldsize)].reg_size
Jason Evans49f7e8f2011-03-15 13:59:15 -07002038 == oldsize);
Jason Evansb1726102012-02-28 16:50:47 -08002039 if ((size + extra <= SMALL_MAXCLASS &&
Jason Evans3541a902014-04-16 17:14:33 -07002040 small_size2bin(size + extra) ==
2041 small_size2bin(oldsize)) || (size <= oldsize &&
Jason Evans6e629842013-12-15 21:49:40 -08002042 size + extra >= oldsize))
Jason Evansb2c31662014-01-12 15:05:44 -08002043 return (false);
Jason Evanse476f8a2010-01-16 09:53:50 -08002044 } else {
2045 assert(size <= arena_maxclass);
Jason Evansb1726102012-02-28 16:50:47 -08002046 if (size + extra > SMALL_MAXCLASS) {
Jason Evans8e3c3c62010-09-17 15:46:18 -07002047 if (arena_ralloc_large(ptr, oldsize, size,
2048 extra, zero) == false)
Jason Evansb2c31662014-01-12 15:05:44 -08002049 return (false);
Jason Evanse476f8a2010-01-16 09:53:50 -08002050 }
2051 }
2052 }
2053
Jason Evans8e3c3c62010-09-17 15:46:18 -07002054 /* Reallocation would require a move. */
Jason Evansb2c31662014-01-12 15:05:44 -08002055 return (true);
Jason Evans8e3c3c62010-09-17 15:46:18 -07002056}
Jason Evanse476f8a2010-01-16 09:53:50 -08002057
Jason Evans8e3c3c62010-09-17 15:46:18 -07002058void *
Jason Evans5460aa62014-09-22 21:09:23 -07002059arena_ralloc(tsd_t *tsd, arena_t *arena, void *ptr, size_t oldsize, size_t size,
Jason Evans609ae592012-10-11 13:53:15 -07002060 size_t extra, size_t alignment, bool zero, bool try_tcache_alloc,
2061 bool try_tcache_dalloc)
Jason Evans8e3c3c62010-09-17 15:46:18 -07002062{
2063 void *ret;
2064 size_t copysize;
2065
2066 /* Try to avoid moving the allocation. */
Jason Evansb2c31662014-01-12 15:05:44 -08002067 if (arena_ralloc_no_move(ptr, oldsize, size, extra, zero) == false)
2068 return (ptr);
Jason Evans8e3c3c62010-09-17 15:46:18 -07002069
Jason Evans8e3c3c62010-09-17 15:46:18 -07002070 /*
2071 * size and oldsize are different enough that we need to move the
2072 * object. In that case, fall back to allocating new space and
2073 * copying.
2074 */
Jason Evans38d92102011-03-23 00:37:29 -07002075 if (alignment != 0) {
Jason Evans5ff709c2012-04-11 18:13:45 -07002076 size_t usize = sa2u(size + extra, alignment);
Jason Evans38d92102011-03-23 00:37:29 -07002077 if (usize == 0)
2078 return (NULL);
Jason Evans5460aa62014-09-22 21:09:23 -07002079 ret = ipalloct(tsd, usize, alignment, zero, try_tcache_alloc,
2080 arena);
2081 } else {
2082 ret = arena_malloc(tsd, arena, size + extra, zero,
2083 try_tcache_alloc);
2084 }
Jason Evans8e3c3c62010-09-17 15:46:18 -07002085
2086 if (ret == NULL) {
2087 if (extra == 0)
2088 return (NULL);
2089 /* Try again, this time without extra. */
Jason Evans38d92102011-03-23 00:37:29 -07002090 if (alignment != 0) {
Jason Evans5ff709c2012-04-11 18:13:45 -07002091 size_t usize = sa2u(size, alignment);
Jason Evans38d92102011-03-23 00:37:29 -07002092 if (usize == 0)
2093 return (NULL);
Jason Evans5460aa62014-09-22 21:09:23 -07002094 ret = ipalloct(tsd, usize, alignment, zero,
2095 try_tcache_alloc, arena);
2096 } else {
2097 ret = arena_malloc(tsd, arena, size, zero,
2098 try_tcache_alloc);
2099 }
Jason Evans8e3c3c62010-09-17 15:46:18 -07002100
2101 if (ret == NULL)
2102 return (NULL);
2103 }
2104
2105 /* Junk/zero-filling were already done by ipalloc()/arena_malloc(). */
2106
2107 /*
2108 * Copy at most size bytes (not size+extra), since the caller has no
2109 * expectation that the extra bytes will be reliably preserved.
2110 */
Jason Evanse476f8a2010-01-16 09:53:50 -08002111 copysize = (size < oldsize) ? size : oldsize;
Jason Evansbd87b012014-04-15 16:35:08 -07002112 JEMALLOC_VALGRIND_MAKE_MEM_UNDEFINED(ret, copysize);
Jason Evanse476f8a2010-01-16 09:53:50 -08002113 memcpy(ret, ptr, copysize);
Jason Evans5460aa62014-09-22 21:09:23 -07002114 iqalloc(tsd, ptr, try_tcache_dalloc);
Jason Evanse476f8a2010-01-16 09:53:50 -08002115 return (ret);
Jason Evanse476f8a2010-01-16 09:53:50 -08002116}
2117
Jason Evans609ae592012-10-11 13:53:15 -07002118dss_prec_t
2119arena_dss_prec_get(arena_t *arena)
2120{
2121 dss_prec_t ret;
2122
2123 malloc_mutex_lock(&arena->lock);
2124 ret = arena->dss_prec;
2125 malloc_mutex_unlock(&arena->lock);
2126 return (ret);
2127}
2128
Jason Evans4d434ad2014-04-15 12:09:48 -07002129bool
Jason Evans609ae592012-10-11 13:53:15 -07002130arena_dss_prec_set(arena_t *arena, dss_prec_t dss_prec)
2131{
2132
Jason Evans4d434ad2014-04-15 12:09:48 -07002133 if (have_dss == false)
2134 return (dss_prec != dss_prec_disabled);
Jason Evans609ae592012-10-11 13:53:15 -07002135 malloc_mutex_lock(&arena->lock);
2136 arena->dss_prec = dss_prec;
2137 malloc_mutex_unlock(&arena->lock);
Jason Evans4d434ad2014-04-15 12:09:48 -07002138 return (false);
Jason Evans609ae592012-10-11 13:53:15 -07002139}
2140
2141void
2142arena_stats_merge(arena_t *arena, const char **dss, size_t *nactive,
2143 size_t *ndirty, arena_stats_t *astats, malloc_bin_stats_t *bstats,
2144 malloc_large_stats_t *lstats)
2145{
2146 unsigned i;
2147
2148 malloc_mutex_lock(&arena->lock);
2149 *dss = dss_prec_names[arena->dss_prec];
2150 *nactive += arena->nactive;
2151 *ndirty += arena->ndirty;
2152
2153 astats->mapped += arena->stats.mapped;
2154 astats->npurge += arena->stats.npurge;
2155 astats->nmadvise += arena->stats.nmadvise;
2156 astats->purged += arena->stats.purged;
2157 astats->allocated_large += arena->stats.allocated_large;
2158 astats->nmalloc_large += arena->stats.nmalloc_large;
2159 astats->ndalloc_large += arena->stats.ndalloc_large;
2160 astats->nrequests_large += arena->stats.nrequests_large;
Jason Evanse2deab72014-05-15 22:22:27 -07002161 astats->allocated_huge += arena->stats.allocated_huge;
2162 astats->nmalloc_huge += arena->stats.nmalloc_huge;
2163 astats->ndalloc_huge += arena->stats.ndalloc_huge;
2164 astats->nrequests_huge += arena->stats.nrequests_huge;
Jason Evans609ae592012-10-11 13:53:15 -07002165
2166 for (i = 0; i < nlclasses; i++) {
2167 lstats[i].nmalloc += arena->stats.lstats[i].nmalloc;
2168 lstats[i].ndalloc += arena->stats.lstats[i].ndalloc;
2169 lstats[i].nrequests += arena->stats.lstats[i].nrequests;
2170 lstats[i].curruns += arena->stats.lstats[i].curruns;
2171 }
2172 malloc_mutex_unlock(&arena->lock);
2173
2174 for (i = 0; i < NBINS; i++) {
2175 arena_bin_t *bin = &arena->bins[i];
2176
2177 malloc_mutex_lock(&bin->lock);
2178 bstats[i].allocated += bin->stats.allocated;
2179 bstats[i].nmalloc += bin->stats.nmalloc;
2180 bstats[i].ndalloc += bin->stats.ndalloc;
2181 bstats[i].nrequests += bin->stats.nrequests;
2182 if (config_tcache) {
2183 bstats[i].nfills += bin->stats.nfills;
2184 bstats[i].nflushes += bin->stats.nflushes;
2185 }
2186 bstats[i].nruns += bin->stats.nruns;
2187 bstats[i].reruns += bin->stats.reruns;
2188 bstats[i].curruns += bin->stats.curruns;
2189 malloc_mutex_unlock(&bin->lock);
2190 }
2191}
2192
Jason Evanse476f8a2010-01-16 09:53:50 -08002193bool
2194arena_new(arena_t *arena, unsigned ind)
2195{
2196 unsigned i;
2197 arena_bin_t *bin;
Jason Evanse476f8a2010-01-16 09:53:50 -08002198
Jason Evans6109fe02010-02-10 10:37:56 -08002199 arena->ind = ind;
Jason Evans597632b2011-03-18 13:41:33 -07002200 arena->nthreads = 0;
aravindfb7fe502014-05-05 15:16:56 -07002201 arena->chunk_alloc = chunk_alloc_default;
Jason Evanse2deab72014-05-15 22:22:27 -07002202 arena->chunk_dalloc = chunk_dalloc_default;
Jason Evans6109fe02010-02-10 10:37:56 -08002203
Jason Evanse476f8a2010-01-16 09:53:50 -08002204 if (malloc_mutex_init(&arena->lock))
2205 return (true);
2206
Jason Evans7372b152012-02-10 20:22:09 -08002207 if (config_stats) {
2208 memset(&arena->stats, 0, sizeof(arena_stats_t));
2209 arena->stats.lstats =
2210 (malloc_large_stats_t *)base_alloc(nlclasses *
2211 sizeof(malloc_large_stats_t));
2212 if (arena->stats.lstats == NULL)
2213 return (true);
2214 memset(arena->stats.lstats, 0, nlclasses *
2215 sizeof(malloc_large_stats_t));
2216 if (config_tcache)
2217 ql_new(&arena->tcache_ql);
2218 }
Jason Evanse476f8a2010-01-16 09:53:50 -08002219
Jason Evans7372b152012-02-10 20:22:09 -08002220 if (config_prof)
2221 arena->prof_accumbytes = 0;
Jason Evansd34f9e72010-02-11 13:19:21 -08002222
Jason Evans609ae592012-10-11 13:53:15 -07002223 arena->dss_prec = chunk_dss_prec_get();
2224
Jason Evanse476f8a2010-01-16 09:53:50 -08002225 arena->spare = NULL;
2226
2227 arena->nactive = 0;
2228 arena->ndirty = 0;
2229
Jason Evanse3d13062012-10-30 15:42:37 -07002230 arena_avail_tree_new(&arena->runs_avail);
Jason Evans070b3c32014-08-14 14:45:58 -07002231 ql_new(&arena->runs_dirty);
Jason Evanse476f8a2010-01-16 09:53:50 -08002232
2233 /* Initialize bins. */
Jason Evansb1726102012-02-28 16:50:47 -08002234 for (i = 0; i < NBINS; i++) {
Jason Evanse476f8a2010-01-16 09:53:50 -08002235 bin = &arena->bins[i];
Jason Evans86815df2010-03-13 20:32:56 -08002236 if (malloc_mutex_init(&bin->lock))
2237 return (true);
Jason Evanse476f8a2010-01-16 09:53:50 -08002238 bin->runcur = NULL;
2239 arena_run_tree_new(&bin->runs);
Jason Evans7372b152012-02-10 20:22:09 -08002240 if (config_stats)
2241 memset(&bin->stats, 0, sizeof(malloc_bin_stats_t));
Jason Evanse476f8a2010-01-16 09:53:50 -08002242 }
2243
Jason Evanse476f8a2010-01-16 09:53:50 -08002244 return (false);
2245}
2246
Jason Evans49f7e8f2011-03-15 13:59:15 -07002247/*
2248 * Calculate bin_info->run_size such that it meets the following constraints:
2249 *
Jason Evans49f7e8f2011-03-15 13:59:15 -07002250 * *) bin_info->run_size <= arena_maxclass
Jason Evans47e57f92011-03-22 09:00:56 -07002251 * *) bin_info->nregs <= RUN_MAXREGS
Jason Evans49f7e8f2011-03-15 13:59:15 -07002252 *
Jason Evans0c5dd032014-09-29 01:31:39 -07002253 * bin_info->nregs and bin_info->reg0_offset are also calculated here, since
2254 * these settings are all interdependent.
Jason Evans49f7e8f2011-03-15 13:59:15 -07002255 */
Jason Evans0c5dd032014-09-29 01:31:39 -07002256static void
2257bin_info_run_size_calc(arena_bin_info_t *bin_info)
Jason Evans49f7e8f2011-03-15 13:59:15 -07002258{
Jason Evans122449b2012-04-06 00:35:09 -07002259 size_t pad_size;
Jason Evans0c5dd032014-09-29 01:31:39 -07002260 size_t try_run_size, perfect_run_size, actual_run_size;
2261 uint32_t try_nregs, perfect_nregs, actual_nregs;
Jason Evans49f7e8f2011-03-15 13:59:15 -07002262
2263 /*
Jason Evans122449b2012-04-06 00:35:09 -07002264 * Determine redzone size based on minimum alignment and minimum
2265 * redzone size. Add padding to the end of the run if it is needed to
2266 * align the regions. The padding allows each redzone to be half the
2267 * minimum alignment; without the padding, each redzone would have to
2268 * be twice as large in order to maintain alignment.
2269 */
Jason Evans9c640bf2014-09-11 16:20:44 -07002270 if (config_fill && unlikely(opt_redzone)) {
Qinfan Wuff6a31d2014-08-29 13:34:40 -07002271 size_t align_min = ZU(1) << (jemalloc_ffs(bin_info->reg_size) -
2272 1);
Jason Evans122449b2012-04-06 00:35:09 -07002273 if (align_min <= REDZONE_MINSIZE) {
2274 bin_info->redzone_size = REDZONE_MINSIZE;
2275 pad_size = 0;
2276 } else {
2277 bin_info->redzone_size = align_min >> 1;
2278 pad_size = bin_info->redzone_size;
2279 }
2280 } else {
2281 bin_info->redzone_size = 0;
2282 pad_size = 0;
2283 }
2284 bin_info->reg_interval = bin_info->reg_size +
2285 (bin_info->redzone_size << 1);
2286
2287 /*
Jason Evans0c5dd032014-09-29 01:31:39 -07002288 * Compute run size under ideal conditions (no redzones, no limit on run
2289 * size).
Jason Evans49f7e8f2011-03-15 13:59:15 -07002290 */
Jason Evans0c5dd032014-09-29 01:31:39 -07002291 try_run_size = PAGE;
2292 try_nregs = try_run_size / bin_info->reg_size;
Jason Evans49f7e8f2011-03-15 13:59:15 -07002293 do {
Jason Evans0c5dd032014-09-29 01:31:39 -07002294 perfect_run_size = try_run_size;
2295 perfect_nregs = try_nregs;
Jason Evans49f7e8f2011-03-15 13:59:15 -07002296
Jason Evansae4c7b42012-04-02 07:04:34 -07002297 try_run_size += PAGE;
Jason Evans0c5dd032014-09-29 01:31:39 -07002298 try_nregs = try_run_size / bin_info->reg_size;
2299 } while (perfect_run_size != perfect_nregs * bin_info->reg_size);
2300 assert(perfect_nregs <= RUN_MAXREGS);
Jason Evans49f7e8f2011-03-15 13:59:15 -07002301
Jason Evans0c5dd032014-09-29 01:31:39 -07002302 actual_run_size = perfect_run_size;
2303 actual_nregs = (actual_run_size - pad_size) / bin_info->reg_interval;
2304
2305 /*
2306 * Redzones can require enough padding that not even a single region can
2307 * fit within the number of pages that would normally be dedicated to a
2308 * run for this size class. Increase the run size until at least one
2309 * region fits.
2310 */
2311 while (actual_nregs == 0) {
2312 assert(config_fill && unlikely(opt_redzone));
2313
2314 actual_run_size += PAGE;
2315 actual_nregs = (actual_run_size - pad_size) /
2316 bin_info->reg_interval;
2317 }
2318
2319 /*
2320 * Make sure that the run will fit within an arena chunk.
2321 */
2322 while (actual_run_size > arena_maxclass) {
2323 actual_run_size -= PAGE;
2324 actual_nregs = (actual_run_size - pad_size) /
2325 bin_info->reg_interval;
2326 }
2327 assert(actual_nregs > 0);
Jason Evans49f7e8f2011-03-15 13:59:15 -07002328
2329 /* Copy final settings. */
Jason Evans0c5dd032014-09-29 01:31:39 -07002330 bin_info->run_size = actual_run_size;
2331 bin_info->nregs = actual_nregs;
2332 bin_info->reg0_offset = actual_run_size - (actual_nregs *
2333 bin_info->reg_interval) - pad_size + bin_info->redzone_size;
Jason Evans122449b2012-04-06 00:35:09 -07002334
2335 assert(bin_info->reg0_offset - bin_info->redzone_size + (bin_info->nregs
2336 * bin_info->reg_interval) + pad_size == bin_info->run_size);
Jason Evans49f7e8f2011-03-15 13:59:15 -07002337}
2338
Jason Evansb1726102012-02-28 16:50:47 -08002339static void
Jason Evans49f7e8f2011-03-15 13:59:15 -07002340bin_info_init(void)
2341{
2342 arena_bin_info_t *bin_info;
Jason Evans49f7e8f2011-03-15 13:59:15 -07002343
Jason Evansd04047c2014-05-28 16:11:55 -07002344#define BIN_INFO_INIT_bin_yes(index, size) \
2345 bin_info = &arena_bin_info[index]; \
Jason Evansb1726102012-02-28 16:50:47 -08002346 bin_info->reg_size = size; \
Jason Evans0c5dd032014-09-29 01:31:39 -07002347 bin_info_run_size_calc(bin_info); \
Jason Evansb1726102012-02-28 16:50:47 -08002348 bitmap_info_init(&bin_info->bitmap_info, bin_info->nregs);
Jason Evansd04047c2014-05-28 16:11:55 -07002349#define BIN_INFO_INIT_bin_no(index, size)
2350#define SC(index, lg_grp, lg_delta, ndelta, bin, lg_delta_lookup) \
2351 BIN_INFO_INIT_bin_##bin(index, (ZU(1)<<lg_grp) + (ZU(ndelta)<<lg_delta))
Jason Evansb1726102012-02-28 16:50:47 -08002352 SIZE_CLASSES
Jason Evansd04047c2014-05-28 16:11:55 -07002353#undef BIN_INFO_INIT_bin_yes
2354#undef BIN_INFO_INIT_bin_no
2355#undef SC
Jason Evans49f7e8f2011-03-15 13:59:15 -07002356}
2357
Jason Evansb1726102012-02-28 16:50:47 -08002358void
Jason Evansa0bf2422010-01-29 14:30:41 -08002359arena_boot(void)
Jason Evanse476f8a2010-01-16 09:53:50 -08002360{
Jason Evansa0bf2422010-01-29 14:30:41 -08002361 size_t header_size;
Jason Evans7393f442010-10-01 17:35:43 -07002362 unsigned i;
Jason Evanse476f8a2010-01-16 09:53:50 -08002363
Jason Evanse476f8a2010-01-16 09:53:50 -08002364 /*
2365 * Compute the header size such that it is large enough to contain the
Jason Evans7393f442010-10-01 17:35:43 -07002366 * page map. The page map is biased to omit entries for the header
2367 * itself, so some iteration is necessary to compute the map bias.
2368 *
2369 * 1) Compute safe header_size and map_bias values that include enough
2370 * space for an unbiased page map.
2371 * 2) Refine map_bias based on (1) to omit the header pages in the page
2372 * map. The resulting map_bias may be one too small.
2373 * 3) Refine map_bias based on (2). The result will be >= the result
2374 * from (2), and will always be correct.
Jason Evanse476f8a2010-01-16 09:53:50 -08002375 */
Jason Evans7393f442010-10-01 17:35:43 -07002376 map_bias = 0;
2377 for (i = 0; i < 3; i++) {
Qinfan Wuff6a31d2014-08-29 13:34:40 -07002378 header_size = offsetof(arena_chunk_t, map_bits) +
2379 ((sizeof(arena_chunk_map_bits_t) +
2380 sizeof(arena_chunk_map_misc_t)) * (chunk_npages-map_bias));
Jason Evans0c5dd032014-09-29 01:31:39 -07002381 map_bias = (header_size + PAGE_MASK) >> LG_PAGE;
Jason Evans7393f442010-10-01 17:35:43 -07002382 }
2383 assert(map_bias > 0);
2384
Qinfan Wuff6a31d2014-08-29 13:34:40 -07002385 map_misc_offset = offsetof(arena_chunk_t, map_bits) +
2386 sizeof(arena_chunk_map_bits_t) * (chunk_npages-map_bias);
2387
Jason Evansae4c7b42012-04-02 07:04:34 -07002388 arena_maxclass = chunksize - (map_bias << LG_PAGE);
Jason Evansa0bf2422010-01-29 14:30:41 -08002389
Jason Evansb1726102012-02-28 16:50:47 -08002390 bin_info_init();
Jason Evanse476f8a2010-01-16 09:53:50 -08002391}
Jason Evans4e2e3dd2012-03-13 16:31:41 -07002392
2393void
2394arena_prefork(arena_t *arena)
2395{
2396 unsigned i;
2397
2398 malloc_mutex_prefork(&arena->lock);
2399 for (i = 0; i < NBINS; i++)
2400 malloc_mutex_prefork(&arena->bins[i].lock);
2401}
2402
2403void
2404arena_postfork_parent(arena_t *arena)
2405{
2406 unsigned i;
2407
2408 for (i = 0; i < NBINS; i++)
2409 malloc_mutex_postfork_parent(&arena->bins[i].lock);
2410 malloc_mutex_postfork_parent(&arena->lock);
2411}
2412
2413void
2414arena_postfork_child(arena_t *arena)
2415{
2416 unsigned i;
2417
2418 for (i = 0; i < NBINS; i++)
2419 malloc_mutex_postfork_child(&arena->bins[i].lock);
2420 malloc_mutex_postfork_child(&arena->lock);
2421}