blob: 68b156bf93f4c779e1b16f3cfd774c4622b25c6d [file] [log] [blame]
Jason Evanse476f8a2010-01-16 09:53:50 -08001#define JEMALLOC_ARENA_C_
Jason Evans376b1522010-02-11 14:45:59 -08002#include "jemalloc/internal/jemalloc_internal.h"
Jason Evanse476f8a2010-01-16 09:53:50 -08003
4/******************************************************************************/
5/* Data. */
6
Jason Evanse476f8a2010-01-16 09:53:50 -08007ssize_t opt_lg_dirty_mult = LG_DIRTY_MULT_DEFAULT;
Jason Evansb1726102012-02-28 16:50:47 -08008arena_bin_info_t arena_bin_info[NBINS];
Jason Evanse476f8a2010-01-16 09:53:50 -08009
Mike Hommeyda99e312012-04-30 12:38:29 +020010JEMALLOC_ALIGNED(CACHELINE)
Jason Evans3541a902014-04-16 17:14:33 -070011const uint32_t small_bin2size_tab[NBINS] = {
Jason Evansd04047c2014-05-28 16:11:55 -070012#define B2S_bin_yes(size) \
Ben Maurer021136c2014-04-16 14:31:24 -070013 size,
Jason Evansd04047c2014-05-28 16:11:55 -070014#define B2S_bin_no(size)
15#define SC(index, lg_grp, lg_delta, ndelta, bin, lg_delta_lookup) \
16 B2S_bin_##bin((ZU(1)<<lg_grp) + (ZU(ndelta)<<lg_delta))
Ben Maurer021136c2014-04-16 14:31:24 -070017 SIZE_CLASSES
Jason Evansd04047c2014-05-28 16:11:55 -070018#undef B2S_bin_yes
19#undef B2S_bin_no
20#undef SC
Ben Maurer021136c2014-04-16 14:31:24 -070021};
22
23JEMALLOC_ALIGNED(CACHELINE)
Jason Evans3541a902014-04-16 17:14:33 -070024const uint8_t small_size2bin_tab[] = {
Jason Evansd04047c2014-05-28 16:11:55 -070025#define S2B_3(i) i,
26#define S2B_4(i) S2B_3(i) S2B_3(i)
27#define S2B_5(i) S2B_4(i) S2B_4(i)
28#define S2B_6(i) S2B_5(i) S2B_5(i)
29#define S2B_7(i) S2B_6(i) S2B_6(i)
30#define S2B_8(i) S2B_7(i) S2B_7(i)
31#define S2B_9(i) S2B_8(i) S2B_8(i)
32#define S2B_no(i)
33#define SC(index, lg_grp, lg_delta, ndelta, bin, lg_delta_lookup) \
34 S2B_##lg_delta_lookup(index)
Jason Evansb1726102012-02-28 16:50:47 -080035 SIZE_CLASSES
Jason Evansd04047c2014-05-28 16:11:55 -070036#undef S2B_3
37#undef S2B_4
38#undef S2B_5
39#undef S2B_6
40#undef S2B_7
Jason Evanse476f8a2010-01-16 09:53:50 -080041#undef S2B_8
Jason Evansd04047c2014-05-28 16:11:55 -070042#undef S2B_9
43#undef S2B_no
44#undef SC
Jason Evansb1726102012-02-28 16:50:47 -080045};
Jason Evanse476f8a2010-01-16 09:53:50 -080046
47/******************************************************************************/
Jason Evansaa5113b2014-01-14 16:23:03 -080048/*
49 * Function prototypes for static functions that are referenced prior to
50 * definition.
51 */
Jason Evanse476f8a2010-01-16 09:53:50 -080052
Jason Evans6005f072010-09-30 16:55:08 -070053static void arena_purge(arena_t *arena, bool all);
Jason Evanse3d13062012-10-30 15:42:37 -070054static void arena_run_dalloc(arena_t *arena, arena_run_t *run, bool dirty,
55 bool cleaned);
Jason Evanse476f8a2010-01-16 09:53:50 -080056static void arena_dalloc_bin_run(arena_t *arena, arena_chunk_t *chunk,
57 arena_run_t *run, arena_bin_t *bin);
Jason Evans940a2e02010-10-17 17:51:37 -070058static void arena_bin_lower_run(arena_t *arena, arena_chunk_t *chunk,
59 arena_run_t *run, arena_bin_t *bin);
Jason Evanse476f8a2010-01-16 09:53:50 -080060
61/******************************************************************************/
62
Ben Maurerf9ff6032014-04-06 13:24:16 -070063JEMALLOC_INLINE_C size_t
64arena_mapelm_to_pageind(arena_chunk_map_t *mapelm)
65{
66 uintptr_t map_offset =
67 CHUNK_ADDR2OFFSET(mapelm) - offsetof(arena_chunk_t, map);
68
69 return ((map_offset / sizeof(arena_chunk_map_t)) + map_bias);
70}
71
72JEMALLOC_INLINE_C size_t
73arena_mapelm_to_bits(arena_chunk_map_t *mapelm)
74{
75
76 return (mapelm->bits);
77}
78
Jason Evanse476f8a2010-01-16 09:53:50 -080079static inline int
Jason Evanse476f8a2010-01-16 09:53:50 -080080arena_run_comp(arena_chunk_map_t *a, arena_chunk_map_t *b)
81{
82 uintptr_t a_mapelm = (uintptr_t)a;
83 uintptr_t b_mapelm = (uintptr_t)b;
84
85 assert(a != NULL);
86 assert(b != NULL);
87
88 return ((a_mapelm > b_mapelm) - (a_mapelm < b_mapelm));
89}
90
Jason Evansf3ff7522010-02-28 15:00:18 -080091/* Generate red-black tree functions. */
Jason Evans7372b152012-02-10 20:22:09 -080092rb_gen(static UNUSED, arena_run_tree_, arena_run_tree_t, arena_chunk_map_t,
93 u.rb_link, arena_run_comp)
Jason Evanse476f8a2010-01-16 09:53:50 -080094
95static inline int
96arena_avail_comp(arena_chunk_map_t *a, arena_chunk_map_t *b)
97{
98 int ret;
Ben Maurerf9ff6032014-04-06 13:24:16 -070099 size_t a_size;
100 size_t b_size = arena_mapelm_to_bits(b) & ~PAGE_MASK;
101 uintptr_t a_mapelm = (uintptr_t)a;
102 uintptr_t b_mapelm = (uintptr_t)b;
103
Qinfan Wu55c9aa12014-08-06 16:10:08 -0700104 if (a_mapelm & CHUNK_MAP_KEY)
Ben Maurerf9ff6032014-04-06 13:24:16 -0700105 a_size = a_mapelm & ~PAGE_MASK;
106 else
107 a_size = arena_mapelm_to_bits(a) & ~PAGE_MASK;
Jason Evanse476f8a2010-01-16 09:53:50 -0800108
109 ret = (a_size > b_size) - (a_size < b_size);
Qinfan Wu55c9aa12014-08-06 16:10:08 -0700110 if (ret == 0) {
111 if (!(a_mapelm & CHUNK_MAP_KEY))
112 ret = (a_mapelm > b_mapelm) - (a_mapelm < b_mapelm);
Qinfan Wuea73eb82014-08-06 16:43:01 -0700113 else {
114 /*
115 * Treat keys as if they are lower than anything else.
116 */
Qinfan Wu55c9aa12014-08-06 16:10:08 -0700117 ret = -1;
Qinfan Wuea73eb82014-08-06 16:43:01 -0700118 }
Qinfan Wu55c9aa12014-08-06 16:10:08 -0700119 }
Jason Evanse476f8a2010-01-16 09:53:50 -0800120
121 return (ret);
122}
123
Jason Evansf3ff7522010-02-28 15:00:18 -0800124/* Generate red-black tree functions. */
Jason Evans7372b152012-02-10 20:22:09 -0800125rb_gen(static UNUSED, arena_avail_tree_, arena_avail_tree_t, arena_chunk_map_t,
126 u.rb_link, arena_avail_comp)
Jason Evanse476f8a2010-01-16 09:53:50 -0800127
Jason Evanse3d13062012-10-30 15:42:37 -0700128static void
129arena_avail_insert(arena_t *arena, arena_chunk_t *chunk, size_t pageind,
Qinfan Wu90737fc2014-07-21 19:39:20 -0700130 size_t npages)
Jason Evanse3d13062012-10-30 15:42:37 -0700131{
132
133 assert(npages == (arena_mapbits_unallocated_size_get(chunk, pageind) >>
134 LG_PAGE));
135
Jason Evanse3d13062012-10-30 15:42:37 -0700136 if (arena_mapbits_dirty_get(chunk, pageind) != 0) {
137 arena->ndirty += npages;
138 chunk->ndirty += npages;
139 }
Jason Evanse3d13062012-10-30 15:42:37 -0700140
141 arena_avail_tree_insert(&arena->runs_avail, arena_mapp_get(chunk,
142 pageind));
143}
144
145static void
146arena_avail_remove(arena_t *arena, arena_chunk_t *chunk, size_t pageind,
Qinfan Wu90737fc2014-07-21 19:39:20 -0700147 size_t npages)
Jason Evanse3d13062012-10-30 15:42:37 -0700148{
149
150 assert(npages == (arena_mapbits_unallocated_size_get(chunk, pageind) >>
151 LG_PAGE));
152
Jason Evanse3d13062012-10-30 15:42:37 -0700153 if (arena_mapbits_dirty_get(chunk, pageind) != 0) {
154 arena->ndirty -= npages;
155 chunk->ndirty -= npages;
156 }
Jason Evanse3d13062012-10-30 15:42:37 -0700157
158 arena_avail_tree_remove(&arena->runs_avail, arena_mapp_get(chunk,
159 pageind));
160}
161
Jason Evanse476f8a2010-01-16 09:53:50 -0800162static inline void *
Jason Evans49f7e8f2011-03-15 13:59:15 -0700163arena_run_reg_alloc(arena_run_t *run, arena_bin_info_t *bin_info)
Jason Evanse476f8a2010-01-16 09:53:50 -0800164{
165 void *ret;
Jason Evans84c8eef2011-03-16 10:30:13 -0700166 unsigned regind;
167 bitmap_t *bitmap = (bitmap_t *)((uintptr_t)run +
168 (uintptr_t)bin_info->bitmap_offset);
Jason Evanse476f8a2010-01-16 09:53:50 -0800169
Jason Evans1e0a6362010-03-13 13:41:58 -0800170 assert(run->nfree > 0);
Jason Evans84c8eef2011-03-16 10:30:13 -0700171 assert(bitmap_full(bitmap, &bin_info->bitmap_info) == false);
Jason Evanse476f8a2010-01-16 09:53:50 -0800172
Jason Evans84c8eef2011-03-16 10:30:13 -0700173 regind = bitmap_sfu(bitmap, &bin_info->bitmap_info);
174 ret = (void *)((uintptr_t)run + (uintptr_t)bin_info->reg0_offset +
Jason Evans122449b2012-04-06 00:35:09 -0700175 (uintptr_t)(bin_info->reg_interval * regind));
Jason Evans1e0a6362010-03-13 13:41:58 -0800176 run->nfree--;
Jason Evans84c8eef2011-03-16 10:30:13 -0700177 if (regind == run->nextind)
178 run->nextind++;
179 assert(regind < run->nextind);
Jason Evans1e0a6362010-03-13 13:41:58 -0800180 return (ret);
Jason Evans6109fe02010-02-10 10:37:56 -0800181}
182
183static inline void
Jason Evans1e0a6362010-03-13 13:41:58 -0800184arena_run_reg_dalloc(arena_run_t *run, void *ptr)
Jason Evans6109fe02010-02-10 10:37:56 -0800185{
Jason Evans49f7e8f2011-03-15 13:59:15 -0700186 arena_chunk_t *chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(run);
Jason Evans203484e2012-05-02 00:30:36 -0700187 size_t pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> LG_PAGE;
188 size_t mapbits = arena_mapbits_get(chunk, pageind);
Jason Evans80737c32012-05-02 16:11:03 -0700189 size_t binind = arena_ptr_small_binind_get(ptr, mapbits);
Jason Evans49f7e8f2011-03-15 13:59:15 -0700190 arena_bin_info_t *bin_info = &arena_bin_info[binind];
Jason Evans84c8eef2011-03-16 10:30:13 -0700191 unsigned regind = arena_run_regind(run, bin_info, ptr);
192 bitmap_t *bitmap = (bitmap_t *)((uintptr_t)run +
193 (uintptr_t)bin_info->bitmap_offset);
194
Jason Evans49f7e8f2011-03-15 13:59:15 -0700195 assert(run->nfree < bin_info->nregs);
Jason Evans1e0a6362010-03-13 13:41:58 -0800196 /* Freeing an interior pointer can cause assertion failure. */
197 assert(((uintptr_t)ptr - ((uintptr_t)run +
Jason Evans122449b2012-04-06 00:35:09 -0700198 (uintptr_t)bin_info->reg0_offset)) %
199 (uintptr_t)bin_info->reg_interval == 0);
Jason Evans21fb95b2010-10-18 17:45:40 -0700200 assert((uintptr_t)ptr >= (uintptr_t)run +
Jason Evans49f7e8f2011-03-15 13:59:15 -0700201 (uintptr_t)bin_info->reg0_offset);
Jason Evans84c8eef2011-03-16 10:30:13 -0700202 /* Freeing an unallocated pointer can cause assertion failure. */
203 assert(bitmap_get(bitmap, &bin_info->bitmap_info, regind));
Jason Evanse476f8a2010-01-16 09:53:50 -0800204
Jason Evans84c8eef2011-03-16 10:30:13 -0700205 bitmap_unset(bitmap, &bin_info->bitmap_info, regind);
Jason Evans1e0a6362010-03-13 13:41:58 -0800206 run->nfree++;
Jason Evanse476f8a2010-01-16 09:53:50 -0800207}
208
Jason Evans21fb95b2010-10-18 17:45:40 -0700209static inline void
Jason Evans38067482013-01-21 20:04:42 -0800210arena_run_zero(arena_chunk_t *chunk, size_t run_ind, size_t npages)
211{
212
Jason Evansbd87b012014-04-15 16:35:08 -0700213 JEMALLOC_VALGRIND_MAKE_MEM_UNDEFINED((void *)((uintptr_t)chunk +
214 (run_ind << LG_PAGE)), (npages << LG_PAGE));
Jason Evans38067482013-01-21 20:04:42 -0800215 memset((void *)((uintptr_t)chunk + (run_ind << LG_PAGE)), 0,
216 (npages << LG_PAGE));
Jason Evans38067482013-01-21 20:04:42 -0800217}
218
219static inline void
Jason Evansdda90f52013-10-19 23:48:40 -0700220arena_run_page_mark_zeroed(arena_chunk_t *chunk, size_t run_ind)
221{
222
Jason Evansbd87b012014-04-15 16:35:08 -0700223 JEMALLOC_VALGRIND_MAKE_MEM_DEFINED((void *)((uintptr_t)chunk + (run_ind
224 << LG_PAGE)), PAGE);
Jason Evansdda90f52013-10-19 23:48:40 -0700225}
226
227static inline void
Jason Evans38067482013-01-21 20:04:42 -0800228arena_run_page_validate_zeroed(arena_chunk_t *chunk, size_t run_ind)
Jason Evans21fb95b2010-10-18 17:45:40 -0700229{
Jason Evansd4bab212010-10-24 20:08:37 -0700230 size_t i;
Jason Evansae4c7b42012-04-02 07:04:34 -0700231 UNUSED size_t *p = (size_t *)((uintptr_t)chunk + (run_ind << LG_PAGE));
Jason Evansd4bab212010-10-24 20:08:37 -0700232
Jason Evansdda90f52013-10-19 23:48:40 -0700233 arena_run_page_mark_zeroed(chunk, run_ind);
Jason Evansae4c7b42012-04-02 07:04:34 -0700234 for (i = 0; i < PAGE / sizeof(size_t); i++)
Jason Evans21fb95b2010-10-18 17:45:40 -0700235 assert(p[i] == 0);
236}
Jason Evans21fb95b2010-10-18 17:45:40 -0700237
Jason Evanse476f8a2010-01-16 09:53:50 -0800238static void
Jason Evansaa5113b2014-01-14 16:23:03 -0800239arena_cactive_update(arena_t *arena, size_t add_pages, size_t sub_pages)
240{
241
242 if (config_stats) {
Jason Evans15229372014-08-06 23:38:39 -0700243 ssize_t cactive_diff = CHUNK_CEILING((arena->nactive + add_pages
244 - sub_pages) << LG_PAGE) - CHUNK_CEILING(arena->nactive <<
245 LG_PAGE);
Jason Evansaa5113b2014-01-14 16:23:03 -0800246 if (cactive_diff != 0)
247 stats_cactive_add(cactive_diff);
248 }
249}
250
251static void
252arena_run_split_remove(arena_t *arena, arena_chunk_t *chunk, size_t run_ind,
253 size_t flag_dirty, size_t need_pages)
254{
Qinfan Wu04d60a12014-07-18 14:21:17 -0700255 arena_chunk_map_t *mapelm;
Jason Evansaa5113b2014-01-14 16:23:03 -0800256 size_t total_pages, rem_pages;
257
258 total_pages = arena_mapbits_unallocated_size_get(chunk, run_ind) >>
259 LG_PAGE;
260 assert(arena_mapbits_dirty_get(chunk, run_ind+total_pages-1) ==
261 flag_dirty);
262 assert(need_pages <= total_pages);
263 rem_pages = total_pages - need_pages;
264
Qinfan Wu90737fc2014-07-21 19:39:20 -0700265 arena_avail_remove(arena, chunk, run_ind, total_pages);
Qinfan Wu04d60a12014-07-18 14:21:17 -0700266 if (flag_dirty != 0) {
267 /* If the run is dirty, it must be in the dirty list. */
268 mapelm = arena_mapp_get(chunk, run_ind);
269 ql_remove(&arena->runs_dirty, mapelm, dr_link);
270 }
Jason Evansaa5113b2014-01-14 16:23:03 -0800271 arena_cactive_update(arena, need_pages, 0);
272 arena->nactive += need_pages;
273
274 /* Keep track of trailing unused pages for later use. */
275 if (rem_pages > 0) {
276 if (flag_dirty != 0) {
277 arena_mapbits_unallocated_set(chunk,
278 run_ind+need_pages, (rem_pages << LG_PAGE),
279 flag_dirty);
280 arena_mapbits_unallocated_set(chunk,
281 run_ind+total_pages-1, (rem_pages << LG_PAGE),
282 flag_dirty);
Qinfan Wu04d60a12014-07-18 14:21:17 -0700283 mapelm = arena_mapp_get(chunk, run_ind+need_pages);
284 /*
285 * Append the trailing run at the end of the dirty list.
286 * We could also insert the run at the original place.
287 * Let us consider this later.
288 */
289 ql_elm_new(mapelm, dr_link);
290 ql_tail_insert(&arena->runs_dirty, mapelm, dr_link);
Jason Evansaa5113b2014-01-14 16:23:03 -0800291 } else {
292 arena_mapbits_unallocated_set(chunk, run_ind+need_pages,
293 (rem_pages << LG_PAGE),
294 arena_mapbits_unzeroed_get(chunk,
295 run_ind+need_pages));
296 arena_mapbits_unallocated_set(chunk,
297 run_ind+total_pages-1, (rem_pages << LG_PAGE),
298 arena_mapbits_unzeroed_get(chunk,
299 run_ind+total_pages-1));
300 }
Qinfan Wu90737fc2014-07-21 19:39:20 -0700301 arena_avail_insert(arena, chunk, run_ind+need_pages, rem_pages);
Jason Evansaa5113b2014-01-14 16:23:03 -0800302 }
303}
304
305static void
306arena_run_split_large_helper(arena_t *arena, arena_run_t *run, size_t size,
307 bool remove, bool zero)
Jason Evanse476f8a2010-01-16 09:53:50 -0800308{
309 arena_chunk_t *chunk;
Jason Evansaa5113b2014-01-14 16:23:03 -0800310 size_t flag_dirty, run_ind, need_pages, i;
Jason Evans203484e2012-05-02 00:30:36 -0700311
Jason Evanse476f8a2010-01-16 09:53:50 -0800312 chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(run);
Jason Evansae4c7b42012-04-02 07:04:34 -0700313 run_ind = (unsigned)(((uintptr_t)run - (uintptr_t)chunk) >> LG_PAGE);
Jason Evans203484e2012-05-02 00:30:36 -0700314 flag_dirty = arena_mapbits_dirty_get(chunk, run_ind);
Jason Evansae4c7b42012-04-02 07:04:34 -0700315 need_pages = (size >> LG_PAGE);
Jason Evanse476f8a2010-01-16 09:53:50 -0800316 assert(need_pages > 0);
Jason Evanse476f8a2010-01-16 09:53:50 -0800317
Jason Evansc368f8c2013-10-29 18:17:42 -0700318 if (remove) {
Jason Evansaa5113b2014-01-14 16:23:03 -0800319 arena_run_split_remove(arena, chunk, run_ind, flag_dirty,
320 need_pages);
Jason Evans19b3d612010-03-18 20:36:40 -0700321 }
322
Jason Evansaa5113b2014-01-14 16:23:03 -0800323 if (zero) {
324 if (flag_dirty == 0) {
325 /*
326 * The run is clean, so some pages may be zeroed (i.e.
327 * never before touched).
328 */
329 for (i = 0; i < need_pages; i++) {
330 if (arena_mapbits_unzeroed_get(chunk, run_ind+i)
331 != 0)
332 arena_run_zero(chunk, run_ind+i, 1);
333 else if (config_debug) {
334 arena_run_page_validate_zeroed(chunk,
335 run_ind+i);
336 } else {
337 arena_run_page_mark_zeroed(chunk,
338 run_ind+i);
Jason Evans19b3d612010-03-18 20:36:40 -0700339 }
Jason Evanse476f8a2010-01-16 09:53:50 -0800340 }
Jason Evansdda90f52013-10-19 23:48:40 -0700341 } else {
Jason Evansaa5113b2014-01-14 16:23:03 -0800342 /* The run is dirty, so all pages must be zeroed. */
343 arena_run_zero(chunk, run_ind, need_pages);
Jason Evanse476f8a2010-01-16 09:53:50 -0800344 }
Jason Evans19b3d612010-03-18 20:36:40 -0700345 } else {
Jason Evansbd87b012014-04-15 16:35:08 -0700346 JEMALLOC_VALGRIND_MAKE_MEM_UNDEFINED((void *)((uintptr_t)chunk +
Jason Evansdda90f52013-10-19 23:48:40 -0700347 (run_ind << LG_PAGE)), (need_pages << LG_PAGE));
Jason Evanse476f8a2010-01-16 09:53:50 -0800348 }
Jason Evansaa5113b2014-01-14 16:23:03 -0800349
350 /*
351 * Set the last element first, in case the run only contains one page
352 * (i.e. both statements set the same element).
353 */
354 arena_mapbits_large_set(chunk, run_ind+need_pages-1, 0, flag_dirty);
355 arena_mapbits_large_set(chunk, run_ind, size, flag_dirty);
Jason Evanse476f8a2010-01-16 09:53:50 -0800356}
357
Jason Evansc368f8c2013-10-29 18:17:42 -0700358static void
Jason Evansaa5113b2014-01-14 16:23:03 -0800359arena_run_split_large(arena_t *arena, arena_run_t *run, size_t size, bool zero)
Jason Evansc368f8c2013-10-29 18:17:42 -0700360{
361
Jason Evansaa5113b2014-01-14 16:23:03 -0800362 arena_run_split_large_helper(arena, run, size, true, zero);
Jason Evansc368f8c2013-10-29 18:17:42 -0700363}
364
365static void
Jason Evansaa5113b2014-01-14 16:23:03 -0800366arena_run_init_large(arena_t *arena, arena_run_t *run, size_t size, bool zero)
Jason Evansc368f8c2013-10-29 18:17:42 -0700367{
368
Jason Evansaa5113b2014-01-14 16:23:03 -0800369 arena_run_split_large_helper(arena, run, size, false, zero);
370}
371
372static void
373arena_run_split_small(arena_t *arena, arena_run_t *run, size_t size,
374 size_t binind)
375{
376 arena_chunk_t *chunk;
377 size_t flag_dirty, run_ind, need_pages, i;
378
379 assert(binind != BININD_INVALID);
380
381 chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(run);
382 run_ind = (unsigned)(((uintptr_t)run - (uintptr_t)chunk) >> LG_PAGE);
383 flag_dirty = arena_mapbits_dirty_get(chunk, run_ind);
384 need_pages = (size >> LG_PAGE);
385 assert(need_pages > 0);
386
387 arena_run_split_remove(arena, chunk, run_ind, flag_dirty, need_pages);
388
389 /*
390 * Propagate the dirty and unzeroed flags to the allocated small run,
391 * so that arena_dalloc_bin_run() has the ability to conditionally trim
392 * clean pages.
393 */
394 arena_mapbits_small_set(chunk, run_ind, 0, binind, flag_dirty);
395 /*
396 * The first page will always be dirtied during small run
397 * initialization, so a validation failure here would not actually
398 * cause an observable failure.
399 */
400 if (config_debug && flag_dirty == 0 && arena_mapbits_unzeroed_get(chunk,
401 run_ind) == 0)
402 arena_run_page_validate_zeroed(chunk, run_ind);
403 for (i = 1; i < need_pages - 1; i++) {
404 arena_mapbits_small_set(chunk, run_ind+i, i, binind, 0);
405 if (config_debug && flag_dirty == 0 &&
406 arena_mapbits_unzeroed_get(chunk, run_ind+i) == 0)
407 arena_run_page_validate_zeroed(chunk, run_ind+i);
408 }
409 arena_mapbits_small_set(chunk, run_ind+need_pages-1, need_pages-1,
410 binind, flag_dirty);
411 if (config_debug && flag_dirty == 0 && arena_mapbits_unzeroed_get(chunk,
412 run_ind+need_pages-1) == 0)
413 arena_run_page_validate_zeroed(chunk, run_ind+need_pages-1);
Jason Evansbd87b012014-04-15 16:35:08 -0700414 JEMALLOC_VALGRIND_MAKE_MEM_UNDEFINED((void *)((uintptr_t)chunk +
Jason Evansaa5113b2014-01-14 16:23:03 -0800415 (run_ind << LG_PAGE)), (need_pages << LG_PAGE));
416}
417
418static arena_chunk_t *
419arena_chunk_init_spare(arena_t *arena)
420{
421 arena_chunk_t *chunk;
422
423 assert(arena->spare != NULL);
424
425 chunk = arena->spare;
426 arena->spare = NULL;
427
428 assert(arena_mapbits_allocated_get(chunk, map_bias) == 0);
429 assert(arena_mapbits_allocated_get(chunk, chunk_npages-1) == 0);
430 assert(arena_mapbits_unallocated_size_get(chunk, map_bias) ==
431 arena_maxclass);
432 assert(arena_mapbits_unallocated_size_get(chunk, chunk_npages-1) ==
433 arena_maxclass);
434 assert(arena_mapbits_dirty_get(chunk, map_bias) ==
435 arena_mapbits_dirty_get(chunk, chunk_npages-1));
436
437 return (chunk);
438}
439
440static arena_chunk_t *
Jason Evanse2deab72014-05-15 22:22:27 -0700441arena_chunk_alloc_internal(arena_t *arena, size_t size, size_t alignment,
442 bool *zero)
443{
444 arena_chunk_t *chunk;
445 chunk_alloc_t *chunk_alloc;
446 chunk_dalloc_t *chunk_dalloc;
447
448 chunk_alloc = arena->chunk_alloc;
449 chunk_dalloc = arena->chunk_dalloc;
450 malloc_mutex_unlock(&arena->lock);
451 chunk = (arena_chunk_t *)chunk_alloc_arena(chunk_alloc, chunk_dalloc,
452 arena->ind, size, alignment, zero);
453 malloc_mutex_lock(&arena->lock);
454 if (config_stats && chunk != NULL)
455 arena->stats.mapped += chunksize;
456
457 return (chunk);
458}
459
460void *
461arena_chunk_alloc_huge(arena_t *arena, size_t size, size_t alignment,
462 bool *zero)
463{
464 void *ret;
465 chunk_alloc_t *chunk_alloc;
466 chunk_dalloc_t *chunk_dalloc;
467
468 malloc_mutex_lock(&arena->lock);
469 chunk_alloc = arena->chunk_alloc;
470 chunk_dalloc = arena->chunk_dalloc;
471 if (config_stats) {
472 /* Optimistically update stats prior to unlocking. */
473 arena->stats.mapped += size;
474 arena->stats.allocated_huge += size;
475 arena->stats.nmalloc_huge++;
476 arena->stats.nrequests_huge++;
477 }
478 arena->nactive += (size >> LG_PAGE);
479 malloc_mutex_unlock(&arena->lock);
480
481 ret = chunk_alloc_arena(chunk_alloc, chunk_dalloc, arena->ind,
482 size, alignment, zero);
483 if (config_stats) {
484 if (ret != NULL)
485 stats_cactive_add(size);
486 else {
487 /* Revert optimistic stats updates. */
488 malloc_mutex_lock(&arena->lock);
489 arena->stats.mapped -= size;
490 arena->stats.allocated_huge -= size;
491 arena->stats.nmalloc_huge--;
492 malloc_mutex_unlock(&arena->lock);
493 }
494 }
495
496 return (ret);
497}
498
499static arena_chunk_t *
Jason Evansaa5113b2014-01-14 16:23:03 -0800500arena_chunk_init_hard(arena_t *arena)
501{
502 arena_chunk_t *chunk;
503 bool zero;
504 size_t unzeroed, i;
505
506 assert(arena->spare == NULL);
507
508 zero = false;
Jason Evanse2deab72014-05-15 22:22:27 -0700509 chunk = arena_chunk_alloc_internal(arena, chunksize, chunksize, &zero);
Jason Evansaa5113b2014-01-14 16:23:03 -0800510 if (chunk == NULL)
511 return (NULL);
Jason Evansaa5113b2014-01-14 16:23:03 -0800512
513 chunk->arena = arena;
514
515 /*
516 * Claim that no pages are in use, since the header is merely overhead.
517 */
518 chunk->ndirty = 0;
519
Jason Evansaa5113b2014-01-14 16:23:03 -0800520 /*
521 * Initialize the map to contain one maximal free untouched run. Mark
522 * the pages as zeroed iff chunk_alloc() returned a zeroed chunk.
523 */
524 unzeroed = zero ? 0 : CHUNK_MAP_UNZEROED;
525 arena_mapbits_unallocated_set(chunk, map_bias, arena_maxclass,
526 unzeroed);
527 /*
528 * There is no need to initialize the internal page map entries unless
529 * the chunk is not zeroed.
530 */
531 if (zero == false) {
Jason Evansbd87b012014-04-15 16:35:08 -0700532 JEMALLOC_VALGRIND_MAKE_MEM_UNDEFINED(
533 (void *)arena_mapp_get(chunk, map_bias+1),
534 (size_t)((uintptr_t) arena_mapp_get(chunk, chunk_npages-1) -
535 (uintptr_t)arena_mapp_get(chunk, map_bias+1)));
Jason Evansaa5113b2014-01-14 16:23:03 -0800536 for (i = map_bias+1; i < chunk_npages-1; i++)
537 arena_mapbits_unzeroed_set(chunk, i, unzeroed);
538 } else {
Jason Evansbd87b012014-04-15 16:35:08 -0700539 JEMALLOC_VALGRIND_MAKE_MEM_DEFINED((void *)arena_mapp_get(chunk,
Jason Evansaa5113b2014-01-14 16:23:03 -0800540 map_bias+1), (size_t)((uintptr_t) arena_mapp_get(chunk,
541 chunk_npages-1) - (uintptr_t)arena_mapp_get(chunk,
542 map_bias+1)));
543 if (config_debug) {
544 for (i = map_bias+1; i < chunk_npages-1; i++) {
545 assert(arena_mapbits_unzeroed_get(chunk, i) ==
546 unzeroed);
547 }
548 }
549 }
550 arena_mapbits_unallocated_set(chunk, chunk_npages-1, arena_maxclass,
551 unzeroed);
552
553 return (chunk);
Jason Evansc368f8c2013-10-29 18:17:42 -0700554}
555
Jason Evanse476f8a2010-01-16 09:53:50 -0800556static arena_chunk_t *
557arena_chunk_alloc(arena_t *arena)
558{
559 arena_chunk_t *chunk;
Jason Evanse476f8a2010-01-16 09:53:50 -0800560
Jason Evansaa5113b2014-01-14 16:23:03 -0800561 if (arena->spare != NULL)
562 chunk = arena_chunk_init_spare(arena);
Chris Pride20a8c782014-03-25 22:36:05 -0700563 else {
Jason Evansaa5113b2014-01-14 16:23:03 -0800564 chunk = arena_chunk_init_hard(arena);
Chris Pride20a8c782014-03-25 22:36:05 -0700565 if (chunk == NULL)
566 return (NULL);
567 }
Jason Evanse476f8a2010-01-16 09:53:50 -0800568
Jason Evanse3d13062012-10-30 15:42:37 -0700569 /* Insert the run into the runs_avail tree. */
Qinfan Wu90737fc2014-07-21 19:39:20 -0700570 arena_avail_insert(arena, chunk, map_bias, chunk_npages-map_bias);
Qinfan Wu04d60a12014-07-18 14:21:17 -0700571 if (arena_mapbits_dirty_get(chunk, map_bias) != 0) {
572 arena_chunk_map_t *mapelm = arena_mapp_get(chunk, map_bias);
573 ql_elm_new(mapelm, dr_link);
574 ql_tail_insert(&arena->runs_dirty, mapelm, dr_link);
575 }
Jason Evanse3d13062012-10-30 15:42:37 -0700576
Jason Evanse476f8a2010-01-16 09:53:50 -0800577 return (chunk);
578}
579
580static void
Jason Evanse2deab72014-05-15 22:22:27 -0700581arena_chunk_dalloc_internal(arena_t *arena, arena_chunk_t *chunk)
582{
583 chunk_dalloc_t *chunk_dalloc;
584
585 chunk_dalloc = arena->chunk_dalloc;
586 malloc_mutex_unlock(&arena->lock);
587 chunk_dalloc((void *)chunk, chunksize, arena->ind);
588 malloc_mutex_lock(&arena->lock);
589 if (config_stats)
590 arena->stats.mapped -= chunksize;
591}
592
593void
594arena_chunk_dalloc_huge(arena_t *arena, void *chunk, size_t size)
595{
596 chunk_dalloc_t *chunk_dalloc;
597
598 malloc_mutex_lock(&arena->lock);
599 chunk_dalloc = arena->chunk_dalloc;
600 if (config_stats) {
601 arena->stats.mapped -= size;
602 arena->stats.allocated_huge -= size;
603 arena->stats.ndalloc_huge++;
604 stats_cactive_sub(size);
605 }
606 arena->nactive -= (size >> LG_PAGE);
607 malloc_mutex_unlock(&arena->lock);
608 chunk_dalloc(chunk, size, arena->ind);
609}
610
611static void
612arena_chunk_dalloc(arena_t *arena, arena_chunk_t *chunk)
Jason Evanse476f8a2010-01-16 09:53:50 -0800613{
Qinfan Wu04d60a12014-07-18 14:21:17 -0700614
Jason Evans30fe12b2012-05-10 17:09:17 -0700615 assert(arena_mapbits_allocated_get(chunk, map_bias) == 0);
616 assert(arena_mapbits_allocated_get(chunk, chunk_npages-1) == 0);
617 assert(arena_mapbits_unallocated_size_get(chunk, map_bias) ==
618 arena_maxclass);
619 assert(arena_mapbits_unallocated_size_get(chunk, chunk_npages-1) ==
620 arena_maxclass);
621 assert(arena_mapbits_dirty_get(chunk, map_bias) ==
622 arena_mapbits_dirty_get(chunk, chunk_npages-1));
623
Jason Evanse476f8a2010-01-16 09:53:50 -0800624 /*
Jason Evanse3d13062012-10-30 15:42:37 -0700625 * Remove run from the runs_avail tree, so that the arena does not use
626 * it.
Jason Evanse476f8a2010-01-16 09:53:50 -0800627 */
Qinfan Wu90737fc2014-07-21 19:39:20 -0700628 arena_avail_remove(arena, chunk, map_bias, chunk_npages-map_bias);
Qinfan Wu04d60a12014-07-18 14:21:17 -0700629 if (arena_mapbits_dirty_get(chunk, map_bias) != 0) {
630 arena_chunk_map_t *mapelm = arena_mapp_get(chunk, map_bias);
631 ql_remove(&arena->runs_dirty, mapelm, dr_link);
632 }
Jason Evanse476f8a2010-01-16 09:53:50 -0800633
Jason Evans8d4203c2010-04-13 20:53:21 -0700634 if (arena->spare != NULL) {
635 arena_chunk_t *spare = arena->spare;
636
637 arena->spare = chunk;
Jason Evanse2deab72014-05-15 22:22:27 -0700638 arena_chunk_dalloc_internal(arena, spare);
Jason Evans8d4203c2010-04-13 20:53:21 -0700639 } else
640 arena->spare = chunk;
Jason Evanse476f8a2010-01-16 09:53:50 -0800641}
642
643static arena_run_t *
Jason Evansaa5113b2014-01-14 16:23:03 -0800644arena_run_alloc_large_helper(arena_t *arena, size_t size, bool zero)
Jason Evanse476f8a2010-01-16 09:53:50 -0800645{
Jason Evanse476f8a2010-01-16 09:53:50 -0800646 arena_run_t *run;
Ben Maurerf9ff6032014-04-06 13:24:16 -0700647 arena_chunk_map_t *mapelm;
648 arena_chunk_map_t *key;
Jason Evanse476f8a2010-01-16 09:53:50 -0800649
Ben Maurerf9ff6032014-04-06 13:24:16 -0700650 key = (arena_chunk_map_t *)(size | CHUNK_MAP_KEY);
651 mapelm = arena_avail_tree_nsearch(&arena->runs_avail, key);
Jason Evanse476f8a2010-01-16 09:53:50 -0800652 if (mapelm != NULL) {
653 arena_chunk_t *run_chunk = CHUNK_ADDR2BASE(mapelm);
Ben Maurerf9ff6032014-04-06 13:24:16 -0700654 size_t pageind = arena_mapelm_to_pageind(mapelm);
Jason Evanse476f8a2010-01-16 09:53:50 -0800655
Jason Evanse00572b2010-03-14 19:43:56 -0700656 run = (arena_run_t *)((uintptr_t)run_chunk + (pageind <<
Jason Evansae4c7b42012-04-02 07:04:34 -0700657 LG_PAGE));
Jason Evansaa5113b2014-01-14 16:23:03 -0800658 arena_run_split_large(arena, run, size, zero);
Jason Evanse476f8a2010-01-16 09:53:50 -0800659 return (run);
660 }
661
Jason Evans5b0c9962012-05-10 15:47:24 -0700662 return (NULL);
663}
664
665static arena_run_t *
Jason Evansaa5113b2014-01-14 16:23:03 -0800666arena_run_alloc_large(arena_t *arena, size_t size, bool zero)
Jason Evans5b0c9962012-05-10 15:47:24 -0700667{
668 arena_chunk_t *chunk;
669 arena_run_t *run;
670
671 assert(size <= arena_maxclass);
672 assert((size & PAGE_MASK) == 0);
Jason Evans5b0c9962012-05-10 15:47:24 -0700673
674 /* Search the arena's chunks for the lowest best fit. */
Jason Evansaa5113b2014-01-14 16:23:03 -0800675 run = arena_run_alloc_large_helper(arena, size, zero);
Jason Evans5b0c9962012-05-10 15:47:24 -0700676 if (run != NULL)
677 return (run);
678
Jason Evanse476f8a2010-01-16 09:53:50 -0800679 /*
680 * No usable runs. Create a new chunk from which to allocate the run.
681 */
682 chunk = arena_chunk_alloc(arena);
Jason Evanse00572b2010-03-14 19:43:56 -0700683 if (chunk != NULL) {
Jason Evansae4c7b42012-04-02 07:04:34 -0700684 run = (arena_run_t *)((uintptr_t)chunk + (map_bias << LG_PAGE));
Jason Evansaa5113b2014-01-14 16:23:03 -0800685 arena_run_split_large(arena, run, size, zero);
Jason Evanse00572b2010-03-14 19:43:56 -0700686 return (run);
687 }
688
689 /*
690 * arena_chunk_alloc() failed, but another thread may have made
691 * sufficient memory available while this one dropped arena->lock in
692 * arena_chunk_alloc(), so search one more time.
693 */
Jason Evansaa5113b2014-01-14 16:23:03 -0800694 return (arena_run_alloc_large_helper(arena, size, zero));
695}
696
697static arena_run_t *
698arena_run_alloc_small_helper(arena_t *arena, size_t size, size_t binind)
699{
700 arena_run_t *run;
Ben Maurerf9ff6032014-04-06 13:24:16 -0700701 arena_chunk_map_t *mapelm;
702 arena_chunk_map_t *key;
Jason Evansaa5113b2014-01-14 16:23:03 -0800703
Ben Maurerf9ff6032014-04-06 13:24:16 -0700704 key = (arena_chunk_map_t *)(size | CHUNK_MAP_KEY);
705 mapelm = arena_avail_tree_nsearch(&arena->runs_avail, key);
Jason Evansaa5113b2014-01-14 16:23:03 -0800706 if (mapelm != NULL) {
707 arena_chunk_t *run_chunk = CHUNK_ADDR2BASE(mapelm);
Ben Maurerf9ff6032014-04-06 13:24:16 -0700708 size_t pageind = arena_mapelm_to_pageind(mapelm);
Jason Evansaa5113b2014-01-14 16:23:03 -0800709
710 run = (arena_run_t *)((uintptr_t)run_chunk + (pageind <<
711 LG_PAGE));
712 arena_run_split_small(arena, run, size, binind);
713 return (run);
714 }
715
716 return (NULL);
717}
718
719static arena_run_t *
720arena_run_alloc_small(arena_t *arena, size_t size, size_t binind)
721{
722 arena_chunk_t *chunk;
723 arena_run_t *run;
724
725 assert(size <= arena_maxclass);
726 assert((size & PAGE_MASK) == 0);
727 assert(binind != BININD_INVALID);
728
729 /* Search the arena's chunks for the lowest best fit. */
730 run = arena_run_alloc_small_helper(arena, size, binind);
731 if (run != NULL)
732 return (run);
733
734 /*
735 * No usable runs. Create a new chunk from which to allocate the run.
736 */
737 chunk = arena_chunk_alloc(arena);
738 if (chunk != NULL) {
739 run = (arena_run_t *)((uintptr_t)chunk + (map_bias << LG_PAGE));
740 arena_run_split_small(arena, run, size, binind);
741 return (run);
742 }
743
744 /*
745 * arena_chunk_alloc() failed, but another thread may have made
746 * sufficient memory available while this one dropped arena->lock in
747 * arena_chunk_alloc(), so search one more time.
748 */
749 return (arena_run_alloc_small_helper(arena, size, binind));
Jason Evanse476f8a2010-01-16 09:53:50 -0800750}
751
Jason Evans05b21be2010-03-14 17:36:10 -0700752static inline void
753arena_maybe_purge(arena_t *arena)
754{
Jason Evanse3d13062012-10-30 15:42:37 -0700755 size_t npurgeable, threshold;
Jason Evans05b21be2010-03-14 17:36:10 -0700756
Jason Evanse3d13062012-10-30 15:42:37 -0700757 /* Don't purge if the option is disabled. */
758 if (opt_lg_dirty_mult < 0)
759 return;
Qinfan Wue8a2fd82014-07-21 20:00:14 -0700760 npurgeable = arena->ndirty;
Jason Evanse3d13062012-10-30 15:42:37 -0700761 threshold = (arena->nactive >> opt_lg_dirty_mult);
762 /*
763 * Don't purge unless the number of purgeable pages exceeds the
764 * threshold.
765 */
766 if (npurgeable <= threshold)
767 return;
768
769 arena_purge(arena, false);
Jason Evans05b21be2010-03-14 17:36:10 -0700770}
771
Qinfan Wua244e502014-07-21 10:23:36 -0700772static size_t
773arena_dirty_count(arena_t *arena)
774{
775 size_t ndirty = 0;
776 arena_chunk_map_t *mapelm;
777 arena_chunk_t *chunk;
778 size_t pageind, npages;
779
780 ql_foreach(mapelm, &arena->runs_dirty, dr_link) {
781 chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(mapelm);
782 pageind = arena_mapelm_to_pageind(mapelm);
783 assert(arena_mapbits_allocated_get(chunk, pageind) == 0);
784 assert(arena_mapbits_large_get(chunk, pageind) == 0);
785 assert(arena_mapbits_dirty_get(chunk, pageind) != 0);
786 npages = arena_mapbits_unallocated_size_get(chunk, pageind) >>
787 LG_PAGE;
788 ndirty += npages;
789 }
790
791 return (ndirty);
Jason Evansaa5113b2014-01-14 16:23:03 -0800792}
793
794static size_t
795arena_compute_npurgatory(arena_t *arena, bool all)
796{
797 size_t npurgatory, npurgeable;
798
799 /*
800 * Compute the minimum number of pages that this thread should try to
801 * purge.
802 */
Qinfan Wue8a2fd82014-07-21 20:00:14 -0700803 npurgeable = arena->ndirty;
Jason Evansaa5113b2014-01-14 16:23:03 -0800804
805 if (all == false) {
806 size_t threshold = (arena->nactive >> opt_lg_dirty_mult);
807
808 npurgatory = npurgeable - threshold;
809 } else
810 npurgatory = npurgeable;
811
812 return (npurgatory);
813}
814
Qinfan Wue9708002014-07-21 18:09:04 -0700815static size_t
816arena_stash_dirty(arena_t *arena, bool all, size_t npurgatory,
Jason Evansaa5113b2014-01-14 16:23:03 -0800817 arena_chunk_mapelms_t *mapelms)
818{
Qinfan Wue9708002014-07-21 18:09:04 -0700819 arena_chunk_map_t *mapelm;
820 size_t nstashed = 0;
821 arena_chunk_t *chunk;
822 size_t pageind, npages, run_size;
823 arena_run_t *run;
Jason Evansaa5113b2014-01-14 16:23:03 -0800824
Qinfan Wue9708002014-07-21 18:09:04 -0700825 /* Add at least npurgatory pages to purge_list. */
826 for (mapelm = ql_first(&arena->runs_dirty); mapelm != NULL;
827 mapelm = ql_first(&arena->runs_dirty)) {
828 chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(mapelm);
829 pageind = arena_mapelm_to_pageind(mapelm);
830 run_size = arena_mapbits_unallocated_size_get(chunk, pageind);
831 npages = run_size >> LG_PAGE;
832 run = (arena_run_t *)((uintptr_t)chunk + (uintptr_t)(pageind <<
833 LG_PAGE));
Jason Evansaa5113b2014-01-14 16:23:03 -0800834
Qinfan Wue9708002014-07-21 18:09:04 -0700835 assert(pageind + npages <= chunk_npages);
836 assert(arena_mapbits_dirty_get(chunk, pageind) ==
837 arena_mapbits_dirty_get(chunk, pageind+npages-1));
Jason Evansaa5113b2014-01-14 16:23:03 -0800838
Qinfan Wue9708002014-07-21 18:09:04 -0700839 /* Temporarily allocate the free dirty run. */
840 arena_run_split_large(arena, run, run_size, false);
841 /* Append to purge_list for later processing. */
842 ql_elm_new(mapelm, dr_link);
843 ql_tail_insert(mapelms, mapelm, dr_link);
Jason Evansaa5113b2014-01-14 16:23:03 -0800844
Qinfan Wue9708002014-07-21 18:09:04 -0700845 nstashed += npages;
Jason Evansaa5113b2014-01-14 16:23:03 -0800846
Qinfan Wue9708002014-07-21 18:09:04 -0700847 if (all == false && nstashed >= npurgatory)
848 break;
Jason Evansaa5113b2014-01-14 16:23:03 -0800849 }
Qinfan Wue9708002014-07-21 18:09:04 -0700850
851 return (nstashed);
Jason Evansaa5113b2014-01-14 16:23:03 -0800852}
853
854static size_t
Qinfan Wue9708002014-07-21 18:09:04 -0700855arena_purge_stashed(arena_t *arena, arena_chunk_mapelms_t *mapelms)
Jason Evansaa5113b2014-01-14 16:23:03 -0800856{
Qinfan Wue9708002014-07-21 18:09:04 -0700857 size_t npurged, nmadvise;
Jason Evansaa5113b2014-01-14 16:23:03 -0800858 arena_chunk_map_t *mapelm;
Qinfan Wue9708002014-07-21 18:09:04 -0700859 arena_chunk_t *chunk;
860 size_t pageind, npages, run_size;
Jason Evansaa5113b2014-01-14 16:23:03 -0800861
Jason Evansaa5113b2014-01-14 16:23:03 -0800862 if (config_stats)
863 nmadvise = 0;
864 npurged = 0;
Qinfan Wue9708002014-07-21 18:09:04 -0700865
866 malloc_mutex_unlock(&arena->lock);
867
868 ql_foreach(mapelm, mapelms, dr_link) {
Jason Evansaa5113b2014-01-14 16:23:03 -0800869 bool unzeroed;
870 size_t flag_unzeroed, i;
871
Qinfan Wue9708002014-07-21 18:09:04 -0700872 chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(mapelm);
Ben Maurerf9ff6032014-04-06 13:24:16 -0700873 pageind = arena_mapelm_to_pageind(mapelm);
Qinfan Wue9708002014-07-21 18:09:04 -0700874 run_size = arena_mapbits_large_size_get(chunk, pageind);
875 npages = run_size >> LG_PAGE;
876
Jason Evansaa5113b2014-01-14 16:23:03 -0800877 assert(pageind + npages <= chunk_npages);
878 unzeroed = pages_purge((void *)((uintptr_t)chunk + (pageind <<
Qinfan Wue9708002014-07-21 18:09:04 -0700879 LG_PAGE)), run_size);
Jason Evansaa5113b2014-01-14 16:23:03 -0800880 flag_unzeroed = unzeroed ? CHUNK_MAP_UNZEROED : 0;
Qinfan Wue9708002014-07-21 18:09:04 -0700881
Jason Evansaa5113b2014-01-14 16:23:03 -0800882 /*
883 * Set the unzeroed flag for all pages, now that pages_purge()
884 * has returned whether the pages were zeroed as a side effect
885 * of purging. This chunk map modification is safe even though
886 * the arena mutex isn't currently owned by this thread,
887 * because the run is marked as allocated, thus protecting it
888 * from being modified by any other thread. As long as these
889 * writes don't perturb the first and last elements'
890 * CHUNK_MAP_ALLOCATED bits, behavior is well defined.
891 */
892 for (i = 0; i < npages; i++) {
893 arena_mapbits_unzeroed_set(chunk, pageind+i,
894 flag_unzeroed);
895 }
Qinfan Wue9708002014-07-21 18:09:04 -0700896
Jason Evansaa5113b2014-01-14 16:23:03 -0800897 npurged += npages;
898 if (config_stats)
899 nmadvise++;
900 }
Qinfan Wue9708002014-07-21 18:09:04 -0700901
Jason Evansaa5113b2014-01-14 16:23:03 -0800902 malloc_mutex_lock(&arena->lock);
Qinfan Wue9708002014-07-21 18:09:04 -0700903
904 if (config_stats) {
Jason Evansaa5113b2014-01-14 16:23:03 -0800905 arena->stats.nmadvise += nmadvise;
Qinfan Wue9708002014-07-21 18:09:04 -0700906 arena->stats.purged += npurged;
907 }
Jason Evansaa5113b2014-01-14 16:23:03 -0800908
909 return (npurged);
910}
911
912static void
Qinfan Wue9708002014-07-21 18:09:04 -0700913arena_unstash_purged(arena_t *arena, arena_chunk_mapelms_t *mapelms)
Jason Evansaa5113b2014-01-14 16:23:03 -0800914{
915 arena_chunk_map_t *mapelm;
Qinfan Wue9708002014-07-21 18:09:04 -0700916 arena_chunk_t *chunk;
917 arena_run_t *run;
Jason Evansaa5113b2014-01-14 16:23:03 -0800918 size_t pageind;
919
920 /* Deallocate runs. */
921 for (mapelm = ql_first(mapelms); mapelm != NULL;
922 mapelm = ql_first(mapelms)) {
Qinfan Wue9708002014-07-21 18:09:04 -0700923 chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(mapelm);
Ben Maurerf9ff6032014-04-06 13:24:16 -0700924 pageind = arena_mapelm_to_pageind(mapelm);
Jason Evansaa5113b2014-01-14 16:23:03 -0800925 run = (arena_run_t *)((uintptr_t)chunk + (uintptr_t)(pageind <<
926 LG_PAGE));
Qinfan Wue9708002014-07-21 18:09:04 -0700927 ql_remove(mapelms, mapelm, dr_link);
Jason Evansaa5113b2014-01-14 16:23:03 -0800928 arena_run_dalloc(arena, run, false, true);
929 }
930}
931
Qinfan Wue9708002014-07-21 18:09:04 -0700932void
Jason Evans6005f072010-09-30 16:55:08 -0700933arena_purge(arena_t *arena, bool all)
Jason Evanse476f8a2010-01-16 09:53:50 -0800934{
Qinfan Wue9708002014-07-21 18:09:04 -0700935 size_t npurgatory, npurgeable, npurged;
936 arena_chunk_mapelms_t purge_list;
937
Jason Evans7372b152012-02-10 20:22:09 -0800938 if (config_debug) {
Qinfan Wu90737fc2014-07-21 19:39:20 -0700939 size_t ndirty = arena_dirty_count(arena);
Qinfan Wua244e502014-07-21 10:23:36 -0700940 assert(ndirty == arena->ndirty);
Jason Evans2caa4712010-03-04 21:35:07 -0800941 }
Qinfan Wue8a2fd82014-07-21 20:00:14 -0700942 assert((arena->nactive >> opt_lg_dirty_mult) < arena->ndirty || all);
Jason Evanse476f8a2010-01-16 09:53:50 -0800943
Jason Evans7372b152012-02-10 20:22:09 -0800944 if (config_stats)
945 arena->stats.npurge++;
Jason Evanse476f8a2010-01-16 09:53:50 -0800946
947 /*
Jason Evansaa5113b2014-01-14 16:23:03 -0800948 * Add the minimum number of pages this thread should try to purge to
949 * arena->npurgatory. This will keep multiple threads from racing to
950 * reduce ndirty below the threshold.
Jason Evanse476f8a2010-01-16 09:53:50 -0800951 */
Jason Evansaa5113b2014-01-14 16:23:03 -0800952 npurgatory = arena_compute_npurgatory(arena, all);
Jason Evans799ca0b2010-04-08 20:31:58 -0700953
Qinfan Wue9708002014-07-21 18:09:04 -0700954 ql_new(&purge_list);
Jason Evanse3d13062012-10-30 15:42:37 -0700955
Qinfan Wue9708002014-07-21 18:09:04 -0700956 npurgeable = arena_stash_dirty(arena, all, npurgatory, &purge_list);
957 assert(npurgeable >= npurgatory);
Jason Evanse476f8a2010-01-16 09:53:50 -0800958
Qinfan Wue9708002014-07-21 18:09:04 -0700959 npurged = arena_purge_stashed(arena, &purge_list);
960 assert(npurged == npurgeable);
Jason Evans799ca0b2010-04-08 20:31:58 -0700961
Qinfan Wue9708002014-07-21 18:09:04 -0700962 arena_unstash_purged(arena, &purge_list);
Jason Evanse476f8a2010-01-16 09:53:50 -0800963}
964
Jason Evans6005f072010-09-30 16:55:08 -0700965void
966arena_purge_all(arena_t *arena)
967{
968
969 malloc_mutex_lock(&arena->lock);
970 arena_purge(arena, true);
971 malloc_mutex_unlock(&arena->lock);
972}
973
Jason Evanse476f8a2010-01-16 09:53:50 -0800974static void
Jason Evansaa5113b2014-01-14 16:23:03 -0800975arena_run_coalesce(arena_t *arena, arena_chunk_t *chunk, size_t *p_size,
976 size_t *p_run_ind, size_t *p_run_pages, size_t flag_dirty)
Jason Evanse476f8a2010-01-16 09:53:50 -0800977{
Jason Evansaa5113b2014-01-14 16:23:03 -0800978 size_t size = *p_size;
979 size_t run_ind = *p_run_ind;
980 size_t run_pages = *p_run_pages;
Jason Evanse476f8a2010-01-16 09:53:50 -0800981
982 /* Try to coalesce forward. */
983 if (run_ind + run_pages < chunk_npages &&
Jason Evans203484e2012-05-02 00:30:36 -0700984 arena_mapbits_allocated_get(chunk, run_ind+run_pages) == 0 &&
985 arena_mapbits_dirty_get(chunk, run_ind+run_pages) == flag_dirty) {
986 size_t nrun_size = arena_mapbits_unallocated_size_get(chunk,
987 run_ind+run_pages);
Jason Evansae4c7b42012-04-02 07:04:34 -0700988 size_t nrun_pages = nrun_size >> LG_PAGE;
Jason Evanse476f8a2010-01-16 09:53:50 -0800989
990 /*
991 * Remove successor from runs_avail; the coalesced run is
992 * inserted later.
993 */
Jason Evans203484e2012-05-02 00:30:36 -0700994 assert(arena_mapbits_unallocated_size_get(chunk,
995 run_ind+run_pages+nrun_pages-1) == nrun_size);
996 assert(arena_mapbits_dirty_get(chunk,
997 run_ind+run_pages+nrun_pages-1) == flag_dirty);
Qinfan Wu90737fc2014-07-21 19:39:20 -0700998 arena_avail_remove(arena, chunk, run_ind+run_pages, nrun_pages);
Jason Evanse476f8a2010-01-16 09:53:50 -0800999
Qinfan Wu04d60a12014-07-18 14:21:17 -07001000 /* If the successor is dirty, remove it from runs_dirty. */
1001 if (flag_dirty != 0) {
1002 arena_chunk_map_t *mapelm = arena_mapp_get(chunk,
1003 run_ind+run_pages);
1004 ql_remove(&arena->runs_dirty, mapelm, dr_link);
1005 }
1006
Jason Evanse476f8a2010-01-16 09:53:50 -08001007 size += nrun_size;
Jason Evans12ca9142010-10-17 19:56:09 -07001008 run_pages += nrun_pages;
Jason Evanse476f8a2010-01-16 09:53:50 -08001009
Jason Evans203484e2012-05-02 00:30:36 -07001010 arena_mapbits_unallocated_size_set(chunk, run_ind, size);
1011 arena_mapbits_unallocated_size_set(chunk, run_ind+run_pages-1,
1012 size);
Jason Evanse476f8a2010-01-16 09:53:50 -08001013 }
1014
1015 /* Try to coalesce backward. */
Jason Evansaa5113b2014-01-14 16:23:03 -08001016 if (run_ind > map_bias && arena_mapbits_allocated_get(chunk,
1017 run_ind-1) == 0 && arena_mapbits_dirty_get(chunk, run_ind-1) ==
1018 flag_dirty) {
Jason Evans203484e2012-05-02 00:30:36 -07001019 size_t prun_size = arena_mapbits_unallocated_size_get(chunk,
1020 run_ind-1);
Jason Evansae4c7b42012-04-02 07:04:34 -07001021 size_t prun_pages = prun_size >> LG_PAGE;
Jason Evanse476f8a2010-01-16 09:53:50 -08001022
Jason Evans12ca9142010-10-17 19:56:09 -07001023 run_ind -= prun_pages;
Jason Evanse476f8a2010-01-16 09:53:50 -08001024
1025 /*
1026 * Remove predecessor from runs_avail; the coalesced run is
1027 * inserted later.
1028 */
Jason Evans203484e2012-05-02 00:30:36 -07001029 assert(arena_mapbits_unallocated_size_get(chunk, run_ind) ==
1030 prun_size);
1031 assert(arena_mapbits_dirty_get(chunk, run_ind) == flag_dirty);
Qinfan Wu90737fc2014-07-21 19:39:20 -07001032 arena_avail_remove(arena, chunk, run_ind, prun_pages);
Jason Evanse476f8a2010-01-16 09:53:50 -08001033
Qinfan Wu04d60a12014-07-18 14:21:17 -07001034 /* If the predecessor is dirty, remove it from runs_dirty. */
1035 if (flag_dirty != 0) {
1036 arena_chunk_map_t *mapelm = arena_mapp_get(chunk,
1037 run_ind);
1038 ql_remove(&arena->runs_dirty, mapelm, dr_link);
1039 }
1040
Jason Evanse476f8a2010-01-16 09:53:50 -08001041 size += prun_size;
Jason Evans12ca9142010-10-17 19:56:09 -07001042 run_pages += prun_pages;
Jason Evanse476f8a2010-01-16 09:53:50 -08001043
Jason Evans203484e2012-05-02 00:30:36 -07001044 arena_mapbits_unallocated_size_set(chunk, run_ind, size);
1045 arena_mapbits_unallocated_size_set(chunk, run_ind+run_pages-1,
1046 size);
Jason Evanse476f8a2010-01-16 09:53:50 -08001047 }
1048
Jason Evansaa5113b2014-01-14 16:23:03 -08001049 *p_size = size;
1050 *p_run_ind = run_ind;
1051 *p_run_pages = run_pages;
1052}
1053
1054static void
1055arena_run_dalloc(arena_t *arena, arena_run_t *run, bool dirty, bool cleaned)
1056{
1057 arena_chunk_t *chunk;
Qinfan Wu04d60a12014-07-18 14:21:17 -07001058 arena_chunk_map_t *mapelm;
Jason Evansaa5113b2014-01-14 16:23:03 -08001059 size_t size, run_ind, run_pages, flag_dirty;
1060
1061 chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(run);
1062 run_ind = (size_t)(((uintptr_t)run - (uintptr_t)chunk) >> LG_PAGE);
1063 assert(run_ind >= map_bias);
1064 assert(run_ind < chunk_npages);
1065 if (arena_mapbits_large_get(chunk, run_ind) != 0) {
1066 size = arena_mapbits_large_size_get(chunk, run_ind);
1067 assert(size == PAGE ||
1068 arena_mapbits_large_size_get(chunk,
1069 run_ind+(size>>LG_PAGE)-1) == 0);
1070 } else {
1071 size_t binind = arena_bin_index(arena, run->bin);
1072 arena_bin_info_t *bin_info = &arena_bin_info[binind];
1073 size = bin_info->run_size;
1074 }
1075 run_pages = (size >> LG_PAGE);
1076 arena_cactive_update(arena, 0, run_pages);
1077 arena->nactive -= run_pages;
1078
1079 /*
1080 * The run is dirty if the caller claims to have dirtied it, as well as
1081 * if it was already dirty before being allocated and the caller
1082 * doesn't claim to have cleaned it.
1083 */
1084 assert(arena_mapbits_dirty_get(chunk, run_ind) ==
1085 arena_mapbits_dirty_get(chunk, run_ind+run_pages-1));
1086 if (cleaned == false && arena_mapbits_dirty_get(chunk, run_ind) != 0)
1087 dirty = true;
1088 flag_dirty = dirty ? CHUNK_MAP_DIRTY : 0;
1089
1090 /* Mark pages as unallocated in the chunk map. */
1091 if (dirty) {
1092 arena_mapbits_unallocated_set(chunk, run_ind, size,
1093 CHUNK_MAP_DIRTY);
1094 arena_mapbits_unallocated_set(chunk, run_ind+run_pages-1, size,
1095 CHUNK_MAP_DIRTY);
1096 } else {
1097 arena_mapbits_unallocated_set(chunk, run_ind, size,
1098 arena_mapbits_unzeroed_get(chunk, run_ind));
1099 arena_mapbits_unallocated_set(chunk, run_ind+run_pages-1, size,
1100 arena_mapbits_unzeroed_get(chunk, run_ind+run_pages-1));
1101 }
1102
1103 arena_run_coalesce(arena, chunk, &size, &run_ind, &run_pages,
1104 flag_dirty);
1105
Jason Evanse476f8a2010-01-16 09:53:50 -08001106 /* Insert into runs_avail, now that coalescing is complete. */
Jason Evans203484e2012-05-02 00:30:36 -07001107 assert(arena_mapbits_unallocated_size_get(chunk, run_ind) ==
1108 arena_mapbits_unallocated_size_get(chunk, run_ind+run_pages-1));
1109 assert(arena_mapbits_dirty_get(chunk, run_ind) ==
1110 arena_mapbits_dirty_get(chunk, run_ind+run_pages-1));
Qinfan Wu90737fc2014-07-21 19:39:20 -07001111 arena_avail_insert(arena, chunk, run_ind, run_pages);
Jason Evans8d4203c2010-04-13 20:53:21 -07001112
Qinfan Wu04d60a12014-07-18 14:21:17 -07001113 if (dirty) {
1114 /* Insert into runs_dirty list. */
1115 mapelm = arena_mapp_get(chunk, run_ind);
1116 ql_elm_new(mapelm, dr_link);
1117 ql_tail_insert(&arena->runs_dirty, mapelm, dr_link);
1118 }
1119
Jason Evans203484e2012-05-02 00:30:36 -07001120 /* Deallocate chunk if it is now completely unused. */
1121 if (size == arena_maxclass) {
1122 assert(run_ind == map_bias);
1123 assert(run_pages == (arena_maxclass >> LG_PAGE));
Jason Evanse2deab72014-05-15 22:22:27 -07001124 arena_chunk_dalloc(arena, chunk);
Jason Evans203484e2012-05-02 00:30:36 -07001125 }
Jason Evanse476f8a2010-01-16 09:53:50 -08001126
Jason Evans4fb7f512010-01-27 18:27:09 -08001127 /*
Jason Evans8d4203c2010-04-13 20:53:21 -07001128 * It is okay to do dirty page processing here even if the chunk was
Jason Evans4fb7f512010-01-27 18:27:09 -08001129 * deallocated above, since in that case it is the spare. Waiting
1130 * until after possible chunk deallocation to do dirty processing
1131 * allows for an old spare to be fully deallocated, thus decreasing the
1132 * chances of spuriously crossing the dirty page purging threshold.
1133 */
Jason Evans8d4203c2010-04-13 20:53:21 -07001134 if (dirty)
Jason Evans05b21be2010-03-14 17:36:10 -07001135 arena_maybe_purge(arena);
Jason Evanse476f8a2010-01-16 09:53:50 -08001136}
1137
1138static void
1139arena_run_trim_head(arena_t *arena, arena_chunk_t *chunk, arena_run_t *run,
1140 size_t oldsize, size_t newsize)
1141{
Jason Evansae4c7b42012-04-02 07:04:34 -07001142 size_t pageind = ((uintptr_t)run - (uintptr_t)chunk) >> LG_PAGE;
1143 size_t head_npages = (oldsize - newsize) >> LG_PAGE;
Jason Evans203484e2012-05-02 00:30:36 -07001144 size_t flag_dirty = arena_mapbits_dirty_get(chunk, pageind);
Jason Evanse476f8a2010-01-16 09:53:50 -08001145
1146 assert(oldsize > newsize);
1147
1148 /*
1149 * Update the chunk map so that arena_run_dalloc() can treat the
Jason Evans940a2e02010-10-17 17:51:37 -07001150 * leading run as separately allocated. Set the last element of each
1151 * run first, in case of single-page runs.
Jason Evanse476f8a2010-01-16 09:53:50 -08001152 */
Jason Evans203484e2012-05-02 00:30:36 -07001153 assert(arena_mapbits_large_size_get(chunk, pageind) == oldsize);
Jason Evansd8ceef62012-05-10 20:59:39 -07001154 arena_mapbits_large_set(chunk, pageind+head_npages-1, 0, flag_dirty);
1155 arena_mapbits_large_set(chunk, pageind, oldsize-newsize, flag_dirty);
Jason Evans940a2e02010-10-17 17:51:37 -07001156
Jason Evans7372b152012-02-10 20:22:09 -08001157 if (config_debug) {
Jason Evansae4c7b42012-04-02 07:04:34 -07001158 UNUSED size_t tail_npages = newsize >> LG_PAGE;
Jason Evans203484e2012-05-02 00:30:36 -07001159 assert(arena_mapbits_large_size_get(chunk,
1160 pageind+head_npages+tail_npages-1) == 0);
1161 assert(arena_mapbits_dirty_get(chunk,
1162 pageind+head_npages+tail_npages-1) == flag_dirty);
Jason Evans940a2e02010-10-17 17:51:37 -07001163 }
Jason Evansd8ceef62012-05-10 20:59:39 -07001164 arena_mapbits_large_set(chunk, pageind+head_npages, newsize,
1165 flag_dirty);
Jason Evanse476f8a2010-01-16 09:53:50 -08001166
Jason Evanse3d13062012-10-30 15:42:37 -07001167 arena_run_dalloc(arena, run, false, false);
Jason Evanse476f8a2010-01-16 09:53:50 -08001168}
1169
1170static void
1171arena_run_trim_tail(arena_t *arena, arena_chunk_t *chunk, arena_run_t *run,
1172 size_t oldsize, size_t newsize, bool dirty)
1173{
Jason Evansae4c7b42012-04-02 07:04:34 -07001174 size_t pageind = ((uintptr_t)run - (uintptr_t)chunk) >> LG_PAGE;
1175 size_t head_npages = newsize >> LG_PAGE;
Jason Evans203484e2012-05-02 00:30:36 -07001176 size_t flag_dirty = arena_mapbits_dirty_get(chunk, pageind);
Jason Evanse476f8a2010-01-16 09:53:50 -08001177
1178 assert(oldsize > newsize);
1179
1180 /*
1181 * Update the chunk map so that arena_run_dalloc() can treat the
Jason Evans940a2e02010-10-17 17:51:37 -07001182 * trailing run as separately allocated. Set the last element of each
1183 * run first, in case of single-page runs.
Jason Evanse476f8a2010-01-16 09:53:50 -08001184 */
Jason Evans203484e2012-05-02 00:30:36 -07001185 assert(arena_mapbits_large_size_get(chunk, pageind) == oldsize);
Jason Evansd8ceef62012-05-10 20:59:39 -07001186 arena_mapbits_large_set(chunk, pageind+head_npages-1, 0, flag_dirty);
1187 arena_mapbits_large_set(chunk, pageind, newsize, flag_dirty);
Jason Evans940a2e02010-10-17 17:51:37 -07001188
Jason Evans203484e2012-05-02 00:30:36 -07001189 if (config_debug) {
1190 UNUSED size_t tail_npages = (oldsize - newsize) >> LG_PAGE;
1191 assert(arena_mapbits_large_size_get(chunk,
1192 pageind+head_npages+tail_npages-1) == 0);
1193 assert(arena_mapbits_dirty_get(chunk,
1194 pageind+head_npages+tail_npages-1) == flag_dirty);
1195 }
1196 arena_mapbits_large_set(chunk, pageind+head_npages, oldsize-newsize,
Jason Evansd8ceef62012-05-10 20:59:39 -07001197 flag_dirty);
Jason Evanse476f8a2010-01-16 09:53:50 -08001198
1199 arena_run_dalloc(arena, (arena_run_t *)((uintptr_t)run + newsize),
Jason Evanse3d13062012-10-30 15:42:37 -07001200 dirty, false);
Jason Evanse476f8a2010-01-16 09:53:50 -08001201}
1202
1203static arena_run_t *
Jason Evanse7a10582012-02-13 17:36:52 -08001204arena_bin_runs_first(arena_bin_t *bin)
1205{
1206 arena_chunk_map_t *mapelm = arena_run_tree_first(&bin->runs);
1207 if (mapelm != NULL) {
1208 arena_chunk_t *chunk;
1209 size_t pageind;
Mike Hommey8b499712012-04-24 23:22:02 +02001210 arena_run_t *run;
Jason Evanse7a10582012-02-13 17:36:52 -08001211
1212 chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(mapelm);
Ben Maurerf9ff6032014-04-06 13:24:16 -07001213 pageind = arena_mapelm_to_pageind(mapelm);
Jason Evans203484e2012-05-02 00:30:36 -07001214 run = (arena_run_t *)((uintptr_t)chunk + (uintptr_t)((pageind -
1215 arena_mapbits_small_runind_get(chunk, pageind)) <<
Jason Evansae4c7b42012-04-02 07:04:34 -07001216 LG_PAGE));
Jason Evanse7a10582012-02-13 17:36:52 -08001217 return (run);
1218 }
1219
1220 return (NULL);
1221}
1222
1223static void
1224arena_bin_runs_insert(arena_bin_t *bin, arena_run_t *run)
1225{
1226 arena_chunk_t *chunk = CHUNK_ADDR2BASE(run);
Jason Evansae4c7b42012-04-02 07:04:34 -07001227 size_t pageind = ((uintptr_t)run - (uintptr_t)chunk) >> LG_PAGE;
Jason Evans203484e2012-05-02 00:30:36 -07001228 arena_chunk_map_t *mapelm = arena_mapp_get(chunk, pageind);
Jason Evanse7a10582012-02-13 17:36:52 -08001229
1230 assert(arena_run_tree_search(&bin->runs, mapelm) == NULL);
1231
1232 arena_run_tree_insert(&bin->runs, mapelm);
1233}
1234
1235static void
1236arena_bin_runs_remove(arena_bin_t *bin, arena_run_t *run)
1237{
1238 arena_chunk_t *chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(run);
Jason Evansae4c7b42012-04-02 07:04:34 -07001239 size_t pageind = ((uintptr_t)run - (uintptr_t)chunk) >> LG_PAGE;
Jason Evans203484e2012-05-02 00:30:36 -07001240 arena_chunk_map_t *mapelm = arena_mapp_get(chunk, pageind);
Jason Evanse7a10582012-02-13 17:36:52 -08001241
1242 assert(arena_run_tree_search(&bin->runs, mapelm) != NULL);
1243
1244 arena_run_tree_remove(&bin->runs, mapelm);
1245}
1246
1247static arena_run_t *
1248arena_bin_nonfull_run_tryget(arena_bin_t *bin)
1249{
1250 arena_run_t *run = arena_bin_runs_first(bin);
1251 if (run != NULL) {
1252 arena_bin_runs_remove(bin, run);
1253 if (config_stats)
1254 bin->stats.reruns++;
1255 }
1256 return (run);
1257}
1258
1259static arena_run_t *
Jason Evanse476f8a2010-01-16 09:53:50 -08001260arena_bin_nonfull_run_get(arena_t *arena, arena_bin_t *bin)
1261{
Jason Evanse476f8a2010-01-16 09:53:50 -08001262 arena_run_t *run;
Jason Evans49f7e8f2011-03-15 13:59:15 -07001263 size_t binind;
1264 arena_bin_info_t *bin_info;
Jason Evanse476f8a2010-01-16 09:53:50 -08001265
1266 /* Look for a usable run. */
Jason Evanse7a10582012-02-13 17:36:52 -08001267 run = arena_bin_nonfull_run_tryget(bin);
1268 if (run != NULL)
Jason Evanse476f8a2010-01-16 09:53:50 -08001269 return (run);
Jason Evanse476f8a2010-01-16 09:53:50 -08001270 /* No existing runs have any space available. */
1271
Jason Evans49f7e8f2011-03-15 13:59:15 -07001272 binind = arena_bin_index(arena, bin);
1273 bin_info = &arena_bin_info[binind];
1274
Jason Evanse476f8a2010-01-16 09:53:50 -08001275 /* Allocate a new run. */
Jason Evanse00572b2010-03-14 19:43:56 -07001276 malloc_mutex_unlock(&bin->lock);
Jason Evanse69bee02010-03-15 22:25:23 -07001277 /******************************/
Jason Evans86815df2010-03-13 20:32:56 -08001278 malloc_mutex_lock(&arena->lock);
Jason Evansaa5113b2014-01-14 16:23:03 -08001279 run = arena_run_alloc_small(arena, bin_info->run_size, binind);
Jason Evanse00572b2010-03-14 19:43:56 -07001280 if (run != NULL) {
Jason Evans84c8eef2011-03-16 10:30:13 -07001281 bitmap_t *bitmap = (bitmap_t *)((uintptr_t)run +
1282 (uintptr_t)bin_info->bitmap_offset);
1283
Jason Evanse00572b2010-03-14 19:43:56 -07001284 /* Initialize run internals. */
1285 run->bin = bin;
Jason Evans84c8eef2011-03-16 10:30:13 -07001286 run->nextind = 0;
Jason Evans49f7e8f2011-03-15 13:59:15 -07001287 run->nfree = bin_info->nregs;
Jason Evans84c8eef2011-03-16 10:30:13 -07001288 bitmap_init(bitmap, &bin_info->bitmap_info);
Jason Evanse69bee02010-03-15 22:25:23 -07001289 }
1290 malloc_mutex_unlock(&arena->lock);
1291 /********************************/
1292 malloc_mutex_lock(&bin->lock);
1293 if (run != NULL) {
Jason Evans7372b152012-02-10 20:22:09 -08001294 if (config_stats) {
1295 bin->stats.nruns++;
1296 bin->stats.curruns++;
Jason Evans7372b152012-02-10 20:22:09 -08001297 }
Jason Evanse00572b2010-03-14 19:43:56 -07001298 return (run);
1299 }
1300
1301 /*
Jason Evansaa5113b2014-01-14 16:23:03 -08001302 * arena_run_alloc_small() failed, but another thread may have made
Jason Evans940a2e02010-10-17 17:51:37 -07001303 * sufficient memory available while this one dropped bin->lock above,
Jason Evanse00572b2010-03-14 19:43:56 -07001304 * so search one more time.
1305 */
Jason Evanse7a10582012-02-13 17:36:52 -08001306 run = arena_bin_nonfull_run_tryget(bin);
1307 if (run != NULL)
Jason Evanse00572b2010-03-14 19:43:56 -07001308 return (run);
Jason Evanse00572b2010-03-14 19:43:56 -07001309
1310 return (NULL);
Jason Evanse476f8a2010-01-16 09:53:50 -08001311}
1312
Jason Evans1e0a6362010-03-13 13:41:58 -08001313/* Re-fill bin->runcur, then call arena_run_reg_alloc(). */
Jason Evanse476f8a2010-01-16 09:53:50 -08001314static void *
1315arena_bin_malloc_hard(arena_t *arena, arena_bin_t *bin)
1316{
Jason Evanse00572b2010-03-14 19:43:56 -07001317 void *ret;
Jason Evans49f7e8f2011-03-15 13:59:15 -07001318 size_t binind;
1319 arena_bin_info_t *bin_info;
Jason Evanse00572b2010-03-14 19:43:56 -07001320 arena_run_t *run;
Jason Evanse476f8a2010-01-16 09:53:50 -08001321
Jason Evans49f7e8f2011-03-15 13:59:15 -07001322 binind = arena_bin_index(arena, bin);
1323 bin_info = &arena_bin_info[binind];
Jason Evanse00572b2010-03-14 19:43:56 -07001324 bin->runcur = NULL;
1325 run = arena_bin_nonfull_run_get(arena, bin);
1326 if (bin->runcur != NULL && bin->runcur->nfree > 0) {
1327 /*
1328 * Another thread updated runcur while this one ran without the
1329 * bin lock in arena_bin_nonfull_run_get().
1330 */
Jason Evanse00572b2010-03-14 19:43:56 -07001331 assert(bin->runcur->nfree > 0);
Jason Evans49f7e8f2011-03-15 13:59:15 -07001332 ret = arena_run_reg_alloc(bin->runcur, bin_info);
Jason Evanse00572b2010-03-14 19:43:56 -07001333 if (run != NULL) {
Jason Evans940a2e02010-10-17 17:51:37 -07001334 arena_chunk_t *chunk;
1335
1336 /*
Jason Evansaa5113b2014-01-14 16:23:03 -08001337 * arena_run_alloc_small() may have allocated run, or
1338 * it may have pulled run from the bin's run tree.
1339 * Therefore it is unsafe to make any assumptions about
1340 * how run has previously been used, and
1341 * arena_bin_lower_run() must be called, as if a region
1342 * were just deallocated from the run.
Jason Evans940a2e02010-10-17 17:51:37 -07001343 */
1344 chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(run);
Jason Evans49f7e8f2011-03-15 13:59:15 -07001345 if (run->nfree == bin_info->nregs)
Jason Evans8de6a022010-10-17 20:57:30 -07001346 arena_dalloc_bin_run(arena, chunk, run, bin);
1347 else
1348 arena_bin_lower_run(arena, chunk, run, bin);
Jason Evanse00572b2010-03-14 19:43:56 -07001349 }
1350 return (ret);
1351 }
1352
1353 if (run == NULL)
Jason Evanse476f8a2010-01-16 09:53:50 -08001354 return (NULL);
Jason Evanse00572b2010-03-14 19:43:56 -07001355
1356 bin->runcur = run;
1357
Jason Evanse476f8a2010-01-16 09:53:50 -08001358 assert(bin->runcur->nfree > 0);
1359
Jason Evans49f7e8f2011-03-15 13:59:15 -07001360 return (arena_run_reg_alloc(bin->runcur, bin_info));
Jason Evanse476f8a2010-01-16 09:53:50 -08001361}
1362
Jason Evans86815df2010-03-13 20:32:56 -08001363void
Jason Evans7372b152012-02-10 20:22:09 -08001364arena_tcache_fill_small(arena_t *arena, tcache_bin_t *tbin, size_t binind,
1365 uint64_t prof_accumbytes)
Jason Evanse476f8a2010-01-16 09:53:50 -08001366{
1367 unsigned i, nfill;
1368 arena_bin_t *bin;
1369 arena_run_t *run;
1370 void *ptr;
1371
1372 assert(tbin->ncached == 0);
1373
Jason Evans88c222c2013-02-06 11:59:30 -08001374 if (config_prof && arena_prof_accum(arena, prof_accumbytes))
1375 prof_idump();
Jason Evanse69bee02010-03-15 22:25:23 -07001376 bin = &arena->bins[binind];
1377 malloc_mutex_lock(&bin->lock);
Jason Evans1dcb4f82011-03-21 00:18:17 -07001378 for (i = 0, nfill = (tcache_bin_info[binind].ncached_max >>
1379 tbin->lg_fill_div); i < nfill; i++) {
Jason Evanse476f8a2010-01-16 09:53:50 -08001380 if ((run = bin->runcur) != NULL && run->nfree > 0)
Jason Evans49f7e8f2011-03-15 13:59:15 -07001381 ptr = arena_run_reg_alloc(run, &arena_bin_info[binind]);
Jason Evanse476f8a2010-01-16 09:53:50 -08001382 else
1383 ptr = arena_bin_malloc_hard(arena, bin);
Jason Evans3fa9a2f2010-03-07 15:34:14 -08001384 if (ptr == NULL)
Jason Evanse476f8a2010-01-16 09:53:50 -08001385 break;
Jason Evans122449b2012-04-06 00:35:09 -07001386 if (config_fill && opt_junk) {
1387 arena_alloc_junk_small(ptr, &arena_bin_info[binind],
1388 true);
1389 }
Jason Evans9c43c132011-03-18 10:53:15 -07001390 /* Insert such that low regions get used first. */
1391 tbin->avail[nfill - 1 - i] = ptr;
Jason Evanse476f8a2010-01-16 09:53:50 -08001392 }
Jason Evans7372b152012-02-10 20:22:09 -08001393 if (config_stats) {
1394 bin->stats.allocated += i * arena_bin_info[binind].reg_size;
1395 bin->stats.nmalloc += i;
1396 bin->stats.nrequests += tbin->tstats.nrequests;
1397 bin->stats.nfills++;
1398 tbin->tstats.nrequests = 0;
1399 }
Jason Evans86815df2010-03-13 20:32:56 -08001400 malloc_mutex_unlock(&bin->lock);
Jason Evanse476f8a2010-01-16 09:53:50 -08001401 tbin->ncached = i;
Jason Evanse476f8a2010-01-16 09:53:50 -08001402}
Jason Evanse476f8a2010-01-16 09:53:50 -08001403
Jason Evans122449b2012-04-06 00:35:09 -07001404void
1405arena_alloc_junk_small(void *ptr, arena_bin_info_t *bin_info, bool zero)
1406{
1407
1408 if (zero) {
1409 size_t redzone_size = bin_info->redzone_size;
1410 memset((void *)((uintptr_t)ptr - redzone_size), 0xa5,
1411 redzone_size);
1412 memset((void *)((uintptr_t)ptr + bin_info->reg_size), 0xa5,
1413 redzone_size);
1414 } else {
1415 memset((void *)((uintptr_t)ptr - bin_info->redzone_size), 0xa5,
1416 bin_info->reg_interval);
1417 }
1418}
1419
Jason Evans0d6c5d82013-12-17 15:14:36 -08001420#ifdef JEMALLOC_JET
1421#undef arena_redzone_corruption
1422#define arena_redzone_corruption JEMALLOC_N(arena_redzone_corruption_impl)
1423#endif
1424static void
1425arena_redzone_corruption(void *ptr, size_t usize, bool after,
1426 size_t offset, uint8_t byte)
1427{
1428
1429 malloc_printf("<jemalloc>: Corrupt redzone %zu byte%s %s %p "
1430 "(size %zu), byte=%#x\n", offset, (offset == 1) ? "" : "s",
1431 after ? "after" : "before", ptr, usize, byte);
1432}
1433#ifdef JEMALLOC_JET
Jason Evans0d6c5d82013-12-17 15:14:36 -08001434#undef arena_redzone_corruption
Jason Evans6b694c42014-01-07 16:47:56 -08001435#define arena_redzone_corruption JEMALLOC_N(arena_redzone_corruption)
1436arena_redzone_corruption_t *arena_redzone_corruption =
1437 JEMALLOC_N(arena_redzone_corruption_impl);
Jason Evans0d6c5d82013-12-17 15:14:36 -08001438#endif
1439
1440static void
1441arena_redzones_validate(void *ptr, arena_bin_info_t *bin_info, bool reset)
Jason Evans122449b2012-04-06 00:35:09 -07001442{
1443 size_t size = bin_info->reg_size;
1444 size_t redzone_size = bin_info->redzone_size;
1445 size_t i;
1446 bool error = false;
1447
1448 for (i = 1; i <= redzone_size; i++) {
Jason Evans0d6c5d82013-12-17 15:14:36 -08001449 uint8_t *byte = (uint8_t *)((uintptr_t)ptr - i);
1450 if (*byte != 0xa5) {
Jason Evans122449b2012-04-06 00:35:09 -07001451 error = true;
Jason Evans0d6c5d82013-12-17 15:14:36 -08001452 arena_redzone_corruption(ptr, size, false, i, *byte);
1453 if (reset)
1454 *byte = 0xa5;
Jason Evans122449b2012-04-06 00:35:09 -07001455 }
1456 }
1457 for (i = 0; i < redzone_size; i++) {
Jason Evans0d6c5d82013-12-17 15:14:36 -08001458 uint8_t *byte = (uint8_t *)((uintptr_t)ptr + size + i);
1459 if (*byte != 0xa5) {
Jason Evans122449b2012-04-06 00:35:09 -07001460 error = true;
Jason Evans0d6c5d82013-12-17 15:14:36 -08001461 arena_redzone_corruption(ptr, size, true, i, *byte);
1462 if (reset)
1463 *byte = 0xa5;
Jason Evans122449b2012-04-06 00:35:09 -07001464 }
1465 }
1466 if (opt_abort && error)
1467 abort();
Jason Evans0d6c5d82013-12-17 15:14:36 -08001468}
Jason Evans122449b2012-04-06 00:35:09 -07001469
Jason Evans6b694c42014-01-07 16:47:56 -08001470#ifdef JEMALLOC_JET
1471#undef arena_dalloc_junk_small
1472#define arena_dalloc_junk_small JEMALLOC_N(arena_dalloc_junk_small_impl)
1473#endif
Jason Evans0d6c5d82013-12-17 15:14:36 -08001474void
1475arena_dalloc_junk_small(void *ptr, arena_bin_info_t *bin_info)
1476{
1477 size_t redzone_size = bin_info->redzone_size;
1478
1479 arena_redzones_validate(ptr, bin_info, false);
Jason Evans122449b2012-04-06 00:35:09 -07001480 memset((void *)((uintptr_t)ptr - redzone_size), 0x5a,
1481 bin_info->reg_interval);
1482}
Jason Evans6b694c42014-01-07 16:47:56 -08001483#ifdef JEMALLOC_JET
1484#undef arena_dalloc_junk_small
1485#define arena_dalloc_junk_small JEMALLOC_N(arena_dalloc_junk_small)
1486arena_dalloc_junk_small_t *arena_dalloc_junk_small =
1487 JEMALLOC_N(arena_dalloc_junk_small_impl);
1488#endif
Jason Evans122449b2012-04-06 00:35:09 -07001489
Jason Evans0d6c5d82013-12-17 15:14:36 -08001490void
1491arena_quarantine_junk_small(void *ptr, size_t usize)
1492{
1493 size_t binind;
1494 arena_bin_info_t *bin_info;
1495 cassert(config_fill);
1496 assert(opt_junk);
1497 assert(opt_quarantine);
1498 assert(usize <= SMALL_MAXCLASS);
1499
Jason Evans3541a902014-04-16 17:14:33 -07001500 binind = small_size2bin(usize);
Jason Evans0d6c5d82013-12-17 15:14:36 -08001501 bin_info = &arena_bin_info[binind];
1502 arena_redzones_validate(ptr, bin_info, true);
1503}
1504
Jason Evanse476f8a2010-01-16 09:53:50 -08001505void *
1506arena_malloc_small(arena_t *arena, size_t size, bool zero)
1507{
1508 void *ret;
1509 arena_bin_t *bin;
1510 arena_run_t *run;
1511 size_t binind;
1512
Jason Evans3541a902014-04-16 17:14:33 -07001513 binind = small_size2bin(size);
Jason Evansb1726102012-02-28 16:50:47 -08001514 assert(binind < NBINS);
Jason Evanse476f8a2010-01-16 09:53:50 -08001515 bin = &arena->bins[binind];
Jason Evans3541a902014-04-16 17:14:33 -07001516 size = small_bin2size(binind);
Jason Evanse476f8a2010-01-16 09:53:50 -08001517
Jason Evans86815df2010-03-13 20:32:56 -08001518 malloc_mutex_lock(&bin->lock);
Jason Evanse476f8a2010-01-16 09:53:50 -08001519 if ((run = bin->runcur) != NULL && run->nfree > 0)
Jason Evans49f7e8f2011-03-15 13:59:15 -07001520 ret = arena_run_reg_alloc(run, &arena_bin_info[binind]);
Jason Evanse476f8a2010-01-16 09:53:50 -08001521 else
1522 ret = arena_bin_malloc_hard(arena, bin);
1523
1524 if (ret == NULL) {
Jason Evans86815df2010-03-13 20:32:56 -08001525 malloc_mutex_unlock(&bin->lock);
Jason Evanse476f8a2010-01-16 09:53:50 -08001526 return (NULL);
1527 }
1528
Jason Evans7372b152012-02-10 20:22:09 -08001529 if (config_stats) {
1530 bin->stats.allocated += size;
1531 bin->stats.nmalloc++;
1532 bin->stats.nrequests++;
1533 }
Jason Evans86815df2010-03-13 20:32:56 -08001534 malloc_mutex_unlock(&bin->lock);
Jason Evans88c222c2013-02-06 11:59:30 -08001535 if (config_prof && isthreaded == false && arena_prof_accum(arena, size))
1536 prof_idump();
Jason Evanse476f8a2010-01-16 09:53:50 -08001537
1538 if (zero == false) {
Jason Evans7372b152012-02-10 20:22:09 -08001539 if (config_fill) {
Jason Evans122449b2012-04-06 00:35:09 -07001540 if (opt_junk) {
1541 arena_alloc_junk_small(ret,
1542 &arena_bin_info[binind], false);
1543 } else if (opt_zero)
Jason Evans7372b152012-02-10 20:22:09 -08001544 memset(ret, 0, size);
1545 }
Jason Evansbd87b012014-04-15 16:35:08 -07001546 JEMALLOC_VALGRIND_MAKE_MEM_UNDEFINED(ret, size);
Jason Evans122449b2012-04-06 00:35:09 -07001547 } else {
1548 if (config_fill && opt_junk) {
1549 arena_alloc_junk_small(ret, &arena_bin_info[binind],
1550 true);
1551 }
Jason Evansbd87b012014-04-15 16:35:08 -07001552 JEMALLOC_VALGRIND_MAKE_MEM_UNDEFINED(ret, size);
Jason Evanse476f8a2010-01-16 09:53:50 -08001553 memset(ret, 0, size);
Jason Evans122449b2012-04-06 00:35:09 -07001554 }
Jason Evanse476f8a2010-01-16 09:53:50 -08001555
1556 return (ret);
1557}
1558
1559void *
Jason Evanse476f8a2010-01-16 09:53:50 -08001560arena_malloc_large(arena_t *arena, size_t size, bool zero)
1561{
1562 void *ret;
Jason Evans88c222c2013-02-06 11:59:30 -08001563 UNUSED bool idump;
Jason Evanse476f8a2010-01-16 09:53:50 -08001564
1565 /* Large allocation. */
1566 size = PAGE_CEILING(size);
1567 malloc_mutex_lock(&arena->lock);
Jason Evansaa5113b2014-01-14 16:23:03 -08001568 ret = (void *)arena_run_alloc_large(arena, size, zero);
Jason Evanse476f8a2010-01-16 09:53:50 -08001569 if (ret == NULL) {
1570 malloc_mutex_unlock(&arena->lock);
1571 return (NULL);
1572 }
Jason Evans7372b152012-02-10 20:22:09 -08001573 if (config_stats) {
1574 arena->stats.nmalloc_large++;
1575 arena->stats.nrequests_large++;
1576 arena->stats.allocated_large += size;
Jason Evansae4c7b42012-04-02 07:04:34 -07001577 arena->stats.lstats[(size >> LG_PAGE) - 1].nmalloc++;
1578 arena->stats.lstats[(size >> LG_PAGE) - 1].nrequests++;
1579 arena->stats.lstats[(size >> LG_PAGE) - 1].curruns++;
Jason Evanse476f8a2010-01-16 09:53:50 -08001580 }
Jason Evans7372b152012-02-10 20:22:09 -08001581 if (config_prof)
Jason Evans88c222c2013-02-06 11:59:30 -08001582 idump = arena_prof_accum_locked(arena, size);
Jason Evanse476f8a2010-01-16 09:53:50 -08001583 malloc_mutex_unlock(&arena->lock);
Jason Evans88c222c2013-02-06 11:59:30 -08001584 if (config_prof && idump)
1585 prof_idump();
Jason Evanse476f8a2010-01-16 09:53:50 -08001586
1587 if (zero == false) {
Jason Evans7372b152012-02-10 20:22:09 -08001588 if (config_fill) {
1589 if (opt_junk)
1590 memset(ret, 0xa5, size);
1591 else if (opt_zero)
1592 memset(ret, 0, size);
1593 }
Jason Evanse476f8a2010-01-16 09:53:50 -08001594 }
1595
1596 return (ret);
1597}
1598
Jason Evanse476f8a2010-01-16 09:53:50 -08001599/* Only handles large allocations that require more than page alignment. */
1600void *
Jason Evans5ff709c2012-04-11 18:13:45 -07001601arena_palloc(arena_t *arena, size_t size, size_t alignment, bool zero)
Jason Evanse476f8a2010-01-16 09:53:50 -08001602{
1603 void *ret;
Jason Evans5ff709c2012-04-11 18:13:45 -07001604 size_t alloc_size, leadsize, trailsize;
1605 arena_run_t *run;
Jason Evanse476f8a2010-01-16 09:53:50 -08001606 arena_chunk_t *chunk;
1607
1608 assert((size & PAGE_MASK) == 0);
Jason Evans93443682010-10-20 17:39:18 -07001609
1610 alignment = PAGE_CEILING(alignment);
Jason Evans5ff709c2012-04-11 18:13:45 -07001611 alloc_size = size + alignment - PAGE;
Jason Evanse476f8a2010-01-16 09:53:50 -08001612
1613 malloc_mutex_lock(&arena->lock);
Jason Evansaa5113b2014-01-14 16:23:03 -08001614 run = arena_run_alloc_large(arena, alloc_size, false);
Jason Evans5ff709c2012-04-11 18:13:45 -07001615 if (run == NULL) {
Jason Evanse476f8a2010-01-16 09:53:50 -08001616 malloc_mutex_unlock(&arena->lock);
1617 return (NULL);
1618 }
Jason Evans5ff709c2012-04-11 18:13:45 -07001619 chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(run);
Jason Evanse476f8a2010-01-16 09:53:50 -08001620
Jason Evans5ff709c2012-04-11 18:13:45 -07001621 leadsize = ALIGNMENT_CEILING((uintptr_t)run, alignment) -
1622 (uintptr_t)run;
1623 assert(alloc_size >= leadsize + size);
1624 trailsize = alloc_size - leadsize - size;
1625 ret = (void *)((uintptr_t)run + leadsize);
1626 if (leadsize != 0) {
1627 arena_run_trim_head(arena, chunk, run, alloc_size, alloc_size -
1628 leadsize);
1629 }
1630 if (trailsize != 0) {
1631 arena_run_trim_tail(arena, chunk, ret, size + trailsize, size,
1632 false);
Jason Evanse476f8a2010-01-16 09:53:50 -08001633 }
Jason Evansaa5113b2014-01-14 16:23:03 -08001634 arena_run_init_large(arena, (arena_run_t *)ret, size, zero);
Jason Evanse476f8a2010-01-16 09:53:50 -08001635
Jason Evans7372b152012-02-10 20:22:09 -08001636 if (config_stats) {
1637 arena->stats.nmalloc_large++;
1638 arena->stats.nrequests_large++;
1639 arena->stats.allocated_large += size;
Jason Evansae4c7b42012-04-02 07:04:34 -07001640 arena->stats.lstats[(size >> LG_PAGE) - 1].nmalloc++;
1641 arena->stats.lstats[(size >> LG_PAGE) - 1].nrequests++;
1642 arena->stats.lstats[(size >> LG_PAGE) - 1].curruns++;
Jason Evanse476f8a2010-01-16 09:53:50 -08001643 }
Jason Evanse476f8a2010-01-16 09:53:50 -08001644 malloc_mutex_unlock(&arena->lock);
1645
Jason Evans7372b152012-02-10 20:22:09 -08001646 if (config_fill && zero == false) {
Jason Evans8e3c3c62010-09-17 15:46:18 -07001647 if (opt_junk)
1648 memset(ret, 0xa5, size);
1649 else if (opt_zero)
1650 memset(ret, 0, size);
1651 }
Jason Evanse476f8a2010-01-16 09:53:50 -08001652 return (ret);
1653}
1654
Jason Evans0b270a92010-03-31 16:45:04 -07001655void
1656arena_prof_promoted(const void *ptr, size_t size)
1657{
1658 arena_chunk_t *chunk;
1659 size_t pageind, binind;
1660
Jason Evans78f73522012-04-18 13:38:40 -07001661 cassert(config_prof);
Jason Evans0b270a92010-03-31 16:45:04 -07001662 assert(ptr != NULL);
1663 assert(CHUNK_ADDR2BASE(ptr) != ptr);
Jason Evans122449b2012-04-06 00:35:09 -07001664 assert(isalloc(ptr, false) == PAGE);
1665 assert(isalloc(ptr, true) == PAGE);
Jason Evansb1726102012-02-28 16:50:47 -08001666 assert(size <= SMALL_MAXCLASS);
Jason Evans0b270a92010-03-31 16:45:04 -07001667
1668 chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr);
Jason Evansae4c7b42012-04-02 07:04:34 -07001669 pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> LG_PAGE;
Jason Evans3541a902014-04-16 17:14:33 -07001670 binind = small_size2bin(size);
Jason Evansb1726102012-02-28 16:50:47 -08001671 assert(binind < NBINS);
Jason Evans203484e2012-05-02 00:30:36 -07001672 arena_mapbits_large_binind_set(chunk, pageind, binind);
Jason Evans0b270a92010-03-31 16:45:04 -07001673
Jason Evans122449b2012-04-06 00:35:09 -07001674 assert(isalloc(ptr, false) == PAGE);
1675 assert(isalloc(ptr, true) == size);
Jason Evans0b270a92010-03-31 16:45:04 -07001676}
Jason Evans6109fe02010-02-10 10:37:56 -08001677
Jason Evanse476f8a2010-01-16 09:53:50 -08001678static void
Jason Evans088e6a02010-10-18 00:04:44 -07001679arena_dissociate_bin_run(arena_chunk_t *chunk, arena_run_t *run,
Jason Evanse476f8a2010-01-16 09:53:50 -08001680 arena_bin_t *bin)
1681{
Jason Evanse476f8a2010-01-16 09:53:50 -08001682
Jason Evans19b3d612010-03-18 20:36:40 -07001683 /* Dissociate run from bin. */
Jason Evanse476f8a2010-01-16 09:53:50 -08001684 if (run == bin->runcur)
1685 bin->runcur = NULL;
Jason Evans49f7e8f2011-03-15 13:59:15 -07001686 else {
1687 size_t binind = arena_bin_index(chunk->arena, bin);
1688 arena_bin_info_t *bin_info = &arena_bin_info[binind];
1689
1690 if (bin_info->nregs != 1) {
Jason Evans49f7e8f2011-03-15 13:59:15 -07001691 /*
1692 * This block's conditional is necessary because if the
1693 * run only contains one region, then it never gets
1694 * inserted into the non-full runs tree.
1695 */
Jason Evanse7a10582012-02-13 17:36:52 -08001696 arena_bin_runs_remove(bin, run);
Jason Evans49f7e8f2011-03-15 13:59:15 -07001697 }
Jason Evanse476f8a2010-01-16 09:53:50 -08001698 }
Jason Evans088e6a02010-10-18 00:04:44 -07001699}
1700
1701static void
1702arena_dalloc_bin_run(arena_t *arena, arena_chunk_t *chunk, arena_run_t *run,
1703 arena_bin_t *bin)
1704{
Jason Evans49f7e8f2011-03-15 13:59:15 -07001705 size_t binind;
1706 arena_bin_info_t *bin_info;
Jason Evans088e6a02010-10-18 00:04:44 -07001707 size_t npages, run_ind, past;
1708
1709 assert(run != bin->runcur);
Jason Evans203484e2012-05-02 00:30:36 -07001710 assert(arena_run_tree_search(&bin->runs,
1711 arena_mapp_get(chunk, ((uintptr_t)run-(uintptr_t)chunk)>>LG_PAGE))
1712 == NULL);
Jason Evans86815df2010-03-13 20:32:56 -08001713
Jason Evans49f7e8f2011-03-15 13:59:15 -07001714 binind = arena_bin_index(chunk->arena, run->bin);
1715 bin_info = &arena_bin_info[binind];
1716
Jason Evanse00572b2010-03-14 19:43:56 -07001717 malloc_mutex_unlock(&bin->lock);
1718 /******************************/
Jason Evansae4c7b42012-04-02 07:04:34 -07001719 npages = bin_info->run_size >> LG_PAGE;
1720 run_ind = (size_t)(((uintptr_t)run - (uintptr_t)chunk) >> LG_PAGE);
Jason Evans84c8eef2011-03-16 10:30:13 -07001721 past = (size_t)(PAGE_CEILING((uintptr_t)run +
1722 (uintptr_t)bin_info->reg0_offset + (uintptr_t)(run->nextind *
Jason Evans122449b2012-04-06 00:35:09 -07001723 bin_info->reg_interval - bin_info->redzone_size) -
1724 (uintptr_t)chunk) >> LG_PAGE);
Jason Evans86815df2010-03-13 20:32:56 -08001725 malloc_mutex_lock(&arena->lock);
Jason Evans19b3d612010-03-18 20:36:40 -07001726
1727 /*
1728 * If the run was originally clean, and some pages were never touched,
1729 * trim the clean pages before deallocating the dirty portion of the
1730 * run.
1731 */
Jason Evans30fe12b2012-05-10 17:09:17 -07001732 assert(arena_mapbits_dirty_get(chunk, run_ind) ==
1733 arena_mapbits_dirty_get(chunk, run_ind+npages-1));
Jason Evans203484e2012-05-02 00:30:36 -07001734 if (arena_mapbits_dirty_get(chunk, run_ind) == 0 && past - run_ind <
1735 npages) {
Jason Evans30fe12b2012-05-10 17:09:17 -07001736 /* Trim clean pages. Convert to large run beforehand. */
1737 assert(npages > 0);
Jason Evansd8ceef62012-05-10 20:59:39 -07001738 arena_mapbits_large_set(chunk, run_ind, bin_info->run_size, 0);
1739 arena_mapbits_large_set(chunk, run_ind+npages-1, 0, 0);
Jason Evansae4c7b42012-04-02 07:04:34 -07001740 arena_run_trim_tail(arena, chunk, run, (npages << LG_PAGE),
1741 ((past - run_ind) << LG_PAGE), false);
Jason Evans940a2e02010-10-17 17:51:37 -07001742 /* npages = past - run_ind; */
Jason Evans1e0a6362010-03-13 13:41:58 -08001743 }
Jason Evanse3d13062012-10-30 15:42:37 -07001744 arena_run_dalloc(arena, run, true, false);
Jason Evans86815df2010-03-13 20:32:56 -08001745 malloc_mutex_unlock(&arena->lock);
Jason Evanse00572b2010-03-14 19:43:56 -07001746 /****************************/
1747 malloc_mutex_lock(&bin->lock);
Jason Evans7372b152012-02-10 20:22:09 -08001748 if (config_stats)
1749 bin->stats.curruns--;
Jason Evanse476f8a2010-01-16 09:53:50 -08001750}
1751
Jason Evans940a2e02010-10-17 17:51:37 -07001752static void
1753arena_bin_lower_run(arena_t *arena, arena_chunk_t *chunk, arena_run_t *run,
1754 arena_bin_t *bin)
Jason Evanse476f8a2010-01-16 09:53:50 -08001755{
Jason Evanse476f8a2010-01-16 09:53:50 -08001756
Jason Evans8de6a022010-10-17 20:57:30 -07001757 /*
Jason Evanse7a10582012-02-13 17:36:52 -08001758 * Make sure that if bin->runcur is non-NULL, it refers to the lowest
1759 * non-full run. It is okay to NULL runcur out rather than proactively
1760 * keeping it pointing at the lowest non-full run.
Jason Evans8de6a022010-10-17 20:57:30 -07001761 */
Jason Evanse7a10582012-02-13 17:36:52 -08001762 if ((uintptr_t)run < (uintptr_t)bin->runcur) {
Jason Evans8de6a022010-10-17 20:57:30 -07001763 /* Switch runcur. */
Jason Evanse7a10582012-02-13 17:36:52 -08001764 if (bin->runcur->nfree > 0)
1765 arena_bin_runs_insert(bin, bin->runcur);
Jason Evans8de6a022010-10-17 20:57:30 -07001766 bin->runcur = run;
Jason Evanse7a10582012-02-13 17:36:52 -08001767 if (config_stats)
1768 bin->stats.reruns++;
1769 } else
1770 arena_bin_runs_insert(bin, run);
Jason Evans940a2e02010-10-17 17:51:37 -07001771}
1772
1773void
Jason Evans203484e2012-05-02 00:30:36 -07001774arena_dalloc_bin_locked(arena_t *arena, arena_chunk_t *chunk, void *ptr,
Jason Evans940a2e02010-10-17 17:51:37 -07001775 arena_chunk_map_t *mapelm)
1776{
1777 size_t pageind;
1778 arena_run_t *run;
1779 arena_bin_t *bin;
Mike Hommey8b499712012-04-24 23:22:02 +02001780 arena_bin_info_t *bin_info;
1781 size_t size, binind;
Jason Evans940a2e02010-10-17 17:51:37 -07001782
Jason Evansae4c7b42012-04-02 07:04:34 -07001783 pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> LG_PAGE;
Jason Evans940a2e02010-10-17 17:51:37 -07001784 run = (arena_run_t *)((uintptr_t)chunk + (uintptr_t)((pageind -
Jason Evans203484e2012-05-02 00:30:36 -07001785 arena_mapbits_small_runind_get(chunk, pageind)) << LG_PAGE));
Jason Evans940a2e02010-10-17 17:51:37 -07001786 bin = run->bin;
Ben Maurerf9ff6032014-04-06 13:24:16 -07001787 binind = arena_ptr_small_binind_get(ptr, arena_mapbits_get(chunk, pageind));
Mike Hommey8b499712012-04-24 23:22:02 +02001788 bin_info = &arena_bin_info[binind];
Jason Evans7372b152012-02-10 20:22:09 -08001789 if (config_fill || config_stats)
1790 size = bin_info->reg_size;
Jason Evans940a2e02010-10-17 17:51:37 -07001791
Jason Evans7372b152012-02-10 20:22:09 -08001792 if (config_fill && opt_junk)
Jason Evans122449b2012-04-06 00:35:09 -07001793 arena_dalloc_junk_small(ptr, bin_info);
Jason Evans940a2e02010-10-17 17:51:37 -07001794
1795 arena_run_reg_dalloc(run, ptr);
Jason Evans49f7e8f2011-03-15 13:59:15 -07001796 if (run->nfree == bin_info->nregs) {
Jason Evans088e6a02010-10-18 00:04:44 -07001797 arena_dissociate_bin_run(chunk, run, bin);
Jason Evans8de6a022010-10-17 20:57:30 -07001798 arena_dalloc_bin_run(arena, chunk, run, bin);
Jason Evans088e6a02010-10-18 00:04:44 -07001799 } else if (run->nfree == 1 && run != bin->runcur)
Jason Evans8de6a022010-10-17 20:57:30 -07001800 arena_bin_lower_run(arena, chunk, run, bin);
Jason Evanse476f8a2010-01-16 09:53:50 -08001801
Jason Evans7372b152012-02-10 20:22:09 -08001802 if (config_stats) {
1803 bin->stats.allocated -= size;
1804 bin->stats.ndalloc++;
1805 }
Jason Evanse476f8a2010-01-16 09:53:50 -08001806}
1807
Jason Evanse476f8a2010-01-16 09:53:50 -08001808void
Jason Evans203484e2012-05-02 00:30:36 -07001809arena_dalloc_bin(arena_t *arena, arena_chunk_t *chunk, void *ptr,
1810 size_t pageind, arena_chunk_map_t *mapelm)
1811{
1812 arena_run_t *run;
1813 arena_bin_t *bin;
1814
1815 run = (arena_run_t *)((uintptr_t)chunk + (uintptr_t)((pageind -
1816 arena_mapbits_small_runind_get(chunk, pageind)) << LG_PAGE));
1817 bin = run->bin;
1818 malloc_mutex_lock(&bin->lock);
1819 arena_dalloc_bin_locked(arena, chunk, ptr, mapelm);
1820 malloc_mutex_unlock(&bin->lock);
1821}
1822
1823void
1824arena_dalloc_small(arena_t *arena, arena_chunk_t *chunk, void *ptr,
1825 size_t pageind)
1826{
1827 arena_chunk_map_t *mapelm;
1828
1829 if (config_debug) {
Jason Evans80737c32012-05-02 16:11:03 -07001830 /* arena_ptr_small_binind_get() does extra sanity checking. */
1831 assert(arena_ptr_small_binind_get(ptr, arena_mapbits_get(chunk,
1832 pageind)) != BININD_INVALID);
Jason Evans203484e2012-05-02 00:30:36 -07001833 }
1834 mapelm = arena_mapp_get(chunk, pageind);
1835 arena_dalloc_bin(arena, chunk, ptr, pageind, mapelm);
1836}
Jason Evanse476f8a2010-01-16 09:53:50 -08001837
Jason Evans6b694c42014-01-07 16:47:56 -08001838#ifdef JEMALLOC_JET
1839#undef arena_dalloc_junk_large
1840#define arena_dalloc_junk_large JEMALLOC_N(arena_dalloc_junk_large_impl)
1841#endif
1842static void
1843arena_dalloc_junk_large(void *ptr, size_t usize)
1844{
1845
1846 if (config_fill && opt_junk)
1847 memset(ptr, 0x5a, usize);
1848}
1849#ifdef JEMALLOC_JET
1850#undef arena_dalloc_junk_large
1851#define arena_dalloc_junk_large JEMALLOC_N(arena_dalloc_junk_large)
1852arena_dalloc_junk_large_t *arena_dalloc_junk_large =
1853 JEMALLOC_N(arena_dalloc_junk_large_impl);
1854#endif
1855
Jason Evanse476f8a2010-01-16 09:53:50 -08001856void
Jason Evans203484e2012-05-02 00:30:36 -07001857arena_dalloc_large_locked(arena_t *arena, arena_chunk_t *chunk, void *ptr)
Jason Evanse476f8a2010-01-16 09:53:50 -08001858{
Jason Evans13668262010-01-31 03:57:29 -08001859
Jason Evans7372b152012-02-10 20:22:09 -08001860 if (config_fill || config_stats) {
Jason Evansae4c7b42012-04-02 07:04:34 -07001861 size_t pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> LG_PAGE;
Jason Evans6b694c42014-01-07 16:47:56 -08001862 size_t usize = arena_mapbits_large_size_get(chunk, pageind);
Jason Evanse476f8a2010-01-16 09:53:50 -08001863
Jason Evans6b694c42014-01-07 16:47:56 -08001864 arena_dalloc_junk_large(ptr, usize);
Jason Evans7372b152012-02-10 20:22:09 -08001865 if (config_stats) {
1866 arena->stats.ndalloc_large++;
Jason Evans6b694c42014-01-07 16:47:56 -08001867 arena->stats.allocated_large -= usize;
1868 arena->stats.lstats[(usize >> LG_PAGE) - 1].ndalloc++;
1869 arena->stats.lstats[(usize >> LG_PAGE) - 1].curruns--;
Jason Evans7372b152012-02-10 20:22:09 -08001870 }
Jason Evanse476f8a2010-01-16 09:53:50 -08001871 }
Jason Evanse476f8a2010-01-16 09:53:50 -08001872
Jason Evanse3d13062012-10-30 15:42:37 -07001873 arena_run_dalloc(arena, (arena_run_t *)ptr, true, false);
Jason Evanse476f8a2010-01-16 09:53:50 -08001874}
1875
Jason Evans203484e2012-05-02 00:30:36 -07001876void
1877arena_dalloc_large(arena_t *arena, arena_chunk_t *chunk, void *ptr)
1878{
1879
1880 malloc_mutex_lock(&arena->lock);
1881 arena_dalloc_large_locked(arena, chunk, ptr);
1882 malloc_mutex_unlock(&arena->lock);
1883}
1884
Jason Evanse476f8a2010-01-16 09:53:50 -08001885static void
1886arena_ralloc_large_shrink(arena_t *arena, arena_chunk_t *chunk, void *ptr,
Jason Evans8e3c3c62010-09-17 15:46:18 -07001887 size_t oldsize, size_t size)
Jason Evanse476f8a2010-01-16 09:53:50 -08001888{
1889
1890 assert(size < oldsize);
1891
1892 /*
1893 * Shrink the run, and make trailing pages available for other
1894 * allocations.
1895 */
1896 malloc_mutex_lock(&arena->lock);
1897 arena_run_trim_tail(arena, chunk, (arena_run_t *)ptr, oldsize, size,
1898 true);
Jason Evans7372b152012-02-10 20:22:09 -08001899 if (config_stats) {
1900 arena->stats.ndalloc_large++;
1901 arena->stats.allocated_large -= oldsize;
Jason Evansae4c7b42012-04-02 07:04:34 -07001902 arena->stats.lstats[(oldsize >> LG_PAGE) - 1].ndalloc++;
1903 arena->stats.lstats[(oldsize >> LG_PAGE) - 1].curruns--;
Jason Evans990d10c2010-01-31 03:49:35 -08001904
Jason Evans7372b152012-02-10 20:22:09 -08001905 arena->stats.nmalloc_large++;
1906 arena->stats.nrequests_large++;
1907 arena->stats.allocated_large += size;
Jason Evansae4c7b42012-04-02 07:04:34 -07001908 arena->stats.lstats[(size >> LG_PAGE) - 1].nmalloc++;
1909 arena->stats.lstats[(size >> LG_PAGE) - 1].nrequests++;
1910 arena->stats.lstats[(size >> LG_PAGE) - 1].curruns++;
Jason Evanse476f8a2010-01-16 09:53:50 -08001911 }
Jason Evanse476f8a2010-01-16 09:53:50 -08001912 malloc_mutex_unlock(&arena->lock);
1913}
1914
1915static bool
1916arena_ralloc_large_grow(arena_t *arena, arena_chunk_t *chunk, void *ptr,
Jason Evans8e3c3c62010-09-17 15:46:18 -07001917 size_t oldsize, size_t size, size_t extra, bool zero)
Jason Evanse476f8a2010-01-16 09:53:50 -08001918{
Jason Evansae4c7b42012-04-02 07:04:34 -07001919 size_t pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> LG_PAGE;
1920 size_t npages = oldsize >> LG_PAGE;
Jason Evans8e3c3c62010-09-17 15:46:18 -07001921 size_t followsize;
Jason Evanse476f8a2010-01-16 09:53:50 -08001922
Jason Evans203484e2012-05-02 00:30:36 -07001923 assert(oldsize == arena_mapbits_large_size_get(chunk, pageind));
Jason Evanse476f8a2010-01-16 09:53:50 -08001924
1925 /* Try to extend the run. */
Jason Evans8e3c3c62010-09-17 15:46:18 -07001926 assert(size + extra > oldsize);
Jason Evanse476f8a2010-01-16 09:53:50 -08001927 malloc_mutex_lock(&arena->lock);
Jason Evans7393f442010-10-01 17:35:43 -07001928 if (pageind + npages < chunk_npages &&
Jason Evans203484e2012-05-02 00:30:36 -07001929 arena_mapbits_allocated_get(chunk, pageind+npages) == 0 &&
1930 (followsize = arena_mapbits_unallocated_size_get(chunk,
1931 pageind+npages)) >= size - oldsize) {
Jason Evanse476f8a2010-01-16 09:53:50 -08001932 /*
1933 * The next run is available and sufficiently large. Split the
1934 * following run, then merge the first part with the existing
1935 * allocation.
1936 */
Jason Evans940a2e02010-10-17 17:51:37 -07001937 size_t flag_dirty;
Jason Evans8e3c3c62010-09-17 15:46:18 -07001938 size_t splitsize = (oldsize + followsize <= size + extra)
1939 ? followsize : size + extra - oldsize;
Jason Evansaa5113b2014-01-14 16:23:03 -08001940 arena_run_split_large(arena, (arena_run_t *)((uintptr_t)chunk +
1941 ((pageind+npages) << LG_PAGE)), splitsize, zero);
Jason Evanse476f8a2010-01-16 09:53:50 -08001942
Jason Evans088e6a02010-10-18 00:04:44 -07001943 size = oldsize + splitsize;
Jason Evansae4c7b42012-04-02 07:04:34 -07001944 npages = size >> LG_PAGE;
Jason Evans940a2e02010-10-17 17:51:37 -07001945
1946 /*
1947 * Mark the extended run as dirty if either portion of the run
1948 * was dirty before allocation. This is rather pedantic,
1949 * because there's not actually any sequence of events that
1950 * could cause the resulting run to be passed to
1951 * arena_run_dalloc() with the dirty argument set to false
1952 * (which is when dirty flag consistency would really matter).
1953 */
Jason Evans203484e2012-05-02 00:30:36 -07001954 flag_dirty = arena_mapbits_dirty_get(chunk, pageind) |
1955 arena_mapbits_dirty_get(chunk, pageind+npages-1);
1956 arena_mapbits_large_set(chunk, pageind, size, flag_dirty);
1957 arena_mapbits_large_set(chunk, pageind+npages-1, 0, flag_dirty);
Jason Evanse476f8a2010-01-16 09:53:50 -08001958
Jason Evans7372b152012-02-10 20:22:09 -08001959 if (config_stats) {
1960 arena->stats.ndalloc_large++;
1961 arena->stats.allocated_large -= oldsize;
Jason Evans203484e2012-05-02 00:30:36 -07001962 arena->stats.lstats[(oldsize >> LG_PAGE) - 1].ndalloc++;
1963 arena->stats.lstats[(oldsize >> LG_PAGE) - 1].curruns--;
Jason Evans990d10c2010-01-31 03:49:35 -08001964
Jason Evans7372b152012-02-10 20:22:09 -08001965 arena->stats.nmalloc_large++;
1966 arena->stats.nrequests_large++;
1967 arena->stats.allocated_large += size;
Jason Evansae4c7b42012-04-02 07:04:34 -07001968 arena->stats.lstats[(size >> LG_PAGE) - 1].nmalloc++;
Jason Evans203484e2012-05-02 00:30:36 -07001969 arena->stats.lstats[(size >> LG_PAGE) - 1].nrequests++;
Jason Evansae4c7b42012-04-02 07:04:34 -07001970 arena->stats.lstats[(size >> LG_PAGE) - 1].curruns++;
Jason Evans940a2e02010-10-17 17:51:37 -07001971 }
Jason Evanse476f8a2010-01-16 09:53:50 -08001972 malloc_mutex_unlock(&arena->lock);
1973 return (false);
1974 }
1975 malloc_mutex_unlock(&arena->lock);
1976
1977 return (true);
1978}
1979
Jason Evans6b694c42014-01-07 16:47:56 -08001980#ifdef JEMALLOC_JET
1981#undef arena_ralloc_junk_large
1982#define arena_ralloc_junk_large JEMALLOC_N(arena_ralloc_junk_large_impl)
1983#endif
1984static void
1985arena_ralloc_junk_large(void *ptr, size_t old_usize, size_t usize)
1986{
1987
1988 if (config_fill && opt_junk) {
1989 memset((void *)((uintptr_t)ptr + usize), 0x5a,
1990 old_usize - usize);
1991 }
1992}
1993#ifdef JEMALLOC_JET
1994#undef arena_ralloc_junk_large
1995#define arena_ralloc_junk_large JEMALLOC_N(arena_ralloc_junk_large)
1996arena_ralloc_junk_large_t *arena_ralloc_junk_large =
1997 JEMALLOC_N(arena_ralloc_junk_large_impl);
1998#endif
1999
Jason Evanse476f8a2010-01-16 09:53:50 -08002000/*
2001 * Try to resize a large allocation, in order to avoid copying. This will
2002 * always fail if growing an object, and the following run is already in use.
2003 */
2004static bool
Jason Evans8e3c3c62010-09-17 15:46:18 -07002005arena_ralloc_large(void *ptr, size_t oldsize, size_t size, size_t extra,
2006 bool zero)
Jason Evanse476f8a2010-01-16 09:53:50 -08002007{
2008 size_t psize;
2009
Jason Evans8e3c3c62010-09-17 15:46:18 -07002010 psize = PAGE_CEILING(size + extra);
Jason Evanse476f8a2010-01-16 09:53:50 -08002011 if (psize == oldsize) {
2012 /* Same size class. */
Jason Evanse476f8a2010-01-16 09:53:50 -08002013 return (false);
2014 } else {
2015 arena_chunk_t *chunk;
2016 arena_t *arena;
2017
2018 chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr);
2019 arena = chunk->arena;
Jason Evanse476f8a2010-01-16 09:53:50 -08002020
2021 if (psize < oldsize) {
Jason Evanse476f8a2010-01-16 09:53:50 -08002022 /* Fill before shrinking in order avoid a race. */
Jason Evans6b694c42014-01-07 16:47:56 -08002023 arena_ralloc_junk_large(ptr, oldsize, psize);
Jason Evans8e3c3c62010-09-17 15:46:18 -07002024 arena_ralloc_large_shrink(arena, chunk, ptr, oldsize,
2025 psize);
Jason Evanse476f8a2010-01-16 09:53:50 -08002026 return (false);
2027 } else {
2028 bool ret = arena_ralloc_large_grow(arena, chunk, ptr,
Jason Evans8e3c3c62010-09-17 15:46:18 -07002029 oldsize, PAGE_CEILING(size),
2030 psize - PAGE_CEILING(size), zero);
Jason Evans6b694c42014-01-07 16:47:56 -08002031 if (config_fill && ret == false && zero == false) {
2032 if (opt_junk) {
2033 memset((void *)((uintptr_t)ptr +
2034 oldsize), 0xa5, isalloc(ptr,
2035 config_prof) - oldsize);
2036 } else if (opt_zero) {
2037 memset((void *)((uintptr_t)ptr +
2038 oldsize), 0, isalloc(ptr,
2039 config_prof) - oldsize);
2040 }
Jason Evanse476f8a2010-01-16 09:53:50 -08002041 }
Jason Evanse476f8a2010-01-16 09:53:50 -08002042 return (ret);
2043 }
2044 }
2045}
2046
Jason Evansb2c31662014-01-12 15:05:44 -08002047bool
Jason Evans8e3c3c62010-09-17 15:46:18 -07002048arena_ralloc_no_move(void *ptr, size_t oldsize, size_t size, size_t extra,
2049 bool zero)
Jason Evanse476f8a2010-01-16 09:53:50 -08002050{
Jason Evanse476f8a2010-01-16 09:53:50 -08002051
Jason Evans8e3c3c62010-09-17 15:46:18 -07002052 /*
2053 * Avoid moving the allocation if the size class can be left the same.
2054 */
Jason Evanse476f8a2010-01-16 09:53:50 -08002055 if (oldsize <= arena_maxclass) {
Jason Evansb1726102012-02-28 16:50:47 -08002056 if (oldsize <= SMALL_MAXCLASS) {
Jason Evans3541a902014-04-16 17:14:33 -07002057 assert(arena_bin_info[small_size2bin(oldsize)].reg_size
Jason Evans49f7e8f2011-03-15 13:59:15 -07002058 == oldsize);
Jason Evansb1726102012-02-28 16:50:47 -08002059 if ((size + extra <= SMALL_MAXCLASS &&
Jason Evans3541a902014-04-16 17:14:33 -07002060 small_size2bin(size + extra) ==
2061 small_size2bin(oldsize)) || (size <= oldsize &&
Jason Evans6e629842013-12-15 21:49:40 -08002062 size + extra >= oldsize))
Jason Evansb2c31662014-01-12 15:05:44 -08002063 return (false);
Jason Evanse476f8a2010-01-16 09:53:50 -08002064 } else {
2065 assert(size <= arena_maxclass);
Jason Evansb1726102012-02-28 16:50:47 -08002066 if (size + extra > SMALL_MAXCLASS) {
Jason Evans8e3c3c62010-09-17 15:46:18 -07002067 if (arena_ralloc_large(ptr, oldsize, size,
2068 extra, zero) == false)
Jason Evansb2c31662014-01-12 15:05:44 -08002069 return (false);
Jason Evanse476f8a2010-01-16 09:53:50 -08002070 }
2071 }
2072 }
2073
Jason Evans8e3c3c62010-09-17 15:46:18 -07002074 /* Reallocation would require a move. */
Jason Evansb2c31662014-01-12 15:05:44 -08002075 return (true);
Jason Evans8e3c3c62010-09-17 15:46:18 -07002076}
Jason Evanse476f8a2010-01-16 09:53:50 -08002077
Jason Evans8e3c3c62010-09-17 15:46:18 -07002078void *
Jason Evans609ae592012-10-11 13:53:15 -07002079arena_ralloc(arena_t *arena, void *ptr, size_t oldsize, size_t size,
2080 size_t extra, size_t alignment, bool zero, bool try_tcache_alloc,
2081 bool try_tcache_dalloc)
Jason Evans8e3c3c62010-09-17 15:46:18 -07002082{
2083 void *ret;
2084 size_t copysize;
2085
2086 /* Try to avoid moving the allocation. */
Jason Evansb2c31662014-01-12 15:05:44 -08002087 if (arena_ralloc_no_move(ptr, oldsize, size, extra, zero) == false)
2088 return (ptr);
Jason Evans8e3c3c62010-09-17 15:46:18 -07002089
Jason Evans8e3c3c62010-09-17 15:46:18 -07002090 /*
2091 * size and oldsize are different enough that we need to move the
2092 * object. In that case, fall back to allocating new space and
2093 * copying.
2094 */
Jason Evans38d92102011-03-23 00:37:29 -07002095 if (alignment != 0) {
Jason Evans5ff709c2012-04-11 18:13:45 -07002096 size_t usize = sa2u(size + extra, alignment);
Jason Evans38d92102011-03-23 00:37:29 -07002097 if (usize == 0)
2098 return (NULL);
Jason Evansd82a5e62013-12-12 22:35:52 -08002099 ret = ipalloct(usize, alignment, zero, try_tcache_alloc, arena);
Jason Evans38d92102011-03-23 00:37:29 -07002100 } else
Jason Evans609ae592012-10-11 13:53:15 -07002101 ret = arena_malloc(arena, size + extra, zero, try_tcache_alloc);
Jason Evans8e3c3c62010-09-17 15:46:18 -07002102
2103 if (ret == NULL) {
2104 if (extra == 0)
2105 return (NULL);
2106 /* Try again, this time without extra. */
Jason Evans38d92102011-03-23 00:37:29 -07002107 if (alignment != 0) {
Jason Evans5ff709c2012-04-11 18:13:45 -07002108 size_t usize = sa2u(size, alignment);
Jason Evans38d92102011-03-23 00:37:29 -07002109 if (usize == 0)
2110 return (NULL);
Jason Evansd82a5e62013-12-12 22:35:52 -08002111 ret = ipalloct(usize, alignment, zero, try_tcache_alloc,
Jason Evans609ae592012-10-11 13:53:15 -07002112 arena);
Jason Evans38d92102011-03-23 00:37:29 -07002113 } else
Jason Evans609ae592012-10-11 13:53:15 -07002114 ret = arena_malloc(arena, size, zero, try_tcache_alloc);
Jason Evans8e3c3c62010-09-17 15:46:18 -07002115
2116 if (ret == NULL)
2117 return (NULL);
2118 }
2119
2120 /* Junk/zero-filling were already done by ipalloc()/arena_malloc(). */
2121
2122 /*
2123 * Copy at most size bytes (not size+extra), since the caller has no
2124 * expectation that the extra bytes will be reliably preserved.
2125 */
Jason Evanse476f8a2010-01-16 09:53:50 -08002126 copysize = (size < oldsize) ? size : oldsize;
Jason Evansbd87b012014-04-15 16:35:08 -07002127 JEMALLOC_VALGRIND_MAKE_MEM_UNDEFINED(ret, copysize);
Jason Evanse476f8a2010-01-16 09:53:50 -08002128 memcpy(ret, ptr, copysize);
Jason Evansd82a5e62013-12-12 22:35:52 -08002129 iqalloct(ptr, try_tcache_dalloc);
Jason Evanse476f8a2010-01-16 09:53:50 -08002130 return (ret);
Jason Evanse476f8a2010-01-16 09:53:50 -08002131}
2132
Jason Evans609ae592012-10-11 13:53:15 -07002133dss_prec_t
2134arena_dss_prec_get(arena_t *arena)
2135{
2136 dss_prec_t ret;
2137
2138 malloc_mutex_lock(&arena->lock);
2139 ret = arena->dss_prec;
2140 malloc_mutex_unlock(&arena->lock);
2141 return (ret);
2142}
2143
Jason Evans4d434ad2014-04-15 12:09:48 -07002144bool
Jason Evans609ae592012-10-11 13:53:15 -07002145arena_dss_prec_set(arena_t *arena, dss_prec_t dss_prec)
2146{
2147
Jason Evans4d434ad2014-04-15 12:09:48 -07002148 if (have_dss == false)
2149 return (dss_prec != dss_prec_disabled);
Jason Evans609ae592012-10-11 13:53:15 -07002150 malloc_mutex_lock(&arena->lock);
2151 arena->dss_prec = dss_prec;
2152 malloc_mutex_unlock(&arena->lock);
Jason Evans4d434ad2014-04-15 12:09:48 -07002153 return (false);
Jason Evans609ae592012-10-11 13:53:15 -07002154}
2155
2156void
2157arena_stats_merge(arena_t *arena, const char **dss, size_t *nactive,
2158 size_t *ndirty, arena_stats_t *astats, malloc_bin_stats_t *bstats,
2159 malloc_large_stats_t *lstats)
2160{
2161 unsigned i;
2162
2163 malloc_mutex_lock(&arena->lock);
2164 *dss = dss_prec_names[arena->dss_prec];
2165 *nactive += arena->nactive;
2166 *ndirty += arena->ndirty;
2167
2168 astats->mapped += arena->stats.mapped;
2169 astats->npurge += arena->stats.npurge;
2170 astats->nmadvise += arena->stats.nmadvise;
2171 astats->purged += arena->stats.purged;
2172 astats->allocated_large += arena->stats.allocated_large;
2173 astats->nmalloc_large += arena->stats.nmalloc_large;
2174 astats->ndalloc_large += arena->stats.ndalloc_large;
2175 astats->nrequests_large += arena->stats.nrequests_large;
Jason Evanse2deab72014-05-15 22:22:27 -07002176 astats->allocated_huge += arena->stats.allocated_huge;
2177 astats->nmalloc_huge += arena->stats.nmalloc_huge;
2178 astats->ndalloc_huge += arena->stats.ndalloc_huge;
2179 astats->nrequests_huge += arena->stats.nrequests_huge;
Jason Evans609ae592012-10-11 13:53:15 -07002180
2181 for (i = 0; i < nlclasses; i++) {
2182 lstats[i].nmalloc += arena->stats.lstats[i].nmalloc;
2183 lstats[i].ndalloc += arena->stats.lstats[i].ndalloc;
2184 lstats[i].nrequests += arena->stats.lstats[i].nrequests;
2185 lstats[i].curruns += arena->stats.lstats[i].curruns;
2186 }
2187 malloc_mutex_unlock(&arena->lock);
2188
2189 for (i = 0; i < NBINS; i++) {
2190 arena_bin_t *bin = &arena->bins[i];
2191
2192 malloc_mutex_lock(&bin->lock);
2193 bstats[i].allocated += bin->stats.allocated;
2194 bstats[i].nmalloc += bin->stats.nmalloc;
2195 bstats[i].ndalloc += bin->stats.ndalloc;
2196 bstats[i].nrequests += bin->stats.nrequests;
2197 if (config_tcache) {
2198 bstats[i].nfills += bin->stats.nfills;
2199 bstats[i].nflushes += bin->stats.nflushes;
2200 }
2201 bstats[i].nruns += bin->stats.nruns;
2202 bstats[i].reruns += bin->stats.reruns;
2203 bstats[i].curruns += bin->stats.curruns;
2204 malloc_mutex_unlock(&bin->lock);
2205 }
2206}
2207
Jason Evanse476f8a2010-01-16 09:53:50 -08002208bool
2209arena_new(arena_t *arena, unsigned ind)
2210{
2211 unsigned i;
2212 arena_bin_t *bin;
Jason Evanse476f8a2010-01-16 09:53:50 -08002213
Jason Evans6109fe02010-02-10 10:37:56 -08002214 arena->ind = ind;
Jason Evans597632b2011-03-18 13:41:33 -07002215 arena->nthreads = 0;
aravindfb7fe502014-05-05 15:16:56 -07002216 arena->chunk_alloc = chunk_alloc_default;
Jason Evanse2deab72014-05-15 22:22:27 -07002217 arena->chunk_dalloc = chunk_dalloc_default;
Jason Evans6109fe02010-02-10 10:37:56 -08002218
Jason Evanse476f8a2010-01-16 09:53:50 -08002219 if (malloc_mutex_init(&arena->lock))
2220 return (true);
2221
Jason Evans7372b152012-02-10 20:22:09 -08002222 if (config_stats) {
2223 memset(&arena->stats, 0, sizeof(arena_stats_t));
2224 arena->stats.lstats =
2225 (malloc_large_stats_t *)base_alloc(nlclasses *
2226 sizeof(malloc_large_stats_t));
2227 if (arena->stats.lstats == NULL)
2228 return (true);
2229 memset(arena->stats.lstats, 0, nlclasses *
2230 sizeof(malloc_large_stats_t));
2231 if (config_tcache)
2232 ql_new(&arena->tcache_ql);
2233 }
Jason Evanse476f8a2010-01-16 09:53:50 -08002234
Jason Evans7372b152012-02-10 20:22:09 -08002235 if (config_prof)
2236 arena->prof_accumbytes = 0;
Jason Evansd34f9e72010-02-11 13:19:21 -08002237
Jason Evans609ae592012-10-11 13:53:15 -07002238 arena->dss_prec = chunk_dss_prec_get();
2239
Jason Evanse476f8a2010-01-16 09:53:50 -08002240 /* Initialize chunks. */
Qinfan Wu04d60a12014-07-18 14:21:17 -07002241 ql_new(&arena->runs_dirty);
Jason Evanse476f8a2010-01-16 09:53:50 -08002242 arena->spare = NULL;
2243
2244 arena->nactive = 0;
2245 arena->ndirty = 0;
2246
Jason Evanse3d13062012-10-30 15:42:37 -07002247 arena_avail_tree_new(&arena->runs_avail);
Jason Evanse476f8a2010-01-16 09:53:50 -08002248
2249 /* Initialize bins. */
Jason Evansb1726102012-02-28 16:50:47 -08002250 for (i = 0; i < NBINS; i++) {
Jason Evanse476f8a2010-01-16 09:53:50 -08002251 bin = &arena->bins[i];
Jason Evans86815df2010-03-13 20:32:56 -08002252 if (malloc_mutex_init(&bin->lock))
2253 return (true);
Jason Evanse476f8a2010-01-16 09:53:50 -08002254 bin->runcur = NULL;
2255 arena_run_tree_new(&bin->runs);
Jason Evans7372b152012-02-10 20:22:09 -08002256 if (config_stats)
2257 memset(&bin->stats, 0, sizeof(malloc_bin_stats_t));
Jason Evanse476f8a2010-01-16 09:53:50 -08002258 }
2259
Jason Evanse476f8a2010-01-16 09:53:50 -08002260 return (false);
2261}
2262
Jason Evans49f7e8f2011-03-15 13:59:15 -07002263/*
2264 * Calculate bin_info->run_size such that it meets the following constraints:
2265 *
2266 * *) bin_info->run_size >= min_run_size
2267 * *) bin_info->run_size <= arena_maxclass
2268 * *) run header overhead <= RUN_MAX_OVRHD (or header overhead relaxed).
Jason Evans47e57f92011-03-22 09:00:56 -07002269 * *) bin_info->nregs <= RUN_MAXREGS
Jason Evans49f7e8f2011-03-15 13:59:15 -07002270 *
Jason Evans84c8eef2011-03-16 10:30:13 -07002271 * bin_info->nregs, bin_info->bitmap_offset, and bin_info->reg0_offset are also
2272 * calculated here, since these settings are all interdependent.
Jason Evans49f7e8f2011-03-15 13:59:15 -07002273 */
2274static size_t
2275bin_info_run_size_calc(arena_bin_info_t *bin_info, size_t min_run_size)
2276{
Jason Evans122449b2012-04-06 00:35:09 -07002277 size_t pad_size;
Jason Evans49f7e8f2011-03-15 13:59:15 -07002278 size_t try_run_size, good_run_size;
2279 uint32_t try_nregs, good_nregs;
2280 uint32_t try_hdr_size, good_hdr_size;
Jason Evans84c8eef2011-03-16 10:30:13 -07002281 uint32_t try_bitmap_offset, good_bitmap_offset;
Jason Evans122449b2012-04-06 00:35:09 -07002282 uint32_t try_redzone0_offset, good_redzone0_offset;
Jason Evans49f7e8f2011-03-15 13:59:15 -07002283
Jason Evansae4c7b42012-04-02 07:04:34 -07002284 assert(min_run_size >= PAGE);
Jason Evans49f7e8f2011-03-15 13:59:15 -07002285 assert(min_run_size <= arena_maxclass);
2286
2287 /*
Jason Evans122449b2012-04-06 00:35:09 -07002288 * Determine redzone size based on minimum alignment and minimum
2289 * redzone size. Add padding to the end of the run if it is needed to
2290 * align the regions. The padding allows each redzone to be half the
2291 * minimum alignment; without the padding, each redzone would have to
2292 * be twice as large in order to maintain alignment.
2293 */
2294 if (config_fill && opt_redzone) {
Richard Diamond9c3a10f2014-05-28 21:37:02 -05002295 size_t align_min = ZU(1) << (jemalloc_ffs(bin_info->reg_size) - 1);
Jason Evans122449b2012-04-06 00:35:09 -07002296 if (align_min <= REDZONE_MINSIZE) {
2297 bin_info->redzone_size = REDZONE_MINSIZE;
2298 pad_size = 0;
2299 } else {
2300 bin_info->redzone_size = align_min >> 1;
2301 pad_size = bin_info->redzone_size;
2302 }
2303 } else {
2304 bin_info->redzone_size = 0;
2305 pad_size = 0;
2306 }
2307 bin_info->reg_interval = bin_info->reg_size +
2308 (bin_info->redzone_size << 1);
2309
2310 /*
Jason Evans49f7e8f2011-03-15 13:59:15 -07002311 * Calculate known-valid settings before entering the run_size
2312 * expansion loop, so that the first part of the loop always copies
2313 * valid settings.
2314 *
2315 * The do..while loop iteratively reduces the number of regions until
2316 * the run header and the regions no longer overlap. A closed formula
2317 * would be quite messy, since there is an interdependency between the
2318 * header's mask length and the number of regions.
2319 */
2320 try_run_size = min_run_size;
Jason Evans122449b2012-04-06 00:35:09 -07002321 try_nregs = ((try_run_size - sizeof(arena_run_t)) /
2322 bin_info->reg_interval)
Jason Evans49f7e8f2011-03-15 13:59:15 -07002323 + 1; /* Counter-act try_nregs-- in loop. */
Jason Evans47e57f92011-03-22 09:00:56 -07002324 if (try_nregs > RUN_MAXREGS) {
2325 try_nregs = RUN_MAXREGS
2326 + 1; /* Counter-act try_nregs-- in loop. */
2327 }
Jason Evans49f7e8f2011-03-15 13:59:15 -07002328 do {
2329 try_nregs--;
2330 try_hdr_size = sizeof(arena_run_t);
Jason Evans84c8eef2011-03-16 10:30:13 -07002331 /* Pad to a long boundary. */
2332 try_hdr_size = LONG_CEILING(try_hdr_size);
2333 try_bitmap_offset = try_hdr_size;
2334 /* Add space for bitmap. */
2335 try_hdr_size += bitmap_size(try_nregs);
Jason Evans122449b2012-04-06 00:35:09 -07002336 try_redzone0_offset = try_run_size - (try_nregs *
2337 bin_info->reg_interval) - pad_size;
2338 } while (try_hdr_size > try_redzone0_offset);
Jason Evans49f7e8f2011-03-15 13:59:15 -07002339
2340 /* run_size expansion loop. */
2341 do {
2342 /*
2343 * Copy valid settings before trying more aggressive settings.
2344 */
2345 good_run_size = try_run_size;
2346 good_nregs = try_nregs;
2347 good_hdr_size = try_hdr_size;
Jason Evans84c8eef2011-03-16 10:30:13 -07002348 good_bitmap_offset = try_bitmap_offset;
Jason Evans122449b2012-04-06 00:35:09 -07002349 good_redzone0_offset = try_redzone0_offset;
Jason Evans49f7e8f2011-03-15 13:59:15 -07002350
2351 /* Try more aggressive settings. */
Jason Evansae4c7b42012-04-02 07:04:34 -07002352 try_run_size += PAGE;
Jason Evans122449b2012-04-06 00:35:09 -07002353 try_nregs = ((try_run_size - sizeof(arena_run_t) - pad_size) /
2354 bin_info->reg_interval)
Jason Evans49f7e8f2011-03-15 13:59:15 -07002355 + 1; /* Counter-act try_nregs-- in loop. */
Jason Evans47e57f92011-03-22 09:00:56 -07002356 if (try_nregs > RUN_MAXREGS) {
2357 try_nregs = RUN_MAXREGS
2358 + 1; /* Counter-act try_nregs-- in loop. */
2359 }
Jason Evans49f7e8f2011-03-15 13:59:15 -07002360 do {
2361 try_nregs--;
2362 try_hdr_size = sizeof(arena_run_t);
Jason Evans84c8eef2011-03-16 10:30:13 -07002363 /* Pad to a long boundary. */
2364 try_hdr_size = LONG_CEILING(try_hdr_size);
2365 try_bitmap_offset = try_hdr_size;
2366 /* Add space for bitmap. */
2367 try_hdr_size += bitmap_size(try_nregs);
Jason Evans122449b2012-04-06 00:35:09 -07002368 try_redzone0_offset = try_run_size - (try_nregs *
2369 bin_info->reg_interval) - pad_size;
2370 } while (try_hdr_size > try_redzone0_offset);
Jason Evans49f7e8f2011-03-15 13:59:15 -07002371 } while (try_run_size <= arena_maxclass
Jason Evans122449b2012-04-06 00:35:09 -07002372 && RUN_MAX_OVRHD * (bin_info->reg_interval << 3) >
2373 RUN_MAX_OVRHD_RELAX
2374 && (try_redzone0_offset << RUN_BFP) > RUN_MAX_OVRHD * try_run_size
Jason Evans47e57f92011-03-22 09:00:56 -07002375 && try_nregs < RUN_MAXREGS);
Jason Evans49f7e8f2011-03-15 13:59:15 -07002376
Jason Evans122449b2012-04-06 00:35:09 -07002377 assert(good_hdr_size <= good_redzone0_offset);
Jason Evans49f7e8f2011-03-15 13:59:15 -07002378
2379 /* Copy final settings. */
2380 bin_info->run_size = good_run_size;
2381 bin_info->nregs = good_nregs;
Jason Evans84c8eef2011-03-16 10:30:13 -07002382 bin_info->bitmap_offset = good_bitmap_offset;
Jason Evans122449b2012-04-06 00:35:09 -07002383 bin_info->reg0_offset = good_redzone0_offset + bin_info->redzone_size;
2384
2385 assert(bin_info->reg0_offset - bin_info->redzone_size + (bin_info->nregs
2386 * bin_info->reg_interval) + pad_size == bin_info->run_size);
Jason Evans49f7e8f2011-03-15 13:59:15 -07002387
2388 return (good_run_size);
2389}
2390
Jason Evansb1726102012-02-28 16:50:47 -08002391static void
Jason Evans49f7e8f2011-03-15 13:59:15 -07002392bin_info_init(void)
2393{
2394 arena_bin_info_t *bin_info;
Jason Evansae4c7b42012-04-02 07:04:34 -07002395 size_t prev_run_size = PAGE;
Jason Evans49f7e8f2011-03-15 13:59:15 -07002396
Jason Evansd04047c2014-05-28 16:11:55 -07002397#define BIN_INFO_INIT_bin_yes(index, size) \
2398 bin_info = &arena_bin_info[index]; \
Jason Evansb1726102012-02-28 16:50:47 -08002399 bin_info->reg_size = size; \
2400 prev_run_size = bin_info_run_size_calc(bin_info, prev_run_size);\
2401 bitmap_info_init(&bin_info->bitmap_info, bin_info->nregs);
Jason Evansd04047c2014-05-28 16:11:55 -07002402#define BIN_INFO_INIT_bin_no(index, size)
2403#define SC(index, lg_grp, lg_delta, ndelta, bin, lg_delta_lookup) \
2404 BIN_INFO_INIT_bin_##bin(index, (ZU(1)<<lg_grp) + (ZU(ndelta)<<lg_delta))
Jason Evansb1726102012-02-28 16:50:47 -08002405 SIZE_CLASSES
Jason Evansd04047c2014-05-28 16:11:55 -07002406#undef BIN_INFO_INIT_bin_yes
2407#undef BIN_INFO_INIT_bin_no
2408#undef SC
Jason Evans49f7e8f2011-03-15 13:59:15 -07002409}
2410
Jason Evansb1726102012-02-28 16:50:47 -08002411void
Jason Evansa0bf2422010-01-29 14:30:41 -08002412arena_boot(void)
Jason Evanse476f8a2010-01-16 09:53:50 -08002413{
Jason Evansa0bf2422010-01-29 14:30:41 -08002414 size_t header_size;
Jason Evans7393f442010-10-01 17:35:43 -07002415 unsigned i;
Jason Evanse476f8a2010-01-16 09:53:50 -08002416
Jason Evanse476f8a2010-01-16 09:53:50 -08002417 /*
2418 * Compute the header size such that it is large enough to contain the
Jason Evans7393f442010-10-01 17:35:43 -07002419 * page map. The page map is biased to omit entries for the header
2420 * itself, so some iteration is necessary to compute the map bias.
2421 *
2422 * 1) Compute safe header_size and map_bias values that include enough
2423 * space for an unbiased page map.
2424 * 2) Refine map_bias based on (1) to omit the header pages in the page
2425 * map. The resulting map_bias may be one too small.
2426 * 3) Refine map_bias based on (2). The result will be >= the result
2427 * from (2), and will always be correct.
Jason Evanse476f8a2010-01-16 09:53:50 -08002428 */
Jason Evans7393f442010-10-01 17:35:43 -07002429 map_bias = 0;
2430 for (i = 0; i < 3; i++) {
Jason Evansae4c7b42012-04-02 07:04:34 -07002431 header_size = offsetof(arena_chunk_t, map) +
2432 (sizeof(arena_chunk_map_t) * (chunk_npages-map_bias));
2433 map_bias = (header_size >> LG_PAGE) + ((header_size & PAGE_MASK)
2434 != 0);
Jason Evans7393f442010-10-01 17:35:43 -07002435 }
2436 assert(map_bias > 0);
2437
Jason Evansae4c7b42012-04-02 07:04:34 -07002438 arena_maxclass = chunksize - (map_bias << LG_PAGE);
Jason Evansa0bf2422010-01-29 14:30:41 -08002439
Jason Evansb1726102012-02-28 16:50:47 -08002440 bin_info_init();
Jason Evanse476f8a2010-01-16 09:53:50 -08002441}
Jason Evans4e2e3dd2012-03-13 16:31:41 -07002442
2443void
2444arena_prefork(arena_t *arena)
2445{
2446 unsigned i;
2447
2448 malloc_mutex_prefork(&arena->lock);
2449 for (i = 0; i < NBINS; i++)
2450 malloc_mutex_prefork(&arena->bins[i].lock);
2451}
2452
2453void
2454arena_postfork_parent(arena_t *arena)
2455{
2456 unsigned i;
2457
2458 for (i = 0; i < NBINS; i++)
2459 malloc_mutex_postfork_parent(&arena->bins[i].lock);
2460 malloc_mutex_postfork_parent(&arena->lock);
2461}
2462
2463void
2464arena_postfork_child(arena_t *arena)
2465{
2466 unsigned i;
2467
2468 for (i = 0; i < NBINS; i++)
2469 malloc_mutex_postfork_child(&arena->bins[i].lock);
2470 malloc_mutex_postfork_child(&arena->lock);
2471}