blob: 6f2410ac30064f9cc53216d0b86375b93b09aa09 [file] [log] [blame]
Jason Evanse476f8a2010-01-16 09:53:50 -08001#define JEMALLOC_ARENA_C_
Jason Evans376b1522010-02-11 14:45:59 -08002#include "jemalloc/internal/jemalloc_internal.h"
Jason Evanse476f8a2010-01-16 09:53:50 -08003
4/******************************************************************************/
5/* Data. */
6
Jason Evanse476f8a2010-01-16 09:53:50 -08007ssize_t opt_lg_dirty_mult = LG_DIRTY_MULT_DEFAULT;
Jason Evansb1726102012-02-28 16:50:47 -08008arena_bin_info_t arena_bin_info[NBINS];
Jason Evanse476f8a2010-01-16 09:53:50 -08009
Jason Evans155bfa72014-10-05 17:54:10 -070010size_t map_bias;
11size_t map_misc_offset;
12size_t arena_maxrun; /* Max run size for arenas. */
13size_t arena_maxclass; /* Max size class for arenas. */
Jason Evans3c4d92e2014-10-12 22:53:59 -070014unsigned nlclasses; /* Number of large size classes. */
15unsigned nhclasses; /* Number of huge size classes. */
Jason Evanse476f8a2010-01-16 09:53:50 -080016
17/******************************************************************************/
Jason Evansaa5113b2014-01-14 16:23:03 -080018/*
19 * Function prototypes for static functions that are referenced prior to
20 * definition.
21 */
Jason Evanse476f8a2010-01-16 09:53:50 -080022
Jason Evans6005f072010-09-30 16:55:08 -070023static void arena_purge(arena_t *arena, bool all);
Jason Evanse3d13062012-10-30 15:42:37 -070024static void arena_run_dalloc(arena_t *arena, arena_run_t *run, bool dirty,
25 bool cleaned);
Jason Evanse476f8a2010-01-16 09:53:50 -080026static void arena_dalloc_bin_run(arena_t *arena, arena_chunk_t *chunk,
27 arena_run_t *run, arena_bin_t *bin);
Jason Evans940a2e02010-10-17 17:51:37 -070028static void arena_bin_lower_run(arena_t *arena, arena_chunk_t *chunk,
29 arena_run_t *run, arena_bin_t *bin);
Jason Evanse476f8a2010-01-16 09:53:50 -080030
31/******************************************************************************/
32
Ben Maurerf9ff6032014-04-06 13:24:16 -070033JEMALLOC_INLINE_C size_t
Qinfan Wuff6a31d2014-08-29 13:34:40 -070034arena_miscelm_to_bits(arena_chunk_map_misc_t *miscelm)
Ben Maurerf9ff6032014-04-06 13:24:16 -070035{
Qinfan Wuff6a31d2014-08-29 13:34:40 -070036 arena_chunk_t *chunk = CHUNK_ADDR2BASE(miscelm);
37 size_t pageind = arena_miscelm_to_pageind(miscelm);
Ben Maurerf9ff6032014-04-06 13:24:16 -070038
Jason Evanse12eaf92014-12-08 14:40:14 -080039 return (arena_mapbits_get(chunk, pageind));
Ben Maurerf9ff6032014-04-06 13:24:16 -070040}
41
Jason Evansaf1f5922014-10-30 16:38:08 -070042JEMALLOC_INLINE_C int
Qinfan Wuff6a31d2014-08-29 13:34:40 -070043arena_run_comp(arena_chunk_map_misc_t *a, arena_chunk_map_misc_t *b)
Jason Evanse476f8a2010-01-16 09:53:50 -080044{
Qinfan Wuff6a31d2014-08-29 13:34:40 -070045 uintptr_t a_miscelm = (uintptr_t)a;
46 uintptr_t b_miscelm = (uintptr_t)b;
Jason Evanse476f8a2010-01-16 09:53:50 -080047
48 assert(a != NULL);
49 assert(b != NULL);
50
Qinfan Wuff6a31d2014-08-29 13:34:40 -070051 return ((a_miscelm > b_miscelm) - (a_miscelm < b_miscelm));
Jason Evanse476f8a2010-01-16 09:53:50 -080052}
53
Jason Evansf3ff7522010-02-28 15:00:18 -080054/* Generate red-black tree functions. */
Qinfan Wuff6a31d2014-08-29 13:34:40 -070055rb_gen(static UNUSED, arena_run_tree_, arena_run_tree_t, arena_chunk_map_misc_t,
Jason Evans070b3c32014-08-14 14:45:58 -070056 rb_link, arena_run_comp)
Jason Evanse476f8a2010-01-16 09:53:50 -080057
Jason Evansaf1f5922014-10-30 16:38:08 -070058JEMALLOC_INLINE_C int
Qinfan Wuff6a31d2014-08-29 13:34:40 -070059arena_avail_comp(arena_chunk_map_misc_t *a, arena_chunk_map_misc_t *b)
Jason Evanse476f8a2010-01-16 09:53:50 -080060{
61 int ret;
Ben Maurerf9ff6032014-04-06 13:24:16 -070062 size_t a_size;
Qinfan Wuff6a31d2014-08-29 13:34:40 -070063 size_t b_size = arena_miscelm_to_bits(b) & ~PAGE_MASK;
64 uintptr_t a_miscelm = (uintptr_t)a;
65 uintptr_t b_miscelm = (uintptr_t)b;
Ben Maurerf9ff6032014-04-06 13:24:16 -070066
Qinfan Wuff6a31d2014-08-29 13:34:40 -070067 if (a_miscelm & CHUNK_MAP_KEY)
68 a_size = a_miscelm & ~PAGE_MASK;
69 else
70 a_size = arena_miscelm_to_bits(a) & ~PAGE_MASK;
Jason Evanse476f8a2010-01-16 09:53:50 -080071
72 ret = (a_size > b_size) - (a_size < b_size);
Qinfan Wu55c9aa12014-08-06 16:10:08 -070073 if (ret == 0) {
Qinfan Wuff6a31d2014-08-29 13:34:40 -070074 if (!(a_miscelm & CHUNK_MAP_KEY))
75 ret = (a_miscelm > b_miscelm) - (a_miscelm < b_miscelm);
Qinfan Wuea73eb82014-08-06 16:43:01 -070076 else {
77 /*
78 * Treat keys as if they are lower than anything else.
79 */
Qinfan Wu55c9aa12014-08-06 16:10:08 -070080 ret = -1;
Qinfan Wuea73eb82014-08-06 16:43:01 -070081 }
Qinfan Wu55c9aa12014-08-06 16:10:08 -070082 }
Jason Evanse476f8a2010-01-16 09:53:50 -080083
84 return (ret);
85}
86
Jason Evansf3ff7522010-02-28 15:00:18 -080087/* Generate red-black tree functions. */
Qinfan Wuff6a31d2014-08-29 13:34:40 -070088rb_gen(static UNUSED, arena_avail_tree_, arena_avail_tree_t,
89 arena_chunk_map_misc_t, rb_link, arena_avail_comp)
Jason Evanse476f8a2010-01-16 09:53:50 -080090
Jason Evanse3d13062012-10-30 15:42:37 -070091static void
92arena_avail_insert(arena_t *arena, arena_chunk_t *chunk, size_t pageind,
Qinfan Wu90737fc2014-07-21 19:39:20 -070093 size_t npages)
Jason Evanse3d13062012-10-30 15:42:37 -070094{
95
96 assert(npages == (arena_mapbits_unallocated_size_get(chunk, pageind) >>
97 LG_PAGE));
Qinfan Wuff6a31d2014-08-29 13:34:40 -070098 arena_avail_tree_insert(&arena->runs_avail, arena_miscelm_get(chunk,
Jason Evanse3d13062012-10-30 15:42:37 -070099 pageind));
100}
101
102static void
103arena_avail_remove(arena_t *arena, arena_chunk_t *chunk, size_t pageind,
Qinfan Wu90737fc2014-07-21 19:39:20 -0700104 size_t npages)
Jason Evanse3d13062012-10-30 15:42:37 -0700105{
106
107 assert(npages == (arena_mapbits_unallocated_size_get(chunk, pageind) >>
108 LG_PAGE));
Qinfan Wuff6a31d2014-08-29 13:34:40 -0700109 arena_avail_tree_remove(&arena->runs_avail, arena_miscelm_get(chunk,
Jason Evanse3d13062012-10-30 15:42:37 -0700110 pageind));
111}
112
Jason Evans070b3c32014-08-14 14:45:58 -0700113static void
114arena_dirty_insert(arena_t *arena, arena_chunk_t *chunk, size_t pageind,
115 size_t npages)
116{
Qinfan Wuff6a31d2014-08-29 13:34:40 -0700117 arena_chunk_map_misc_t *miscelm = arena_miscelm_get(chunk, pageind);
Jason Evans070b3c32014-08-14 14:45:58 -0700118 assert(npages == (arena_mapbits_unallocated_size_get(chunk, pageind) >>
119 LG_PAGE));
120 assert(arena_mapbits_dirty_get(chunk, pageind) == CHUNK_MAP_DIRTY);
121 assert(arena_mapbits_dirty_get(chunk, pageind+npages-1) ==
122 CHUNK_MAP_DIRTY);
Qinfan Wuff6a31d2014-08-29 13:34:40 -0700123 ql_elm_new(miscelm, dr_link);
124 ql_tail_insert(&arena->runs_dirty, miscelm, dr_link);
Jason Evans070b3c32014-08-14 14:45:58 -0700125 arena->ndirty += npages;
126}
127
128static void
129arena_dirty_remove(arena_t *arena, arena_chunk_t *chunk, size_t pageind,
130 size_t npages)
131{
Qinfan Wuff6a31d2014-08-29 13:34:40 -0700132 arena_chunk_map_misc_t *miscelm = arena_miscelm_get(chunk, pageind);
Jason Evans070b3c32014-08-14 14:45:58 -0700133 assert(npages == (arena_mapbits_unallocated_size_get(chunk, pageind) >>
134 LG_PAGE));
135 assert(arena_mapbits_dirty_get(chunk, pageind) == CHUNK_MAP_DIRTY);
136 assert(arena_mapbits_dirty_get(chunk, pageind+npages-1) ==
137 CHUNK_MAP_DIRTY);
Qinfan Wuff6a31d2014-08-29 13:34:40 -0700138 ql_remove(&arena->runs_dirty, miscelm, dr_link);
Jason Evans070b3c32014-08-14 14:45:58 -0700139 arena->ndirty -= npages;
140}
141
Jason Evansaf1f5922014-10-30 16:38:08 -0700142JEMALLOC_INLINE_C void *
Jason Evans49f7e8f2011-03-15 13:59:15 -0700143arena_run_reg_alloc(arena_run_t *run, arena_bin_info_t *bin_info)
Jason Evanse476f8a2010-01-16 09:53:50 -0800144{
145 void *ret;
Jason Evans84c8eef2011-03-16 10:30:13 -0700146 unsigned regind;
Jason Evans0c5dd032014-09-29 01:31:39 -0700147 arena_chunk_map_misc_t *miscelm;
148 void *rpages;
Jason Evanse476f8a2010-01-16 09:53:50 -0800149
Jason Evans1e0a6362010-03-13 13:41:58 -0800150 assert(run->nfree > 0);
Jason Evans551ebc42014-10-03 10:16:09 -0700151 assert(!bitmap_full(run->bitmap, &bin_info->bitmap_info));
Jason Evanse476f8a2010-01-16 09:53:50 -0800152
Jason Evans0c5dd032014-09-29 01:31:39 -0700153 regind = bitmap_sfu(run->bitmap, &bin_info->bitmap_info);
154 miscelm = arena_run_to_miscelm(run);
155 rpages = arena_miscelm_to_rpages(miscelm);
156 ret = (void *)((uintptr_t)rpages + (uintptr_t)bin_info->reg0_offset +
Jason Evans122449b2012-04-06 00:35:09 -0700157 (uintptr_t)(bin_info->reg_interval * regind));
Jason Evans1e0a6362010-03-13 13:41:58 -0800158 run->nfree--;
Jason Evans1e0a6362010-03-13 13:41:58 -0800159 return (ret);
Jason Evans6109fe02010-02-10 10:37:56 -0800160}
161
Jason Evansaf1f5922014-10-30 16:38:08 -0700162JEMALLOC_INLINE_C void
Jason Evans1e0a6362010-03-13 13:41:58 -0800163arena_run_reg_dalloc(arena_run_t *run, void *ptr)
Jason Evans6109fe02010-02-10 10:37:56 -0800164{
Jason Evans49f7e8f2011-03-15 13:59:15 -0700165 arena_chunk_t *chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(run);
Jason Evans203484e2012-05-02 00:30:36 -0700166 size_t pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> LG_PAGE;
167 size_t mapbits = arena_mapbits_get(chunk, pageind);
Jason Evans155bfa72014-10-05 17:54:10 -0700168 index_t binind = arena_ptr_small_binind_get(ptr, mapbits);
Jason Evans49f7e8f2011-03-15 13:59:15 -0700169 arena_bin_info_t *bin_info = &arena_bin_info[binind];
Jason Evans84c8eef2011-03-16 10:30:13 -0700170 unsigned regind = arena_run_regind(run, bin_info, ptr);
Jason Evans84c8eef2011-03-16 10:30:13 -0700171
Jason Evans49f7e8f2011-03-15 13:59:15 -0700172 assert(run->nfree < bin_info->nregs);
Jason Evans1e0a6362010-03-13 13:41:58 -0800173 /* Freeing an interior pointer can cause assertion failure. */
Jason Evans0c5dd032014-09-29 01:31:39 -0700174 assert(((uintptr_t)ptr -
175 ((uintptr_t)arena_miscelm_to_rpages(arena_run_to_miscelm(run)) +
Jason Evans122449b2012-04-06 00:35:09 -0700176 (uintptr_t)bin_info->reg0_offset)) %
177 (uintptr_t)bin_info->reg_interval == 0);
Jason Evans0c5dd032014-09-29 01:31:39 -0700178 assert((uintptr_t)ptr >=
179 (uintptr_t)arena_miscelm_to_rpages(arena_run_to_miscelm(run)) +
Jason Evans49f7e8f2011-03-15 13:59:15 -0700180 (uintptr_t)bin_info->reg0_offset);
Jason Evans84c8eef2011-03-16 10:30:13 -0700181 /* Freeing an unallocated pointer can cause assertion failure. */
Jason Evans0c5dd032014-09-29 01:31:39 -0700182 assert(bitmap_get(run->bitmap, &bin_info->bitmap_info, regind));
Jason Evanse476f8a2010-01-16 09:53:50 -0800183
Jason Evans0c5dd032014-09-29 01:31:39 -0700184 bitmap_unset(run->bitmap, &bin_info->bitmap_info, regind);
Jason Evans1e0a6362010-03-13 13:41:58 -0800185 run->nfree++;
Jason Evanse476f8a2010-01-16 09:53:50 -0800186}
187
Jason Evansaf1f5922014-10-30 16:38:08 -0700188JEMALLOC_INLINE_C void
Jason Evans38067482013-01-21 20:04:42 -0800189arena_run_zero(arena_chunk_t *chunk, size_t run_ind, size_t npages)
190{
191
Jason Evansbd87b012014-04-15 16:35:08 -0700192 JEMALLOC_VALGRIND_MAKE_MEM_UNDEFINED((void *)((uintptr_t)chunk +
193 (run_ind << LG_PAGE)), (npages << LG_PAGE));
Jason Evans38067482013-01-21 20:04:42 -0800194 memset((void *)((uintptr_t)chunk + (run_ind << LG_PAGE)), 0,
195 (npages << LG_PAGE));
Jason Evans38067482013-01-21 20:04:42 -0800196}
197
Jason Evansaf1f5922014-10-30 16:38:08 -0700198JEMALLOC_INLINE_C void
Jason Evansdda90f52013-10-19 23:48:40 -0700199arena_run_page_mark_zeroed(arena_chunk_t *chunk, size_t run_ind)
200{
201
Jason Evansbd87b012014-04-15 16:35:08 -0700202 JEMALLOC_VALGRIND_MAKE_MEM_DEFINED((void *)((uintptr_t)chunk + (run_ind
203 << LG_PAGE)), PAGE);
Jason Evansdda90f52013-10-19 23:48:40 -0700204}
205
Jason Evansaf1f5922014-10-30 16:38:08 -0700206JEMALLOC_INLINE_C void
Jason Evans38067482013-01-21 20:04:42 -0800207arena_run_page_validate_zeroed(arena_chunk_t *chunk, size_t run_ind)
Jason Evans21fb95b2010-10-18 17:45:40 -0700208{
Jason Evansd4bab212010-10-24 20:08:37 -0700209 size_t i;
Jason Evansae4c7b42012-04-02 07:04:34 -0700210 UNUSED size_t *p = (size_t *)((uintptr_t)chunk + (run_ind << LG_PAGE));
Jason Evansd4bab212010-10-24 20:08:37 -0700211
Jason Evansdda90f52013-10-19 23:48:40 -0700212 arena_run_page_mark_zeroed(chunk, run_ind);
Jason Evansae4c7b42012-04-02 07:04:34 -0700213 for (i = 0; i < PAGE / sizeof(size_t); i++)
Jason Evans21fb95b2010-10-18 17:45:40 -0700214 assert(p[i] == 0);
215}
Jason Evans21fb95b2010-10-18 17:45:40 -0700216
Jason Evanse476f8a2010-01-16 09:53:50 -0800217static void
Jason Evansaa5113b2014-01-14 16:23:03 -0800218arena_cactive_update(arena_t *arena, size_t add_pages, size_t sub_pages)
219{
220
221 if (config_stats) {
Jason Evans15229372014-08-06 23:38:39 -0700222 ssize_t cactive_diff = CHUNK_CEILING((arena->nactive + add_pages
223 - sub_pages) << LG_PAGE) - CHUNK_CEILING(arena->nactive <<
224 LG_PAGE);
Jason Evansaa5113b2014-01-14 16:23:03 -0800225 if (cactive_diff != 0)
226 stats_cactive_add(cactive_diff);
227 }
228}
229
230static void
231arena_run_split_remove(arena_t *arena, arena_chunk_t *chunk, size_t run_ind,
232 size_t flag_dirty, size_t need_pages)
233{
234 size_t total_pages, rem_pages;
235
236 total_pages = arena_mapbits_unallocated_size_get(chunk, run_ind) >>
237 LG_PAGE;
238 assert(arena_mapbits_dirty_get(chunk, run_ind+total_pages-1) ==
239 flag_dirty);
240 assert(need_pages <= total_pages);
241 rem_pages = total_pages - need_pages;
242
Qinfan Wu90737fc2014-07-21 19:39:20 -0700243 arena_avail_remove(arena, chunk, run_ind, total_pages);
Jason Evans070b3c32014-08-14 14:45:58 -0700244 if (flag_dirty != 0)
245 arena_dirty_remove(arena, chunk, run_ind, total_pages);
Jason Evansaa5113b2014-01-14 16:23:03 -0800246 arena_cactive_update(arena, need_pages, 0);
247 arena->nactive += need_pages;
248
249 /* Keep track of trailing unused pages for later use. */
250 if (rem_pages > 0) {
251 if (flag_dirty != 0) {
252 arena_mapbits_unallocated_set(chunk,
253 run_ind+need_pages, (rem_pages << LG_PAGE),
254 flag_dirty);
255 arena_mapbits_unallocated_set(chunk,
256 run_ind+total_pages-1, (rem_pages << LG_PAGE),
257 flag_dirty);
Jason Evans070b3c32014-08-14 14:45:58 -0700258 arena_dirty_insert(arena, chunk, run_ind+need_pages,
259 rem_pages);
Jason Evansaa5113b2014-01-14 16:23:03 -0800260 } else {
261 arena_mapbits_unallocated_set(chunk, run_ind+need_pages,
262 (rem_pages << LG_PAGE),
263 arena_mapbits_unzeroed_get(chunk,
264 run_ind+need_pages));
265 arena_mapbits_unallocated_set(chunk,
266 run_ind+total_pages-1, (rem_pages << LG_PAGE),
267 arena_mapbits_unzeroed_get(chunk,
268 run_ind+total_pages-1));
269 }
Qinfan Wu90737fc2014-07-21 19:39:20 -0700270 arena_avail_insert(arena, chunk, run_ind+need_pages, rem_pages);
Jason Evansaa5113b2014-01-14 16:23:03 -0800271 }
272}
273
274static void
275arena_run_split_large_helper(arena_t *arena, arena_run_t *run, size_t size,
276 bool remove, bool zero)
Jason Evanse476f8a2010-01-16 09:53:50 -0800277{
278 arena_chunk_t *chunk;
Jason Evans0c5dd032014-09-29 01:31:39 -0700279 arena_chunk_map_misc_t *miscelm;
Jason Evansaa5113b2014-01-14 16:23:03 -0800280 size_t flag_dirty, run_ind, need_pages, i;
Jason Evans203484e2012-05-02 00:30:36 -0700281
Jason Evanse476f8a2010-01-16 09:53:50 -0800282 chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(run);
Jason Evans0c5dd032014-09-29 01:31:39 -0700283 miscelm = arena_run_to_miscelm(run);
284 run_ind = arena_miscelm_to_pageind(miscelm);
Jason Evans203484e2012-05-02 00:30:36 -0700285 flag_dirty = arena_mapbits_dirty_get(chunk, run_ind);
Jason Evansae4c7b42012-04-02 07:04:34 -0700286 need_pages = (size >> LG_PAGE);
Jason Evanse476f8a2010-01-16 09:53:50 -0800287 assert(need_pages > 0);
Jason Evanse476f8a2010-01-16 09:53:50 -0800288
Jason Evansc368f8c2013-10-29 18:17:42 -0700289 if (remove) {
Jason Evansaa5113b2014-01-14 16:23:03 -0800290 arena_run_split_remove(arena, chunk, run_ind, flag_dirty,
291 need_pages);
Jason Evans19b3d612010-03-18 20:36:40 -0700292 }
293
Jason Evansaa5113b2014-01-14 16:23:03 -0800294 if (zero) {
295 if (flag_dirty == 0) {
296 /*
297 * The run is clean, so some pages may be zeroed (i.e.
298 * never before touched).
299 */
300 for (i = 0; i < need_pages; i++) {
301 if (arena_mapbits_unzeroed_get(chunk, run_ind+i)
302 != 0)
303 arena_run_zero(chunk, run_ind+i, 1);
304 else if (config_debug) {
305 arena_run_page_validate_zeroed(chunk,
306 run_ind+i);
307 } else {
308 arena_run_page_mark_zeroed(chunk,
309 run_ind+i);
Jason Evans19b3d612010-03-18 20:36:40 -0700310 }
Jason Evanse476f8a2010-01-16 09:53:50 -0800311 }
Jason Evansdda90f52013-10-19 23:48:40 -0700312 } else {
Jason Evansaa5113b2014-01-14 16:23:03 -0800313 /* The run is dirty, so all pages must be zeroed. */
314 arena_run_zero(chunk, run_ind, need_pages);
Jason Evanse476f8a2010-01-16 09:53:50 -0800315 }
Jason Evans19b3d612010-03-18 20:36:40 -0700316 } else {
Jason Evansbd87b012014-04-15 16:35:08 -0700317 JEMALLOC_VALGRIND_MAKE_MEM_UNDEFINED((void *)((uintptr_t)chunk +
Jason Evansdda90f52013-10-19 23:48:40 -0700318 (run_ind << LG_PAGE)), (need_pages << LG_PAGE));
Jason Evanse476f8a2010-01-16 09:53:50 -0800319 }
Jason Evansaa5113b2014-01-14 16:23:03 -0800320
321 /*
322 * Set the last element first, in case the run only contains one page
323 * (i.e. both statements set the same element).
324 */
325 arena_mapbits_large_set(chunk, run_ind+need_pages-1, 0, flag_dirty);
326 arena_mapbits_large_set(chunk, run_ind, size, flag_dirty);
Jason Evanse476f8a2010-01-16 09:53:50 -0800327}
328
Jason Evansc368f8c2013-10-29 18:17:42 -0700329static void
Jason Evansaa5113b2014-01-14 16:23:03 -0800330arena_run_split_large(arena_t *arena, arena_run_t *run, size_t size, bool zero)
Jason Evansc368f8c2013-10-29 18:17:42 -0700331{
332
Jason Evansaa5113b2014-01-14 16:23:03 -0800333 arena_run_split_large_helper(arena, run, size, true, zero);
Jason Evansc368f8c2013-10-29 18:17:42 -0700334}
335
336static void
Jason Evansaa5113b2014-01-14 16:23:03 -0800337arena_run_init_large(arena_t *arena, arena_run_t *run, size_t size, bool zero)
Jason Evansc368f8c2013-10-29 18:17:42 -0700338{
339
Jason Evansaa5113b2014-01-14 16:23:03 -0800340 arena_run_split_large_helper(arena, run, size, false, zero);
341}
342
343static void
344arena_run_split_small(arena_t *arena, arena_run_t *run, size_t size,
Jason Evans155bfa72014-10-05 17:54:10 -0700345 index_t binind)
Jason Evansaa5113b2014-01-14 16:23:03 -0800346{
347 arena_chunk_t *chunk;
Jason Evans0c5dd032014-09-29 01:31:39 -0700348 arena_chunk_map_misc_t *miscelm;
Jason Evansaa5113b2014-01-14 16:23:03 -0800349 size_t flag_dirty, run_ind, need_pages, i;
350
351 assert(binind != BININD_INVALID);
352
353 chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(run);
Jason Evans0c5dd032014-09-29 01:31:39 -0700354 miscelm = arena_run_to_miscelm(run);
355 run_ind = arena_miscelm_to_pageind(miscelm);
Jason Evansaa5113b2014-01-14 16:23:03 -0800356 flag_dirty = arena_mapbits_dirty_get(chunk, run_ind);
357 need_pages = (size >> LG_PAGE);
358 assert(need_pages > 0);
359
360 arena_run_split_remove(arena, chunk, run_ind, flag_dirty, need_pages);
361
Jason Evans381c23d2014-10-10 23:01:03 -0700362 for (i = 0; i < need_pages; i++) {
Jason Evansaa5113b2014-01-14 16:23:03 -0800363 arena_mapbits_small_set(chunk, run_ind+i, i, binind, 0);
364 if (config_debug && flag_dirty == 0 &&
365 arena_mapbits_unzeroed_get(chunk, run_ind+i) == 0)
366 arena_run_page_validate_zeroed(chunk, run_ind+i);
367 }
Jason Evansbd87b012014-04-15 16:35:08 -0700368 JEMALLOC_VALGRIND_MAKE_MEM_UNDEFINED((void *)((uintptr_t)chunk +
Jason Evansaa5113b2014-01-14 16:23:03 -0800369 (run_ind << LG_PAGE)), (need_pages << LG_PAGE));
370}
371
372static arena_chunk_t *
373arena_chunk_init_spare(arena_t *arena)
374{
375 arena_chunk_t *chunk;
376
377 assert(arena->spare != NULL);
378
379 chunk = arena->spare;
380 arena->spare = NULL;
381
382 assert(arena_mapbits_allocated_get(chunk, map_bias) == 0);
383 assert(arena_mapbits_allocated_get(chunk, chunk_npages-1) == 0);
384 assert(arena_mapbits_unallocated_size_get(chunk, map_bias) ==
Jason Evans155bfa72014-10-05 17:54:10 -0700385 arena_maxrun);
Jason Evansaa5113b2014-01-14 16:23:03 -0800386 assert(arena_mapbits_unallocated_size_get(chunk, chunk_npages-1) ==
Jason Evans155bfa72014-10-05 17:54:10 -0700387 arena_maxrun);
Jason Evansaa5113b2014-01-14 16:23:03 -0800388 assert(arena_mapbits_dirty_get(chunk, map_bias) ==
389 arena_mapbits_dirty_get(chunk, chunk_npages-1));
390
391 return (chunk);
392}
393
394static arena_chunk_t *
Jason Evanse2deab72014-05-15 22:22:27 -0700395arena_chunk_alloc_internal(arena_t *arena, size_t size, size_t alignment,
396 bool *zero)
397{
398 arena_chunk_t *chunk;
399 chunk_alloc_t *chunk_alloc;
400 chunk_dalloc_t *chunk_dalloc;
401
402 chunk_alloc = arena->chunk_alloc;
403 chunk_dalloc = arena->chunk_dalloc;
404 malloc_mutex_unlock(&arena->lock);
405 chunk = (arena_chunk_t *)chunk_alloc_arena(chunk_alloc, chunk_dalloc,
Daniel Micaya95018e2014-10-04 01:39:32 -0400406 arena->ind, NULL, size, alignment, zero);
Jason Evanse2deab72014-05-15 22:22:27 -0700407 malloc_mutex_lock(&arena->lock);
408 if (config_stats && chunk != NULL)
409 arena->stats.mapped += chunksize;
410
411 return (chunk);
412}
413
Jason Evanse2deab72014-05-15 22:22:27 -0700414static arena_chunk_t *
Jason Evansaa5113b2014-01-14 16:23:03 -0800415arena_chunk_init_hard(arena_t *arena)
416{
417 arena_chunk_t *chunk;
418 bool zero;
419 size_t unzeroed, i;
420
421 assert(arena->spare == NULL);
422
423 zero = false;
Jason Evanse2deab72014-05-15 22:22:27 -0700424 chunk = arena_chunk_alloc_internal(arena, chunksize, chunksize, &zero);
Jason Evansaa5113b2014-01-14 16:23:03 -0800425 if (chunk == NULL)
426 return (NULL);
Jason Evansaa5113b2014-01-14 16:23:03 -0800427
428 chunk->arena = arena;
429
430 /*
Jason Evansaa5113b2014-01-14 16:23:03 -0800431 * Initialize the map to contain one maximal free untouched run. Mark
432 * the pages as zeroed iff chunk_alloc() returned a zeroed chunk.
433 */
434 unzeroed = zero ? 0 : CHUNK_MAP_UNZEROED;
Jason Evans155bfa72014-10-05 17:54:10 -0700435 arena_mapbits_unallocated_set(chunk, map_bias, arena_maxrun, unzeroed);
Jason Evansaa5113b2014-01-14 16:23:03 -0800436 /*
437 * There is no need to initialize the internal page map entries unless
438 * the chunk is not zeroed.
439 */
Jason Evans551ebc42014-10-03 10:16:09 -0700440 if (!zero) {
Jason Evansbd87b012014-04-15 16:35:08 -0700441 JEMALLOC_VALGRIND_MAKE_MEM_UNDEFINED(
Qinfan Wuff6a31d2014-08-29 13:34:40 -0700442 (void *)arena_bitselm_get(chunk, map_bias+1),
443 (size_t)((uintptr_t) arena_bitselm_get(chunk,
444 chunk_npages-1) - (uintptr_t)arena_bitselm_get(chunk,
445 map_bias+1)));
Jason Evansaa5113b2014-01-14 16:23:03 -0800446 for (i = map_bias+1; i < chunk_npages-1; i++)
447 arena_mapbits_unzeroed_set(chunk, i, unzeroed);
448 } else {
Qinfan Wuff6a31d2014-08-29 13:34:40 -0700449 JEMALLOC_VALGRIND_MAKE_MEM_DEFINED((void
450 *)arena_bitselm_get(chunk, map_bias+1), (size_t)((uintptr_t)
451 arena_bitselm_get(chunk, chunk_npages-1) -
452 (uintptr_t)arena_bitselm_get(chunk, map_bias+1)));
Jason Evansaa5113b2014-01-14 16:23:03 -0800453 if (config_debug) {
454 for (i = map_bias+1; i < chunk_npages-1; i++) {
455 assert(arena_mapbits_unzeroed_get(chunk, i) ==
456 unzeroed);
457 }
458 }
459 }
Jason Evans155bfa72014-10-05 17:54:10 -0700460 arena_mapbits_unallocated_set(chunk, chunk_npages-1, arena_maxrun,
Jason Evansaa5113b2014-01-14 16:23:03 -0800461 unzeroed);
462
463 return (chunk);
Jason Evansc368f8c2013-10-29 18:17:42 -0700464}
465
Jason Evanse476f8a2010-01-16 09:53:50 -0800466static arena_chunk_t *
467arena_chunk_alloc(arena_t *arena)
468{
469 arena_chunk_t *chunk;
Jason Evanse476f8a2010-01-16 09:53:50 -0800470
Jason Evansaa5113b2014-01-14 16:23:03 -0800471 if (arena->spare != NULL)
472 chunk = arena_chunk_init_spare(arena);
Chris Pride20a8c782014-03-25 22:36:05 -0700473 else {
Jason Evansaa5113b2014-01-14 16:23:03 -0800474 chunk = arena_chunk_init_hard(arena);
Chris Pride20a8c782014-03-25 22:36:05 -0700475 if (chunk == NULL)
476 return (NULL);
477 }
Jason Evanse476f8a2010-01-16 09:53:50 -0800478
Jason Evanse3d13062012-10-30 15:42:37 -0700479 /* Insert the run into the runs_avail tree. */
Qinfan Wu90737fc2014-07-21 19:39:20 -0700480 arena_avail_insert(arena, chunk, map_bias, chunk_npages-map_bias);
Jason Evanse3d13062012-10-30 15:42:37 -0700481
Jason Evanse476f8a2010-01-16 09:53:50 -0800482 return (chunk);
483}
484
485static void
Jason Evanse2deab72014-05-15 22:22:27 -0700486arena_chunk_dalloc(arena_t *arena, arena_chunk_t *chunk)
Jason Evanse476f8a2010-01-16 09:53:50 -0800487{
Qinfan Wu04d60a12014-07-18 14:21:17 -0700488
Jason Evans30fe12b2012-05-10 17:09:17 -0700489 assert(arena_mapbits_allocated_get(chunk, map_bias) == 0);
490 assert(arena_mapbits_allocated_get(chunk, chunk_npages-1) == 0);
491 assert(arena_mapbits_unallocated_size_get(chunk, map_bias) ==
Jason Evans155bfa72014-10-05 17:54:10 -0700492 arena_maxrun);
Jason Evans30fe12b2012-05-10 17:09:17 -0700493 assert(arena_mapbits_unallocated_size_get(chunk, chunk_npages-1) ==
Jason Evans155bfa72014-10-05 17:54:10 -0700494 arena_maxrun);
Jason Evans30fe12b2012-05-10 17:09:17 -0700495 assert(arena_mapbits_dirty_get(chunk, map_bias) ==
496 arena_mapbits_dirty_get(chunk, chunk_npages-1));
497
Jason Evanse476f8a2010-01-16 09:53:50 -0800498 /*
Jason Evanse3d13062012-10-30 15:42:37 -0700499 * Remove run from the runs_avail tree, so that the arena does not use
500 * it.
Jason Evanse476f8a2010-01-16 09:53:50 -0800501 */
Qinfan Wu90737fc2014-07-21 19:39:20 -0700502 arena_avail_remove(arena, chunk, map_bias, chunk_npages-map_bias);
Jason Evanse476f8a2010-01-16 09:53:50 -0800503
Jason Evans8d4203c2010-04-13 20:53:21 -0700504 if (arena->spare != NULL) {
505 arena_chunk_t *spare = arena->spare;
Jason Evans9b41ac92014-10-14 22:20:00 -0700506 chunk_dalloc_t *chunk_dalloc;
Jason Evans8d4203c2010-04-13 20:53:21 -0700507
508 arena->spare = chunk;
Jason Evans070b3c32014-08-14 14:45:58 -0700509 if (arena_mapbits_dirty_get(spare, map_bias) != 0) {
510 arena_dirty_remove(arena, spare, map_bias,
511 chunk_npages-map_bias);
512 }
Jason Evans9b41ac92014-10-14 22:20:00 -0700513 chunk_dalloc = arena->chunk_dalloc;
514 malloc_mutex_unlock(&arena->lock);
515 chunk_dalloc((void *)spare, chunksize, arena->ind);
516 malloc_mutex_lock(&arena->lock);
517 if (config_stats)
518 arena->stats.mapped -= chunksize;
Jason Evans8d4203c2010-04-13 20:53:21 -0700519 } else
520 arena->spare = chunk;
Jason Evanse476f8a2010-01-16 09:53:50 -0800521}
522
Jason Evans9b41ac92014-10-14 22:20:00 -0700523static void
524arena_huge_malloc_stats_update(arena_t *arena, size_t usize)
525{
526 index_t index = size2index(usize) - nlclasses - NBINS;
527
528 cassert(config_stats);
529
530 arena->stats.nmalloc_huge++;
531 arena->stats.allocated_huge += usize;
532 arena->stats.hstats[index].nmalloc++;
533 arena->stats.hstats[index].curhchunks++;
534}
535
536static void
537arena_huge_malloc_stats_update_undo(arena_t *arena, size_t usize)
538{
539 index_t index = size2index(usize) - nlclasses - NBINS;
540
541 cassert(config_stats);
542
543 arena->stats.nmalloc_huge--;
544 arena->stats.allocated_huge -= usize;
545 arena->stats.hstats[index].nmalloc--;
546 arena->stats.hstats[index].curhchunks--;
547}
548
549static void
550arena_huge_dalloc_stats_update(arena_t *arena, size_t usize)
551{
552 index_t index = size2index(usize) - nlclasses - NBINS;
553
554 cassert(config_stats);
555
556 arena->stats.ndalloc_huge++;
557 arena->stats.allocated_huge -= usize;
558 arena->stats.hstats[index].ndalloc++;
559 arena->stats.hstats[index].curhchunks--;
560}
561
562static void
563arena_huge_dalloc_stats_update_undo(arena_t *arena, size_t usize)
564{
565 index_t index = size2index(usize) - nlclasses - NBINS;
566
567 cassert(config_stats);
568
569 arena->stats.ndalloc_huge--;
570 arena->stats.allocated_huge += usize;
571 arena->stats.hstats[index].ndalloc--;
572 arena->stats.hstats[index].curhchunks++;
573}
574
575static void
576arena_huge_ralloc_stats_update(arena_t *arena, size_t oldsize, size_t usize)
577{
578
579 arena_huge_dalloc_stats_update(arena, oldsize);
580 arena_huge_malloc_stats_update(arena, usize);
581}
582
583static void
584arena_huge_ralloc_stats_update_undo(arena_t *arena, size_t oldsize,
585 size_t usize)
586{
587
588 arena_huge_dalloc_stats_update_undo(arena, oldsize);
589 arena_huge_malloc_stats_update_undo(arena, usize);
590}
591
592void *
593arena_chunk_alloc_huge(arena_t *arena, size_t usize, size_t alignment,
594 bool *zero)
595{
596 void *ret;
597 chunk_alloc_t *chunk_alloc;
598 chunk_dalloc_t *chunk_dalloc;
599 size_t csize = CHUNK_CEILING(usize);
600
601 malloc_mutex_lock(&arena->lock);
602 chunk_alloc = arena->chunk_alloc;
603 chunk_dalloc = arena->chunk_dalloc;
604 if (config_stats) {
605 /* Optimistically update stats prior to unlocking. */
606 arena_huge_malloc_stats_update(arena, usize);
607 arena->stats.mapped += usize;
608 }
609 arena->nactive += (usize >> LG_PAGE);
610 malloc_mutex_unlock(&arena->lock);
611
612 ret = chunk_alloc_arena(chunk_alloc, chunk_dalloc, arena->ind, NULL,
613 csize, alignment, zero);
614 if (ret == NULL) {
615 /* Revert optimistic stats updates. */
616 malloc_mutex_lock(&arena->lock);
617 if (config_stats) {
618 arena_huge_malloc_stats_update_undo(arena, usize);
619 arena->stats.mapped -= usize;
620 }
621 arena->nactive -= (usize >> LG_PAGE);
622 malloc_mutex_unlock(&arena->lock);
623 return (NULL);
624 }
625
626 if (config_stats)
627 stats_cactive_add(usize);
628
629 return (ret);
630}
631
632void
633arena_chunk_dalloc_huge(arena_t *arena, void *chunk, size_t usize)
634{
635 chunk_dalloc_t *chunk_dalloc;
636
637 malloc_mutex_lock(&arena->lock);
638 chunk_dalloc = arena->chunk_dalloc;
639 if (config_stats) {
640 arena_huge_dalloc_stats_update(arena, usize);
641 arena->stats.mapped -= usize;
642 stats_cactive_sub(usize);
643 }
644 arena->nactive -= (usize >> LG_PAGE);
645 malloc_mutex_unlock(&arena->lock);
646 chunk_dalloc(chunk, CHUNK_CEILING(usize), arena->ind);
647}
648
649void
650arena_chunk_ralloc_huge_similar(arena_t *arena, void *chunk, size_t oldsize,
651 size_t usize)
652{
653
654 assert(CHUNK_CEILING(oldsize) == CHUNK_CEILING(usize));
655 assert(oldsize != usize);
656
657 malloc_mutex_lock(&arena->lock);
658 if (config_stats)
659 arena_huge_ralloc_stats_update(arena, oldsize, usize);
660 if (oldsize < usize) {
661 size_t udiff = usize - oldsize;
662 arena->nactive += udiff >> LG_PAGE;
663 if (config_stats)
664 stats_cactive_add(udiff);
665 } else {
666 size_t udiff = oldsize - usize;
667 arena->nactive -= udiff >> LG_PAGE;
668 if (config_stats)
669 stats_cactive_sub(udiff);
670 }
671 malloc_mutex_unlock(&arena->lock);
672}
673
674void
675arena_chunk_ralloc_huge_shrink(arena_t *arena, void *chunk, size_t oldsize,
676 size_t usize)
677{
678 chunk_dalloc_t *chunk_dalloc;
679 size_t udiff = oldsize - usize;
680 size_t cdiff = CHUNK_CEILING(oldsize) - CHUNK_CEILING(usize);
681
682 malloc_mutex_lock(&arena->lock);
683 chunk_dalloc = arena->chunk_dalloc;
684 if (config_stats) {
685 arena_huge_ralloc_stats_update(arena, oldsize, usize);
686 if (cdiff != 0) {
687 arena->stats.mapped -= cdiff;
688 stats_cactive_sub(udiff);
689 }
690 }
691 arena->nactive -= udiff >> LG_PAGE;
692 malloc_mutex_unlock(&arena->lock);
Jason Evans2012d5a2014-11-17 09:54:49 -0800693 if (cdiff != 0) {
694 chunk_dalloc((void *)((uintptr_t)chunk + CHUNK_CEILING(usize)),
695 cdiff, arena->ind);
696 }
Jason Evans9b41ac92014-10-14 22:20:00 -0700697}
698
699bool
700arena_chunk_ralloc_huge_expand(arena_t *arena, void *chunk, size_t oldsize,
701 size_t usize, bool *zero)
702{
703 chunk_alloc_t *chunk_alloc;
704 chunk_dalloc_t *chunk_dalloc;
705 size_t udiff = usize - oldsize;
706 size_t cdiff = CHUNK_CEILING(usize) - CHUNK_CEILING(oldsize);
707
708 malloc_mutex_lock(&arena->lock);
709 chunk_alloc = arena->chunk_alloc;
710 chunk_dalloc = arena->chunk_dalloc;
711 if (config_stats) {
712 /* Optimistically update stats prior to unlocking. */
713 arena_huge_ralloc_stats_update(arena, oldsize, usize);
714 arena->stats.mapped += cdiff;
715 }
716 arena->nactive += (udiff >> LG_PAGE);
717 malloc_mutex_unlock(&arena->lock);
718
Jason Evans2012d5a2014-11-17 09:54:49 -0800719 if (chunk_alloc_arena(chunk_alloc, chunk_dalloc, arena->ind,
720 (void *)((uintptr_t)chunk + CHUNK_CEILING(oldsize)), cdiff,
721 chunksize, zero) == NULL) {
Jason Evans9b41ac92014-10-14 22:20:00 -0700722 /* Revert optimistic stats updates. */
723 malloc_mutex_lock(&arena->lock);
724 if (config_stats) {
725 arena_huge_ralloc_stats_update_undo(arena,
726 oldsize, usize);
727 arena->stats.mapped -= cdiff;
728 }
729 arena->nactive -= (udiff >> LG_PAGE);
730 malloc_mutex_unlock(&arena->lock);
731 return (true);
732 }
733
734 if (config_stats)
735 stats_cactive_add(udiff);
736
737 return (false);
738}
739
Jason Evanse476f8a2010-01-16 09:53:50 -0800740static arena_run_t *
Jason Evansaa5113b2014-01-14 16:23:03 -0800741arena_run_alloc_large_helper(arena_t *arena, size_t size, bool zero)
Jason Evanse476f8a2010-01-16 09:53:50 -0800742{
Qinfan Wuff6a31d2014-08-29 13:34:40 -0700743 arena_chunk_map_misc_t *miscelm;
744 arena_chunk_map_misc_t *key;
Jason Evanse476f8a2010-01-16 09:53:50 -0800745
Qinfan Wuff6a31d2014-08-29 13:34:40 -0700746 key = (arena_chunk_map_misc_t *)(size | CHUNK_MAP_KEY);
747 miscelm = arena_avail_tree_nsearch(&arena->runs_avail, key);
748 if (miscelm != NULL) {
Jason Evans0c5dd032014-09-29 01:31:39 -0700749 arena_run_t *run = &miscelm->run;
750 arena_run_split_large(arena, &miscelm->run, size, zero);
Jason Evanse476f8a2010-01-16 09:53:50 -0800751 return (run);
752 }
753
Jason Evans5b0c9962012-05-10 15:47:24 -0700754 return (NULL);
755}
756
757static arena_run_t *
Jason Evansaa5113b2014-01-14 16:23:03 -0800758arena_run_alloc_large(arena_t *arena, size_t size, bool zero)
Jason Evans5b0c9962012-05-10 15:47:24 -0700759{
760 arena_chunk_t *chunk;
761 arena_run_t *run;
762
Jason Evansfc0b3b72014-10-09 17:54:06 -0700763 assert(size <= arena_maxrun);
Jason Evans5b0c9962012-05-10 15:47:24 -0700764 assert((size & PAGE_MASK) == 0);
Jason Evans5b0c9962012-05-10 15:47:24 -0700765
766 /* Search the arena's chunks for the lowest best fit. */
Jason Evansaa5113b2014-01-14 16:23:03 -0800767 run = arena_run_alloc_large_helper(arena, size, zero);
Jason Evans5b0c9962012-05-10 15:47:24 -0700768 if (run != NULL)
769 return (run);
770
Jason Evanse476f8a2010-01-16 09:53:50 -0800771 /*
772 * No usable runs. Create a new chunk from which to allocate the run.
773 */
774 chunk = arena_chunk_alloc(arena);
Jason Evanse00572b2010-03-14 19:43:56 -0700775 if (chunk != NULL) {
Jason Evans0c5dd032014-09-29 01:31:39 -0700776 run = &arena_miscelm_get(chunk, map_bias)->run;
Jason Evansaa5113b2014-01-14 16:23:03 -0800777 arena_run_split_large(arena, run, size, zero);
Jason Evanse00572b2010-03-14 19:43:56 -0700778 return (run);
779 }
780
781 /*
782 * arena_chunk_alloc() failed, but another thread may have made
783 * sufficient memory available while this one dropped arena->lock in
784 * arena_chunk_alloc(), so search one more time.
785 */
Jason Evansaa5113b2014-01-14 16:23:03 -0800786 return (arena_run_alloc_large_helper(arena, size, zero));
787}
788
789static arena_run_t *
Jason Evans155bfa72014-10-05 17:54:10 -0700790arena_run_alloc_small_helper(arena_t *arena, size_t size, index_t binind)
Jason Evansaa5113b2014-01-14 16:23:03 -0800791{
792 arena_run_t *run;
Qinfan Wuff6a31d2014-08-29 13:34:40 -0700793 arena_chunk_map_misc_t *miscelm;
794 arena_chunk_map_misc_t *key;
Jason Evansaa5113b2014-01-14 16:23:03 -0800795
Qinfan Wuff6a31d2014-08-29 13:34:40 -0700796 key = (arena_chunk_map_misc_t *)(size | CHUNK_MAP_KEY);
797 miscelm = arena_avail_tree_nsearch(&arena->runs_avail, key);
798 if (miscelm != NULL) {
Jason Evans0c5dd032014-09-29 01:31:39 -0700799 run = &miscelm->run;
Jason Evansaa5113b2014-01-14 16:23:03 -0800800 arena_run_split_small(arena, run, size, binind);
801 return (run);
802 }
803
804 return (NULL);
805}
806
807static arena_run_t *
Jason Evans155bfa72014-10-05 17:54:10 -0700808arena_run_alloc_small(arena_t *arena, size_t size, index_t binind)
Jason Evansaa5113b2014-01-14 16:23:03 -0800809{
810 arena_chunk_t *chunk;
811 arena_run_t *run;
812
Jason Evansfc0b3b72014-10-09 17:54:06 -0700813 assert(size <= arena_maxrun);
Jason Evansaa5113b2014-01-14 16:23:03 -0800814 assert((size & PAGE_MASK) == 0);
815 assert(binind != BININD_INVALID);
816
817 /* Search the arena's chunks for the lowest best fit. */
818 run = arena_run_alloc_small_helper(arena, size, binind);
819 if (run != NULL)
820 return (run);
821
822 /*
823 * No usable runs. Create a new chunk from which to allocate the run.
824 */
825 chunk = arena_chunk_alloc(arena);
826 if (chunk != NULL) {
Jason Evans0c5dd032014-09-29 01:31:39 -0700827 run = &arena_miscelm_get(chunk, map_bias)->run;
Jason Evansaa5113b2014-01-14 16:23:03 -0800828 arena_run_split_small(arena, run, size, binind);
829 return (run);
830 }
831
832 /*
833 * arena_chunk_alloc() failed, but another thread may have made
834 * sufficient memory available while this one dropped arena->lock in
835 * arena_chunk_alloc(), so search one more time.
836 */
837 return (arena_run_alloc_small_helper(arena, size, binind));
Jason Evanse476f8a2010-01-16 09:53:50 -0800838}
839
Jason Evansaf1f5922014-10-30 16:38:08 -0700840JEMALLOC_INLINE_C void
Jason Evans05b21be2010-03-14 17:36:10 -0700841arena_maybe_purge(arena_t *arena)
842{
Jason Evans070b3c32014-08-14 14:45:58 -0700843 size_t threshold;
Jason Evans05b21be2010-03-14 17:36:10 -0700844
Jason Evanse3d13062012-10-30 15:42:37 -0700845 /* Don't purge if the option is disabled. */
846 if (opt_lg_dirty_mult < 0)
847 return;
Jason Evanse3d13062012-10-30 15:42:37 -0700848 threshold = (arena->nactive >> opt_lg_dirty_mult);
849 /*
850 * Don't purge unless the number of purgeable pages exceeds the
851 * threshold.
852 */
Jason Evans070b3c32014-08-14 14:45:58 -0700853 if (arena->ndirty <= threshold)
Jason Evanse3d13062012-10-30 15:42:37 -0700854 return;
855
856 arena_purge(arena, false);
Jason Evans05b21be2010-03-14 17:36:10 -0700857}
858
Qinfan Wua244e502014-07-21 10:23:36 -0700859static size_t
860arena_dirty_count(arena_t *arena)
861{
862 size_t ndirty = 0;
Qinfan Wuff6a31d2014-08-29 13:34:40 -0700863 arena_chunk_map_misc_t *miscelm;
Qinfan Wua244e502014-07-21 10:23:36 -0700864 arena_chunk_t *chunk;
865 size_t pageind, npages;
866
Qinfan Wuff6a31d2014-08-29 13:34:40 -0700867 ql_foreach(miscelm, &arena->runs_dirty, dr_link) {
868 chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(miscelm);
869 pageind = arena_miscelm_to_pageind(miscelm);
Qinfan Wua244e502014-07-21 10:23:36 -0700870 assert(arena_mapbits_allocated_get(chunk, pageind) == 0);
871 assert(arena_mapbits_large_get(chunk, pageind) == 0);
872 assert(arena_mapbits_dirty_get(chunk, pageind) != 0);
873 npages = arena_mapbits_unallocated_size_get(chunk, pageind) >>
874 LG_PAGE;
875 ndirty += npages;
876 }
877
Jason Evans2b2f6dc2014-11-01 02:29:10 -0700878 return (ndirty);
Jason Evansaa5113b2014-01-14 16:23:03 -0800879}
880
881static size_t
Jason Evans070b3c32014-08-14 14:45:58 -0700882arena_compute_npurge(arena_t *arena, bool all)
Jason Evansaa5113b2014-01-14 16:23:03 -0800883{
Jason Evans070b3c32014-08-14 14:45:58 -0700884 size_t npurge;
Jason Evansaa5113b2014-01-14 16:23:03 -0800885
886 /*
887 * Compute the minimum number of pages that this thread should try to
888 * purge.
889 */
Jason Evans551ebc42014-10-03 10:16:09 -0700890 if (!all) {
Jason Evansaa5113b2014-01-14 16:23:03 -0800891 size_t threshold = (arena->nactive >> opt_lg_dirty_mult);
892
Jason Evans070b3c32014-08-14 14:45:58 -0700893 npurge = arena->ndirty - threshold;
Jason Evansaa5113b2014-01-14 16:23:03 -0800894 } else
Jason Evans070b3c32014-08-14 14:45:58 -0700895 npurge = arena->ndirty;
Jason Evansaa5113b2014-01-14 16:23:03 -0800896
Jason Evans070b3c32014-08-14 14:45:58 -0700897 return (npurge);
Jason Evansaa5113b2014-01-14 16:23:03 -0800898}
899
Qinfan Wue9708002014-07-21 18:09:04 -0700900static size_t
Jason Evans070b3c32014-08-14 14:45:58 -0700901arena_stash_dirty(arena_t *arena, bool all, size_t npurge,
Qinfan Wuff6a31d2014-08-29 13:34:40 -0700902 arena_chunk_miscelms_t *miscelms)
Jason Evansaa5113b2014-01-14 16:23:03 -0800903{
Qinfan Wuff6a31d2014-08-29 13:34:40 -0700904 arena_chunk_map_misc_t *miscelm;
Qinfan Wue9708002014-07-21 18:09:04 -0700905 size_t nstashed = 0;
Jason Evansaa5113b2014-01-14 16:23:03 -0800906
Jason Evans070b3c32014-08-14 14:45:58 -0700907 /* Add at least npurge pages to purge_list. */
Qinfan Wuff6a31d2014-08-29 13:34:40 -0700908 for (miscelm = ql_first(&arena->runs_dirty); miscelm != NULL;
909 miscelm = ql_first(&arena->runs_dirty)) {
910 arena_chunk_t *chunk =
911 (arena_chunk_t *)CHUNK_ADDR2BASE(miscelm);
912 size_t pageind = arena_miscelm_to_pageind(miscelm);
Jason Evans070b3c32014-08-14 14:45:58 -0700913 size_t run_size = arena_mapbits_unallocated_size_get(chunk,
914 pageind);
915 size_t npages = run_size >> LG_PAGE;
Jason Evans0c5dd032014-09-29 01:31:39 -0700916 arena_run_t *run = &miscelm->run;
Jason Evansaa5113b2014-01-14 16:23:03 -0800917
Qinfan Wue9708002014-07-21 18:09:04 -0700918 assert(pageind + npages <= chunk_npages);
919 assert(arena_mapbits_dirty_get(chunk, pageind) ==
920 arena_mapbits_dirty_get(chunk, pageind+npages-1));
Jason Evansaa5113b2014-01-14 16:23:03 -0800921
Jason Evans070b3c32014-08-14 14:45:58 -0700922 /*
923 * If purging the spare chunk's run, make it available prior to
924 * allocation.
925 */
926 if (chunk == arena->spare)
927 arena_chunk_alloc(arena);
928
Qinfan Wue9708002014-07-21 18:09:04 -0700929 /* Temporarily allocate the free dirty run. */
930 arena_run_split_large(arena, run, run_size, false);
931 /* Append to purge_list for later processing. */
Qinfan Wuff6a31d2014-08-29 13:34:40 -0700932 ql_elm_new(miscelm, dr_link);
933 ql_tail_insert(miscelms, miscelm, dr_link);
Jason Evansaa5113b2014-01-14 16:23:03 -0800934
Qinfan Wue9708002014-07-21 18:09:04 -0700935 nstashed += npages;
Jason Evansaa5113b2014-01-14 16:23:03 -0800936
Jason Evans551ebc42014-10-03 10:16:09 -0700937 if (!all && nstashed >= npurge)
Qinfan Wue9708002014-07-21 18:09:04 -0700938 break;
Jason Evansaa5113b2014-01-14 16:23:03 -0800939 }
Qinfan Wue9708002014-07-21 18:09:04 -0700940
941 return (nstashed);
Jason Evansaa5113b2014-01-14 16:23:03 -0800942}
943
944static size_t
Qinfan Wuff6a31d2014-08-29 13:34:40 -0700945arena_purge_stashed(arena_t *arena, arena_chunk_miscelms_t *miscelms)
Jason Evansaa5113b2014-01-14 16:23:03 -0800946{
Qinfan Wue9708002014-07-21 18:09:04 -0700947 size_t npurged, nmadvise;
Qinfan Wuff6a31d2014-08-29 13:34:40 -0700948 arena_chunk_map_misc_t *miscelm;
Jason Evansaa5113b2014-01-14 16:23:03 -0800949
Jason Evansaa5113b2014-01-14 16:23:03 -0800950 if (config_stats)
951 nmadvise = 0;
952 npurged = 0;
Qinfan Wue9708002014-07-21 18:09:04 -0700953
954 malloc_mutex_unlock(&arena->lock);
955
Qinfan Wuff6a31d2014-08-29 13:34:40 -0700956 ql_foreach(miscelm, miscelms, dr_link) {
Jason Evans070b3c32014-08-14 14:45:58 -0700957 arena_chunk_t *chunk;
958 size_t pageind, run_size, npages, flag_unzeroed, i;
Jason Evansaa5113b2014-01-14 16:23:03 -0800959 bool unzeroed;
Jason Evansaa5113b2014-01-14 16:23:03 -0800960
Qinfan Wuff6a31d2014-08-29 13:34:40 -0700961 chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(miscelm);
962 pageind = arena_miscelm_to_pageind(miscelm);
Qinfan Wue9708002014-07-21 18:09:04 -0700963 run_size = arena_mapbits_large_size_get(chunk, pageind);
964 npages = run_size >> LG_PAGE;
965
Jason Evansaa5113b2014-01-14 16:23:03 -0800966 assert(pageind + npages <= chunk_npages);
967 unzeroed = pages_purge((void *)((uintptr_t)chunk + (pageind <<
Qinfan Wue9708002014-07-21 18:09:04 -0700968 LG_PAGE)), run_size);
Jason Evansaa5113b2014-01-14 16:23:03 -0800969 flag_unzeroed = unzeroed ? CHUNK_MAP_UNZEROED : 0;
Qinfan Wue9708002014-07-21 18:09:04 -0700970
Jason Evansaa5113b2014-01-14 16:23:03 -0800971 /*
972 * Set the unzeroed flag for all pages, now that pages_purge()
973 * has returned whether the pages were zeroed as a side effect
974 * of purging. This chunk map modification is safe even though
975 * the arena mutex isn't currently owned by this thread,
976 * because the run is marked as allocated, thus protecting it
977 * from being modified by any other thread. As long as these
978 * writes don't perturb the first and last elements'
979 * CHUNK_MAP_ALLOCATED bits, behavior is well defined.
980 */
981 for (i = 0; i < npages; i++) {
982 arena_mapbits_unzeroed_set(chunk, pageind+i,
983 flag_unzeroed);
984 }
Qinfan Wue9708002014-07-21 18:09:04 -0700985
Jason Evansaa5113b2014-01-14 16:23:03 -0800986 npurged += npages;
987 if (config_stats)
988 nmadvise++;
989 }
Qinfan Wue9708002014-07-21 18:09:04 -0700990
Jason Evansaa5113b2014-01-14 16:23:03 -0800991 malloc_mutex_lock(&arena->lock);
Qinfan Wue9708002014-07-21 18:09:04 -0700992
993 if (config_stats) {
Jason Evansaa5113b2014-01-14 16:23:03 -0800994 arena->stats.nmadvise += nmadvise;
Qinfan Wue9708002014-07-21 18:09:04 -0700995 arena->stats.purged += npurged;
996 }
Jason Evansaa5113b2014-01-14 16:23:03 -0800997
998 return (npurged);
999}
1000
1001static void
Qinfan Wuff6a31d2014-08-29 13:34:40 -07001002arena_unstash_purged(arena_t *arena, arena_chunk_miscelms_t *miscelms)
Jason Evansaa5113b2014-01-14 16:23:03 -08001003{
Qinfan Wuff6a31d2014-08-29 13:34:40 -07001004 arena_chunk_map_misc_t *miscelm;
Jason Evansaa5113b2014-01-14 16:23:03 -08001005
1006 /* Deallocate runs. */
Qinfan Wuff6a31d2014-08-29 13:34:40 -07001007 for (miscelm = ql_first(miscelms); miscelm != NULL;
1008 miscelm = ql_first(miscelms)) {
Jason Evans0c5dd032014-09-29 01:31:39 -07001009 arena_run_t *run = &miscelm->run;
Qinfan Wuff6a31d2014-08-29 13:34:40 -07001010 ql_remove(miscelms, miscelm, dr_link);
Jason Evansaa5113b2014-01-14 16:23:03 -08001011 arena_run_dalloc(arena, run, false, true);
1012 }
1013}
1014
Qinfan Wue9708002014-07-21 18:09:04 -07001015void
Jason Evans6005f072010-09-30 16:55:08 -07001016arena_purge(arena_t *arena, bool all)
Jason Evanse476f8a2010-01-16 09:53:50 -08001017{
Jason Evans070b3c32014-08-14 14:45:58 -07001018 size_t npurge, npurgeable, npurged;
Qinfan Wuff6a31d2014-08-29 13:34:40 -07001019 arena_chunk_miscelms_t purge_list;
Qinfan Wue9708002014-07-21 18:09:04 -07001020
Jason Evans2b2f6dc2014-11-01 02:29:10 -07001021 /*
1022 * Calls to arena_dirty_count() are disabled even for debug builds
1023 * because overhead grows nonlinearly as memory usage increases.
1024 */
1025 if (false && config_debug) {
Qinfan Wu90737fc2014-07-21 19:39:20 -07001026 size_t ndirty = arena_dirty_count(arena);
Qinfan Wua244e502014-07-21 10:23:36 -07001027 assert(ndirty == arena->ndirty);
Jason Evans2caa4712010-03-04 21:35:07 -08001028 }
Qinfan Wue8a2fd82014-07-21 20:00:14 -07001029 assert((arena->nactive >> opt_lg_dirty_mult) < arena->ndirty || all);
Jason Evanse476f8a2010-01-16 09:53:50 -08001030
Jason Evans7372b152012-02-10 20:22:09 -08001031 if (config_stats)
1032 arena->stats.npurge++;
Jason Evanse476f8a2010-01-16 09:53:50 -08001033
Jason Evans070b3c32014-08-14 14:45:58 -07001034 npurge = arena_compute_npurge(arena, all);
Qinfan Wue9708002014-07-21 18:09:04 -07001035 ql_new(&purge_list);
Jason Evans070b3c32014-08-14 14:45:58 -07001036 npurgeable = arena_stash_dirty(arena, all, npurge, &purge_list);
1037 assert(npurgeable >= npurge);
Qinfan Wue9708002014-07-21 18:09:04 -07001038 npurged = arena_purge_stashed(arena, &purge_list);
1039 assert(npurged == npurgeable);
Qinfan Wue9708002014-07-21 18:09:04 -07001040 arena_unstash_purged(arena, &purge_list);
Jason Evanse476f8a2010-01-16 09:53:50 -08001041}
1042
Jason Evans6005f072010-09-30 16:55:08 -07001043void
1044arena_purge_all(arena_t *arena)
1045{
1046
1047 malloc_mutex_lock(&arena->lock);
1048 arena_purge(arena, true);
1049 malloc_mutex_unlock(&arena->lock);
1050}
1051
Jason Evanse476f8a2010-01-16 09:53:50 -08001052static void
Jason Evansaa5113b2014-01-14 16:23:03 -08001053arena_run_coalesce(arena_t *arena, arena_chunk_t *chunk, size_t *p_size,
1054 size_t *p_run_ind, size_t *p_run_pages, size_t flag_dirty)
Jason Evanse476f8a2010-01-16 09:53:50 -08001055{
Jason Evansaa5113b2014-01-14 16:23:03 -08001056 size_t size = *p_size;
1057 size_t run_ind = *p_run_ind;
1058 size_t run_pages = *p_run_pages;
Jason Evanse476f8a2010-01-16 09:53:50 -08001059
1060 /* Try to coalesce forward. */
1061 if (run_ind + run_pages < chunk_npages &&
Jason Evans203484e2012-05-02 00:30:36 -07001062 arena_mapbits_allocated_get(chunk, run_ind+run_pages) == 0 &&
1063 arena_mapbits_dirty_get(chunk, run_ind+run_pages) == flag_dirty) {
1064 size_t nrun_size = arena_mapbits_unallocated_size_get(chunk,
1065 run_ind+run_pages);
Jason Evansae4c7b42012-04-02 07:04:34 -07001066 size_t nrun_pages = nrun_size >> LG_PAGE;
Jason Evanse476f8a2010-01-16 09:53:50 -08001067
1068 /*
1069 * Remove successor from runs_avail; the coalesced run is
1070 * inserted later.
1071 */
Jason Evans203484e2012-05-02 00:30:36 -07001072 assert(arena_mapbits_unallocated_size_get(chunk,
1073 run_ind+run_pages+nrun_pages-1) == nrun_size);
1074 assert(arena_mapbits_dirty_get(chunk,
1075 run_ind+run_pages+nrun_pages-1) == flag_dirty);
Qinfan Wu90737fc2014-07-21 19:39:20 -07001076 arena_avail_remove(arena, chunk, run_ind+run_pages, nrun_pages);
Jason Evanse476f8a2010-01-16 09:53:50 -08001077
Qinfan Wu04d60a12014-07-18 14:21:17 -07001078 /* If the successor is dirty, remove it from runs_dirty. */
1079 if (flag_dirty != 0) {
Jason Evans070b3c32014-08-14 14:45:58 -07001080 arena_dirty_remove(arena, chunk, run_ind+run_pages,
1081 nrun_pages);
Qinfan Wu04d60a12014-07-18 14:21:17 -07001082 }
1083
Jason Evanse476f8a2010-01-16 09:53:50 -08001084 size += nrun_size;
Jason Evans12ca9142010-10-17 19:56:09 -07001085 run_pages += nrun_pages;
Jason Evanse476f8a2010-01-16 09:53:50 -08001086
Jason Evans203484e2012-05-02 00:30:36 -07001087 arena_mapbits_unallocated_size_set(chunk, run_ind, size);
1088 arena_mapbits_unallocated_size_set(chunk, run_ind+run_pages-1,
1089 size);
Jason Evanse476f8a2010-01-16 09:53:50 -08001090 }
1091
1092 /* Try to coalesce backward. */
Jason Evansaa5113b2014-01-14 16:23:03 -08001093 if (run_ind > map_bias && arena_mapbits_allocated_get(chunk,
1094 run_ind-1) == 0 && arena_mapbits_dirty_get(chunk, run_ind-1) ==
1095 flag_dirty) {
Jason Evans203484e2012-05-02 00:30:36 -07001096 size_t prun_size = arena_mapbits_unallocated_size_get(chunk,
1097 run_ind-1);
Jason Evansae4c7b42012-04-02 07:04:34 -07001098 size_t prun_pages = prun_size >> LG_PAGE;
Jason Evanse476f8a2010-01-16 09:53:50 -08001099
Jason Evans12ca9142010-10-17 19:56:09 -07001100 run_ind -= prun_pages;
Jason Evanse476f8a2010-01-16 09:53:50 -08001101
1102 /*
1103 * Remove predecessor from runs_avail; the coalesced run is
1104 * inserted later.
1105 */
Jason Evans203484e2012-05-02 00:30:36 -07001106 assert(arena_mapbits_unallocated_size_get(chunk, run_ind) ==
1107 prun_size);
1108 assert(arena_mapbits_dirty_get(chunk, run_ind) == flag_dirty);
Qinfan Wu90737fc2014-07-21 19:39:20 -07001109 arena_avail_remove(arena, chunk, run_ind, prun_pages);
Jason Evanse476f8a2010-01-16 09:53:50 -08001110
Qinfan Wu04d60a12014-07-18 14:21:17 -07001111 /* If the predecessor is dirty, remove it from runs_dirty. */
Jason Evans070b3c32014-08-14 14:45:58 -07001112 if (flag_dirty != 0)
1113 arena_dirty_remove(arena, chunk, run_ind, prun_pages);
Qinfan Wu04d60a12014-07-18 14:21:17 -07001114
Jason Evanse476f8a2010-01-16 09:53:50 -08001115 size += prun_size;
Jason Evans12ca9142010-10-17 19:56:09 -07001116 run_pages += prun_pages;
Jason Evanse476f8a2010-01-16 09:53:50 -08001117
Jason Evans203484e2012-05-02 00:30:36 -07001118 arena_mapbits_unallocated_size_set(chunk, run_ind, size);
1119 arena_mapbits_unallocated_size_set(chunk, run_ind+run_pages-1,
1120 size);
Jason Evanse476f8a2010-01-16 09:53:50 -08001121 }
1122
Jason Evansaa5113b2014-01-14 16:23:03 -08001123 *p_size = size;
1124 *p_run_ind = run_ind;
1125 *p_run_pages = run_pages;
1126}
1127
1128static void
1129arena_run_dalloc(arena_t *arena, arena_run_t *run, bool dirty, bool cleaned)
1130{
1131 arena_chunk_t *chunk;
Jason Evans0c5dd032014-09-29 01:31:39 -07001132 arena_chunk_map_misc_t *miscelm;
Jason Evansaa5113b2014-01-14 16:23:03 -08001133 size_t size, run_ind, run_pages, flag_dirty;
1134
1135 chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(run);
Jason Evans0c5dd032014-09-29 01:31:39 -07001136 miscelm = arena_run_to_miscelm(run);
1137 run_ind = arena_miscelm_to_pageind(miscelm);
Jason Evansaa5113b2014-01-14 16:23:03 -08001138 assert(run_ind >= map_bias);
1139 assert(run_ind < chunk_npages);
1140 if (arena_mapbits_large_get(chunk, run_ind) != 0) {
1141 size = arena_mapbits_large_size_get(chunk, run_ind);
1142 assert(size == PAGE ||
1143 arena_mapbits_large_size_get(chunk,
1144 run_ind+(size>>LG_PAGE)-1) == 0);
1145 } else {
Jason Evans381c23d2014-10-10 23:01:03 -07001146 arena_bin_info_t *bin_info = &arena_bin_info[run->binind];
Jason Evansaa5113b2014-01-14 16:23:03 -08001147 size = bin_info->run_size;
1148 }
1149 run_pages = (size >> LG_PAGE);
1150 arena_cactive_update(arena, 0, run_pages);
1151 arena->nactive -= run_pages;
1152
1153 /*
1154 * The run is dirty if the caller claims to have dirtied it, as well as
1155 * if it was already dirty before being allocated and the caller
1156 * doesn't claim to have cleaned it.
1157 */
1158 assert(arena_mapbits_dirty_get(chunk, run_ind) ==
1159 arena_mapbits_dirty_get(chunk, run_ind+run_pages-1));
Jason Evans551ebc42014-10-03 10:16:09 -07001160 if (!cleaned && arena_mapbits_dirty_get(chunk, run_ind) != 0)
Jason Evansaa5113b2014-01-14 16:23:03 -08001161 dirty = true;
1162 flag_dirty = dirty ? CHUNK_MAP_DIRTY : 0;
1163
1164 /* Mark pages as unallocated in the chunk map. */
1165 if (dirty) {
1166 arena_mapbits_unallocated_set(chunk, run_ind, size,
1167 CHUNK_MAP_DIRTY);
1168 arena_mapbits_unallocated_set(chunk, run_ind+run_pages-1, size,
1169 CHUNK_MAP_DIRTY);
1170 } else {
1171 arena_mapbits_unallocated_set(chunk, run_ind, size,
1172 arena_mapbits_unzeroed_get(chunk, run_ind));
1173 arena_mapbits_unallocated_set(chunk, run_ind+run_pages-1, size,
1174 arena_mapbits_unzeroed_get(chunk, run_ind+run_pages-1));
1175 }
1176
Jason Evans0c5dd032014-09-29 01:31:39 -07001177 arena_run_coalesce(arena, chunk, &size, &run_ind, &run_pages, flag_dirty);
Jason Evansaa5113b2014-01-14 16:23:03 -08001178
Jason Evanse476f8a2010-01-16 09:53:50 -08001179 /* Insert into runs_avail, now that coalescing is complete. */
Jason Evans203484e2012-05-02 00:30:36 -07001180 assert(arena_mapbits_unallocated_size_get(chunk, run_ind) ==
1181 arena_mapbits_unallocated_size_get(chunk, run_ind+run_pages-1));
1182 assert(arena_mapbits_dirty_get(chunk, run_ind) ==
1183 arena_mapbits_dirty_get(chunk, run_ind+run_pages-1));
Qinfan Wu90737fc2014-07-21 19:39:20 -07001184 arena_avail_insert(arena, chunk, run_ind, run_pages);
Jason Evans8d4203c2010-04-13 20:53:21 -07001185
Jason Evans070b3c32014-08-14 14:45:58 -07001186 if (dirty)
1187 arena_dirty_insert(arena, chunk, run_ind, run_pages);
Qinfan Wu04d60a12014-07-18 14:21:17 -07001188
Jason Evans203484e2012-05-02 00:30:36 -07001189 /* Deallocate chunk if it is now completely unused. */
Jason Evans155bfa72014-10-05 17:54:10 -07001190 if (size == arena_maxrun) {
Jason Evans203484e2012-05-02 00:30:36 -07001191 assert(run_ind == map_bias);
Jason Evans155bfa72014-10-05 17:54:10 -07001192 assert(run_pages == (arena_maxrun >> LG_PAGE));
Jason Evanse2deab72014-05-15 22:22:27 -07001193 arena_chunk_dalloc(arena, chunk);
Jason Evans203484e2012-05-02 00:30:36 -07001194 }
Jason Evanse476f8a2010-01-16 09:53:50 -08001195
Jason Evans4fb7f512010-01-27 18:27:09 -08001196 /*
Jason Evans8d4203c2010-04-13 20:53:21 -07001197 * It is okay to do dirty page processing here even if the chunk was
Jason Evans4fb7f512010-01-27 18:27:09 -08001198 * deallocated above, since in that case it is the spare. Waiting
1199 * until after possible chunk deallocation to do dirty processing
1200 * allows for an old spare to be fully deallocated, thus decreasing the
1201 * chances of spuriously crossing the dirty page purging threshold.
1202 */
Jason Evans8d4203c2010-04-13 20:53:21 -07001203 if (dirty)
Jason Evans05b21be2010-03-14 17:36:10 -07001204 arena_maybe_purge(arena);
Jason Evanse476f8a2010-01-16 09:53:50 -08001205}
1206
1207static void
1208arena_run_trim_head(arena_t *arena, arena_chunk_t *chunk, arena_run_t *run,
1209 size_t oldsize, size_t newsize)
1210{
Jason Evans0c5dd032014-09-29 01:31:39 -07001211 arena_chunk_map_misc_t *miscelm = arena_run_to_miscelm(run);
1212 size_t pageind = arena_miscelm_to_pageind(miscelm);
Jason Evansae4c7b42012-04-02 07:04:34 -07001213 size_t head_npages = (oldsize - newsize) >> LG_PAGE;
Jason Evans203484e2012-05-02 00:30:36 -07001214 size_t flag_dirty = arena_mapbits_dirty_get(chunk, pageind);
Jason Evanse476f8a2010-01-16 09:53:50 -08001215
1216 assert(oldsize > newsize);
1217
1218 /*
1219 * Update the chunk map so that arena_run_dalloc() can treat the
Jason Evans940a2e02010-10-17 17:51:37 -07001220 * leading run as separately allocated. Set the last element of each
1221 * run first, in case of single-page runs.
Jason Evanse476f8a2010-01-16 09:53:50 -08001222 */
Jason Evans203484e2012-05-02 00:30:36 -07001223 assert(arena_mapbits_large_size_get(chunk, pageind) == oldsize);
Jason Evansd8ceef62012-05-10 20:59:39 -07001224 arena_mapbits_large_set(chunk, pageind+head_npages-1, 0, flag_dirty);
1225 arena_mapbits_large_set(chunk, pageind, oldsize-newsize, flag_dirty);
Jason Evans940a2e02010-10-17 17:51:37 -07001226
Jason Evans7372b152012-02-10 20:22:09 -08001227 if (config_debug) {
Jason Evansae4c7b42012-04-02 07:04:34 -07001228 UNUSED size_t tail_npages = newsize >> LG_PAGE;
Jason Evans203484e2012-05-02 00:30:36 -07001229 assert(arena_mapbits_large_size_get(chunk,
1230 pageind+head_npages+tail_npages-1) == 0);
1231 assert(arena_mapbits_dirty_get(chunk,
1232 pageind+head_npages+tail_npages-1) == flag_dirty);
Jason Evans940a2e02010-10-17 17:51:37 -07001233 }
Jason Evansd8ceef62012-05-10 20:59:39 -07001234 arena_mapbits_large_set(chunk, pageind+head_npages, newsize,
1235 flag_dirty);
Jason Evanse476f8a2010-01-16 09:53:50 -08001236
Jason Evanse3d13062012-10-30 15:42:37 -07001237 arena_run_dalloc(arena, run, false, false);
Jason Evanse476f8a2010-01-16 09:53:50 -08001238}
1239
1240static void
1241arena_run_trim_tail(arena_t *arena, arena_chunk_t *chunk, arena_run_t *run,
1242 size_t oldsize, size_t newsize, bool dirty)
1243{
Jason Evans0c5dd032014-09-29 01:31:39 -07001244 arena_chunk_map_misc_t *miscelm = arena_run_to_miscelm(run);
1245 size_t pageind = arena_miscelm_to_pageind(miscelm);
Jason Evansae4c7b42012-04-02 07:04:34 -07001246 size_t head_npages = newsize >> LG_PAGE;
Jason Evans203484e2012-05-02 00:30:36 -07001247 size_t flag_dirty = arena_mapbits_dirty_get(chunk, pageind);
Jason Evans0c5dd032014-09-29 01:31:39 -07001248 arena_chunk_map_misc_t *tail_miscelm;
1249 arena_run_t *tail_run;
Jason Evanse476f8a2010-01-16 09:53:50 -08001250
1251 assert(oldsize > newsize);
1252
1253 /*
1254 * Update the chunk map so that arena_run_dalloc() can treat the
Jason Evans940a2e02010-10-17 17:51:37 -07001255 * trailing run as separately allocated. Set the last element of each
1256 * run first, in case of single-page runs.
Jason Evanse476f8a2010-01-16 09:53:50 -08001257 */
Jason Evans203484e2012-05-02 00:30:36 -07001258 assert(arena_mapbits_large_size_get(chunk, pageind) == oldsize);
Jason Evansd8ceef62012-05-10 20:59:39 -07001259 arena_mapbits_large_set(chunk, pageind+head_npages-1, 0, flag_dirty);
1260 arena_mapbits_large_set(chunk, pageind, newsize, flag_dirty);
Jason Evans940a2e02010-10-17 17:51:37 -07001261
Jason Evans203484e2012-05-02 00:30:36 -07001262 if (config_debug) {
1263 UNUSED size_t tail_npages = (oldsize - newsize) >> LG_PAGE;
1264 assert(arena_mapbits_large_size_get(chunk,
1265 pageind+head_npages+tail_npages-1) == 0);
1266 assert(arena_mapbits_dirty_get(chunk,
1267 pageind+head_npages+tail_npages-1) == flag_dirty);
1268 }
1269 arena_mapbits_large_set(chunk, pageind+head_npages, oldsize-newsize,
Jason Evansd8ceef62012-05-10 20:59:39 -07001270 flag_dirty);
Jason Evanse476f8a2010-01-16 09:53:50 -08001271
Jason Evans0c5dd032014-09-29 01:31:39 -07001272 tail_miscelm = arena_miscelm_get(chunk, pageind + head_npages);
1273 tail_run = &tail_miscelm->run;
1274 arena_run_dalloc(arena, tail_run, dirty, false);
Jason Evanse476f8a2010-01-16 09:53:50 -08001275}
1276
1277static arena_run_t *
Jason Evanse7a10582012-02-13 17:36:52 -08001278arena_bin_runs_first(arena_bin_t *bin)
1279{
Qinfan Wuff6a31d2014-08-29 13:34:40 -07001280 arena_chunk_map_misc_t *miscelm = arena_run_tree_first(&bin->runs);
Jason Evans0c5dd032014-09-29 01:31:39 -07001281 if (miscelm != NULL)
1282 return (&miscelm->run);
Jason Evanse7a10582012-02-13 17:36:52 -08001283
1284 return (NULL);
1285}
1286
1287static void
1288arena_bin_runs_insert(arena_bin_t *bin, arena_run_t *run)
1289{
Jason Evans0c5dd032014-09-29 01:31:39 -07001290 arena_chunk_map_misc_t *miscelm = arena_run_to_miscelm(run);
Jason Evanse7a10582012-02-13 17:36:52 -08001291
Qinfan Wuff6a31d2014-08-29 13:34:40 -07001292 assert(arena_run_tree_search(&bin->runs, miscelm) == NULL);
Jason Evanse7a10582012-02-13 17:36:52 -08001293
Qinfan Wuff6a31d2014-08-29 13:34:40 -07001294 arena_run_tree_insert(&bin->runs, miscelm);
Jason Evanse7a10582012-02-13 17:36:52 -08001295}
1296
1297static void
1298arena_bin_runs_remove(arena_bin_t *bin, arena_run_t *run)
1299{
Jason Evans0c5dd032014-09-29 01:31:39 -07001300 arena_chunk_map_misc_t *miscelm = arena_run_to_miscelm(run);
Jason Evanse7a10582012-02-13 17:36:52 -08001301
Qinfan Wuff6a31d2014-08-29 13:34:40 -07001302 assert(arena_run_tree_search(&bin->runs, miscelm) != NULL);
Jason Evanse7a10582012-02-13 17:36:52 -08001303
Qinfan Wuff6a31d2014-08-29 13:34:40 -07001304 arena_run_tree_remove(&bin->runs, miscelm);
Jason Evanse7a10582012-02-13 17:36:52 -08001305}
1306
1307static arena_run_t *
1308arena_bin_nonfull_run_tryget(arena_bin_t *bin)
1309{
1310 arena_run_t *run = arena_bin_runs_first(bin);
1311 if (run != NULL) {
1312 arena_bin_runs_remove(bin, run);
1313 if (config_stats)
1314 bin->stats.reruns++;
1315 }
1316 return (run);
1317}
1318
1319static arena_run_t *
Jason Evanse476f8a2010-01-16 09:53:50 -08001320arena_bin_nonfull_run_get(arena_t *arena, arena_bin_t *bin)
1321{
Jason Evanse476f8a2010-01-16 09:53:50 -08001322 arena_run_t *run;
Jason Evans155bfa72014-10-05 17:54:10 -07001323 index_t binind;
Jason Evans49f7e8f2011-03-15 13:59:15 -07001324 arena_bin_info_t *bin_info;
Jason Evanse476f8a2010-01-16 09:53:50 -08001325
1326 /* Look for a usable run. */
Jason Evanse7a10582012-02-13 17:36:52 -08001327 run = arena_bin_nonfull_run_tryget(bin);
1328 if (run != NULL)
Jason Evanse476f8a2010-01-16 09:53:50 -08001329 return (run);
Jason Evanse476f8a2010-01-16 09:53:50 -08001330 /* No existing runs have any space available. */
1331
Jason Evans49f7e8f2011-03-15 13:59:15 -07001332 binind = arena_bin_index(arena, bin);
1333 bin_info = &arena_bin_info[binind];
1334
Jason Evanse476f8a2010-01-16 09:53:50 -08001335 /* Allocate a new run. */
Jason Evanse00572b2010-03-14 19:43:56 -07001336 malloc_mutex_unlock(&bin->lock);
Jason Evanse69bee02010-03-15 22:25:23 -07001337 /******************************/
Jason Evans86815df2010-03-13 20:32:56 -08001338 malloc_mutex_lock(&arena->lock);
Jason Evansaa5113b2014-01-14 16:23:03 -08001339 run = arena_run_alloc_small(arena, bin_info->run_size, binind);
Jason Evanse00572b2010-03-14 19:43:56 -07001340 if (run != NULL) {
1341 /* Initialize run internals. */
Jason Evans381c23d2014-10-10 23:01:03 -07001342 run->binind = binind;
Jason Evans49f7e8f2011-03-15 13:59:15 -07001343 run->nfree = bin_info->nregs;
Jason Evans0c5dd032014-09-29 01:31:39 -07001344 bitmap_init(run->bitmap, &bin_info->bitmap_info);
Jason Evanse69bee02010-03-15 22:25:23 -07001345 }
1346 malloc_mutex_unlock(&arena->lock);
1347 /********************************/
1348 malloc_mutex_lock(&bin->lock);
1349 if (run != NULL) {
Jason Evans7372b152012-02-10 20:22:09 -08001350 if (config_stats) {
1351 bin->stats.nruns++;
1352 bin->stats.curruns++;
Jason Evans7372b152012-02-10 20:22:09 -08001353 }
Jason Evanse00572b2010-03-14 19:43:56 -07001354 return (run);
1355 }
1356
1357 /*
Jason Evansaa5113b2014-01-14 16:23:03 -08001358 * arena_run_alloc_small() failed, but another thread may have made
Jason Evans940a2e02010-10-17 17:51:37 -07001359 * sufficient memory available while this one dropped bin->lock above,
Jason Evanse00572b2010-03-14 19:43:56 -07001360 * so search one more time.
1361 */
Jason Evanse7a10582012-02-13 17:36:52 -08001362 run = arena_bin_nonfull_run_tryget(bin);
1363 if (run != NULL)
Jason Evanse00572b2010-03-14 19:43:56 -07001364 return (run);
Jason Evanse00572b2010-03-14 19:43:56 -07001365
1366 return (NULL);
Jason Evanse476f8a2010-01-16 09:53:50 -08001367}
1368
Jason Evans1e0a6362010-03-13 13:41:58 -08001369/* Re-fill bin->runcur, then call arena_run_reg_alloc(). */
Jason Evanse476f8a2010-01-16 09:53:50 -08001370static void *
1371arena_bin_malloc_hard(arena_t *arena, arena_bin_t *bin)
1372{
Jason Evanse00572b2010-03-14 19:43:56 -07001373 void *ret;
Jason Evans155bfa72014-10-05 17:54:10 -07001374 index_t binind;
Jason Evans49f7e8f2011-03-15 13:59:15 -07001375 arena_bin_info_t *bin_info;
Jason Evanse00572b2010-03-14 19:43:56 -07001376 arena_run_t *run;
Jason Evanse476f8a2010-01-16 09:53:50 -08001377
Jason Evans49f7e8f2011-03-15 13:59:15 -07001378 binind = arena_bin_index(arena, bin);
1379 bin_info = &arena_bin_info[binind];
Jason Evanse00572b2010-03-14 19:43:56 -07001380 bin->runcur = NULL;
1381 run = arena_bin_nonfull_run_get(arena, bin);
1382 if (bin->runcur != NULL && bin->runcur->nfree > 0) {
1383 /*
1384 * Another thread updated runcur while this one ran without the
1385 * bin lock in arena_bin_nonfull_run_get().
1386 */
Jason Evanse00572b2010-03-14 19:43:56 -07001387 assert(bin->runcur->nfree > 0);
Jason Evans49f7e8f2011-03-15 13:59:15 -07001388 ret = arena_run_reg_alloc(bin->runcur, bin_info);
Jason Evanse00572b2010-03-14 19:43:56 -07001389 if (run != NULL) {
Jason Evans940a2e02010-10-17 17:51:37 -07001390 arena_chunk_t *chunk;
1391
1392 /*
Jason Evansaa5113b2014-01-14 16:23:03 -08001393 * arena_run_alloc_small() may have allocated run, or
1394 * it may have pulled run from the bin's run tree.
1395 * Therefore it is unsafe to make any assumptions about
1396 * how run has previously been used, and
1397 * arena_bin_lower_run() must be called, as if a region
1398 * were just deallocated from the run.
Jason Evans940a2e02010-10-17 17:51:37 -07001399 */
1400 chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(run);
Jason Evans49f7e8f2011-03-15 13:59:15 -07001401 if (run->nfree == bin_info->nregs)
Jason Evans8de6a022010-10-17 20:57:30 -07001402 arena_dalloc_bin_run(arena, chunk, run, bin);
1403 else
1404 arena_bin_lower_run(arena, chunk, run, bin);
Jason Evanse00572b2010-03-14 19:43:56 -07001405 }
1406 return (ret);
1407 }
1408
1409 if (run == NULL)
Jason Evanse476f8a2010-01-16 09:53:50 -08001410 return (NULL);
Jason Evanse00572b2010-03-14 19:43:56 -07001411
1412 bin->runcur = run;
1413
Jason Evanse476f8a2010-01-16 09:53:50 -08001414 assert(bin->runcur->nfree > 0);
1415
Jason Evans49f7e8f2011-03-15 13:59:15 -07001416 return (arena_run_reg_alloc(bin->runcur, bin_info));
Jason Evanse476f8a2010-01-16 09:53:50 -08001417}
1418
Jason Evans86815df2010-03-13 20:32:56 -08001419void
Jason Evans155bfa72014-10-05 17:54:10 -07001420arena_tcache_fill_small(arena_t *arena, tcache_bin_t *tbin, index_t binind,
Jason Evans7372b152012-02-10 20:22:09 -08001421 uint64_t prof_accumbytes)
Jason Evanse476f8a2010-01-16 09:53:50 -08001422{
1423 unsigned i, nfill;
1424 arena_bin_t *bin;
1425 arena_run_t *run;
1426 void *ptr;
1427
1428 assert(tbin->ncached == 0);
1429
Jason Evans88c222c2013-02-06 11:59:30 -08001430 if (config_prof && arena_prof_accum(arena, prof_accumbytes))
1431 prof_idump();
Jason Evanse69bee02010-03-15 22:25:23 -07001432 bin = &arena->bins[binind];
1433 malloc_mutex_lock(&bin->lock);
Jason Evans1dcb4f82011-03-21 00:18:17 -07001434 for (i = 0, nfill = (tcache_bin_info[binind].ncached_max >>
1435 tbin->lg_fill_div); i < nfill; i++) {
Jason Evanse476f8a2010-01-16 09:53:50 -08001436 if ((run = bin->runcur) != NULL && run->nfree > 0)
Jason Evans49f7e8f2011-03-15 13:59:15 -07001437 ptr = arena_run_reg_alloc(run, &arena_bin_info[binind]);
Jason Evanse476f8a2010-01-16 09:53:50 -08001438 else
1439 ptr = arena_bin_malloc_hard(arena, bin);
Jason Evansf11a6772014-10-05 13:05:10 -07001440 if (ptr == NULL) {
1441 /*
1442 * OOM. tbin->avail isn't yet filled down to its first
1443 * element, so the successful allocations (if any) must
1444 * be moved to the base of tbin->avail before bailing
1445 * out.
1446 */
1447 if (i > 0) {
1448 memmove(tbin->avail, &tbin->avail[nfill - i],
1449 i * sizeof(void *));
1450 }
Jason Evanse476f8a2010-01-16 09:53:50 -08001451 break;
Jason Evansf11a6772014-10-05 13:05:10 -07001452 }
Jason Evans9c640bf2014-09-11 16:20:44 -07001453 if (config_fill && unlikely(opt_junk)) {
Jason Evans122449b2012-04-06 00:35:09 -07001454 arena_alloc_junk_small(ptr, &arena_bin_info[binind],
1455 true);
1456 }
Jason Evans9c43c132011-03-18 10:53:15 -07001457 /* Insert such that low regions get used first. */
1458 tbin->avail[nfill - 1 - i] = ptr;
Jason Evanse476f8a2010-01-16 09:53:50 -08001459 }
Jason Evans7372b152012-02-10 20:22:09 -08001460 if (config_stats) {
Jason Evans7372b152012-02-10 20:22:09 -08001461 bin->stats.nmalloc += i;
1462 bin->stats.nrequests += tbin->tstats.nrequests;
Jason Evans3c4d92e2014-10-12 22:53:59 -07001463 bin->stats.curregs += i;
Jason Evans7372b152012-02-10 20:22:09 -08001464 bin->stats.nfills++;
1465 tbin->tstats.nrequests = 0;
1466 }
Jason Evans86815df2010-03-13 20:32:56 -08001467 malloc_mutex_unlock(&bin->lock);
Jason Evanse476f8a2010-01-16 09:53:50 -08001468 tbin->ncached = i;
Jason Evanse476f8a2010-01-16 09:53:50 -08001469}
Jason Evanse476f8a2010-01-16 09:53:50 -08001470
Jason Evans122449b2012-04-06 00:35:09 -07001471void
1472arena_alloc_junk_small(void *ptr, arena_bin_info_t *bin_info, bool zero)
1473{
1474
1475 if (zero) {
1476 size_t redzone_size = bin_info->redzone_size;
1477 memset((void *)((uintptr_t)ptr - redzone_size), 0xa5,
1478 redzone_size);
1479 memset((void *)((uintptr_t)ptr + bin_info->reg_size), 0xa5,
1480 redzone_size);
1481 } else {
1482 memset((void *)((uintptr_t)ptr - bin_info->redzone_size), 0xa5,
1483 bin_info->reg_interval);
1484 }
1485}
1486
Jason Evans0d6c5d82013-12-17 15:14:36 -08001487#ifdef JEMALLOC_JET
1488#undef arena_redzone_corruption
1489#define arena_redzone_corruption JEMALLOC_N(arena_redzone_corruption_impl)
1490#endif
1491static void
1492arena_redzone_corruption(void *ptr, size_t usize, bool after,
1493 size_t offset, uint8_t byte)
1494{
1495
1496 malloc_printf("<jemalloc>: Corrupt redzone %zu byte%s %s %p "
1497 "(size %zu), byte=%#x\n", offset, (offset == 1) ? "" : "s",
1498 after ? "after" : "before", ptr, usize, byte);
1499}
1500#ifdef JEMALLOC_JET
Jason Evans0d6c5d82013-12-17 15:14:36 -08001501#undef arena_redzone_corruption
Jason Evans6b694c42014-01-07 16:47:56 -08001502#define arena_redzone_corruption JEMALLOC_N(arena_redzone_corruption)
1503arena_redzone_corruption_t *arena_redzone_corruption =
1504 JEMALLOC_N(arena_redzone_corruption_impl);
Jason Evans0d6c5d82013-12-17 15:14:36 -08001505#endif
1506
1507static void
1508arena_redzones_validate(void *ptr, arena_bin_info_t *bin_info, bool reset)
Jason Evans122449b2012-04-06 00:35:09 -07001509{
1510 size_t size = bin_info->reg_size;
1511 size_t redzone_size = bin_info->redzone_size;
1512 size_t i;
1513 bool error = false;
1514
1515 for (i = 1; i <= redzone_size; i++) {
Jason Evans0d6c5d82013-12-17 15:14:36 -08001516 uint8_t *byte = (uint8_t *)((uintptr_t)ptr - i);
1517 if (*byte != 0xa5) {
Jason Evans122449b2012-04-06 00:35:09 -07001518 error = true;
Jason Evans0d6c5d82013-12-17 15:14:36 -08001519 arena_redzone_corruption(ptr, size, false, i, *byte);
1520 if (reset)
1521 *byte = 0xa5;
Jason Evans122449b2012-04-06 00:35:09 -07001522 }
1523 }
1524 for (i = 0; i < redzone_size; i++) {
Jason Evans0d6c5d82013-12-17 15:14:36 -08001525 uint8_t *byte = (uint8_t *)((uintptr_t)ptr + size + i);
1526 if (*byte != 0xa5) {
Jason Evans122449b2012-04-06 00:35:09 -07001527 error = true;
Jason Evans0d6c5d82013-12-17 15:14:36 -08001528 arena_redzone_corruption(ptr, size, true, i, *byte);
1529 if (reset)
1530 *byte = 0xa5;
Jason Evans122449b2012-04-06 00:35:09 -07001531 }
1532 }
1533 if (opt_abort && error)
1534 abort();
Jason Evans0d6c5d82013-12-17 15:14:36 -08001535}
Jason Evans122449b2012-04-06 00:35:09 -07001536
Jason Evans6b694c42014-01-07 16:47:56 -08001537#ifdef JEMALLOC_JET
1538#undef arena_dalloc_junk_small
1539#define arena_dalloc_junk_small JEMALLOC_N(arena_dalloc_junk_small_impl)
1540#endif
Jason Evans0d6c5d82013-12-17 15:14:36 -08001541void
1542arena_dalloc_junk_small(void *ptr, arena_bin_info_t *bin_info)
1543{
1544 size_t redzone_size = bin_info->redzone_size;
1545
1546 arena_redzones_validate(ptr, bin_info, false);
Jason Evans122449b2012-04-06 00:35:09 -07001547 memset((void *)((uintptr_t)ptr - redzone_size), 0x5a,
1548 bin_info->reg_interval);
1549}
Jason Evans6b694c42014-01-07 16:47:56 -08001550#ifdef JEMALLOC_JET
1551#undef arena_dalloc_junk_small
1552#define arena_dalloc_junk_small JEMALLOC_N(arena_dalloc_junk_small)
1553arena_dalloc_junk_small_t *arena_dalloc_junk_small =
1554 JEMALLOC_N(arena_dalloc_junk_small_impl);
1555#endif
Jason Evans122449b2012-04-06 00:35:09 -07001556
Jason Evans0d6c5d82013-12-17 15:14:36 -08001557void
1558arena_quarantine_junk_small(void *ptr, size_t usize)
1559{
Jason Evans155bfa72014-10-05 17:54:10 -07001560 index_t binind;
Jason Evans0d6c5d82013-12-17 15:14:36 -08001561 arena_bin_info_t *bin_info;
1562 cassert(config_fill);
1563 assert(opt_junk);
1564 assert(opt_quarantine);
1565 assert(usize <= SMALL_MAXCLASS);
1566
Jason Evans155bfa72014-10-05 17:54:10 -07001567 binind = size2index(usize);
Jason Evans0d6c5d82013-12-17 15:14:36 -08001568 bin_info = &arena_bin_info[binind];
1569 arena_redzones_validate(ptr, bin_info, true);
1570}
1571
Jason Evanse476f8a2010-01-16 09:53:50 -08001572void *
1573arena_malloc_small(arena_t *arena, size_t size, bool zero)
1574{
1575 void *ret;
1576 arena_bin_t *bin;
1577 arena_run_t *run;
Jason Evans155bfa72014-10-05 17:54:10 -07001578 index_t binind;
Jason Evanse476f8a2010-01-16 09:53:50 -08001579
Jason Evans155bfa72014-10-05 17:54:10 -07001580 binind = size2index(size);
Jason Evansb1726102012-02-28 16:50:47 -08001581 assert(binind < NBINS);
Jason Evanse476f8a2010-01-16 09:53:50 -08001582 bin = &arena->bins[binind];
Jason Evans155bfa72014-10-05 17:54:10 -07001583 size = index2size(binind);
Jason Evanse476f8a2010-01-16 09:53:50 -08001584
Jason Evans86815df2010-03-13 20:32:56 -08001585 malloc_mutex_lock(&bin->lock);
Jason Evanse476f8a2010-01-16 09:53:50 -08001586 if ((run = bin->runcur) != NULL && run->nfree > 0)
Jason Evans49f7e8f2011-03-15 13:59:15 -07001587 ret = arena_run_reg_alloc(run, &arena_bin_info[binind]);
Jason Evanse476f8a2010-01-16 09:53:50 -08001588 else
1589 ret = arena_bin_malloc_hard(arena, bin);
1590
1591 if (ret == NULL) {
Jason Evans86815df2010-03-13 20:32:56 -08001592 malloc_mutex_unlock(&bin->lock);
Jason Evanse476f8a2010-01-16 09:53:50 -08001593 return (NULL);
1594 }
1595
Jason Evans7372b152012-02-10 20:22:09 -08001596 if (config_stats) {
Jason Evans7372b152012-02-10 20:22:09 -08001597 bin->stats.nmalloc++;
1598 bin->stats.nrequests++;
Jason Evans3c4d92e2014-10-12 22:53:59 -07001599 bin->stats.curregs++;
Jason Evans7372b152012-02-10 20:22:09 -08001600 }
Jason Evans86815df2010-03-13 20:32:56 -08001601 malloc_mutex_unlock(&bin->lock);
Jason Evans551ebc42014-10-03 10:16:09 -07001602 if (config_prof && !isthreaded && arena_prof_accum(arena, size))
Jason Evans88c222c2013-02-06 11:59:30 -08001603 prof_idump();
Jason Evanse476f8a2010-01-16 09:53:50 -08001604
Jason Evans551ebc42014-10-03 10:16:09 -07001605 if (!zero) {
Jason Evans7372b152012-02-10 20:22:09 -08001606 if (config_fill) {
Jason Evans9c640bf2014-09-11 16:20:44 -07001607 if (unlikely(opt_junk)) {
Jason Evans122449b2012-04-06 00:35:09 -07001608 arena_alloc_junk_small(ret,
1609 &arena_bin_info[binind], false);
Jason Evans9c640bf2014-09-11 16:20:44 -07001610 } else if (unlikely(opt_zero))
Jason Evans7372b152012-02-10 20:22:09 -08001611 memset(ret, 0, size);
1612 }
Jason Evansbd87b012014-04-15 16:35:08 -07001613 JEMALLOC_VALGRIND_MAKE_MEM_UNDEFINED(ret, size);
Jason Evans122449b2012-04-06 00:35:09 -07001614 } else {
Jason Evans9c640bf2014-09-11 16:20:44 -07001615 if (config_fill && unlikely(opt_junk)) {
Jason Evans122449b2012-04-06 00:35:09 -07001616 arena_alloc_junk_small(ret, &arena_bin_info[binind],
1617 true);
1618 }
Jason Evansbd87b012014-04-15 16:35:08 -07001619 JEMALLOC_VALGRIND_MAKE_MEM_UNDEFINED(ret, size);
Jason Evanse476f8a2010-01-16 09:53:50 -08001620 memset(ret, 0, size);
Jason Evans122449b2012-04-06 00:35:09 -07001621 }
Jason Evanse476f8a2010-01-16 09:53:50 -08001622
1623 return (ret);
1624}
1625
1626void *
Jason Evanse476f8a2010-01-16 09:53:50 -08001627arena_malloc_large(arena_t *arena, size_t size, bool zero)
1628{
1629 void *ret;
Jason Evans155bfa72014-10-05 17:54:10 -07001630 size_t usize;
Jason Evans0c5dd032014-09-29 01:31:39 -07001631 arena_run_t *run;
1632 arena_chunk_map_misc_t *miscelm;
Jason Evans88c222c2013-02-06 11:59:30 -08001633 UNUSED bool idump;
Jason Evanse476f8a2010-01-16 09:53:50 -08001634
1635 /* Large allocation. */
Jason Evans155bfa72014-10-05 17:54:10 -07001636 usize = s2u(size);
Jason Evanse476f8a2010-01-16 09:53:50 -08001637 malloc_mutex_lock(&arena->lock);
Jason Evans155bfa72014-10-05 17:54:10 -07001638 run = arena_run_alloc_large(arena, usize, zero);
Jason Evans0c5dd032014-09-29 01:31:39 -07001639 if (run == NULL) {
Jason Evanse476f8a2010-01-16 09:53:50 -08001640 malloc_mutex_unlock(&arena->lock);
1641 return (NULL);
1642 }
Jason Evans0c5dd032014-09-29 01:31:39 -07001643 miscelm = arena_run_to_miscelm(run);
1644 ret = arena_miscelm_to_rpages(miscelm);
Jason Evans7372b152012-02-10 20:22:09 -08001645 if (config_stats) {
Jason Evans155bfa72014-10-05 17:54:10 -07001646 index_t index = size2index(usize) - NBINS;
1647
Jason Evans7372b152012-02-10 20:22:09 -08001648 arena->stats.nmalloc_large++;
1649 arena->stats.nrequests_large++;
Jason Evans155bfa72014-10-05 17:54:10 -07001650 arena->stats.allocated_large += usize;
1651 arena->stats.lstats[index].nmalloc++;
1652 arena->stats.lstats[index].nrequests++;
1653 arena->stats.lstats[index].curruns++;
Jason Evanse476f8a2010-01-16 09:53:50 -08001654 }
Jason Evans7372b152012-02-10 20:22:09 -08001655 if (config_prof)
Jason Evans155bfa72014-10-05 17:54:10 -07001656 idump = arena_prof_accum_locked(arena, usize);
Jason Evanse476f8a2010-01-16 09:53:50 -08001657 malloc_mutex_unlock(&arena->lock);
Jason Evans88c222c2013-02-06 11:59:30 -08001658 if (config_prof && idump)
1659 prof_idump();
Jason Evanse476f8a2010-01-16 09:53:50 -08001660
Jason Evans551ebc42014-10-03 10:16:09 -07001661 if (!zero) {
Jason Evans7372b152012-02-10 20:22:09 -08001662 if (config_fill) {
Jason Evans9c640bf2014-09-11 16:20:44 -07001663 if (unlikely(opt_junk))
Jason Evans155bfa72014-10-05 17:54:10 -07001664 memset(ret, 0xa5, usize);
Jason Evans9c640bf2014-09-11 16:20:44 -07001665 else if (unlikely(opt_zero))
Jason Evans155bfa72014-10-05 17:54:10 -07001666 memset(ret, 0, usize);
Jason Evans7372b152012-02-10 20:22:09 -08001667 }
Jason Evanse476f8a2010-01-16 09:53:50 -08001668 }
1669
1670 return (ret);
1671}
1672
Jason Evanse476f8a2010-01-16 09:53:50 -08001673/* Only handles large allocations that require more than page alignment. */
1674void *
Jason Evans5ff709c2012-04-11 18:13:45 -07001675arena_palloc(arena_t *arena, size_t size, size_t alignment, bool zero)
Jason Evanse476f8a2010-01-16 09:53:50 -08001676{
1677 void *ret;
Jason Evans5ff709c2012-04-11 18:13:45 -07001678 size_t alloc_size, leadsize, trailsize;
1679 arena_run_t *run;
Jason Evanse476f8a2010-01-16 09:53:50 -08001680 arena_chunk_t *chunk;
Jason Evans0c5dd032014-09-29 01:31:39 -07001681 arena_chunk_map_misc_t *miscelm;
1682 void *rpages;
Jason Evanse476f8a2010-01-16 09:53:50 -08001683
1684 assert((size & PAGE_MASK) == 0);
Jason Evans93443682010-10-20 17:39:18 -07001685
1686 alignment = PAGE_CEILING(alignment);
Jason Evans5ff709c2012-04-11 18:13:45 -07001687 alloc_size = size + alignment - PAGE;
Jason Evanse476f8a2010-01-16 09:53:50 -08001688
1689 malloc_mutex_lock(&arena->lock);
Jason Evansaa5113b2014-01-14 16:23:03 -08001690 run = arena_run_alloc_large(arena, alloc_size, false);
Jason Evans5ff709c2012-04-11 18:13:45 -07001691 if (run == NULL) {
Jason Evanse476f8a2010-01-16 09:53:50 -08001692 malloc_mutex_unlock(&arena->lock);
1693 return (NULL);
1694 }
Jason Evans5ff709c2012-04-11 18:13:45 -07001695 chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(run);
Jason Evans0c5dd032014-09-29 01:31:39 -07001696 miscelm = arena_run_to_miscelm(run);
1697 rpages = arena_miscelm_to_rpages(miscelm);
Jason Evanse476f8a2010-01-16 09:53:50 -08001698
Jason Evans0c5dd032014-09-29 01:31:39 -07001699 leadsize = ALIGNMENT_CEILING((uintptr_t)rpages, alignment) -
1700 (uintptr_t)rpages;
Jason Evans5ff709c2012-04-11 18:13:45 -07001701 assert(alloc_size >= leadsize + size);
1702 trailsize = alloc_size - leadsize - size;
Jason Evans5ff709c2012-04-11 18:13:45 -07001703 if (leadsize != 0) {
Jason Evans0c5dd032014-09-29 01:31:39 -07001704 arena_chunk_map_misc_t *head_miscelm = miscelm;
1705 arena_run_t *head_run = run;
1706
1707 miscelm = arena_miscelm_get(chunk,
1708 arena_miscelm_to_pageind(head_miscelm) + (leadsize >>
1709 LG_PAGE));
1710 run = &miscelm->run;
1711
1712 arena_run_trim_head(arena, chunk, head_run, alloc_size,
1713 alloc_size - leadsize);
Jason Evans5ff709c2012-04-11 18:13:45 -07001714 }
1715 if (trailsize != 0) {
Jason Evans0c5dd032014-09-29 01:31:39 -07001716 arena_run_trim_tail(arena, chunk, run, size + trailsize, size,
Jason Evans5ff709c2012-04-11 18:13:45 -07001717 false);
Jason Evanse476f8a2010-01-16 09:53:50 -08001718 }
Jason Evans0c5dd032014-09-29 01:31:39 -07001719 arena_run_init_large(arena, run, size, zero);
1720 ret = arena_miscelm_to_rpages(miscelm);
Jason Evanse476f8a2010-01-16 09:53:50 -08001721
Jason Evans7372b152012-02-10 20:22:09 -08001722 if (config_stats) {
Jason Evans155bfa72014-10-05 17:54:10 -07001723 index_t index = size2index(size) - NBINS;
1724
Jason Evans7372b152012-02-10 20:22:09 -08001725 arena->stats.nmalloc_large++;
1726 arena->stats.nrequests_large++;
1727 arena->stats.allocated_large += size;
Jason Evans155bfa72014-10-05 17:54:10 -07001728 arena->stats.lstats[index].nmalloc++;
1729 arena->stats.lstats[index].nrequests++;
1730 arena->stats.lstats[index].curruns++;
Jason Evanse476f8a2010-01-16 09:53:50 -08001731 }
Jason Evanse476f8a2010-01-16 09:53:50 -08001732 malloc_mutex_unlock(&arena->lock);
1733
Jason Evans551ebc42014-10-03 10:16:09 -07001734 if (config_fill && !zero) {
Jason Evans9c640bf2014-09-11 16:20:44 -07001735 if (unlikely(opt_junk))
Jason Evans8e3c3c62010-09-17 15:46:18 -07001736 memset(ret, 0xa5, size);
Jason Evans9c640bf2014-09-11 16:20:44 -07001737 else if (unlikely(opt_zero))
Jason Evans8e3c3c62010-09-17 15:46:18 -07001738 memset(ret, 0, size);
1739 }
Jason Evanse476f8a2010-01-16 09:53:50 -08001740 return (ret);
1741}
1742
Jason Evans0b270a92010-03-31 16:45:04 -07001743void
1744arena_prof_promoted(const void *ptr, size_t size)
1745{
1746 arena_chunk_t *chunk;
Jason Evans155bfa72014-10-05 17:54:10 -07001747 size_t pageind;
1748 index_t binind;
Jason Evans0b270a92010-03-31 16:45:04 -07001749
Jason Evans78f73522012-04-18 13:38:40 -07001750 cassert(config_prof);
Jason Evans0b270a92010-03-31 16:45:04 -07001751 assert(ptr != NULL);
1752 assert(CHUNK_ADDR2BASE(ptr) != ptr);
Jason Evans155bfa72014-10-05 17:54:10 -07001753 assert(isalloc(ptr, false) == LARGE_MINCLASS);
1754 assert(isalloc(ptr, true) == LARGE_MINCLASS);
Jason Evansb1726102012-02-28 16:50:47 -08001755 assert(size <= SMALL_MAXCLASS);
Jason Evans0b270a92010-03-31 16:45:04 -07001756
1757 chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr);
Jason Evansae4c7b42012-04-02 07:04:34 -07001758 pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> LG_PAGE;
Jason Evans155bfa72014-10-05 17:54:10 -07001759 binind = size2index(size);
Jason Evansb1726102012-02-28 16:50:47 -08001760 assert(binind < NBINS);
Jason Evans203484e2012-05-02 00:30:36 -07001761 arena_mapbits_large_binind_set(chunk, pageind, binind);
Jason Evans0b270a92010-03-31 16:45:04 -07001762
Jason Evans155bfa72014-10-05 17:54:10 -07001763 assert(isalloc(ptr, false) == LARGE_MINCLASS);
Jason Evans122449b2012-04-06 00:35:09 -07001764 assert(isalloc(ptr, true) == size);
Jason Evans0b270a92010-03-31 16:45:04 -07001765}
Jason Evans6109fe02010-02-10 10:37:56 -08001766
Jason Evanse476f8a2010-01-16 09:53:50 -08001767static void
Jason Evans088e6a02010-10-18 00:04:44 -07001768arena_dissociate_bin_run(arena_chunk_t *chunk, arena_run_t *run,
Jason Evanse476f8a2010-01-16 09:53:50 -08001769 arena_bin_t *bin)
1770{
Jason Evanse476f8a2010-01-16 09:53:50 -08001771
Jason Evans19b3d612010-03-18 20:36:40 -07001772 /* Dissociate run from bin. */
Jason Evanse476f8a2010-01-16 09:53:50 -08001773 if (run == bin->runcur)
1774 bin->runcur = NULL;
Jason Evans49f7e8f2011-03-15 13:59:15 -07001775 else {
Jason Evans155bfa72014-10-05 17:54:10 -07001776 index_t binind = arena_bin_index(chunk->arena, bin);
Jason Evans49f7e8f2011-03-15 13:59:15 -07001777 arena_bin_info_t *bin_info = &arena_bin_info[binind];
1778
1779 if (bin_info->nregs != 1) {
Jason Evans49f7e8f2011-03-15 13:59:15 -07001780 /*
1781 * This block's conditional is necessary because if the
1782 * run only contains one region, then it never gets
1783 * inserted into the non-full runs tree.
1784 */
Jason Evanse7a10582012-02-13 17:36:52 -08001785 arena_bin_runs_remove(bin, run);
Jason Evans49f7e8f2011-03-15 13:59:15 -07001786 }
Jason Evanse476f8a2010-01-16 09:53:50 -08001787 }
Jason Evans088e6a02010-10-18 00:04:44 -07001788}
1789
1790static void
1791arena_dalloc_bin_run(arena_t *arena, arena_chunk_t *chunk, arena_run_t *run,
1792 arena_bin_t *bin)
1793{
Jason Evans088e6a02010-10-18 00:04:44 -07001794
1795 assert(run != bin->runcur);
Jason Evans0c5dd032014-09-29 01:31:39 -07001796 assert(arena_run_tree_search(&bin->runs, arena_run_to_miscelm(run)) ==
1797 NULL);
Jason Evans86815df2010-03-13 20:32:56 -08001798
Jason Evanse00572b2010-03-14 19:43:56 -07001799 malloc_mutex_unlock(&bin->lock);
1800 /******************************/
Jason Evans86815df2010-03-13 20:32:56 -08001801 malloc_mutex_lock(&arena->lock);
Jason Evans381c23d2014-10-10 23:01:03 -07001802 arena_run_dalloc(arena, run, true, false);
Jason Evans86815df2010-03-13 20:32:56 -08001803 malloc_mutex_unlock(&arena->lock);
Jason Evanse00572b2010-03-14 19:43:56 -07001804 /****************************/
1805 malloc_mutex_lock(&bin->lock);
Jason Evans7372b152012-02-10 20:22:09 -08001806 if (config_stats)
1807 bin->stats.curruns--;
Jason Evanse476f8a2010-01-16 09:53:50 -08001808}
1809
Jason Evans940a2e02010-10-17 17:51:37 -07001810static void
1811arena_bin_lower_run(arena_t *arena, arena_chunk_t *chunk, arena_run_t *run,
1812 arena_bin_t *bin)
Jason Evanse476f8a2010-01-16 09:53:50 -08001813{
Jason Evanse476f8a2010-01-16 09:53:50 -08001814
Jason Evans8de6a022010-10-17 20:57:30 -07001815 /*
Jason Evanse7a10582012-02-13 17:36:52 -08001816 * Make sure that if bin->runcur is non-NULL, it refers to the lowest
1817 * non-full run. It is okay to NULL runcur out rather than proactively
1818 * keeping it pointing at the lowest non-full run.
Jason Evans8de6a022010-10-17 20:57:30 -07001819 */
Jason Evanse7a10582012-02-13 17:36:52 -08001820 if ((uintptr_t)run < (uintptr_t)bin->runcur) {
Jason Evans8de6a022010-10-17 20:57:30 -07001821 /* Switch runcur. */
Jason Evanse7a10582012-02-13 17:36:52 -08001822 if (bin->runcur->nfree > 0)
1823 arena_bin_runs_insert(bin, bin->runcur);
Jason Evans8de6a022010-10-17 20:57:30 -07001824 bin->runcur = run;
Jason Evanse7a10582012-02-13 17:36:52 -08001825 if (config_stats)
1826 bin->stats.reruns++;
1827 } else
1828 arena_bin_runs_insert(bin, run);
Jason Evans940a2e02010-10-17 17:51:37 -07001829}
1830
Jason Evansfc0b3b72014-10-09 17:54:06 -07001831static void
1832arena_dalloc_bin_locked_impl(arena_t *arena, arena_chunk_t *chunk, void *ptr,
1833 arena_chunk_map_bits_t *bitselm, bool junked)
Jason Evans940a2e02010-10-17 17:51:37 -07001834{
Jason Evans0c5dd032014-09-29 01:31:39 -07001835 size_t pageind, rpages_ind;
Jason Evans940a2e02010-10-17 17:51:37 -07001836 arena_run_t *run;
1837 arena_bin_t *bin;
Mike Hommey8b499712012-04-24 23:22:02 +02001838 arena_bin_info_t *bin_info;
Jason Evans155bfa72014-10-05 17:54:10 -07001839 index_t binind;
Jason Evans940a2e02010-10-17 17:51:37 -07001840
Jason Evansae4c7b42012-04-02 07:04:34 -07001841 pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> LG_PAGE;
Jason Evans0c5dd032014-09-29 01:31:39 -07001842 rpages_ind = pageind - arena_mapbits_small_runind_get(chunk, pageind);
1843 run = &arena_miscelm_get(chunk, rpages_ind)->run;
Jason Evans381c23d2014-10-10 23:01:03 -07001844 binind = run->binind;
1845 bin = &arena->bins[binind];
Mike Hommey8b499712012-04-24 23:22:02 +02001846 bin_info = &arena_bin_info[binind];
Jason Evans940a2e02010-10-17 17:51:37 -07001847
Jason Evansfc0b3b72014-10-09 17:54:06 -07001848 if (!junked && config_fill && unlikely(opt_junk))
Jason Evans122449b2012-04-06 00:35:09 -07001849 arena_dalloc_junk_small(ptr, bin_info);
Jason Evans940a2e02010-10-17 17:51:37 -07001850
1851 arena_run_reg_dalloc(run, ptr);
Jason Evans49f7e8f2011-03-15 13:59:15 -07001852 if (run->nfree == bin_info->nregs) {
Jason Evans088e6a02010-10-18 00:04:44 -07001853 arena_dissociate_bin_run(chunk, run, bin);
Jason Evans8de6a022010-10-17 20:57:30 -07001854 arena_dalloc_bin_run(arena, chunk, run, bin);
Jason Evans088e6a02010-10-18 00:04:44 -07001855 } else if (run->nfree == 1 && run != bin->runcur)
Jason Evans8de6a022010-10-17 20:57:30 -07001856 arena_bin_lower_run(arena, chunk, run, bin);
Jason Evanse476f8a2010-01-16 09:53:50 -08001857
Jason Evans7372b152012-02-10 20:22:09 -08001858 if (config_stats) {
Jason Evans7372b152012-02-10 20:22:09 -08001859 bin->stats.ndalloc++;
Jason Evans3c4d92e2014-10-12 22:53:59 -07001860 bin->stats.curregs--;
Jason Evans7372b152012-02-10 20:22:09 -08001861 }
Jason Evanse476f8a2010-01-16 09:53:50 -08001862}
1863
Jason Evanse476f8a2010-01-16 09:53:50 -08001864void
Jason Evansfc0b3b72014-10-09 17:54:06 -07001865arena_dalloc_bin_junked_locked(arena_t *arena, arena_chunk_t *chunk, void *ptr,
1866 arena_chunk_map_bits_t *bitselm)
1867{
1868
1869 arena_dalloc_bin_locked_impl(arena, chunk, ptr, bitselm, true);
1870}
1871
1872void
Jason Evans203484e2012-05-02 00:30:36 -07001873arena_dalloc_bin(arena_t *arena, arena_chunk_t *chunk, void *ptr,
Qinfan Wuff6a31d2014-08-29 13:34:40 -07001874 size_t pageind, arena_chunk_map_bits_t *bitselm)
Jason Evans203484e2012-05-02 00:30:36 -07001875{
1876 arena_run_t *run;
1877 arena_bin_t *bin;
Jason Evans0c5dd032014-09-29 01:31:39 -07001878 size_t rpages_ind;
Jason Evans203484e2012-05-02 00:30:36 -07001879
Jason Evans0c5dd032014-09-29 01:31:39 -07001880 rpages_ind = pageind - arena_mapbits_small_runind_get(chunk, pageind);
1881 run = &arena_miscelm_get(chunk, rpages_ind)->run;
Jason Evans381c23d2014-10-10 23:01:03 -07001882 bin = &arena->bins[run->binind];
Jason Evans203484e2012-05-02 00:30:36 -07001883 malloc_mutex_lock(&bin->lock);
Jason Evansfc0b3b72014-10-09 17:54:06 -07001884 arena_dalloc_bin_locked_impl(arena, chunk, ptr, bitselm, false);
Jason Evans203484e2012-05-02 00:30:36 -07001885 malloc_mutex_unlock(&bin->lock);
1886}
1887
1888void
1889arena_dalloc_small(arena_t *arena, arena_chunk_t *chunk, void *ptr,
1890 size_t pageind)
1891{
Qinfan Wuff6a31d2014-08-29 13:34:40 -07001892 arena_chunk_map_bits_t *bitselm;
Jason Evans203484e2012-05-02 00:30:36 -07001893
1894 if (config_debug) {
Jason Evans80737c32012-05-02 16:11:03 -07001895 /* arena_ptr_small_binind_get() does extra sanity checking. */
1896 assert(arena_ptr_small_binind_get(ptr, arena_mapbits_get(chunk,
1897 pageind)) != BININD_INVALID);
Jason Evans203484e2012-05-02 00:30:36 -07001898 }
Qinfan Wuff6a31d2014-08-29 13:34:40 -07001899 bitselm = arena_bitselm_get(chunk, pageind);
1900 arena_dalloc_bin(arena, chunk, ptr, pageind, bitselm);
Jason Evans203484e2012-05-02 00:30:36 -07001901}
Jason Evanse476f8a2010-01-16 09:53:50 -08001902
Jason Evans6b694c42014-01-07 16:47:56 -08001903#ifdef JEMALLOC_JET
1904#undef arena_dalloc_junk_large
1905#define arena_dalloc_junk_large JEMALLOC_N(arena_dalloc_junk_large_impl)
1906#endif
Jason Evansfc0b3b72014-10-09 17:54:06 -07001907void
Jason Evans6b694c42014-01-07 16:47:56 -08001908arena_dalloc_junk_large(void *ptr, size_t usize)
1909{
1910
Jason Evans9c640bf2014-09-11 16:20:44 -07001911 if (config_fill && unlikely(opt_junk))
Jason Evans6b694c42014-01-07 16:47:56 -08001912 memset(ptr, 0x5a, usize);
1913}
1914#ifdef JEMALLOC_JET
1915#undef arena_dalloc_junk_large
1916#define arena_dalloc_junk_large JEMALLOC_N(arena_dalloc_junk_large)
1917arena_dalloc_junk_large_t *arena_dalloc_junk_large =
1918 JEMALLOC_N(arena_dalloc_junk_large_impl);
1919#endif
1920
Jason Evanse476f8a2010-01-16 09:53:50 -08001921void
Jason Evansfc0b3b72014-10-09 17:54:06 -07001922arena_dalloc_large_locked_impl(arena_t *arena, arena_chunk_t *chunk,
1923 void *ptr, bool junked)
Jason Evanse476f8a2010-01-16 09:53:50 -08001924{
Jason Evans0c5dd032014-09-29 01:31:39 -07001925 size_t pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> LG_PAGE;
1926 arena_chunk_map_misc_t *miscelm = arena_miscelm_get(chunk, pageind);
1927 arena_run_t *run = &miscelm->run;
Jason Evans13668262010-01-31 03:57:29 -08001928
Jason Evans7372b152012-02-10 20:22:09 -08001929 if (config_fill || config_stats) {
Jason Evans6b694c42014-01-07 16:47:56 -08001930 size_t usize = arena_mapbits_large_size_get(chunk, pageind);
Jason Evanse476f8a2010-01-16 09:53:50 -08001931
Jason Evansfc0b3b72014-10-09 17:54:06 -07001932 if (!junked)
1933 arena_dalloc_junk_large(ptr, usize);
Jason Evans7372b152012-02-10 20:22:09 -08001934 if (config_stats) {
Jason Evans155bfa72014-10-05 17:54:10 -07001935 index_t index = size2index(usize) - NBINS;
1936
Jason Evans7372b152012-02-10 20:22:09 -08001937 arena->stats.ndalloc_large++;
Jason Evans6b694c42014-01-07 16:47:56 -08001938 arena->stats.allocated_large -= usize;
Jason Evans155bfa72014-10-05 17:54:10 -07001939 arena->stats.lstats[index].ndalloc++;
1940 arena->stats.lstats[index].curruns--;
Jason Evans7372b152012-02-10 20:22:09 -08001941 }
Jason Evanse476f8a2010-01-16 09:53:50 -08001942 }
Jason Evanse476f8a2010-01-16 09:53:50 -08001943
Jason Evans0c5dd032014-09-29 01:31:39 -07001944 arena_run_dalloc(arena, run, true, false);
Jason Evanse476f8a2010-01-16 09:53:50 -08001945}
1946
Jason Evans203484e2012-05-02 00:30:36 -07001947void
Jason Evansfc0b3b72014-10-09 17:54:06 -07001948arena_dalloc_large_junked_locked(arena_t *arena, arena_chunk_t *chunk,
1949 void *ptr)
1950{
1951
1952 arena_dalloc_large_locked_impl(arena, chunk, ptr, true);
1953}
1954
1955void
Jason Evans203484e2012-05-02 00:30:36 -07001956arena_dalloc_large(arena_t *arena, arena_chunk_t *chunk, void *ptr)
1957{
1958
1959 malloc_mutex_lock(&arena->lock);
Jason Evansfc0b3b72014-10-09 17:54:06 -07001960 arena_dalloc_large_locked_impl(arena, chunk, ptr, false);
Jason Evans203484e2012-05-02 00:30:36 -07001961 malloc_mutex_unlock(&arena->lock);
1962}
1963
Jason Evanse476f8a2010-01-16 09:53:50 -08001964static void
1965arena_ralloc_large_shrink(arena_t *arena, arena_chunk_t *chunk, void *ptr,
Jason Evans8e3c3c62010-09-17 15:46:18 -07001966 size_t oldsize, size_t size)
Jason Evanse476f8a2010-01-16 09:53:50 -08001967{
Jason Evans0c5dd032014-09-29 01:31:39 -07001968 size_t pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> LG_PAGE;
1969 arena_chunk_map_misc_t *miscelm = arena_miscelm_get(chunk, pageind);
1970 arena_run_t *run = &miscelm->run;
Jason Evanse476f8a2010-01-16 09:53:50 -08001971
1972 assert(size < oldsize);
1973
1974 /*
1975 * Shrink the run, and make trailing pages available for other
1976 * allocations.
1977 */
1978 malloc_mutex_lock(&arena->lock);
Jason Evans0c5dd032014-09-29 01:31:39 -07001979 arena_run_trim_tail(arena, chunk, run, oldsize, size, true);
Jason Evans7372b152012-02-10 20:22:09 -08001980 if (config_stats) {
Jason Evans155bfa72014-10-05 17:54:10 -07001981 index_t oldindex = size2index(oldsize) - NBINS;
1982 index_t index = size2index(size) - NBINS;
1983
Jason Evans7372b152012-02-10 20:22:09 -08001984 arena->stats.ndalloc_large++;
1985 arena->stats.allocated_large -= oldsize;
Jason Evans155bfa72014-10-05 17:54:10 -07001986 arena->stats.lstats[oldindex].ndalloc++;
1987 arena->stats.lstats[oldindex].curruns--;
Jason Evans990d10c2010-01-31 03:49:35 -08001988
Jason Evans7372b152012-02-10 20:22:09 -08001989 arena->stats.nmalloc_large++;
1990 arena->stats.nrequests_large++;
1991 arena->stats.allocated_large += size;
Jason Evans155bfa72014-10-05 17:54:10 -07001992 arena->stats.lstats[index].nmalloc++;
1993 arena->stats.lstats[index].nrequests++;
1994 arena->stats.lstats[index].curruns++;
Jason Evanse476f8a2010-01-16 09:53:50 -08001995 }
Jason Evanse476f8a2010-01-16 09:53:50 -08001996 malloc_mutex_unlock(&arena->lock);
1997}
1998
1999static bool
2000arena_ralloc_large_grow(arena_t *arena, arena_chunk_t *chunk, void *ptr,
Jason Evans8e3c3c62010-09-17 15:46:18 -07002001 size_t oldsize, size_t size, size_t extra, bool zero)
Jason Evanse476f8a2010-01-16 09:53:50 -08002002{
Jason Evansae4c7b42012-04-02 07:04:34 -07002003 size_t pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> LG_PAGE;
2004 size_t npages = oldsize >> LG_PAGE;
Jason Evans8e3c3c62010-09-17 15:46:18 -07002005 size_t followsize;
Jason Evans155bfa72014-10-05 17:54:10 -07002006 size_t usize_min = s2u(size);
Jason Evanse476f8a2010-01-16 09:53:50 -08002007
Jason Evans203484e2012-05-02 00:30:36 -07002008 assert(oldsize == arena_mapbits_large_size_get(chunk, pageind));
Jason Evanse476f8a2010-01-16 09:53:50 -08002009
2010 /* Try to extend the run. */
Jason Evans155bfa72014-10-05 17:54:10 -07002011 assert(usize_min > oldsize);
Jason Evanse476f8a2010-01-16 09:53:50 -08002012 malloc_mutex_lock(&arena->lock);
Jason Evans7393f442010-10-01 17:35:43 -07002013 if (pageind + npages < chunk_npages &&
Jason Evans203484e2012-05-02 00:30:36 -07002014 arena_mapbits_allocated_get(chunk, pageind+npages) == 0 &&
2015 (followsize = arena_mapbits_unallocated_size_get(chunk,
Jason Evans155bfa72014-10-05 17:54:10 -07002016 pageind+npages)) >= usize_min - oldsize) {
Jason Evanse476f8a2010-01-16 09:53:50 -08002017 /*
2018 * The next run is available and sufficiently large. Split the
2019 * following run, then merge the first part with the existing
2020 * allocation.
2021 */
Jason Evans155bfa72014-10-05 17:54:10 -07002022 size_t flag_dirty, splitsize, usize;
2023
2024 usize = s2u(size + extra);
2025 while (oldsize + followsize < usize)
2026 usize = index2size(size2index(usize)-1);
2027 assert(usize >= usize_min);
2028 splitsize = usize - oldsize;
2029
Jason Evans0c5dd032014-09-29 01:31:39 -07002030 arena_run_t *run = &arena_miscelm_get(chunk,
2031 pageind+npages)->run;
2032 arena_run_split_large(arena, run, splitsize, zero);
Jason Evanse476f8a2010-01-16 09:53:50 -08002033
Jason Evans088e6a02010-10-18 00:04:44 -07002034 size = oldsize + splitsize;
Jason Evansae4c7b42012-04-02 07:04:34 -07002035 npages = size >> LG_PAGE;
Jason Evans940a2e02010-10-17 17:51:37 -07002036
2037 /*
2038 * Mark the extended run as dirty if either portion of the run
2039 * was dirty before allocation. This is rather pedantic,
2040 * because there's not actually any sequence of events that
2041 * could cause the resulting run to be passed to
2042 * arena_run_dalloc() with the dirty argument set to false
2043 * (which is when dirty flag consistency would really matter).
2044 */
Jason Evans203484e2012-05-02 00:30:36 -07002045 flag_dirty = arena_mapbits_dirty_get(chunk, pageind) |
2046 arena_mapbits_dirty_get(chunk, pageind+npages-1);
2047 arena_mapbits_large_set(chunk, pageind, size, flag_dirty);
2048 arena_mapbits_large_set(chunk, pageind+npages-1, 0, flag_dirty);
Jason Evanse476f8a2010-01-16 09:53:50 -08002049
Jason Evans7372b152012-02-10 20:22:09 -08002050 if (config_stats) {
Jason Evans155bfa72014-10-05 17:54:10 -07002051 index_t oldindex = size2index(oldsize) - NBINS;
2052 index_t index = size2index(size) - NBINS;
2053
Jason Evans7372b152012-02-10 20:22:09 -08002054 arena->stats.ndalloc_large++;
2055 arena->stats.allocated_large -= oldsize;
Jason Evans155bfa72014-10-05 17:54:10 -07002056 arena->stats.lstats[oldindex].ndalloc++;
2057 arena->stats.lstats[oldindex].curruns--;
Jason Evans990d10c2010-01-31 03:49:35 -08002058
Jason Evans7372b152012-02-10 20:22:09 -08002059 arena->stats.nmalloc_large++;
2060 arena->stats.nrequests_large++;
2061 arena->stats.allocated_large += size;
Jason Evans155bfa72014-10-05 17:54:10 -07002062 arena->stats.lstats[index].nmalloc++;
2063 arena->stats.lstats[index].nrequests++;
2064 arena->stats.lstats[index].curruns++;
Jason Evans940a2e02010-10-17 17:51:37 -07002065 }
Jason Evanse476f8a2010-01-16 09:53:50 -08002066 malloc_mutex_unlock(&arena->lock);
2067 return (false);
2068 }
2069 malloc_mutex_unlock(&arena->lock);
2070
2071 return (true);
2072}
2073
Jason Evans6b694c42014-01-07 16:47:56 -08002074#ifdef JEMALLOC_JET
2075#undef arena_ralloc_junk_large
2076#define arena_ralloc_junk_large JEMALLOC_N(arena_ralloc_junk_large_impl)
2077#endif
2078static void
2079arena_ralloc_junk_large(void *ptr, size_t old_usize, size_t usize)
2080{
2081
Jason Evans9c640bf2014-09-11 16:20:44 -07002082 if (config_fill && unlikely(opt_junk)) {
Jason Evans6b694c42014-01-07 16:47:56 -08002083 memset((void *)((uintptr_t)ptr + usize), 0x5a,
2084 old_usize - usize);
2085 }
2086}
2087#ifdef JEMALLOC_JET
2088#undef arena_ralloc_junk_large
2089#define arena_ralloc_junk_large JEMALLOC_N(arena_ralloc_junk_large)
2090arena_ralloc_junk_large_t *arena_ralloc_junk_large =
2091 JEMALLOC_N(arena_ralloc_junk_large_impl);
2092#endif
2093
Jason Evanse476f8a2010-01-16 09:53:50 -08002094/*
2095 * Try to resize a large allocation, in order to avoid copying. This will
2096 * always fail if growing an object, and the following run is already in use.
2097 */
2098static bool
Jason Evans8e3c3c62010-09-17 15:46:18 -07002099arena_ralloc_large(void *ptr, size_t oldsize, size_t size, size_t extra,
2100 bool zero)
Jason Evanse476f8a2010-01-16 09:53:50 -08002101{
Jason Evans155bfa72014-10-05 17:54:10 -07002102 size_t usize;
Jason Evanse476f8a2010-01-16 09:53:50 -08002103
Jason Evans155bfa72014-10-05 17:54:10 -07002104 /* Make sure extra can't cause size_t overflow. */
Daniel Micay809b0ac2014-10-23 10:30:52 -04002105 if (unlikely(extra >= arena_maxclass))
Jason Evans155bfa72014-10-05 17:54:10 -07002106 return (true);
2107
2108 usize = s2u(size + extra);
2109 if (usize == oldsize) {
Jason Evanse476f8a2010-01-16 09:53:50 -08002110 /* Same size class. */
Jason Evanse476f8a2010-01-16 09:53:50 -08002111 return (false);
2112 } else {
2113 arena_chunk_t *chunk;
2114 arena_t *arena;
2115
2116 chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr);
2117 arena = chunk->arena;
Jason Evanse476f8a2010-01-16 09:53:50 -08002118
Jason Evans155bfa72014-10-05 17:54:10 -07002119 if (usize < oldsize) {
Jason Evanse476f8a2010-01-16 09:53:50 -08002120 /* Fill before shrinking in order avoid a race. */
Jason Evans155bfa72014-10-05 17:54:10 -07002121 arena_ralloc_junk_large(ptr, oldsize, usize);
Jason Evans8e3c3c62010-09-17 15:46:18 -07002122 arena_ralloc_large_shrink(arena, chunk, ptr, oldsize,
Jason Evans155bfa72014-10-05 17:54:10 -07002123 usize);
Jason Evanse476f8a2010-01-16 09:53:50 -08002124 return (false);
2125 } else {
2126 bool ret = arena_ralloc_large_grow(arena, chunk, ptr,
Jason Evans155bfa72014-10-05 17:54:10 -07002127 oldsize, size, extra, zero);
Jason Evans551ebc42014-10-03 10:16:09 -07002128 if (config_fill && !ret && !zero) {
Jason Evans9c640bf2014-09-11 16:20:44 -07002129 if (unlikely(opt_junk)) {
Jason Evans6b694c42014-01-07 16:47:56 -08002130 memset((void *)((uintptr_t)ptr +
2131 oldsize), 0xa5, isalloc(ptr,
2132 config_prof) - oldsize);
Jason Evans9c640bf2014-09-11 16:20:44 -07002133 } else if (unlikely(opt_zero)) {
Jason Evans6b694c42014-01-07 16:47:56 -08002134 memset((void *)((uintptr_t)ptr +
2135 oldsize), 0, isalloc(ptr,
2136 config_prof) - oldsize);
2137 }
Jason Evanse476f8a2010-01-16 09:53:50 -08002138 }
Jason Evanse476f8a2010-01-16 09:53:50 -08002139 return (ret);
2140 }
2141 }
2142}
2143
Jason Evansb2c31662014-01-12 15:05:44 -08002144bool
Jason Evans8e3c3c62010-09-17 15:46:18 -07002145arena_ralloc_no_move(void *ptr, size_t oldsize, size_t size, size_t extra,
2146 bool zero)
Jason Evanse476f8a2010-01-16 09:53:50 -08002147{
Jason Evanse476f8a2010-01-16 09:53:50 -08002148
Jason Evans8e3c3c62010-09-17 15:46:18 -07002149 /*
2150 * Avoid moving the allocation if the size class can be left the same.
2151 */
Daniel Micay809b0ac2014-10-23 10:30:52 -04002152 if (likely(oldsize <= arena_maxclass)) {
Jason Evansb1726102012-02-28 16:50:47 -08002153 if (oldsize <= SMALL_MAXCLASS) {
Jason Evans155bfa72014-10-05 17:54:10 -07002154 assert(arena_bin_info[size2index(oldsize)].reg_size
Jason Evans49f7e8f2011-03-15 13:59:15 -07002155 == oldsize);
Jason Evans155bfa72014-10-05 17:54:10 -07002156 if ((size + extra <= SMALL_MAXCLASS && size2index(size +
2157 extra) == size2index(oldsize)) || (size <= oldsize
2158 && size + extra >= oldsize))
Jason Evansb2c31662014-01-12 15:05:44 -08002159 return (false);
Jason Evanse476f8a2010-01-16 09:53:50 -08002160 } else {
2161 assert(size <= arena_maxclass);
Jason Evansb1726102012-02-28 16:50:47 -08002162 if (size + extra > SMALL_MAXCLASS) {
Jason Evans551ebc42014-10-03 10:16:09 -07002163 if (!arena_ralloc_large(ptr, oldsize, size,
2164 extra, zero))
Jason Evansb2c31662014-01-12 15:05:44 -08002165 return (false);
Jason Evanse476f8a2010-01-16 09:53:50 -08002166 }
2167 }
2168 }
2169
Jason Evans8e3c3c62010-09-17 15:46:18 -07002170 /* Reallocation would require a move. */
Jason Evansb2c31662014-01-12 15:05:44 -08002171 return (true);
Jason Evans8e3c3c62010-09-17 15:46:18 -07002172}
Jason Evanse476f8a2010-01-16 09:53:50 -08002173
Jason Evans8e3c3c62010-09-17 15:46:18 -07002174void *
Jason Evans5460aa62014-09-22 21:09:23 -07002175arena_ralloc(tsd_t *tsd, arena_t *arena, void *ptr, size_t oldsize, size_t size,
Jason Evans609ae592012-10-11 13:53:15 -07002176 size_t extra, size_t alignment, bool zero, bool try_tcache_alloc,
2177 bool try_tcache_dalloc)
Jason Evans8e3c3c62010-09-17 15:46:18 -07002178{
2179 void *ret;
2180 size_t copysize;
2181
2182 /* Try to avoid moving the allocation. */
Jason Evans551ebc42014-10-03 10:16:09 -07002183 if (!arena_ralloc_no_move(ptr, oldsize, size, extra, zero))
Jason Evansb2c31662014-01-12 15:05:44 -08002184 return (ptr);
Jason Evans8e3c3c62010-09-17 15:46:18 -07002185
Jason Evans8e3c3c62010-09-17 15:46:18 -07002186 /*
2187 * size and oldsize are different enough that we need to move the
2188 * object. In that case, fall back to allocating new space and
2189 * copying.
2190 */
Jason Evans38d92102011-03-23 00:37:29 -07002191 if (alignment != 0) {
Jason Evans5ff709c2012-04-11 18:13:45 -07002192 size_t usize = sa2u(size + extra, alignment);
Jason Evans38d92102011-03-23 00:37:29 -07002193 if (usize == 0)
2194 return (NULL);
Jason Evans5460aa62014-09-22 21:09:23 -07002195 ret = ipalloct(tsd, usize, alignment, zero, try_tcache_alloc,
2196 arena);
2197 } else {
2198 ret = arena_malloc(tsd, arena, size + extra, zero,
2199 try_tcache_alloc);
2200 }
Jason Evans8e3c3c62010-09-17 15:46:18 -07002201
2202 if (ret == NULL) {
2203 if (extra == 0)
2204 return (NULL);
2205 /* Try again, this time without extra. */
Jason Evans38d92102011-03-23 00:37:29 -07002206 if (alignment != 0) {
Jason Evans5ff709c2012-04-11 18:13:45 -07002207 size_t usize = sa2u(size, alignment);
Jason Evans38d92102011-03-23 00:37:29 -07002208 if (usize == 0)
2209 return (NULL);
Jason Evans5460aa62014-09-22 21:09:23 -07002210 ret = ipalloct(tsd, usize, alignment, zero,
2211 try_tcache_alloc, arena);
2212 } else {
2213 ret = arena_malloc(tsd, arena, size, zero,
2214 try_tcache_alloc);
2215 }
Jason Evans8e3c3c62010-09-17 15:46:18 -07002216
2217 if (ret == NULL)
2218 return (NULL);
2219 }
2220
2221 /* Junk/zero-filling were already done by ipalloc()/arena_malloc(). */
2222
2223 /*
2224 * Copy at most size bytes (not size+extra), since the caller has no
2225 * expectation that the extra bytes will be reliably preserved.
2226 */
Jason Evanse476f8a2010-01-16 09:53:50 -08002227 copysize = (size < oldsize) ? size : oldsize;
Jason Evansbd87b012014-04-15 16:35:08 -07002228 JEMALLOC_VALGRIND_MAKE_MEM_UNDEFINED(ret, copysize);
Jason Evanse476f8a2010-01-16 09:53:50 -08002229 memcpy(ret, ptr, copysize);
Daniel Micaya9ea10d2014-10-16 15:05:02 -04002230 isqalloc(tsd, ptr, oldsize, try_tcache_dalloc);
Jason Evanse476f8a2010-01-16 09:53:50 -08002231 return (ret);
Jason Evanse476f8a2010-01-16 09:53:50 -08002232}
2233
Jason Evans609ae592012-10-11 13:53:15 -07002234dss_prec_t
2235arena_dss_prec_get(arena_t *arena)
2236{
2237 dss_prec_t ret;
2238
2239 malloc_mutex_lock(&arena->lock);
2240 ret = arena->dss_prec;
2241 malloc_mutex_unlock(&arena->lock);
2242 return (ret);
2243}
2244
Jason Evans4d434ad2014-04-15 12:09:48 -07002245bool
Jason Evans609ae592012-10-11 13:53:15 -07002246arena_dss_prec_set(arena_t *arena, dss_prec_t dss_prec)
2247{
2248
Jason Evans551ebc42014-10-03 10:16:09 -07002249 if (!have_dss)
Jason Evans4d434ad2014-04-15 12:09:48 -07002250 return (dss_prec != dss_prec_disabled);
Jason Evans609ae592012-10-11 13:53:15 -07002251 malloc_mutex_lock(&arena->lock);
2252 arena->dss_prec = dss_prec;
2253 malloc_mutex_unlock(&arena->lock);
Jason Evans4d434ad2014-04-15 12:09:48 -07002254 return (false);
Jason Evans609ae592012-10-11 13:53:15 -07002255}
2256
2257void
2258arena_stats_merge(arena_t *arena, const char **dss, size_t *nactive,
2259 size_t *ndirty, arena_stats_t *astats, malloc_bin_stats_t *bstats,
Jason Evans3c4d92e2014-10-12 22:53:59 -07002260 malloc_large_stats_t *lstats, malloc_huge_stats_t *hstats)
Jason Evans609ae592012-10-11 13:53:15 -07002261{
2262 unsigned i;
2263
2264 malloc_mutex_lock(&arena->lock);
2265 *dss = dss_prec_names[arena->dss_prec];
2266 *nactive += arena->nactive;
2267 *ndirty += arena->ndirty;
2268
2269 astats->mapped += arena->stats.mapped;
2270 astats->npurge += arena->stats.npurge;
2271 astats->nmadvise += arena->stats.nmadvise;
2272 astats->purged += arena->stats.purged;
2273 astats->allocated_large += arena->stats.allocated_large;
2274 astats->nmalloc_large += arena->stats.nmalloc_large;
2275 astats->ndalloc_large += arena->stats.ndalloc_large;
2276 astats->nrequests_large += arena->stats.nrequests_large;
Jason Evanse2deab72014-05-15 22:22:27 -07002277 astats->allocated_huge += arena->stats.allocated_huge;
2278 astats->nmalloc_huge += arena->stats.nmalloc_huge;
2279 astats->ndalloc_huge += arena->stats.ndalloc_huge;
Jason Evans609ae592012-10-11 13:53:15 -07002280
2281 for (i = 0; i < nlclasses; i++) {
2282 lstats[i].nmalloc += arena->stats.lstats[i].nmalloc;
2283 lstats[i].ndalloc += arena->stats.lstats[i].ndalloc;
2284 lstats[i].nrequests += arena->stats.lstats[i].nrequests;
2285 lstats[i].curruns += arena->stats.lstats[i].curruns;
2286 }
Jason Evans3c4d92e2014-10-12 22:53:59 -07002287
2288 for (i = 0; i < nhclasses; i++) {
2289 hstats[i].nmalloc += arena->stats.hstats[i].nmalloc;
2290 hstats[i].ndalloc += arena->stats.hstats[i].ndalloc;
2291 hstats[i].curhchunks += arena->stats.hstats[i].curhchunks;
2292 }
Jason Evans609ae592012-10-11 13:53:15 -07002293 malloc_mutex_unlock(&arena->lock);
2294
2295 for (i = 0; i < NBINS; i++) {
2296 arena_bin_t *bin = &arena->bins[i];
2297
2298 malloc_mutex_lock(&bin->lock);
Jason Evans609ae592012-10-11 13:53:15 -07002299 bstats[i].nmalloc += bin->stats.nmalloc;
2300 bstats[i].ndalloc += bin->stats.ndalloc;
2301 bstats[i].nrequests += bin->stats.nrequests;
Jason Evans3c4d92e2014-10-12 22:53:59 -07002302 bstats[i].curregs += bin->stats.curregs;
Jason Evans609ae592012-10-11 13:53:15 -07002303 if (config_tcache) {
2304 bstats[i].nfills += bin->stats.nfills;
2305 bstats[i].nflushes += bin->stats.nflushes;
2306 }
2307 bstats[i].nruns += bin->stats.nruns;
2308 bstats[i].reruns += bin->stats.reruns;
2309 bstats[i].curruns += bin->stats.curruns;
2310 malloc_mutex_unlock(&bin->lock);
2311 }
2312}
2313
Jason Evans8bb31982014-10-07 23:14:57 -07002314arena_t *
2315arena_new(unsigned ind)
Jason Evanse476f8a2010-01-16 09:53:50 -08002316{
Jason Evans8bb31982014-10-07 23:14:57 -07002317 arena_t *arena;
Jason Evanse476f8a2010-01-16 09:53:50 -08002318 unsigned i;
2319 arena_bin_t *bin;
Jason Evanse476f8a2010-01-16 09:53:50 -08002320
Jason Evans8bb31982014-10-07 23:14:57 -07002321 /*
Jason Evans3c4d92e2014-10-12 22:53:59 -07002322 * Allocate arena, arena->lstats, and arena->hstats contiguously, mainly
2323 * because there is no way to clean up if base_alloc() OOMs.
Jason Evans8bb31982014-10-07 23:14:57 -07002324 */
2325 if (config_stats) {
2326 arena = (arena_t *)base_alloc(CACHELINE_CEILING(sizeof(arena_t))
Jason Evans3c4d92e2014-10-12 22:53:59 -07002327 + QUANTUM_CEILING(nlclasses * sizeof(malloc_large_stats_t) +
2328 nhclasses) * sizeof(malloc_huge_stats_t));
Jason Evans8bb31982014-10-07 23:14:57 -07002329 } else
2330 arena = (arena_t *)base_alloc(sizeof(arena_t));
2331 if (arena == NULL)
2332 return (NULL);
2333
Jason Evans6109fe02010-02-10 10:37:56 -08002334 arena->ind = ind;
Jason Evans597632b2011-03-18 13:41:33 -07002335 arena->nthreads = 0;
aravindfb7fe502014-05-05 15:16:56 -07002336 arena->chunk_alloc = chunk_alloc_default;
Jason Evanse2deab72014-05-15 22:22:27 -07002337 arena->chunk_dalloc = chunk_dalloc_default;
Jason Evans6109fe02010-02-10 10:37:56 -08002338
Jason Evanse476f8a2010-01-16 09:53:50 -08002339 if (malloc_mutex_init(&arena->lock))
Jason Evans8bb31982014-10-07 23:14:57 -07002340 return (NULL);
Jason Evanse476f8a2010-01-16 09:53:50 -08002341
Jason Evans7372b152012-02-10 20:22:09 -08002342 if (config_stats) {
2343 memset(&arena->stats, 0, sizeof(arena_stats_t));
Jason Evansd49cb682014-11-17 10:31:59 -08002344 arena->stats.lstats = (malloc_large_stats_t *)((uintptr_t)arena
2345 + CACHELINE_CEILING(sizeof(arena_t)));
Jason Evans7372b152012-02-10 20:22:09 -08002346 memset(arena->stats.lstats, 0, nlclasses *
2347 sizeof(malloc_large_stats_t));
Jason Evansd49cb682014-11-17 10:31:59 -08002348 arena->stats.hstats = (malloc_huge_stats_t *)((uintptr_t)arena
2349 + CACHELINE_CEILING(sizeof(arena_t)) +
Jason Evans3c4d92e2014-10-12 22:53:59 -07002350 QUANTUM_CEILING(nlclasses * sizeof(malloc_large_stats_t)));
2351 memset(arena->stats.hstats, 0, nhclasses *
2352 sizeof(malloc_huge_stats_t));
Jason Evans7372b152012-02-10 20:22:09 -08002353 if (config_tcache)
2354 ql_new(&arena->tcache_ql);
2355 }
Jason Evanse476f8a2010-01-16 09:53:50 -08002356
Jason Evans7372b152012-02-10 20:22:09 -08002357 if (config_prof)
2358 arena->prof_accumbytes = 0;
Jason Evansd34f9e72010-02-11 13:19:21 -08002359
Jason Evans609ae592012-10-11 13:53:15 -07002360 arena->dss_prec = chunk_dss_prec_get();
2361
Jason Evanse476f8a2010-01-16 09:53:50 -08002362 arena->spare = NULL;
2363
2364 arena->nactive = 0;
2365 arena->ndirty = 0;
2366
Jason Evanse3d13062012-10-30 15:42:37 -07002367 arena_avail_tree_new(&arena->runs_avail);
Jason Evans070b3c32014-08-14 14:45:58 -07002368 ql_new(&arena->runs_dirty);
Jason Evanse476f8a2010-01-16 09:53:50 -08002369
2370 /* Initialize bins. */
Jason Evansb1726102012-02-28 16:50:47 -08002371 for (i = 0; i < NBINS; i++) {
Jason Evanse476f8a2010-01-16 09:53:50 -08002372 bin = &arena->bins[i];
Jason Evans86815df2010-03-13 20:32:56 -08002373 if (malloc_mutex_init(&bin->lock))
Jason Evans8bb31982014-10-07 23:14:57 -07002374 return (NULL);
Jason Evanse476f8a2010-01-16 09:53:50 -08002375 bin->runcur = NULL;
2376 arena_run_tree_new(&bin->runs);
Jason Evans7372b152012-02-10 20:22:09 -08002377 if (config_stats)
2378 memset(&bin->stats, 0, sizeof(malloc_bin_stats_t));
Jason Evanse476f8a2010-01-16 09:53:50 -08002379 }
2380
Jason Evans8bb31982014-10-07 23:14:57 -07002381 return (arena);
Jason Evanse476f8a2010-01-16 09:53:50 -08002382}
2383
Jason Evans49f7e8f2011-03-15 13:59:15 -07002384/*
2385 * Calculate bin_info->run_size such that it meets the following constraints:
2386 *
Jason Evans155bfa72014-10-05 17:54:10 -07002387 * *) bin_info->run_size <= arena_maxrun
Jason Evans47e57f92011-03-22 09:00:56 -07002388 * *) bin_info->nregs <= RUN_MAXREGS
Jason Evans49f7e8f2011-03-15 13:59:15 -07002389 *
Jason Evans0c5dd032014-09-29 01:31:39 -07002390 * bin_info->nregs and bin_info->reg0_offset are also calculated here, since
2391 * these settings are all interdependent.
Jason Evans49f7e8f2011-03-15 13:59:15 -07002392 */
Jason Evans0c5dd032014-09-29 01:31:39 -07002393static void
2394bin_info_run_size_calc(arena_bin_info_t *bin_info)
Jason Evans49f7e8f2011-03-15 13:59:15 -07002395{
Jason Evans122449b2012-04-06 00:35:09 -07002396 size_t pad_size;
Jason Evans0c5dd032014-09-29 01:31:39 -07002397 size_t try_run_size, perfect_run_size, actual_run_size;
2398 uint32_t try_nregs, perfect_nregs, actual_nregs;
Jason Evans49f7e8f2011-03-15 13:59:15 -07002399
2400 /*
Jason Evans122449b2012-04-06 00:35:09 -07002401 * Determine redzone size based on minimum alignment and minimum
2402 * redzone size. Add padding to the end of the run if it is needed to
2403 * align the regions. The padding allows each redzone to be half the
2404 * minimum alignment; without the padding, each redzone would have to
2405 * be twice as large in order to maintain alignment.
2406 */
Jason Evans9c640bf2014-09-11 16:20:44 -07002407 if (config_fill && unlikely(opt_redzone)) {
Qinfan Wuff6a31d2014-08-29 13:34:40 -07002408 size_t align_min = ZU(1) << (jemalloc_ffs(bin_info->reg_size) -
2409 1);
Jason Evans122449b2012-04-06 00:35:09 -07002410 if (align_min <= REDZONE_MINSIZE) {
2411 bin_info->redzone_size = REDZONE_MINSIZE;
2412 pad_size = 0;
2413 } else {
2414 bin_info->redzone_size = align_min >> 1;
2415 pad_size = bin_info->redzone_size;
2416 }
2417 } else {
2418 bin_info->redzone_size = 0;
2419 pad_size = 0;
2420 }
2421 bin_info->reg_interval = bin_info->reg_size +
2422 (bin_info->redzone_size << 1);
2423
2424 /*
Jason Evans0c5dd032014-09-29 01:31:39 -07002425 * Compute run size under ideal conditions (no redzones, no limit on run
2426 * size).
Jason Evans49f7e8f2011-03-15 13:59:15 -07002427 */
Jason Evans0c5dd032014-09-29 01:31:39 -07002428 try_run_size = PAGE;
2429 try_nregs = try_run_size / bin_info->reg_size;
Jason Evans49f7e8f2011-03-15 13:59:15 -07002430 do {
Jason Evans0c5dd032014-09-29 01:31:39 -07002431 perfect_run_size = try_run_size;
2432 perfect_nregs = try_nregs;
Jason Evans49f7e8f2011-03-15 13:59:15 -07002433
Jason Evansae4c7b42012-04-02 07:04:34 -07002434 try_run_size += PAGE;
Jason Evans0c5dd032014-09-29 01:31:39 -07002435 try_nregs = try_run_size / bin_info->reg_size;
2436 } while (perfect_run_size != perfect_nregs * bin_info->reg_size);
2437 assert(perfect_nregs <= RUN_MAXREGS);
Jason Evans49f7e8f2011-03-15 13:59:15 -07002438
Jason Evans0c5dd032014-09-29 01:31:39 -07002439 actual_run_size = perfect_run_size;
2440 actual_nregs = (actual_run_size - pad_size) / bin_info->reg_interval;
2441
2442 /*
2443 * Redzones can require enough padding that not even a single region can
2444 * fit within the number of pages that would normally be dedicated to a
2445 * run for this size class. Increase the run size until at least one
2446 * region fits.
2447 */
2448 while (actual_nregs == 0) {
2449 assert(config_fill && unlikely(opt_redzone));
2450
2451 actual_run_size += PAGE;
2452 actual_nregs = (actual_run_size - pad_size) /
2453 bin_info->reg_interval;
2454 }
2455
2456 /*
2457 * Make sure that the run will fit within an arena chunk.
2458 */
Jason Evans155bfa72014-10-05 17:54:10 -07002459 while (actual_run_size > arena_maxrun) {
Jason Evans0c5dd032014-09-29 01:31:39 -07002460 actual_run_size -= PAGE;
2461 actual_nregs = (actual_run_size - pad_size) /
2462 bin_info->reg_interval;
2463 }
2464 assert(actual_nregs > 0);
Jason Evans49f7e8f2011-03-15 13:59:15 -07002465
2466 /* Copy final settings. */
Jason Evans0c5dd032014-09-29 01:31:39 -07002467 bin_info->run_size = actual_run_size;
2468 bin_info->nregs = actual_nregs;
2469 bin_info->reg0_offset = actual_run_size - (actual_nregs *
2470 bin_info->reg_interval) - pad_size + bin_info->redzone_size;
Jason Evans122449b2012-04-06 00:35:09 -07002471
2472 assert(bin_info->reg0_offset - bin_info->redzone_size + (bin_info->nregs
2473 * bin_info->reg_interval) + pad_size == bin_info->run_size);
Jason Evans49f7e8f2011-03-15 13:59:15 -07002474}
2475
Jason Evansb1726102012-02-28 16:50:47 -08002476static void
Jason Evans49f7e8f2011-03-15 13:59:15 -07002477bin_info_init(void)
2478{
2479 arena_bin_info_t *bin_info;
Jason Evans49f7e8f2011-03-15 13:59:15 -07002480
Jason Evansd04047c2014-05-28 16:11:55 -07002481#define BIN_INFO_INIT_bin_yes(index, size) \
2482 bin_info = &arena_bin_info[index]; \
Jason Evansb1726102012-02-28 16:50:47 -08002483 bin_info->reg_size = size; \
Jason Evans0c5dd032014-09-29 01:31:39 -07002484 bin_info_run_size_calc(bin_info); \
Jason Evansb1726102012-02-28 16:50:47 -08002485 bitmap_info_init(&bin_info->bitmap_info, bin_info->nregs);
Jason Evansd04047c2014-05-28 16:11:55 -07002486#define BIN_INFO_INIT_bin_no(index, size)
2487#define SC(index, lg_grp, lg_delta, ndelta, bin, lg_delta_lookup) \
2488 BIN_INFO_INIT_bin_##bin(index, (ZU(1)<<lg_grp) + (ZU(ndelta)<<lg_delta))
Jason Evansb1726102012-02-28 16:50:47 -08002489 SIZE_CLASSES
Jason Evansd04047c2014-05-28 16:11:55 -07002490#undef BIN_INFO_INIT_bin_yes
2491#undef BIN_INFO_INIT_bin_no
2492#undef SC
Jason Evans49f7e8f2011-03-15 13:59:15 -07002493}
2494
Jason Evansb1726102012-02-28 16:50:47 -08002495void
Jason Evansa0bf2422010-01-29 14:30:41 -08002496arena_boot(void)
Jason Evanse476f8a2010-01-16 09:53:50 -08002497{
Jason Evansa0bf2422010-01-29 14:30:41 -08002498 size_t header_size;
Jason Evans7393f442010-10-01 17:35:43 -07002499 unsigned i;
Jason Evanse476f8a2010-01-16 09:53:50 -08002500
Jason Evanse476f8a2010-01-16 09:53:50 -08002501 /*
2502 * Compute the header size such that it is large enough to contain the
Jason Evans7393f442010-10-01 17:35:43 -07002503 * page map. The page map is biased to omit entries for the header
2504 * itself, so some iteration is necessary to compute the map bias.
2505 *
2506 * 1) Compute safe header_size and map_bias values that include enough
2507 * space for an unbiased page map.
2508 * 2) Refine map_bias based on (1) to omit the header pages in the page
2509 * map. The resulting map_bias may be one too small.
2510 * 3) Refine map_bias based on (2). The result will be >= the result
2511 * from (2), and will always be correct.
Jason Evanse476f8a2010-01-16 09:53:50 -08002512 */
Jason Evans7393f442010-10-01 17:35:43 -07002513 map_bias = 0;
2514 for (i = 0; i < 3; i++) {
Qinfan Wuff6a31d2014-08-29 13:34:40 -07002515 header_size = offsetof(arena_chunk_t, map_bits) +
2516 ((sizeof(arena_chunk_map_bits_t) +
2517 sizeof(arena_chunk_map_misc_t)) * (chunk_npages-map_bias));
Jason Evans0c5dd032014-09-29 01:31:39 -07002518 map_bias = (header_size + PAGE_MASK) >> LG_PAGE;
Jason Evans7393f442010-10-01 17:35:43 -07002519 }
2520 assert(map_bias > 0);
2521
Qinfan Wuff6a31d2014-08-29 13:34:40 -07002522 map_misc_offset = offsetof(arena_chunk_t, map_bits) +
2523 sizeof(arena_chunk_map_bits_t) * (chunk_npages-map_bias);
2524
Jason Evans155bfa72014-10-05 17:54:10 -07002525 arena_maxrun = chunksize - (map_bias << LG_PAGE);
Jason Evansfc0b3b72014-10-09 17:54:06 -07002526 assert(arena_maxrun > 0);
Jason Evans155bfa72014-10-05 17:54:10 -07002527 arena_maxclass = index2size(size2index(chunksize)-1);
2528 if (arena_maxclass > arena_maxrun) {
2529 /*
2530 * For small chunk sizes it's possible for there to be fewer
2531 * non-header pages available than are necessary to serve the
2532 * size classes just below chunksize.
2533 */
2534 arena_maxclass = arena_maxrun;
2535 }
Jason Evansfc0b3b72014-10-09 17:54:06 -07002536 assert(arena_maxclass > 0);
Jason Evans155bfa72014-10-05 17:54:10 -07002537 nlclasses = size2index(arena_maxclass) - size2index(SMALL_MAXCLASS);
Jason Evans3c4d92e2014-10-12 22:53:59 -07002538 nhclasses = NSIZES - nlclasses - NBINS;
Jason Evansa0bf2422010-01-29 14:30:41 -08002539
Jason Evansb1726102012-02-28 16:50:47 -08002540 bin_info_init();
Jason Evanse476f8a2010-01-16 09:53:50 -08002541}
Jason Evans4e2e3dd2012-03-13 16:31:41 -07002542
2543void
2544arena_prefork(arena_t *arena)
2545{
2546 unsigned i;
2547
2548 malloc_mutex_prefork(&arena->lock);
2549 for (i = 0; i < NBINS; i++)
2550 malloc_mutex_prefork(&arena->bins[i].lock);
2551}
2552
2553void
2554arena_postfork_parent(arena_t *arena)
2555{
2556 unsigned i;
2557
2558 for (i = 0; i < NBINS; i++)
2559 malloc_mutex_postfork_parent(&arena->bins[i].lock);
2560 malloc_mutex_postfork_parent(&arena->lock);
2561}
2562
2563void
2564arena_postfork_child(arena_t *arena)
2565{
2566 unsigned i;
2567
2568 for (i = 0; i < NBINS; i++)
2569 malloc_mutex_postfork_child(&arena->bins[i].lock);
2570 malloc_mutex_postfork_child(&arena->lock);
2571}