blob: d3fe0fbad72711fdef25a3261d76c1ccc79a8539 [file] [log] [blame]
Jason Evanse476f8a2010-01-16 09:53:50 -08001#define JEMALLOC_ARENA_C_
Jason Evans376b1522010-02-11 14:45:59 -08002#include "jemalloc/internal/jemalloc_internal.h"
Jason Evanse476f8a2010-01-16 09:53:50 -08003
4/******************************************************************************/
5/* Data. */
6
Jason Evanse476f8a2010-01-16 09:53:50 -08007ssize_t opt_lg_dirty_mult = LG_DIRTY_MULT_DEFAULT;
Jason Evansb1726102012-02-28 16:50:47 -08008arena_bin_info_t arena_bin_info[NBINS];
Jason Evanse476f8a2010-01-16 09:53:50 -08009
Mike Hommeyda99e312012-04-30 12:38:29 +020010JEMALLOC_ALIGNED(CACHELINE)
Jason Evans3541a902014-04-16 17:14:33 -070011const uint32_t small_bin2size_tab[NBINS] = {
Jason Evansd04047c2014-05-28 16:11:55 -070012#define B2S_bin_yes(size) \
Ben Maurer021136c2014-04-16 14:31:24 -070013 size,
Jason Evansd04047c2014-05-28 16:11:55 -070014#define B2S_bin_no(size)
15#define SC(index, lg_grp, lg_delta, ndelta, bin, lg_delta_lookup) \
16 B2S_bin_##bin((ZU(1)<<lg_grp) + (ZU(ndelta)<<lg_delta))
Ben Maurer021136c2014-04-16 14:31:24 -070017 SIZE_CLASSES
Jason Evansd04047c2014-05-28 16:11:55 -070018#undef B2S_bin_yes
19#undef B2S_bin_no
20#undef SC
Ben Maurer021136c2014-04-16 14:31:24 -070021};
22
23JEMALLOC_ALIGNED(CACHELINE)
Jason Evans3541a902014-04-16 17:14:33 -070024const uint8_t small_size2bin_tab[] = {
Jason Evansd04047c2014-05-28 16:11:55 -070025#define S2B_3(i) i,
26#define S2B_4(i) S2B_3(i) S2B_3(i)
27#define S2B_5(i) S2B_4(i) S2B_4(i)
28#define S2B_6(i) S2B_5(i) S2B_5(i)
29#define S2B_7(i) S2B_6(i) S2B_6(i)
30#define S2B_8(i) S2B_7(i) S2B_7(i)
31#define S2B_9(i) S2B_8(i) S2B_8(i)
32#define S2B_no(i)
33#define SC(index, lg_grp, lg_delta, ndelta, bin, lg_delta_lookup) \
34 S2B_##lg_delta_lookup(index)
Jason Evansb1726102012-02-28 16:50:47 -080035 SIZE_CLASSES
Jason Evansd04047c2014-05-28 16:11:55 -070036#undef S2B_3
37#undef S2B_4
38#undef S2B_5
39#undef S2B_6
40#undef S2B_7
Jason Evanse476f8a2010-01-16 09:53:50 -080041#undef S2B_8
Jason Evansd04047c2014-05-28 16:11:55 -070042#undef S2B_9
43#undef S2B_no
44#undef SC
Jason Evansb1726102012-02-28 16:50:47 -080045};
Jason Evanse476f8a2010-01-16 09:53:50 -080046
47/******************************************************************************/
Jason Evansaa5113b2014-01-14 16:23:03 -080048/*
49 * Function prototypes for static functions that are referenced prior to
50 * definition.
51 */
Jason Evanse476f8a2010-01-16 09:53:50 -080052
Jason Evans6005f072010-09-30 16:55:08 -070053static void arena_purge(arena_t *arena, bool all);
Jason Evanse3d13062012-10-30 15:42:37 -070054static void arena_run_dalloc(arena_t *arena, arena_run_t *run, bool dirty,
55 bool cleaned);
Jason Evanse476f8a2010-01-16 09:53:50 -080056static void arena_dalloc_bin_run(arena_t *arena, arena_chunk_t *chunk,
57 arena_run_t *run, arena_bin_t *bin);
Jason Evans940a2e02010-10-17 17:51:37 -070058static void arena_bin_lower_run(arena_t *arena, arena_chunk_t *chunk,
59 arena_run_t *run, arena_bin_t *bin);
Jason Evanse476f8a2010-01-16 09:53:50 -080060
61/******************************************************************************/
62
Ben Maurerf9ff6032014-04-06 13:24:16 -070063JEMALLOC_INLINE_C size_t
64arena_mapelm_to_pageind(arena_chunk_map_t *mapelm)
65{
66 uintptr_t map_offset =
67 CHUNK_ADDR2OFFSET(mapelm) - offsetof(arena_chunk_t, map);
68
69 return ((map_offset / sizeof(arena_chunk_map_t)) + map_bias);
70}
71
72JEMALLOC_INLINE_C size_t
73arena_mapelm_to_bits(arena_chunk_map_t *mapelm)
74{
75
76 return (mapelm->bits);
77}
78
Jason Evanse476f8a2010-01-16 09:53:50 -080079static inline int
Jason Evanse476f8a2010-01-16 09:53:50 -080080arena_run_comp(arena_chunk_map_t *a, arena_chunk_map_t *b)
81{
82 uintptr_t a_mapelm = (uintptr_t)a;
83 uintptr_t b_mapelm = (uintptr_t)b;
84
85 assert(a != NULL);
86 assert(b != NULL);
87
88 return ((a_mapelm > b_mapelm) - (a_mapelm < b_mapelm));
89}
90
Jason Evansf3ff7522010-02-28 15:00:18 -080091/* Generate red-black tree functions. */
Jason Evans7372b152012-02-10 20:22:09 -080092rb_gen(static UNUSED, arena_run_tree_, arena_run_tree_t, arena_chunk_map_t,
93 u.rb_link, arena_run_comp)
Jason Evanse476f8a2010-01-16 09:53:50 -080094
95static inline int
96arena_avail_comp(arena_chunk_map_t *a, arena_chunk_map_t *b)
97{
98 int ret;
Ben Maurerf9ff6032014-04-06 13:24:16 -070099 size_t a_size;
100 size_t b_size = arena_mapelm_to_bits(b) & ~PAGE_MASK;
101 uintptr_t a_mapelm = (uintptr_t)a;
102 uintptr_t b_mapelm = (uintptr_t)b;
103
104 if (a_mapelm & CHUNK_MAP_KEY)
105 a_size = a_mapelm & ~PAGE_MASK;
106 else
107 a_size = arena_mapelm_to_bits(a) & ~PAGE_MASK;
Jason Evanse476f8a2010-01-16 09:53:50 -0800108
109 ret = (a_size > b_size) - (a_size < b_size);
Ben Maurerf9ff6032014-04-06 13:24:16 -0700110 if (ret == 0 && (!(a_mapelm & CHUNK_MAP_KEY)))
Jason Evanse476f8a2010-01-16 09:53:50 -0800111 ret = (a_mapelm > b_mapelm) - (a_mapelm < b_mapelm);
Jason Evanse476f8a2010-01-16 09:53:50 -0800112
113 return (ret);
114}
115
Jason Evansf3ff7522010-02-28 15:00:18 -0800116/* Generate red-black tree functions. */
Jason Evans7372b152012-02-10 20:22:09 -0800117rb_gen(static UNUSED, arena_avail_tree_, arena_avail_tree_t, arena_chunk_map_t,
118 u.rb_link, arena_avail_comp)
Jason Evanse476f8a2010-01-16 09:53:50 -0800119
Jason Evanse3d13062012-10-30 15:42:37 -0700120static inline int
121arena_chunk_dirty_comp(arena_chunk_t *a, arena_chunk_t *b)
122{
Jason Evanse3d13062012-10-30 15:42:37 -0700123
124 assert(a != NULL);
125 assert(b != NULL);
126
127 /*
Jason Evansabf67392012-11-07 10:05:04 -0800128 * Short-circuit for self comparison. The following comparison code
129 * would come to the same result, but at the cost of executing the slow
130 * path.
131 */
132 if (a == b)
133 return (0);
134
135 /*
Jason Evanse3d13062012-10-30 15:42:37 -0700136 * Order such that chunks with higher fragmentation are "less than"
Jason Evansabf67392012-11-07 10:05:04 -0800137 * those with lower fragmentation -- purging order is from "least" to
138 * "greatest". Fragmentation is measured as:
Jason Evanse3d13062012-10-30 15:42:37 -0700139 *
140 * mean current avail run size
141 * --------------------------------
142 * mean defragmented avail run size
143 *
144 * navail
145 * -----------
146 * nruns_avail nruns_avail-nruns_adjac
147 * = ========================= = -----------------------
148 * navail nruns_avail
149 * -----------------------
150 * nruns_avail-nruns_adjac
151 *
152 * The following code multiplies away the denominator prior to
153 * comparison, in order to avoid division.
154 *
155 */
Jason Evansabf67392012-11-07 10:05:04 -0800156 {
157 size_t a_val = (a->nruns_avail - a->nruns_adjac) *
158 b->nruns_avail;
159 size_t b_val = (b->nruns_avail - b->nruns_adjac) *
160 a->nruns_avail;
161
162 if (a_val < b_val)
163 return (1);
164 if (a_val > b_val)
165 return (-1);
166 }
167 /*
168 * Break ties by chunk address. For fragmented chunks, report lower
169 * addresses as "lower", so that fragmentation reduction happens first
170 * at lower addresses. However, use the opposite ordering for
171 * unfragmented chunks, in order to increase the chances of
172 * re-allocating dirty runs.
173 */
Jason Evanse3d13062012-10-30 15:42:37 -0700174 {
175 uintptr_t a_chunk = (uintptr_t)a;
176 uintptr_t b_chunk = (uintptr_t)b;
Jason Evansabf67392012-11-07 10:05:04 -0800177 int ret = ((a_chunk > b_chunk) - (a_chunk < b_chunk));
178 if (a->nruns_adjac == 0) {
179 assert(b->nruns_adjac == 0);
180 ret = -ret;
181 }
182 return (ret);
Jason Evanse3d13062012-10-30 15:42:37 -0700183 }
184}
185
186/* Generate red-black tree functions. */
187rb_gen(static UNUSED, arena_chunk_dirty_, arena_chunk_tree_t, arena_chunk_t,
188 dirty_link, arena_chunk_dirty_comp)
189
190static inline bool
191arena_avail_adjac_pred(arena_chunk_t *chunk, size_t pageind)
192{
193 bool ret;
194
195 if (pageind-1 < map_bias)
196 ret = false;
197 else {
198 ret = (arena_mapbits_allocated_get(chunk, pageind-1) == 0);
199 assert(ret == false || arena_mapbits_dirty_get(chunk,
200 pageind-1) != arena_mapbits_dirty_get(chunk, pageind));
201 }
202 return (ret);
203}
204
205static inline bool
206arena_avail_adjac_succ(arena_chunk_t *chunk, size_t pageind, size_t npages)
207{
208 bool ret;
209
210 if (pageind+npages == chunk_npages)
211 ret = false;
212 else {
213 assert(pageind+npages < chunk_npages);
214 ret = (arena_mapbits_allocated_get(chunk, pageind+npages) == 0);
215 assert(ret == false || arena_mapbits_dirty_get(chunk, pageind)
216 != arena_mapbits_dirty_get(chunk, pageind+npages));
217 }
218 return (ret);
219}
220
221static inline bool
222arena_avail_adjac(arena_chunk_t *chunk, size_t pageind, size_t npages)
223{
224
225 return (arena_avail_adjac_pred(chunk, pageind) ||
226 arena_avail_adjac_succ(chunk, pageind, npages));
227}
228
229static void
230arena_avail_insert(arena_t *arena, arena_chunk_t *chunk, size_t pageind,
231 size_t npages, bool maybe_adjac_pred, bool maybe_adjac_succ)
232{
233
234 assert(npages == (arena_mapbits_unallocated_size_get(chunk, pageind) >>
235 LG_PAGE));
236
237 /*
238 * chunks_dirty is keyed by nruns_{avail,adjac}, so the chunk must be
239 * removed and reinserted even if the run to be inserted is clean.
240 */
241 if (chunk->ndirty != 0)
242 arena_chunk_dirty_remove(&arena->chunks_dirty, chunk);
243
244 if (maybe_adjac_pred && arena_avail_adjac_pred(chunk, pageind))
245 chunk->nruns_adjac++;
246 if (maybe_adjac_succ && arena_avail_adjac_succ(chunk, pageind, npages))
247 chunk->nruns_adjac++;
248 chunk->nruns_avail++;
249 assert(chunk->nruns_avail > chunk->nruns_adjac);
250
251 if (arena_mapbits_dirty_get(chunk, pageind) != 0) {
252 arena->ndirty += npages;
253 chunk->ndirty += npages;
254 }
255 if (chunk->ndirty != 0)
256 arena_chunk_dirty_insert(&arena->chunks_dirty, chunk);
257
258 arena_avail_tree_insert(&arena->runs_avail, arena_mapp_get(chunk,
259 pageind));
260}
261
262static void
263arena_avail_remove(arena_t *arena, arena_chunk_t *chunk, size_t pageind,
264 size_t npages, bool maybe_adjac_pred, bool maybe_adjac_succ)
265{
266
267 assert(npages == (arena_mapbits_unallocated_size_get(chunk, pageind) >>
268 LG_PAGE));
269
270 /*
271 * chunks_dirty is keyed by nruns_{avail,adjac}, so the chunk must be
272 * removed and reinserted even if the run to be removed is clean.
273 */
274 if (chunk->ndirty != 0)
275 arena_chunk_dirty_remove(&arena->chunks_dirty, chunk);
276
277 if (maybe_adjac_pred && arena_avail_adjac_pred(chunk, pageind))
278 chunk->nruns_adjac--;
279 if (maybe_adjac_succ && arena_avail_adjac_succ(chunk, pageind, npages))
280 chunk->nruns_adjac--;
281 chunk->nruns_avail--;
282 assert(chunk->nruns_avail > chunk->nruns_adjac || (chunk->nruns_avail
283 == 0 && chunk->nruns_adjac == 0));
284
285 if (arena_mapbits_dirty_get(chunk, pageind) != 0) {
286 arena->ndirty -= npages;
287 chunk->ndirty -= npages;
288 }
289 if (chunk->ndirty != 0)
290 arena_chunk_dirty_insert(&arena->chunks_dirty, chunk);
291
292 arena_avail_tree_remove(&arena->runs_avail, arena_mapp_get(chunk,
293 pageind));
294}
295
Jason Evanse476f8a2010-01-16 09:53:50 -0800296static inline void *
Jason Evans49f7e8f2011-03-15 13:59:15 -0700297arena_run_reg_alloc(arena_run_t *run, arena_bin_info_t *bin_info)
Jason Evanse476f8a2010-01-16 09:53:50 -0800298{
299 void *ret;
Jason Evans84c8eef2011-03-16 10:30:13 -0700300 unsigned regind;
301 bitmap_t *bitmap = (bitmap_t *)((uintptr_t)run +
302 (uintptr_t)bin_info->bitmap_offset);
Jason Evanse476f8a2010-01-16 09:53:50 -0800303
Jason Evans1e0a6362010-03-13 13:41:58 -0800304 assert(run->nfree > 0);
Jason Evans84c8eef2011-03-16 10:30:13 -0700305 assert(bitmap_full(bitmap, &bin_info->bitmap_info) == false);
Jason Evanse476f8a2010-01-16 09:53:50 -0800306
Jason Evans84c8eef2011-03-16 10:30:13 -0700307 regind = bitmap_sfu(bitmap, &bin_info->bitmap_info);
308 ret = (void *)((uintptr_t)run + (uintptr_t)bin_info->reg0_offset +
Jason Evans122449b2012-04-06 00:35:09 -0700309 (uintptr_t)(bin_info->reg_interval * regind));
Jason Evans1e0a6362010-03-13 13:41:58 -0800310 run->nfree--;
Jason Evans84c8eef2011-03-16 10:30:13 -0700311 if (regind == run->nextind)
312 run->nextind++;
313 assert(regind < run->nextind);
Jason Evans1e0a6362010-03-13 13:41:58 -0800314 return (ret);
Jason Evans6109fe02010-02-10 10:37:56 -0800315}
316
317static inline void
Jason Evans1e0a6362010-03-13 13:41:58 -0800318arena_run_reg_dalloc(arena_run_t *run, void *ptr)
Jason Evans6109fe02010-02-10 10:37:56 -0800319{
Jason Evans49f7e8f2011-03-15 13:59:15 -0700320 arena_chunk_t *chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(run);
Jason Evans203484e2012-05-02 00:30:36 -0700321 size_t pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> LG_PAGE;
322 size_t mapbits = arena_mapbits_get(chunk, pageind);
Jason Evans80737c32012-05-02 16:11:03 -0700323 size_t binind = arena_ptr_small_binind_get(ptr, mapbits);
Jason Evans49f7e8f2011-03-15 13:59:15 -0700324 arena_bin_info_t *bin_info = &arena_bin_info[binind];
Jason Evans84c8eef2011-03-16 10:30:13 -0700325 unsigned regind = arena_run_regind(run, bin_info, ptr);
326 bitmap_t *bitmap = (bitmap_t *)((uintptr_t)run +
327 (uintptr_t)bin_info->bitmap_offset);
328
Jason Evans49f7e8f2011-03-15 13:59:15 -0700329 assert(run->nfree < bin_info->nregs);
Jason Evans1e0a6362010-03-13 13:41:58 -0800330 /* Freeing an interior pointer can cause assertion failure. */
331 assert(((uintptr_t)ptr - ((uintptr_t)run +
Jason Evans122449b2012-04-06 00:35:09 -0700332 (uintptr_t)bin_info->reg0_offset)) %
333 (uintptr_t)bin_info->reg_interval == 0);
Jason Evans21fb95b2010-10-18 17:45:40 -0700334 assert((uintptr_t)ptr >= (uintptr_t)run +
Jason Evans49f7e8f2011-03-15 13:59:15 -0700335 (uintptr_t)bin_info->reg0_offset);
Jason Evans84c8eef2011-03-16 10:30:13 -0700336 /* Freeing an unallocated pointer can cause assertion failure. */
337 assert(bitmap_get(bitmap, &bin_info->bitmap_info, regind));
Jason Evanse476f8a2010-01-16 09:53:50 -0800338
Jason Evans84c8eef2011-03-16 10:30:13 -0700339 bitmap_unset(bitmap, &bin_info->bitmap_info, regind);
Jason Evans1e0a6362010-03-13 13:41:58 -0800340 run->nfree++;
Jason Evanse476f8a2010-01-16 09:53:50 -0800341}
342
Jason Evans21fb95b2010-10-18 17:45:40 -0700343static inline void
Jason Evans38067482013-01-21 20:04:42 -0800344arena_run_zero(arena_chunk_t *chunk, size_t run_ind, size_t npages)
345{
346
Jason Evansbd87b012014-04-15 16:35:08 -0700347 JEMALLOC_VALGRIND_MAKE_MEM_UNDEFINED((void *)((uintptr_t)chunk +
348 (run_ind << LG_PAGE)), (npages << LG_PAGE));
Jason Evans38067482013-01-21 20:04:42 -0800349 memset((void *)((uintptr_t)chunk + (run_ind << LG_PAGE)), 0,
350 (npages << LG_PAGE));
Jason Evans38067482013-01-21 20:04:42 -0800351}
352
353static inline void
Jason Evansdda90f52013-10-19 23:48:40 -0700354arena_run_page_mark_zeroed(arena_chunk_t *chunk, size_t run_ind)
355{
356
Jason Evansbd87b012014-04-15 16:35:08 -0700357 JEMALLOC_VALGRIND_MAKE_MEM_DEFINED((void *)((uintptr_t)chunk + (run_ind
358 << LG_PAGE)), PAGE);
Jason Evansdda90f52013-10-19 23:48:40 -0700359}
360
361static inline void
Jason Evans38067482013-01-21 20:04:42 -0800362arena_run_page_validate_zeroed(arena_chunk_t *chunk, size_t run_ind)
Jason Evans21fb95b2010-10-18 17:45:40 -0700363{
Jason Evansd4bab212010-10-24 20:08:37 -0700364 size_t i;
Jason Evansae4c7b42012-04-02 07:04:34 -0700365 UNUSED size_t *p = (size_t *)((uintptr_t)chunk + (run_ind << LG_PAGE));
Jason Evansd4bab212010-10-24 20:08:37 -0700366
Jason Evansdda90f52013-10-19 23:48:40 -0700367 arena_run_page_mark_zeroed(chunk, run_ind);
Jason Evansae4c7b42012-04-02 07:04:34 -0700368 for (i = 0; i < PAGE / sizeof(size_t); i++)
Jason Evans21fb95b2010-10-18 17:45:40 -0700369 assert(p[i] == 0);
370}
Jason Evans21fb95b2010-10-18 17:45:40 -0700371
Jason Evanse476f8a2010-01-16 09:53:50 -0800372static void
Jason Evansaa5113b2014-01-14 16:23:03 -0800373arena_cactive_update(arena_t *arena, size_t add_pages, size_t sub_pages)
374{
375
376 if (config_stats) {
377 ssize_t cactive_diff = CHUNK_CEILING((arena->nactive +
378 add_pages) << LG_PAGE) - CHUNK_CEILING((arena->nactive -
379 sub_pages) << LG_PAGE);
380 if (cactive_diff != 0)
381 stats_cactive_add(cactive_diff);
382 }
383}
384
385static void
386arena_run_split_remove(arena_t *arena, arena_chunk_t *chunk, size_t run_ind,
387 size_t flag_dirty, size_t need_pages)
388{
389 size_t total_pages, rem_pages;
390
391 total_pages = arena_mapbits_unallocated_size_get(chunk, run_ind) >>
392 LG_PAGE;
393 assert(arena_mapbits_dirty_get(chunk, run_ind+total_pages-1) ==
394 flag_dirty);
395 assert(need_pages <= total_pages);
396 rem_pages = total_pages - need_pages;
397
398 arena_avail_remove(arena, chunk, run_ind, total_pages, true, true);
399 arena_cactive_update(arena, need_pages, 0);
400 arena->nactive += need_pages;
401
402 /* Keep track of trailing unused pages for later use. */
403 if (rem_pages > 0) {
404 if (flag_dirty != 0) {
405 arena_mapbits_unallocated_set(chunk,
406 run_ind+need_pages, (rem_pages << LG_PAGE),
407 flag_dirty);
408 arena_mapbits_unallocated_set(chunk,
409 run_ind+total_pages-1, (rem_pages << LG_PAGE),
410 flag_dirty);
411 } else {
412 arena_mapbits_unallocated_set(chunk, run_ind+need_pages,
413 (rem_pages << LG_PAGE),
414 arena_mapbits_unzeroed_get(chunk,
415 run_ind+need_pages));
416 arena_mapbits_unallocated_set(chunk,
417 run_ind+total_pages-1, (rem_pages << LG_PAGE),
418 arena_mapbits_unzeroed_get(chunk,
419 run_ind+total_pages-1));
420 }
421 arena_avail_insert(arena, chunk, run_ind+need_pages, rem_pages,
422 false, true);
423 }
424}
425
426static void
427arena_run_split_large_helper(arena_t *arena, arena_run_t *run, size_t size,
428 bool remove, bool zero)
Jason Evanse476f8a2010-01-16 09:53:50 -0800429{
430 arena_chunk_t *chunk;
Jason Evansaa5113b2014-01-14 16:23:03 -0800431 size_t flag_dirty, run_ind, need_pages, i;
Jason Evans203484e2012-05-02 00:30:36 -0700432
Jason Evanse476f8a2010-01-16 09:53:50 -0800433 chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(run);
Jason Evansae4c7b42012-04-02 07:04:34 -0700434 run_ind = (unsigned)(((uintptr_t)run - (uintptr_t)chunk) >> LG_PAGE);
Jason Evans203484e2012-05-02 00:30:36 -0700435 flag_dirty = arena_mapbits_dirty_get(chunk, run_ind);
Jason Evansae4c7b42012-04-02 07:04:34 -0700436 need_pages = (size >> LG_PAGE);
Jason Evanse476f8a2010-01-16 09:53:50 -0800437 assert(need_pages > 0);
Jason Evanse476f8a2010-01-16 09:53:50 -0800438
Jason Evansc368f8c2013-10-29 18:17:42 -0700439 if (remove) {
Jason Evansaa5113b2014-01-14 16:23:03 -0800440 arena_run_split_remove(arena, chunk, run_ind, flag_dirty,
441 need_pages);
Jason Evans19b3d612010-03-18 20:36:40 -0700442 }
443
Jason Evansaa5113b2014-01-14 16:23:03 -0800444 if (zero) {
445 if (flag_dirty == 0) {
446 /*
447 * The run is clean, so some pages may be zeroed (i.e.
448 * never before touched).
449 */
450 for (i = 0; i < need_pages; i++) {
451 if (arena_mapbits_unzeroed_get(chunk, run_ind+i)
452 != 0)
453 arena_run_zero(chunk, run_ind+i, 1);
454 else if (config_debug) {
455 arena_run_page_validate_zeroed(chunk,
456 run_ind+i);
457 } else {
458 arena_run_page_mark_zeroed(chunk,
459 run_ind+i);
Jason Evans19b3d612010-03-18 20:36:40 -0700460 }
Jason Evanse476f8a2010-01-16 09:53:50 -0800461 }
Jason Evansdda90f52013-10-19 23:48:40 -0700462 } else {
Jason Evansaa5113b2014-01-14 16:23:03 -0800463 /* The run is dirty, so all pages must be zeroed. */
464 arena_run_zero(chunk, run_ind, need_pages);
Jason Evanse476f8a2010-01-16 09:53:50 -0800465 }
Jason Evans19b3d612010-03-18 20:36:40 -0700466 } else {
Jason Evansbd87b012014-04-15 16:35:08 -0700467 JEMALLOC_VALGRIND_MAKE_MEM_UNDEFINED((void *)((uintptr_t)chunk +
Jason Evansdda90f52013-10-19 23:48:40 -0700468 (run_ind << LG_PAGE)), (need_pages << LG_PAGE));
Jason Evanse476f8a2010-01-16 09:53:50 -0800469 }
Jason Evansaa5113b2014-01-14 16:23:03 -0800470
471 /*
472 * Set the last element first, in case the run only contains one page
473 * (i.e. both statements set the same element).
474 */
475 arena_mapbits_large_set(chunk, run_ind+need_pages-1, 0, flag_dirty);
476 arena_mapbits_large_set(chunk, run_ind, size, flag_dirty);
Jason Evanse476f8a2010-01-16 09:53:50 -0800477}
478
Jason Evansc368f8c2013-10-29 18:17:42 -0700479static void
Jason Evansaa5113b2014-01-14 16:23:03 -0800480arena_run_split_large(arena_t *arena, arena_run_t *run, size_t size, bool zero)
Jason Evansc368f8c2013-10-29 18:17:42 -0700481{
482
Jason Evansaa5113b2014-01-14 16:23:03 -0800483 arena_run_split_large_helper(arena, run, size, true, zero);
Jason Evansc368f8c2013-10-29 18:17:42 -0700484}
485
486static void
Jason Evansaa5113b2014-01-14 16:23:03 -0800487arena_run_init_large(arena_t *arena, arena_run_t *run, size_t size, bool zero)
Jason Evansc368f8c2013-10-29 18:17:42 -0700488{
489
Jason Evansaa5113b2014-01-14 16:23:03 -0800490 arena_run_split_large_helper(arena, run, size, false, zero);
491}
492
493static void
494arena_run_split_small(arena_t *arena, arena_run_t *run, size_t size,
495 size_t binind)
496{
497 arena_chunk_t *chunk;
498 size_t flag_dirty, run_ind, need_pages, i;
499
500 assert(binind != BININD_INVALID);
501
502 chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(run);
503 run_ind = (unsigned)(((uintptr_t)run - (uintptr_t)chunk) >> LG_PAGE);
504 flag_dirty = arena_mapbits_dirty_get(chunk, run_ind);
505 need_pages = (size >> LG_PAGE);
506 assert(need_pages > 0);
507
508 arena_run_split_remove(arena, chunk, run_ind, flag_dirty, need_pages);
509
510 /*
511 * Propagate the dirty and unzeroed flags to the allocated small run,
512 * so that arena_dalloc_bin_run() has the ability to conditionally trim
513 * clean pages.
514 */
515 arena_mapbits_small_set(chunk, run_ind, 0, binind, flag_dirty);
516 /*
517 * The first page will always be dirtied during small run
518 * initialization, so a validation failure here would not actually
519 * cause an observable failure.
520 */
521 if (config_debug && flag_dirty == 0 && arena_mapbits_unzeroed_get(chunk,
522 run_ind) == 0)
523 arena_run_page_validate_zeroed(chunk, run_ind);
524 for (i = 1; i < need_pages - 1; i++) {
525 arena_mapbits_small_set(chunk, run_ind+i, i, binind, 0);
526 if (config_debug && flag_dirty == 0 &&
527 arena_mapbits_unzeroed_get(chunk, run_ind+i) == 0)
528 arena_run_page_validate_zeroed(chunk, run_ind+i);
529 }
530 arena_mapbits_small_set(chunk, run_ind+need_pages-1, need_pages-1,
531 binind, flag_dirty);
532 if (config_debug && flag_dirty == 0 && arena_mapbits_unzeroed_get(chunk,
533 run_ind+need_pages-1) == 0)
534 arena_run_page_validate_zeroed(chunk, run_ind+need_pages-1);
Jason Evansbd87b012014-04-15 16:35:08 -0700535 JEMALLOC_VALGRIND_MAKE_MEM_UNDEFINED((void *)((uintptr_t)chunk +
Jason Evansaa5113b2014-01-14 16:23:03 -0800536 (run_ind << LG_PAGE)), (need_pages << LG_PAGE));
537}
538
539static arena_chunk_t *
540arena_chunk_init_spare(arena_t *arena)
541{
542 arena_chunk_t *chunk;
543
544 assert(arena->spare != NULL);
545
546 chunk = arena->spare;
547 arena->spare = NULL;
548
549 assert(arena_mapbits_allocated_get(chunk, map_bias) == 0);
550 assert(arena_mapbits_allocated_get(chunk, chunk_npages-1) == 0);
551 assert(arena_mapbits_unallocated_size_get(chunk, map_bias) ==
552 arena_maxclass);
553 assert(arena_mapbits_unallocated_size_get(chunk, chunk_npages-1) ==
554 arena_maxclass);
555 assert(arena_mapbits_dirty_get(chunk, map_bias) ==
556 arena_mapbits_dirty_get(chunk, chunk_npages-1));
557
558 return (chunk);
559}
560
561static arena_chunk_t *
Jason Evanse2deab72014-05-15 22:22:27 -0700562arena_chunk_alloc_internal(arena_t *arena, size_t size, size_t alignment,
563 bool *zero)
564{
565 arena_chunk_t *chunk;
566 chunk_alloc_t *chunk_alloc;
567 chunk_dalloc_t *chunk_dalloc;
568
569 chunk_alloc = arena->chunk_alloc;
570 chunk_dalloc = arena->chunk_dalloc;
571 malloc_mutex_unlock(&arena->lock);
572 chunk = (arena_chunk_t *)chunk_alloc_arena(chunk_alloc, chunk_dalloc,
573 arena->ind, size, alignment, zero);
574 malloc_mutex_lock(&arena->lock);
575 if (config_stats && chunk != NULL)
576 arena->stats.mapped += chunksize;
577
578 return (chunk);
579}
580
581void *
582arena_chunk_alloc_huge(arena_t *arena, size_t size, size_t alignment,
583 bool *zero)
584{
585 void *ret;
586 chunk_alloc_t *chunk_alloc;
587 chunk_dalloc_t *chunk_dalloc;
588
589 malloc_mutex_lock(&arena->lock);
590 chunk_alloc = arena->chunk_alloc;
591 chunk_dalloc = arena->chunk_dalloc;
592 if (config_stats) {
593 /* Optimistically update stats prior to unlocking. */
594 arena->stats.mapped += size;
595 arena->stats.allocated_huge += size;
596 arena->stats.nmalloc_huge++;
597 arena->stats.nrequests_huge++;
598 }
599 arena->nactive += (size >> LG_PAGE);
600 malloc_mutex_unlock(&arena->lock);
601
602 ret = chunk_alloc_arena(chunk_alloc, chunk_dalloc, arena->ind,
603 size, alignment, zero);
604 if (config_stats) {
605 if (ret != NULL)
606 stats_cactive_add(size);
607 else {
608 /* Revert optimistic stats updates. */
609 malloc_mutex_lock(&arena->lock);
610 arena->stats.mapped -= size;
611 arena->stats.allocated_huge -= size;
612 arena->stats.nmalloc_huge--;
613 malloc_mutex_unlock(&arena->lock);
614 }
615 }
616
617 return (ret);
618}
619
620static arena_chunk_t *
Jason Evansaa5113b2014-01-14 16:23:03 -0800621arena_chunk_init_hard(arena_t *arena)
622{
623 arena_chunk_t *chunk;
624 bool zero;
625 size_t unzeroed, i;
626
627 assert(arena->spare == NULL);
628
629 zero = false;
Jason Evanse2deab72014-05-15 22:22:27 -0700630 chunk = arena_chunk_alloc_internal(arena, chunksize, chunksize, &zero);
Jason Evansaa5113b2014-01-14 16:23:03 -0800631 if (chunk == NULL)
632 return (NULL);
Jason Evansaa5113b2014-01-14 16:23:03 -0800633
634 chunk->arena = arena;
635
636 /*
637 * Claim that no pages are in use, since the header is merely overhead.
638 */
639 chunk->ndirty = 0;
640
641 chunk->nruns_avail = 0;
642 chunk->nruns_adjac = 0;
643
644 /*
645 * Initialize the map to contain one maximal free untouched run. Mark
646 * the pages as zeroed iff chunk_alloc() returned a zeroed chunk.
647 */
648 unzeroed = zero ? 0 : CHUNK_MAP_UNZEROED;
649 arena_mapbits_unallocated_set(chunk, map_bias, arena_maxclass,
650 unzeroed);
651 /*
652 * There is no need to initialize the internal page map entries unless
653 * the chunk is not zeroed.
654 */
655 if (zero == false) {
Jason Evansbd87b012014-04-15 16:35:08 -0700656 JEMALLOC_VALGRIND_MAKE_MEM_UNDEFINED(
657 (void *)arena_mapp_get(chunk, map_bias+1),
658 (size_t)((uintptr_t) arena_mapp_get(chunk, chunk_npages-1) -
659 (uintptr_t)arena_mapp_get(chunk, map_bias+1)));
Jason Evansaa5113b2014-01-14 16:23:03 -0800660 for (i = map_bias+1; i < chunk_npages-1; i++)
661 arena_mapbits_unzeroed_set(chunk, i, unzeroed);
662 } else {
Jason Evansbd87b012014-04-15 16:35:08 -0700663 JEMALLOC_VALGRIND_MAKE_MEM_DEFINED((void *)arena_mapp_get(chunk,
Jason Evansaa5113b2014-01-14 16:23:03 -0800664 map_bias+1), (size_t)((uintptr_t) arena_mapp_get(chunk,
665 chunk_npages-1) - (uintptr_t)arena_mapp_get(chunk,
666 map_bias+1)));
667 if (config_debug) {
668 for (i = map_bias+1; i < chunk_npages-1; i++) {
669 assert(arena_mapbits_unzeroed_get(chunk, i) ==
670 unzeroed);
671 }
672 }
673 }
674 arena_mapbits_unallocated_set(chunk, chunk_npages-1, arena_maxclass,
675 unzeroed);
676
677 return (chunk);
Jason Evansc368f8c2013-10-29 18:17:42 -0700678}
679
Jason Evanse476f8a2010-01-16 09:53:50 -0800680static arena_chunk_t *
681arena_chunk_alloc(arena_t *arena)
682{
683 arena_chunk_t *chunk;
Jason Evanse476f8a2010-01-16 09:53:50 -0800684
Jason Evansaa5113b2014-01-14 16:23:03 -0800685 if (arena->spare != NULL)
686 chunk = arena_chunk_init_spare(arena);
Chris Pride20a8c782014-03-25 22:36:05 -0700687 else {
Jason Evansaa5113b2014-01-14 16:23:03 -0800688 chunk = arena_chunk_init_hard(arena);
Chris Pride20a8c782014-03-25 22:36:05 -0700689 if (chunk == NULL)
690 return (NULL);
691 }
Jason Evanse476f8a2010-01-16 09:53:50 -0800692
Jason Evanse3d13062012-10-30 15:42:37 -0700693 /* Insert the run into the runs_avail tree. */
694 arena_avail_insert(arena, chunk, map_bias, chunk_npages-map_bias,
695 false, false);
696
Jason Evanse476f8a2010-01-16 09:53:50 -0800697 return (chunk);
698}
699
700static void
Jason Evanse2deab72014-05-15 22:22:27 -0700701arena_chunk_dalloc_internal(arena_t *arena, arena_chunk_t *chunk)
702{
703 chunk_dalloc_t *chunk_dalloc;
704
705 chunk_dalloc = arena->chunk_dalloc;
706 malloc_mutex_unlock(&arena->lock);
707 chunk_dalloc((void *)chunk, chunksize, arena->ind);
708 malloc_mutex_lock(&arena->lock);
709 if (config_stats)
710 arena->stats.mapped -= chunksize;
711}
712
713void
714arena_chunk_dalloc_huge(arena_t *arena, void *chunk, size_t size)
715{
716 chunk_dalloc_t *chunk_dalloc;
717
718 malloc_mutex_lock(&arena->lock);
719 chunk_dalloc = arena->chunk_dalloc;
720 if (config_stats) {
721 arena->stats.mapped -= size;
722 arena->stats.allocated_huge -= size;
723 arena->stats.ndalloc_huge++;
724 stats_cactive_sub(size);
725 }
726 arena->nactive -= (size >> LG_PAGE);
727 malloc_mutex_unlock(&arena->lock);
728 chunk_dalloc(chunk, size, arena->ind);
729}
730
731static void
732arena_chunk_dalloc(arena_t *arena, arena_chunk_t *chunk)
Jason Evanse476f8a2010-01-16 09:53:50 -0800733{
Jason Evans30fe12b2012-05-10 17:09:17 -0700734 assert(arena_mapbits_allocated_get(chunk, map_bias) == 0);
735 assert(arena_mapbits_allocated_get(chunk, chunk_npages-1) == 0);
736 assert(arena_mapbits_unallocated_size_get(chunk, map_bias) ==
737 arena_maxclass);
738 assert(arena_mapbits_unallocated_size_get(chunk, chunk_npages-1) ==
739 arena_maxclass);
740 assert(arena_mapbits_dirty_get(chunk, map_bias) ==
741 arena_mapbits_dirty_get(chunk, chunk_npages-1));
742
Jason Evanse476f8a2010-01-16 09:53:50 -0800743 /*
Jason Evanse3d13062012-10-30 15:42:37 -0700744 * Remove run from the runs_avail tree, so that the arena does not use
745 * it.
Jason Evanse476f8a2010-01-16 09:53:50 -0800746 */
Jason Evanse3d13062012-10-30 15:42:37 -0700747 arena_avail_remove(arena, chunk, map_bias, chunk_npages-map_bias,
748 false, false);
Jason Evanse476f8a2010-01-16 09:53:50 -0800749
Jason Evans8d4203c2010-04-13 20:53:21 -0700750 if (arena->spare != NULL) {
751 arena_chunk_t *spare = arena->spare;
752
753 arena->spare = chunk;
Jason Evanse2deab72014-05-15 22:22:27 -0700754 arena_chunk_dalloc_internal(arena, spare);
Jason Evans8d4203c2010-04-13 20:53:21 -0700755 } else
756 arena->spare = chunk;
Jason Evanse476f8a2010-01-16 09:53:50 -0800757}
758
759static arena_run_t *
Jason Evansaa5113b2014-01-14 16:23:03 -0800760arena_run_alloc_large_helper(arena_t *arena, size_t size, bool zero)
Jason Evanse476f8a2010-01-16 09:53:50 -0800761{
Jason Evanse476f8a2010-01-16 09:53:50 -0800762 arena_run_t *run;
Ben Maurerf9ff6032014-04-06 13:24:16 -0700763 arena_chunk_map_t *mapelm;
764 arena_chunk_map_t *key;
Jason Evanse476f8a2010-01-16 09:53:50 -0800765
Ben Maurerf9ff6032014-04-06 13:24:16 -0700766 key = (arena_chunk_map_t *)(size | CHUNK_MAP_KEY);
767 mapelm = arena_avail_tree_nsearch(&arena->runs_avail, key);
Jason Evanse476f8a2010-01-16 09:53:50 -0800768 if (mapelm != NULL) {
769 arena_chunk_t *run_chunk = CHUNK_ADDR2BASE(mapelm);
Ben Maurerf9ff6032014-04-06 13:24:16 -0700770 size_t pageind = arena_mapelm_to_pageind(mapelm);
Jason Evanse476f8a2010-01-16 09:53:50 -0800771
Jason Evanse00572b2010-03-14 19:43:56 -0700772 run = (arena_run_t *)((uintptr_t)run_chunk + (pageind <<
Jason Evansae4c7b42012-04-02 07:04:34 -0700773 LG_PAGE));
Jason Evansaa5113b2014-01-14 16:23:03 -0800774 arena_run_split_large(arena, run, size, zero);
Jason Evanse476f8a2010-01-16 09:53:50 -0800775 return (run);
776 }
777
Jason Evans5b0c9962012-05-10 15:47:24 -0700778 return (NULL);
779}
780
781static arena_run_t *
Jason Evansaa5113b2014-01-14 16:23:03 -0800782arena_run_alloc_large(arena_t *arena, size_t size, bool zero)
Jason Evans5b0c9962012-05-10 15:47:24 -0700783{
784 arena_chunk_t *chunk;
785 arena_run_t *run;
786
787 assert(size <= arena_maxclass);
788 assert((size & PAGE_MASK) == 0);
Jason Evans5b0c9962012-05-10 15:47:24 -0700789
790 /* Search the arena's chunks for the lowest best fit. */
Jason Evansaa5113b2014-01-14 16:23:03 -0800791 run = arena_run_alloc_large_helper(arena, size, zero);
Jason Evans5b0c9962012-05-10 15:47:24 -0700792 if (run != NULL)
793 return (run);
794
Jason Evanse476f8a2010-01-16 09:53:50 -0800795 /*
796 * No usable runs. Create a new chunk from which to allocate the run.
797 */
798 chunk = arena_chunk_alloc(arena);
Jason Evanse00572b2010-03-14 19:43:56 -0700799 if (chunk != NULL) {
Jason Evansae4c7b42012-04-02 07:04:34 -0700800 run = (arena_run_t *)((uintptr_t)chunk + (map_bias << LG_PAGE));
Jason Evansaa5113b2014-01-14 16:23:03 -0800801 arena_run_split_large(arena, run, size, zero);
Jason Evanse00572b2010-03-14 19:43:56 -0700802 return (run);
803 }
804
805 /*
806 * arena_chunk_alloc() failed, but another thread may have made
807 * sufficient memory available while this one dropped arena->lock in
808 * arena_chunk_alloc(), so search one more time.
809 */
Jason Evansaa5113b2014-01-14 16:23:03 -0800810 return (arena_run_alloc_large_helper(arena, size, zero));
811}
812
813static arena_run_t *
814arena_run_alloc_small_helper(arena_t *arena, size_t size, size_t binind)
815{
816 arena_run_t *run;
Ben Maurerf9ff6032014-04-06 13:24:16 -0700817 arena_chunk_map_t *mapelm;
818 arena_chunk_map_t *key;
Jason Evansaa5113b2014-01-14 16:23:03 -0800819
Ben Maurerf9ff6032014-04-06 13:24:16 -0700820 key = (arena_chunk_map_t *)(size | CHUNK_MAP_KEY);
821 mapelm = arena_avail_tree_nsearch(&arena->runs_avail, key);
Jason Evansaa5113b2014-01-14 16:23:03 -0800822 if (mapelm != NULL) {
823 arena_chunk_t *run_chunk = CHUNK_ADDR2BASE(mapelm);
Ben Maurerf9ff6032014-04-06 13:24:16 -0700824 size_t pageind = arena_mapelm_to_pageind(mapelm);
Jason Evansaa5113b2014-01-14 16:23:03 -0800825
826 run = (arena_run_t *)((uintptr_t)run_chunk + (pageind <<
827 LG_PAGE));
828 arena_run_split_small(arena, run, size, binind);
829 return (run);
830 }
831
832 return (NULL);
833}
834
835static arena_run_t *
836arena_run_alloc_small(arena_t *arena, size_t size, size_t binind)
837{
838 arena_chunk_t *chunk;
839 arena_run_t *run;
840
841 assert(size <= arena_maxclass);
842 assert((size & PAGE_MASK) == 0);
843 assert(binind != BININD_INVALID);
844
845 /* Search the arena's chunks for the lowest best fit. */
846 run = arena_run_alloc_small_helper(arena, size, binind);
847 if (run != NULL)
848 return (run);
849
850 /*
851 * No usable runs. Create a new chunk from which to allocate the run.
852 */
853 chunk = arena_chunk_alloc(arena);
854 if (chunk != NULL) {
855 run = (arena_run_t *)((uintptr_t)chunk + (map_bias << LG_PAGE));
856 arena_run_split_small(arena, run, size, binind);
857 return (run);
858 }
859
860 /*
861 * arena_chunk_alloc() failed, but another thread may have made
862 * sufficient memory available while this one dropped arena->lock in
863 * arena_chunk_alloc(), so search one more time.
864 */
865 return (arena_run_alloc_small_helper(arena, size, binind));
Jason Evanse476f8a2010-01-16 09:53:50 -0800866}
867
Jason Evans05b21be2010-03-14 17:36:10 -0700868static inline void
869arena_maybe_purge(arena_t *arena)
870{
Jason Evanse3d13062012-10-30 15:42:37 -0700871 size_t npurgeable, threshold;
Jason Evans05b21be2010-03-14 17:36:10 -0700872
Jason Evanse3d13062012-10-30 15:42:37 -0700873 /* Don't purge if the option is disabled. */
874 if (opt_lg_dirty_mult < 0)
875 return;
876 /* Don't purge if all dirty pages are already being purged. */
877 if (arena->ndirty <= arena->npurgatory)
878 return;
879 npurgeable = arena->ndirty - arena->npurgatory;
880 threshold = (arena->nactive >> opt_lg_dirty_mult);
881 /*
882 * Don't purge unless the number of purgeable pages exceeds the
883 * threshold.
884 */
885 if (npurgeable <= threshold)
886 return;
887
888 arena_purge(arena, false);
Jason Evans05b21be2010-03-14 17:36:10 -0700889}
890
Jason Evansaa5113b2014-01-14 16:23:03 -0800891static arena_chunk_t *
892chunks_dirty_iter_cb(arena_chunk_tree_t *tree, arena_chunk_t *chunk, void *arg)
893{
894 size_t *ndirty = (size_t *)arg;
895
896 assert(chunk->ndirty != 0);
897 *ndirty += chunk->ndirty;
898 return (NULL);
899}
900
901static size_t
902arena_compute_npurgatory(arena_t *arena, bool all)
903{
904 size_t npurgatory, npurgeable;
905
906 /*
907 * Compute the minimum number of pages that this thread should try to
908 * purge.
909 */
910 npurgeable = arena->ndirty - arena->npurgatory;
911
912 if (all == false) {
913 size_t threshold = (arena->nactive >> opt_lg_dirty_mult);
914
915 npurgatory = npurgeable - threshold;
916 } else
917 npurgatory = npurgeable;
918
919 return (npurgatory);
920}
921
922static void
923arena_chunk_stash_dirty(arena_t *arena, arena_chunk_t *chunk, bool all,
924 arena_chunk_mapelms_t *mapelms)
925{
926 size_t pageind, npages;
927
928 /*
929 * Temporarily allocate free dirty runs within chunk. If all is false,
930 * only operate on dirty runs that are fragments; otherwise operate on
931 * all dirty runs.
932 */
933 for (pageind = map_bias; pageind < chunk_npages; pageind += npages) {
934 arena_chunk_map_t *mapelm = arena_mapp_get(chunk, pageind);
935 if (arena_mapbits_allocated_get(chunk, pageind) == 0) {
936 size_t run_size =
937 arena_mapbits_unallocated_size_get(chunk, pageind);
938
939 npages = run_size >> LG_PAGE;
940 assert(pageind + npages <= chunk_npages);
941 assert(arena_mapbits_dirty_get(chunk, pageind) ==
942 arena_mapbits_dirty_get(chunk, pageind+npages-1));
943
944 if (arena_mapbits_dirty_get(chunk, pageind) != 0 &&
945 (all || arena_avail_adjac(chunk, pageind,
946 npages))) {
947 arena_run_t *run = (arena_run_t *)((uintptr_t)
948 chunk + (uintptr_t)(pageind << LG_PAGE));
949
950 arena_run_split_large(arena, run, run_size,
951 false);
952 /* Append to list for later processing. */
953 ql_elm_new(mapelm, u.ql_link);
954 ql_tail_insert(mapelms, mapelm, u.ql_link);
955 }
956 } else {
957 /* Skip run. */
958 if (arena_mapbits_large_get(chunk, pageind) != 0) {
959 npages = arena_mapbits_large_size_get(chunk,
960 pageind) >> LG_PAGE;
961 } else {
962 size_t binind;
963 arena_bin_info_t *bin_info;
964 arena_run_t *run = (arena_run_t *)((uintptr_t)
965 chunk + (uintptr_t)(pageind << LG_PAGE));
966
967 assert(arena_mapbits_small_runind_get(chunk,
968 pageind) == 0);
969 binind = arena_bin_index(arena, run->bin);
970 bin_info = &arena_bin_info[binind];
971 npages = bin_info->run_size >> LG_PAGE;
972 }
973 }
974 }
975 assert(pageind == chunk_npages);
976 assert(chunk->ndirty == 0 || all == false);
977 assert(chunk->nruns_adjac == 0);
978}
979
980static size_t
981arena_chunk_purge_stashed(arena_t *arena, arena_chunk_t *chunk,
982 arena_chunk_mapelms_t *mapelms)
983{
984 size_t npurged, pageind, npages, nmadvise;
985 arena_chunk_map_t *mapelm;
986
987 malloc_mutex_unlock(&arena->lock);
988 if (config_stats)
989 nmadvise = 0;
990 npurged = 0;
991 ql_foreach(mapelm, mapelms, u.ql_link) {
992 bool unzeroed;
993 size_t flag_unzeroed, i;
994
Ben Maurerf9ff6032014-04-06 13:24:16 -0700995 pageind = arena_mapelm_to_pageind(mapelm);
Jason Evansaa5113b2014-01-14 16:23:03 -0800996 npages = arena_mapbits_large_size_get(chunk, pageind) >>
997 LG_PAGE;
998 assert(pageind + npages <= chunk_npages);
999 unzeroed = pages_purge((void *)((uintptr_t)chunk + (pageind <<
1000 LG_PAGE)), (npages << LG_PAGE));
1001 flag_unzeroed = unzeroed ? CHUNK_MAP_UNZEROED : 0;
1002 /*
1003 * Set the unzeroed flag for all pages, now that pages_purge()
1004 * has returned whether the pages were zeroed as a side effect
1005 * of purging. This chunk map modification is safe even though
1006 * the arena mutex isn't currently owned by this thread,
1007 * because the run is marked as allocated, thus protecting it
1008 * from being modified by any other thread. As long as these
1009 * writes don't perturb the first and last elements'
1010 * CHUNK_MAP_ALLOCATED bits, behavior is well defined.
1011 */
1012 for (i = 0; i < npages; i++) {
1013 arena_mapbits_unzeroed_set(chunk, pageind+i,
1014 flag_unzeroed);
1015 }
1016 npurged += npages;
1017 if (config_stats)
1018 nmadvise++;
1019 }
1020 malloc_mutex_lock(&arena->lock);
1021 if (config_stats)
1022 arena->stats.nmadvise += nmadvise;
1023
1024 return (npurged);
1025}
1026
1027static void
1028arena_chunk_unstash_purged(arena_t *arena, arena_chunk_t *chunk,
1029 arena_chunk_mapelms_t *mapelms)
1030{
1031 arena_chunk_map_t *mapelm;
1032 size_t pageind;
1033
1034 /* Deallocate runs. */
1035 for (mapelm = ql_first(mapelms); mapelm != NULL;
1036 mapelm = ql_first(mapelms)) {
1037 arena_run_t *run;
1038
Ben Maurerf9ff6032014-04-06 13:24:16 -07001039 pageind = arena_mapelm_to_pageind(mapelm);
Jason Evansaa5113b2014-01-14 16:23:03 -08001040 run = (arena_run_t *)((uintptr_t)chunk + (uintptr_t)(pageind <<
1041 LG_PAGE));
1042 ql_remove(mapelms, mapelm, u.ql_link);
1043 arena_run_dalloc(arena, run, false, true);
1044 }
1045}
1046
Jason Evanse3d13062012-10-30 15:42:37 -07001047static inline size_t
1048arena_chunk_purge(arena_t *arena, arena_chunk_t *chunk, bool all)
Jason Evans05b21be2010-03-14 17:36:10 -07001049{
Jason Evanse3d13062012-10-30 15:42:37 -07001050 size_t npurged;
Jason Evansaa5113b2014-01-14 16:23:03 -08001051 arena_chunk_mapelms_t mapelms;
Jason Evans05b21be2010-03-14 17:36:10 -07001052
1053 ql_new(&mapelms);
1054
1055 /*
1056 * If chunk is the spare, temporarily re-allocate it, 1) so that its
Jason Evanse3d13062012-10-30 15:42:37 -07001057 * run is reinserted into runs_avail, and 2) so that it cannot be
Jason Evans05b21be2010-03-14 17:36:10 -07001058 * completely discarded by another thread while arena->lock is dropped
1059 * by this thread. Note that the arena_run_dalloc() call will
1060 * implicitly deallocate the chunk, so no explicit action is required
1061 * in this function to deallocate the chunk.
Jason Evans19b3d612010-03-18 20:36:40 -07001062 *
1063 * Note that once a chunk contains dirty pages, it cannot again contain
1064 * a single run unless 1) it is a dirty run, or 2) this function purges
1065 * dirty pages and causes the transition to a single clean run. Thus
1066 * (chunk == arena->spare) is possible, but it is not possible for
1067 * this function to be called on the spare unless it contains a dirty
1068 * run.
Jason Evans05b21be2010-03-14 17:36:10 -07001069 */
Jason Evans19b3d612010-03-18 20:36:40 -07001070 if (chunk == arena->spare) {
Jason Evans203484e2012-05-02 00:30:36 -07001071 assert(arena_mapbits_dirty_get(chunk, map_bias) != 0);
Jason Evans30fe12b2012-05-10 17:09:17 -07001072 assert(arena_mapbits_dirty_get(chunk, chunk_npages-1) != 0);
1073
Jason Evans05b21be2010-03-14 17:36:10 -07001074 arena_chunk_alloc(arena);
Jason Evans19b3d612010-03-18 20:36:40 -07001075 }
Jason Evans05b21be2010-03-14 17:36:10 -07001076
Jason Evanse3d13062012-10-30 15:42:37 -07001077 if (config_stats)
1078 arena->stats.purged += chunk->ndirty;
1079
1080 /*
1081 * Operate on all dirty runs if there is no clean/dirty run
1082 * fragmentation.
1083 */
1084 if (chunk->nruns_adjac == 0)
1085 all = true;
1086
Jason Evansaa5113b2014-01-14 16:23:03 -08001087 arena_chunk_stash_dirty(arena, chunk, all, &mapelms);
1088 npurged = arena_chunk_purge_stashed(arena, chunk, &mapelms);
1089 arena_chunk_unstash_purged(arena, chunk, &mapelms);
Jason Evanse3d13062012-10-30 15:42:37 -07001090
1091 return (npurged);
1092}
1093
Jason Evanse476f8a2010-01-16 09:53:50 -08001094static void
Jason Evans6005f072010-09-30 16:55:08 -07001095arena_purge(arena_t *arena, bool all)
Jason Evanse476f8a2010-01-16 09:53:50 -08001096{
1097 arena_chunk_t *chunk;
Jason Evans05b21be2010-03-14 17:36:10 -07001098 size_t npurgatory;
Jason Evans7372b152012-02-10 20:22:09 -08001099 if (config_debug) {
1100 size_t ndirty = 0;
Jason Evanse476f8a2010-01-16 09:53:50 -08001101
Jason Evanse3d13062012-10-30 15:42:37 -07001102 arena_chunk_dirty_iter(&arena->chunks_dirty, NULL,
1103 chunks_dirty_iter_cb, (void *)&ndirty);
Jason Evans7372b152012-02-10 20:22:09 -08001104 assert(ndirty == arena->ndirty);
Jason Evans2caa4712010-03-04 21:35:07 -08001105 }
Jason Evansaf8ad3e2011-03-23 20:39:02 -07001106 assert(arena->ndirty > arena->npurgatory || all);
Jason Evansaf8ad3e2011-03-23 20:39:02 -07001107 assert((arena->nactive >> opt_lg_dirty_mult) < (arena->ndirty -
Jason Evansf9a8edb2011-06-12 16:46:03 -07001108 arena->npurgatory) || all);
Jason Evanse476f8a2010-01-16 09:53:50 -08001109
Jason Evans7372b152012-02-10 20:22:09 -08001110 if (config_stats)
1111 arena->stats.npurge++;
Jason Evanse476f8a2010-01-16 09:53:50 -08001112
1113 /*
Jason Evansaa5113b2014-01-14 16:23:03 -08001114 * Add the minimum number of pages this thread should try to purge to
1115 * arena->npurgatory. This will keep multiple threads from racing to
1116 * reduce ndirty below the threshold.
Jason Evanse476f8a2010-01-16 09:53:50 -08001117 */
Jason Evansaa5113b2014-01-14 16:23:03 -08001118 npurgatory = arena_compute_npurgatory(arena, all);
Jason Evans799ca0b2010-04-08 20:31:58 -07001119 arena->npurgatory += npurgatory;
1120
Jason Evans05b21be2010-03-14 17:36:10 -07001121 while (npurgatory > 0) {
Jason Evanse3d13062012-10-30 15:42:37 -07001122 size_t npurgeable, npurged, nunpurged;
1123
Jason Evans05b21be2010-03-14 17:36:10 -07001124 /* Get next chunk with dirty pages. */
Jason Evanse3d13062012-10-30 15:42:37 -07001125 chunk = arena_chunk_dirty_first(&arena->chunks_dirty);
Jason Evans05b21be2010-03-14 17:36:10 -07001126 if (chunk == NULL) {
1127 /*
1128 * This thread was unable to purge as many pages as
1129 * originally intended, due to races with other threads
Jason Evans799ca0b2010-04-08 20:31:58 -07001130 * that either did some of the purging work, or re-used
1131 * dirty pages.
Jason Evans05b21be2010-03-14 17:36:10 -07001132 */
Jason Evans799ca0b2010-04-08 20:31:58 -07001133 arena->npurgatory -= npurgatory;
1134 return;
Jason Evans05b21be2010-03-14 17:36:10 -07001135 }
Jason Evanse3d13062012-10-30 15:42:37 -07001136 npurgeable = chunk->ndirty;
1137 assert(npurgeable != 0);
Jason Evanse476f8a2010-01-16 09:53:50 -08001138
Jason Evanse3d13062012-10-30 15:42:37 -07001139 if (npurgeable > npurgatory && chunk->nruns_adjac == 0) {
Jason Evans799ca0b2010-04-08 20:31:58 -07001140 /*
Jason Evanse3d13062012-10-30 15:42:37 -07001141 * This thread will purge all the dirty pages in chunk,
1142 * so set npurgatory to reflect this thread's intent to
1143 * purge the pages. This tends to reduce the chances
1144 * of the following scenario:
Jason Evans799ca0b2010-04-08 20:31:58 -07001145 *
1146 * 1) This thread sets arena->npurgatory such that
1147 * (arena->ndirty - arena->npurgatory) is at the
1148 * threshold.
1149 * 2) This thread drops arena->lock.
1150 * 3) Another thread causes one or more pages to be
1151 * dirtied, and immediately determines that it must
1152 * purge dirty pages.
1153 *
1154 * If this scenario *does* play out, that's okay,
1155 * because all of the purging work being done really
1156 * needs to happen.
1157 */
Jason Evanse3d13062012-10-30 15:42:37 -07001158 arena->npurgatory += npurgeable - npurgatory;
1159 npurgatory = npurgeable;
Jason Evans799ca0b2010-04-08 20:31:58 -07001160 }
1161
Jason Evanse3d13062012-10-30 15:42:37 -07001162 /*
1163 * Keep track of how many pages are purgeable, versus how many
1164 * actually get purged, and adjust counters accordingly.
1165 */
1166 arena->npurgatory -= npurgeable;
1167 npurgatory -= npurgeable;
1168 npurged = arena_chunk_purge(arena, chunk, all);
1169 nunpurged = npurgeable - npurged;
1170 arena->npurgatory += nunpurged;
1171 npurgatory += nunpurged;
Jason Evanse476f8a2010-01-16 09:53:50 -08001172 }
1173}
1174
Jason Evans6005f072010-09-30 16:55:08 -07001175void
1176arena_purge_all(arena_t *arena)
1177{
1178
1179 malloc_mutex_lock(&arena->lock);
1180 arena_purge(arena, true);
1181 malloc_mutex_unlock(&arena->lock);
1182}
1183
Jason Evanse476f8a2010-01-16 09:53:50 -08001184static void
Jason Evansaa5113b2014-01-14 16:23:03 -08001185arena_run_coalesce(arena_t *arena, arena_chunk_t *chunk, size_t *p_size,
1186 size_t *p_run_ind, size_t *p_run_pages, size_t flag_dirty)
Jason Evanse476f8a2010-01-16 09:53:50 -08001187{
Jason Evansaa5113b2014-01-14 16:23:03 -08001188 size_t size = *p_size;
1189 size_t run_ind = *p_run_ind;
1190 size_t run_pages = *p_run_pages;
Jason Evanse476f8a2010-01-16 09:53:50 -08001191
1192 /* Try to coalesce forward. */
1193 if (run_ind + run_pages < chunk_npages &&
Jason Evans203484e2012-05-02 00:30:36 -07001194 arena_mapbits_allocated_get(chunk, run_ind+run_pages) == 0 &&
1195 arena_mapbits_dirty_get(chunk, run_ind+run_pages) == flag_dirty) {
1196 size_t nrun_size = arena_mapbits_unallocated_size_get(chunk,
1197 run_ind+run_pages);
Jason Evansae4c7b42012-04-02 07:04:34 -07001198 size_t nrun_pages = nrun_size >> LG_PAGE;
Jason Evanse476f8a2010-01-16 09:53:50 -08001199
1200 /*
1201 * Remove successor from runs_avail; the coalesced run is
1202 * inserted later.
1203 */
Jason Evans203484e2012-05-02 00:30:36 -07001204 assert(arena_mapbits_unallocated_size_get(chunk,
1205 run_ind+run_pages+nrun_pages-1) == nrun_size);
1206 assert(arena_mapbits_dirty_get(chunk,
1207 run_ind+run_pages+nrun_pages-1) == flag_dirty);
Jason Evanse3d13062012-10-30 15:42:37 -07001208 arena_avail_remove(arena, chunk, run_ind+run_pages, nrun_pages,
1209 false, true);
Jason Evanse476f8a2010-01-16 09:53:50 -08001210
1211 size += nrun_size;
Jason Evans12ca9142010-10-17 19:56:09 -07001212 run_pages += nrun_pages;
Jason Evanse476f8a2010-01-16 09:53:50 -08001213
Jason Evans203484e2012-05-02 00:30:36 -07001214 arena_mapbits_unallocated_size_set(chunk, run_ind, size);
1215 arena_mapbits_unallocated_size_set(chunk, run_ind+run_pages-1,
1216 size);
Jason Evanse476f8a2010-01-16 09:53:50 -08001217 }
1218
1219 /* Try to coalesce backward. */
Jason Evansaa5113b2014-01-14 16:23:03 -08001220 if (run_ind > map_bias && arena_mapbits_allocated_get(chunk,
1221 run_ind-1) == 0 && arena_mapbits_dirty_get(chunk, run_ind-1) ==
1222 flag_dirty) {
Jason Evans203484e2012-05-02 00:30:36 -07001223 size_t prun_size = arena_mapbits_unallocated_size_get(chunk,
1224 run_ind-1);
Jason Evansae4c7b42012-04-02 07:04:34 -07001225 size_t prun_pages = prun_size >> LG_PAGE;
Jason Evanse476f8a2010-01-16 09:53:50 -08001226
Jason Evans12ca9142010-10-17 19:56:09 -07001227 run_ind -= prun_pages;
Jason Evanse476f8a2010-01-16 09:53:50 -08001228
1229 /*
1230 * Remove predecessor from runs_avail; the coalesced run is
1231 * inserted later.
1232 */
Jason Evans203484e2012-05-02 00:30:36 -07001233 assert(arena_mapbits_unallocated_size_get(chunk, run_ind) ==
1234 prun_size);
1235 assert(arena_mapbits_dirty_get(chunk, run_ind) == flag_dirty);
Jason Evanse3d13062012-10-30 15:42:37 -07001236 arena_avail_remove(arena, chunk, run_ind, prun_pages, true,
1237 false);
Jason Evanse476f8a2010-01-16 09:53:50 -08001238
1239 size += prun_size;
Jason Evans12ca9142010-10-17 19:56:09 -07001240 run_pages += prun_pages;
Jason Evanse476f8a2010-01-16 09:53:50 -08001241
Jason Evans203484e2012-05-02 00:30:36 -07001242 arena_mapbits_unallocated_size_set(chunk, run_ind, size);
1243 arena_mapbits_unallocated_size_set(chunk, run_ind+run_pages-1,
1244 size);
Jason Evanse476f8a2010-01-16 09:53:50 -08001245 }
1246
Jason Evansaa5113b2014-01-14 16:23:03 -08001247 *p_size = size;
1248 *p_run_ind = run_ind;
1249 *p_run_pages = run_pages;
1250}
1251
1252static void
1253arena_run_dalloc(arena_t *arena, arena_run_t *run, bool dirty, bool cleaned)
1254{
1255 arena_chunk_t *chunk;
1256 size_t size, run_ind, run_pages, flag_dirty;
1257
1258 chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(run);
1259 run_ind = (size_t)(((uintptr_t)run - (uintptr_t)chunk) >> LG_PAGE);
1260 assert(run_ind >= map_bias);
1261 assert(run_ind < chunk_npages);
1262 if (arena_mapbits_large_get(chunk, run_ind) != 0) {
1263 size = arena_mapbits_large_size_get(chunk, run_ind);
1264 assert(size == PAGE ||
1265 arena_mapbits_large_size_get(chunk,
1266 run_ind+(size>>LG_PAGE)-1) == 0);
1267 } else {
1268 size_t binind = arena_bin_index(arena, run->bin);
1269 arena_bin_info_t *bin_info = &arena_bin_info[binind];
1270 size = bin_info->run_size;
1271 }
1272 run_pages = (size >> LG_PAGE);
1273 arena_cactive_update(arena, 0, run_pages);
1274 arena->nactive -= run_pages;
1275
1276 /*
1277 * The run is dirty if the caller claims to have dirtied it, as well as
1278 * if it was already dirty before being allocated and the caller
1279 * doesn't claim to have cleaned it.
1280 */
1281 assert(arena_mapbits_dirty_get(chunk, run_ind) ==
1282 arena_mapbits_dirty_get(chunk, run_ind+run_pages-1));
1283 if (cleaned == false && arena_mapbits_dirty_get(chunk, run_ind) != 0)
1284 dirty = true;
1285 flag_dirty = dirty ? CHUNK_MAP_DIRTY : 0;
1286
1287 /* Mark pages as unallocated in the chunk map. */
1288 if (dirty) {
1289 arena_mapbits_unallocated_set(chunk, run_ind, size,
1290 CHUNK_MAP_DIRTY);
1291 arena_mapbits_unallocated_set(chunk, run_ind+run_pages-1, size,
1292 CHUNK_MAP_DIRTY);
1293 } else {
1294 arena_mapbits_unallocated_set(chunk, run_ind, size,
1295 arena_mapbits_unzeroed_get(chunk, run_ind));
1296 arena_mapbits_unallocated_set(chunk, run_ind+run_pages-1, size,
1297 arena_mapbits_unzeroed_get(chunk, run_ind+run_pages-1));
1298 }
1299
1300 arena_run_coalesce(arena, chunk, &size, &run_ind, &run_pages,
1301 flag_dirty);
1302
Jason Evanse476f8a2010-01-16 09:53:50 -08001303 /* Insert into runs_avail, now that coalescing is complete. */
Jason Evans203484e2012-05-02 00:30:36 -07001304 assert(arena_mapbits_unallocated_size_get(chunk, run_ind) ==
1305 arena_mapbits_unallocated_size_get(chunk, run_ind+run_pages-1));
1306 assert(arena_mapbits_dirty_get(chunk, run_ind) ==
1307 arena_mapbits_dirty_get(chunk, run_ind+run_pages-1));
Jason Evanse3d13062012-10-30 15:42:37 -07001308 arena_avail_insert(arena, chunk, run_ind, run_pages, true, true);
Jason Evans8d4203c2010-04-13 20:53:21 -07001309
Jason Evans203484e2012-05-02 00:30:36 -07001310 /* Deallocate chunk if it is now completely unused. */
1311 if (size == arena_maxclass) {
1312 assert(run_ind == map_bias);
1313 assert(run_pages == (arena_maxclass >> LG_PAGE));
Jason Evanse2deab72014-05-15 22:22:27 -07001314 arena_chunk_dalloc(arena, chunk);
Jason Evans203484e2012-05-02 00:30:36 -07001315 }
Jason Evanse476f8a2010-01-16 09:53:50 -08001316
Jason Evans4fb7f512010-01-27 18:27:09 -08001317 /*
Jason Evans8d4203c2010-04-13 20:53:21 -07001318 * It is okay to do dirty page processing here even if the chunk was
Jason Evans4fb7f512010-01-27 18:27:09 -08001319 * deallocated above, since in that case it is the spare. Waiting
1320 * until after possible chunk deallocation to do dirty processing
1321 * allows for an old spare to be fully deallocated, thus decreasing the
1322 * chances of spuriously crossing the dirty page purging threshold.
1323 */
Jason Evans8d4203c2010-04-13 20:53:21 -07001324 if (dirty)
Jason Evans05b21be2010-03-14 17:36:10 -07001325 arena_maybe_purge(arena);
Jason Evanse476f8a2010-01-16 09:53:50 -08001326}
1327
1328static void
1329arena_run_trim_head(arena_t *arena, arena_chunk_t *chunk, arena_run_t *run,
1330 size_t oldsize, size_t newsize)
1331{
Jason Evansae4c7b42012-04-02 07:04:34 -07001332 size_t pageind = ((uintptr_t)run - (uintptr_t)chunk) >> LG_PAGE;
1333 size_t head_npages = (oldsize - newsize) >> LG_PAGE;
Jason Evans203484e2012-05-02 00:30:36 -07001334 size_t flag_dirty = arena_mapbits_dirty_get(chunk, pageind);
Jason Evanse476f8a2010-01-16 09:53:50 -08001335
1336 assert(oldsize > newsize);
1337
1338 /*
1339 * Update the chunk map so that arena_run_dalloc() can treat the
Jason Evans940a2e02010-10-17 17:51:37 -07001340 * leading run as separately allocated. Set the last element of each
1341 * run first, in case of single-page runs.
Jason Evanse476f8a2010-01-16 09:53:50 -08001342 */
Jason Evans203484e2012-05-02 00:30:36 -07001343 assert(arena_mapbits_large_size_get(chunk, pageind) == oldsize);
Jason Evansd8ceef62012-05-10 20:59:39 -07001344 arena_mapbits_large_set(chunk, pageind+head_npages-1, 0, flag_dirty);
1345 arena_mapbits_large_set(chunk, pageind, oldsize-newsize, flag_dirty);
Jason Evans940a2e02010-10-17 17:51:37 -07001346
Jason Evans7372b152012-02-10 20:22:09 -08001347 if (config_debug) {
Jason Evansae4c7b42012-04-02 07:04:34 -07001348 UNUSED size_t tail_npages = newsize >> LG_PAGE;
Jason Evans203484e2012-05-02 00:30:36 -07001349 assert(arena_mapbits_large_size_get(chunk,
1350 pageind+head_npages+tail_npages-1) == 0);
1351 assert(arena_mapbits_dirty_get(chunk,
1352 pageind+head_npages+tail_npages-1) == flag_dirty);
Jason Evans940a2e02010-10-17 17:51:37 -07001353 }
Jason Evansd8ceef62012-05-10 20:59:39 -07001354 arena_mapbits_large_set(chunk, pageind+head_npages, newsize,
1355 flag_dirty);
Jason Evanse476f8a2010-01-16 09:53:50 -08001356
Jason Evanse3d13062012-10-30 15:42:37 -07001357 arena_run_dalloc(arena, run, false, false);
Jason Evanse476f8a2010-01-16 09:53:50 -08001358}
1359
1360static void
1361arena_run_trim_tail(arena_t *arena, arena_chunk_t *chunk, arena_run_t *run,
1362 size_t oldsize, size_t newsize, bool dirty)
1363{
Jason Evansae4c7b42012-04-02 07:04:34 -07001364 size_t pageind = ((uintptr_t)run - (uintptr_t)chunk) >> LG_PAGE;
1365 size_t head_npages = newsize >> LG_PAGE;
Jason Evans203484e2012-05-02 00:30:36 -07001366 size_t flag_dirty = arena_mapbits_dirty_get(chunk, pageind);
Jason Evanse476f8a2010-01-16 09:53:50 -08001367
1368 assert(oldsize > newsize);
1369
1370 /*
1371 * Update the chunk map so that arena_run_dalloc() can treat the
Jason Evans940a2e02010-10-17 17:51:37 -07001372 * trailing run as separately allocated. Set the last element of each
1373 * run first, in case of single-page runs.
Jason Evanse476f8a2010-01-16 09:53:50 -08001374 */
Jason Evans203484e2012-05-02 00:30:36 -07001375 assert(arena_mapbits_large_size_get(chunk, pageind) == oldsize);
Jason Evansd8ceef62012-05-10 20:59:39 -07001376 arena_mapbits_large_set(chunk, pageind+head_npages-1, 0, flag_dirty);
1377 arena_mapbits_large_set(chunk, pageind, newsize, flag_dirty);
Jason Evans940a2e02010-10-17 17:51:37 -07001378
Jason Evans203484e2012-05-02 00:30:36 -07001379 if (config_debug) {
1380 UNUSED size_t tail_npages = (oldsize - newsize) >> LG_PAGE;
1381 assert(arena_mapbits_large_size_get(chunk,
1382 pageind+head_npages+tail_npages-1) == 0);
1383 assert(arena_mapbits_dirty_get(chunk,
1384 pageind+head_npages+tail_npages-1) == flag_dirty);
1385 }
1386 arena_mapbits_large_set(chunk, pageind+head_npages, oldsize-newsize,
Jason Evansd8ceef62012-05-10 20:59:39 -07001387 flag_dirty);
Jason Evanse476f8a2010-01-16 09:53:50 -08001388
1389 arena_run_dalloc(arena, (arena_run_t *)((uintptr_t)run + newsize),
Jason Evanse3d13062012-10-30 15:42:37 -07001390 dirty, false);
Jason Evanse476f8a2010-01-16 09:53:50 -08001391}
1392
1393static arena_run_t *
Jason Evanse7a10582012-02-13 17:36:52 -08001394arena_bin_runs_first(arena_bin_t *bin)
1395{
1396 arena_chunk_map_t *mapelm = arena_run_tree_first(&bin->runs);
1397 if (mapelm != NULL) {
1398 arena_chunk_t *chunk;
1399 size_t pageind;
Mike Hommey8b499712012-04-24 23:22:02 +02001400 arena_run_t *run;
Jason Evanse7a10582012-02-13 17:36:52 -08001401
1402 chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(mapelm);
Ben Maurerf9ff6032014-04-06 13:24:16 -07001403 pageind = arena_mapelm_to_pageind(mapelm);
Jason Evans203484e2012-05-02 00:30:36 -07001404 run = (arena_run_t *)((uintptr_t)chunk + (uintptr_t)((pageind -
1405 arena_mapbits_small_runind_get(chunk, pageind)) <<
Jason Evansae4c7b42012-04-02 07:04:34 -07001406 LG_PAGE));
Jason Evanse7a10582012-02-13 17:36:52 -08001407 return (run);
1408 }
1409
1410 return (NULL);
1411}
1412
1413static void
1414arena_bin_runs_insert(arena_bin_t *bin, arena_run_t *run)
1415{
1416 arena_chunk_t *chunk = CHUNK_ADDR2BASE(run);
Jason Evansae4c7b42012-04-02 07:04:34 -07001417 size_t pageind = ((uintptr_t)run - (uintptr_t)chunk) >> LG_PAGE;
Jason Evans203484e2012-05-02 00:30:36 -07001418 arena_chunk_map_t *mapelm = arena_mapp_get(chunk, pageind);
Jason Evanse7a10582012-02-13 17:36:52 -08001419
1420 assert(arena_run_tree_search(&bin->runs, mapelm) == NULL);
1421
1422 arena_run_tree_insert(&bin->runs, mapelm);
1423}
1424
1425static void
1426arena_bin_runs_remove(arena_bin_t *bin, arena_run_t *run)
1427{
1428 arena_chunk_t *chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(run);
Jason Evansae4c7b42012-04-02 07:04:34 -07001429 size_t pageind = ((uintptr_t)run - (uintptr_t)chunk) >> LG_PAGE;
Jason Evans203484e2012-05-02 00:30:36 -07001430 arena_chunk_map_t *mapelm = arena_mapp_get(chunk, pageind);
Jason Evanse7a10582012-02-13 17:36:52 -08001431
1432 assert(arena_run_tree_search(&bin->runs, mapelm) != NULL);
1433
1434 arena_run_tree_remove(&bin->runs, mapelm);
1435}
1436
1437static arena_run_t *
1438arena_bin_nonfull_run_tryget(arena_bin_t *bin)
1439{
1440 arena_run_t *run = arena_bin_runs_first(bin);
1441 if (run != NULL) {
1442 arena_bin_runs_remove(bin, run);
1443 if (config_stats)
1444 bin->stats.reruns++;
1445 }
1446 return (run);
1447}
1448
1449static arena_run_t *
Jason Evanse476f8a2010-01-16 09:53:50 -08001450arena_bin_nonfull_run_get(arena_t *arena, arena_bin_t *bin)
1451{
Jason Evanse476f8a2010-01-16 09:53:50 -08001452 arena_run_t *run;
Jason Evans49f7e8f2011-03-15 13:59:15 -07001453 size_t binind;
1454 arena_bin_info_t *bin_info;
Jason Evanse476f8a2010-01-16 09:53:50 -08001455
1456 /* Look for a usable run. */
Jason Evanse7a10582012-02-13 17:36:52 -08001457 run = arena_bin_nonfull_run_tryget(bin);
1458 if (run != NULL)
Jason Evanse476f8a2010-01-16 09:53:50 -08001459 return (run);
Jason Evanse476f8a2010-01-16 09:53:50 -08001460 /* No existing runs have any space available. */
1461
Jason Evans49f7e8f2011-03-15 13:59:15 -07001462 binind = arena_bin_index(arena, bin);
1463 bin_info = &arena_bin_info[binind];
1464
Jason Evanse476f8a2010-01-16 09:53:50 -08001465 /* Allocate a new run. */
Jason Evanse00572b2010-03-14 19:43:56 -07001466 malloc_mutex_unlock(&bin->lock);
Jason Evanse69bee02010-03-15 22:25:23 -07001467 /******************************/
Jason Evans86815df2010-03-13 20:32:56 -08001468 malloc_mutex_lock(&arena->lock);
Jason Evansaa5113b2014-01-14 16:23:03 -08001469 run = arena_run_alloc_small(arena, bin_info->run_size, binind);
Jason Evanse00572b2010-03-14 19:43:56 -07001470 if (run != NULL) {
Jason Evans84c8eef2011-03-16 10:30:13 -07001471 bitmap_t *bitmap = (bitmap_t *)((uintptr_t)run +
1472 (uintptr_t)bin_info->bitmap_offset);
1473
Jason Evanse00572b2010-03-14 19:43:56 -07001474 /* Initialize run internals. */
1475 run->bin = bin;
Jason Evans84c8eef2011-03-16 10:30:13 -07001476 run->nextind = 0;
Jason Evans49f7e8f2011-03-15 13:59:15 -07001477 run->nfree = bin_info->nregs;
Jason Evans84c8eef2011-03-16 10:30:13 -07001478 bitmap_init(bitmap, &bin_info->bitmap_info);
Jason Evanse69bee02010-03-15 22:25:23 -07001479 }
1480 malloc_mutex_unlock(&arena->lock);
1481 /********************************/
1482 malloc_mutex_lock(&bin->lock);
1483 if (run != NULL) {
Jason Evans7372b152012-02-10 20:22:09 -08001484 if (config_stats) {
1485 bin->stats.nruns++;
1486 bin->stats.curruns++;
Jason Evans7372b152012-02-10 20:22:09 -08001487 }
Jason Evanse00572b2010-03-14 19:43:56 -07001488 return (run);
1489 }
1490
1491 /*
Jason Evansaa5113b2014-01-14 16:23:03 -08001492 * arena_run_alloc_small() failed, but another thread may have made
Jason Evans940a2e02010-10-17 17:51:37 -07001493 * sufficient memory available while this one dropped bin->lock above,
Jason Evanse00572b2010-03-14 19:43:56 -07001494 * so search one more time.
1495 */
Jason Evanse7a10582012-02-13 17:36:52 -08001496 run = arena_bin_nonfull_run_tryget(bin);
1497 if (run != NULL)
Jason Evanse00572b2010-03-14 19:43:56 -07001498 return (run);
Jason Evanse00572b2010-03-14 19:43:56 -07001499
1500 return (NULL);
Jason Evanse476f8a2010-01-16 09:53:50 -08001501}
1502
Jason Evans1e0a6362010-03-13 13:41:58 -08001503/* Re-fill bin->runcur, then call arena_run_reg_alloc(). */
Jason Evanse476f8a2010-01-16 09:53:50 -08001504static void *
1505arena_bin_malloc_hard(arena_t *arena, arena_bin_t *bin)
1506{
Jason Evanse00572b2010-03-14 19:43:56 -07001507 void *ret;
Jason Evans49f7e8f2011-03-15 13:59:15 -07001508 size_t binind;
1509 arena_bin_info_t *bin_info;
Jason Evanse00572b2010-03-14 19:43:56 -07001510 arena_run_t *run;
Jason Evanse476f8a2010-01-16 09:53:50 -08001511
Jason Evans49f7e8f2011-03-15 13:59:15 -07001512 binind = arena_bin_index(arena, bin);
1513 bin_info = &arena_bin_info[binind];
Jason Evanse00572b2010-03-14 19:43:56 -07001514 bin->runcur = NULL;
1515 run = arena_bin_nonfull_run_get(arena, bin);
1516 if (bin->runcur != NULL && bin->runcur->nfree > 0) {
1517 /*
1518 * Another thread updated runcur while this one ran without the
1519 * bin lock in arena_bin_nonfull_run_get().
1520 */
Jason Evanse00572b2010-03-14 19:43:56 -07001521 assert(bin->runcur->nfree > 0);
Jason Evans49f7e8f2011-03-15 13:59:15 -07001522 ret = arena_run_reg_alloc(bin->runcur, bin_info);
Jason Evanse00572b2010-03-14 19:43:56 -07001523 if (run != NULL) {
Jason Evans940a2e02010-10-17 17:51:37 -07001524 arena_chunk_t *chunk;
1525
1526 /*
Jason Evansaa5113b2014-01-14 16:23:03 -08001527 * arena_run_alloc_small() may have allocated run, or
1528 * it may have pulled run from the bin's run tree.
1529 * Therefore it is unsafe to make any assumptions about
1530 * how run has previously been used, and
1531 * arena_bin_lower_run() must be called, as if a region
1532 * were just deallocated from the run.
Jason Evans940a2e02010-10-17 17:51:37 -07001533 */
1534 chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(run);
Jason Evans49f7e8f2011-03-15 13:59:15 -07001535 if (run->nfree == bin_info->nregs)
Jason Evans8de6a022010-10-17 20:57:30 -07001536 arena_dalloc_bin_run(arena, chunk, run, bin);
1537 else
1538 arena_bin_lower_run(arena, chunk, run, bin);
Jason Evanse00572b2010-03-14 19:43:56 -07001539 }
1540 return (ret);
1541 }
1542
1543 if (run == NULL)
Jason Evanse476f8a2010-01-16 09:53:50 -08001544 return (NULL);
Jason Evanse00572b2010-03-14 19:43:56 -07001545
1546 bin->runcur = run;
1547
Jason Evanse476f8a2010-01-16 09:53:50 -08001548 assert(bin->runcur->nfree > 0);
1549
Jason Evans49f7e8f2011-03-15 13:59:15 -07001550 return (arena_run_reg_alloc(bin->runcur, bin_info));
Jason Evanse476f8a2010-01-16 09:53:50 -08001551}
1552
Jason Evans86815df2010-03-13 20:32:56 -08001553void
Jason Evans7372b152012-02-10 20:22:09 -08001554arena_tcache_fill_small(arena_t *arena, tcache_bin_t *tbin, size_t binind,
1555 uint64_t prof_accumbytes)
Jason Evanse476f8a2010-01-16 09:53:50 -08001556{
1557 unsigned i, nfill;
1558 arena_bin_t *bin;
1559 arena_run_t *run;
1560 void *ptr;
1561
1562 assert(tbin->ncached == 0);
1563
Jason Evans88c222c2013-02-06 11:59:30 -08001564 if (config_prof && arena_prof_accum(arena, prof_accumbytes))
1565 prof_idump();
Jason Evanse69bee02010-03-15 22:25:23 -07001566 bin = &arena->bins[binind];
1567 malloc_mutex_lock(&bin->lock);
Jason Evans1dcb4f82011-03-21 00:18:17 -07001568 for (i = 0, nfill = (tcache_bin_info[binind].ncached_max >>
1569 tbin->lg_fill_div); i < nfill; i++) {
Jason Evanse476f8a2010-01-16 09:53:50 -08001570 if ((run = bin->runcur) != NULL && run->nfree > 0)
Jason Evans49f7e8f2011-03-15 13:59:15 -07001571 ptr = arena_run_reg_alloc(run, &arena_bin_info[binind]);
Jason Evanse476f8a2010-01-16 09:53:50 -08001572 else
1573 ptr = arena_bin_malloc_hard(arena, bin);
Jason Evans3fa9a2f2010-03-07 15:34:14 -08001574 if (ptr == NULL)
Jason Evanse476f8a2010-01-16 09:53:50 -08001575 break;
Jason Evans122449b2012-04-06 00:35:09 -07001576 if (config_fill && opt_junk) {
1577 arena_alloc_junk_small(ptr, &arena_bin_info[binind],
1578 true);
1579 }
Jason Evans9c43c132011-03-18 10:53:15 -07001580 /* Insert such that low regions get used first. */
1581 tbin->avail[nfill - 1 - i] = ptr;
Jason Evanse476f8a2010-01-16 09:53:50 -08001582 }
Jason Evans7372b152012-02-10 20:22:09 -08001583 if (config_stats) {
1584 bin->stats.allocated += i * arena_bin_info[binind].reg_size;
1585 bin->stats.nmalloc += i;
1586 bin->stats.nrequests += tbin->tstats.nrequests;
1587 bin->stats.nfills++;
1588 tbin->tstats.nrequests = 0;
1589 }
Jason Evans86815df2010-03-13 20:32:56 -08001590 malloc_mutex_unlock(&bin->lock);
Jason Evanse476f8a2010-01-16 09:53:50 -08001591 tbin->ncached = i;
Jason Evanse476f8a2010-01-16 09:53:50 -08001592}
Jason Evanse476f8a2010-01-16 09:53:50 -08001593
Jason Evans122449b2012-04-06 00:35:09 -07001594void
1595arena_alloc_junk_small(void *ptr, arena_bin_info_t *bin_info, bool zero)
1596{
1597
1598 if (zero) {
1599 size_t redzone_size = bin_info->redzone_size;
1600 memset((void *)((uintptr_t)ptr - redzone_size), 0xa5,
1601 redzone_size);
1602 memset((void *)((uintptr_t)ptr + bin_info->reg_size), 0xa5,
1603 redzone_size);
1604 } else {
1605 memset((void *)((uintptr_t)ptr - bin_info->redzone_size), 0xa5,
1606 bin_info->reg_interval);
1607 }
1608}
1609
Jason Evans0d6c5d82013-12-17 15:14:36 -08001610#ifdef JEMALLOC_JET
1611#undef arena_redzone_corruption
1612#define arena_redzone_corruption JEMALLOC_N(arena_redzone_corruption_impl)
1613#endif
1614static void
1615arena_redzone_corruption(void *ptr, size_t usize, bool after,
1616 size_t offset, uint8_t byte)
1617{
1618
1619 malloc_printf("<jemalloc>: Corrupt redzone %zu byte%s %s %p "
1620 "(size %zu), byte=%#x\n", offset, (offset == 1) ? "" : "s",
1621 after ? "after" : "before", ptr, usize, byte);
1622}
1623#ifdef JEMALLOC_JET
Jason Evans0d6c5d82013-12-17 15:14:36 -08001624#undef arena_redzone_corruption
Jason Evans6b694c42014-01-07 16:47:56 -08001625#define arena_redzone_corruption JEMALLOC_N(arena_redzone_corruption)
1626arena_redzone_corruption_t *arena_redzone_corruption =
1627 JEMALLOC_N(arena_redzone_corruption_impl);
Jason Evans0d6c5d82013-12-17 15:14:36 -08001628#endif
1629
1630static void
1631arena_redzones_validate(void *ptr, arena_bin_info_t *bin_info, bool reset)
Jason Evans122449b2012-04-06 00:35:09 -07001632{
1633 size_t size = bin_info->reg_size;
1634 size_t redzone_size = bin_info->redzone_size;
1635 size_t i;
1636 bool error = false;
1637
1638 for (i = 1; i <= redzone_size; i++) {
Jason Evans0d6c5d82013-12-17 15:14:36 -08001639 uint8_t *byte = (uint8_t *)((uintptr_t)ptr - i);
1640 if (*byte != 0xa5) {
Jason Evans122449b2012-04-06 00:35:09 -07001641 error = true;
Jason Evans0d6c5d82013-12-17 15:14:36 -08001642 arena_redzone_corruption(ptr, size, false, i, *byte);
1643 if (reset)
1644 *byte = 0xa5;
Jason Evans122449b2012-04-06 00:35:09 -07001645 }
1646 }
1647 for (i = 0; i < redzone_size; i++) {
Jason Evans0d6c5d82013-12-17 15:14:36 -08001648 uint8_t *byte = (uint8_t *)((uintptr_t)ptr + size + i);
1649 if (*byte != 0xa5) {
Jason Evans122449b2012-04-06 00:35:09 -07001650 error = true;
Jason Evans0d6c5d82013-12-17 15:14:36 -08001651 arena_redzone_corruption(ptr, size, true, i, *byte);
1652 if (reset)
1653 *byte = 0xa5;
Jason Evans122449b2012-04-06 00:35:09 -07001654 }
1655 }
1656 if (opt_abort && error)
1657 abort();
Jason Evans0d6c5d82013-12-17 15:14:36 -08001658}
Jason Evans122449b2012-04-06 00:35:09 -07001659
Jason Evans6b694c42014-01-07 16:47:56 -08001660#ifdef JEMALLOC_JET
1661#undef arena_dalloc_junk_small
1662#define arena_dalloc_junk_small JEMALLOC_N(arena_dalloc_junk_small_impl)
1663#endif
Jason Evans0d6c5d82013-12-17 15:14:36 -08001664void
1665arena_dalloc_junk_small(void *ptr, arena_bin_info_t *bin_info)
1666{
1667 size_t redzone_size = bin_info->redzone_size;
1668
1669 arena_redzones_validate(ptr, bin_info, false);
Jason Evans122449b2012-04-06 00:35:09 -07001670 memset((void *)((uintptr_t)ptr - redzone_size), 0x5a,
1671 bin_info->reg_interval);
1672}
Jason Evans6b694c42014-01-07 16:47:56 -08001673#ifdef JEMALLOC_JET
1674#undef arena_dalloc_junk_small
1675#define arena_dalloc_junk_small JEMALLOC_N(arena_dalloc_junk_small)
1676arena_dalloc_junk_small_t *arena_dalloc_junk_small =
1677 JEMALLOC_N(arena_dalloc_junk_small_impl);
1678#endif
Jason Evans122449b2012-04-06 00:35:09 -07001679
Jason Evans0d6c5d82013-12-17 15:14:36 -08001680void
1681arena_quarantine_junk_small(void *ptr, size_t usize)
1682{
1683 size_t binind;
1684 arena_bin_info_t *bin_info;
1685 cassert(config_fill);
1686 assert(opt_junk);
1687 assert(opt_quarantine);
1688 assert(usize <= SMALL_MAXCLASS);
1689
Jason Evans3541a902014-04-16 17:14:33 -07001690 binind = small_size2bin(usize);
Jason Evans0d6c5d82013-12-17 15:14:36 -08001691 bin_info = &arena_bin_info[binind];
1692 arena_redzones_validate(ptr, bin_info, true);
1693}
1694
Jason Evanse476f8a2010-01-16 09:53:50 -08001695void *
1696arena_malloc_small(arena_t *arena, size_t size, bool zero)
1697{
1698 void *ret;
1699 arena_bin_t *bin;
1700 arena_run_t *run;
1701 size_t binind;
1702
Jason Evans3541a902014-04-16 17:14:33 -07001703 binind = small_size2bin(size);
Jason Evansb1726102012-02-28 16:50:47 -08001704 assert(binind < NBINS);
Jason Evanse476f8a2010-01-16 09:53:50 -08001705 bin = &arena->bins[binind];
Jason Evans3541a902014-04-16 17:14:33 -07001706 size = small_bin2size(binind);
Jason Evanse476f8a2010-01-16 09:53:50 -08001707
Jason Evans86815df2010-03-13 20:32:56 -08001708 malloc_mutex_lock(&bin->lock);
Jason Evanse476f8a2010-01-16 09:53:50 -08001709 if ((run = bin->runcur) != NULL && run->nfree > 0)
Jason Evans49f7e8f2011-03-15 13:59:15 -07001710 ret = arena_run_reg_alloc(run, &arena_bin_info[binind]);
Jason Evanse476f8a2010-01-16 09:53:50 -08001711 else
1712 ret = arena_bin_malloc_hard(arena, bin);
1713
1714 if (ret == NULL) {
Jason Evans86815df2010-03-13 20:32:56 -08001715 malloc_mutex_unlock(&bin->lock);
Jason Evanse476f8a2010-01-16 09:53:50 -08001716 return (NULL);
1717 }
1718
Jason Evans7372b152012-02-10 20:22:09 -08001719 if (config_stats) {
1720 bin->stats.allocated += size;
1721 bin->stats.nmalloc++;
1722 bin->stats.nrequests++;
1723 }
Jason Evans86815df2010-03-13 20:32:56 -08001724 malloc_mutex_unlock(&bin->lock);
Jason Evans88c222c2013-02-06 11:59:30 -08001725 if (config_prof && isthreaded == false && arena_prof_accum(arena, size))
1726 prof_idump();
Jason Evanse476f8a2010-01-16 09:53:50 -08001727
1728 if (zero == false) {
Jason Evans7372b152012-02-10 20:22:09 -08001729 if (config_fill) {
Jason Evans122449b2012-04-06 00:35:09 -07001730 if (opt_junk) {
1731 arena_alloc_junk_small(ret,
1732 &arena_bin_info[binind], false);
1733 } else if (opt_zero)
Jason Evans7372b152012-02-10 20:22:09 -08001734 memset(ret, 0, size);
1735 }
Jason Evansbd87b012014-04-15 16:35:08 -07001736 JEMALLOC_VALGRIND_MAKE_MEM_UNDEFINED(ret, size);
Jason Evans122449b2012-04-06 00:35:09 -07001737 } else {
1738 if (config_fill && opt_junk) {
1739 arena_alloc_junk_small(ret, &arena_bin_info[binind],
1740 true);
1741 }
Jason Evansbd87b012014-04-15 16:35:08 -07001742 JEMALLOC_VALGRIND_MAKE_MEM_UNDEFINED(ret, size);
Jason Evanse476f8a2010-01-16 09:53:50 -08001743 memset(ret, 0, size);
Jason Evans122449b2012-04-06 00:35:09 -07001744 }
Jason Evanse476f8a2010-01-16 09:53:50 -08001745
1746 return (ret);
1747}
1748
1749void *
Jason Evanse476f8a2010-01-16 09:53:50 -08001750arena_malloc_large(arena_t *arena, size_t size, bool zero)
1751{
1752 void *ret;
Jason Evans88c222c2013-02-06 11:59:30 -08001753 UNUSED bool idump;
Jason Evanse476f8a2010-01-16 09:53:50 -08001754
1755 /* Large allocation. */
1756 size = PAGE_CEILING(size);
1757 malloc_mutex_lock(&arena->lock);
Jason Evansaa5113b2014-01-14 16:23:03 -08001758 ret = (void *)arena_run_alloc_large(arena, size, zero);
Jason Evanse476f8a2010-01-16 09:53:50 -08001759 if (ret == NULL) {
1760 malloc_mutex_unlock(&arena->lock);
1761 return (NULL);
1762 }
Jason Evans7372b152012-02-10 20:22:09 -08001763 if (config_stats) {
1764 arena->stats.nmalloc_large++;
1765 arena->stats.nrequests_large++;
1766 arena->stats.allocated_large += size;
Jason Evansae4c7b42012-04-02 07:04:34 -07001767 arena->stats.lstats[(size >> LG_PAGE) - 1].nmalloc++;
1768 arena->stats.lstats[(size >> LG_PAGE) - 1].nrequests++;
1769 arena->stats.lstats[(size >> LG_PAGE) - 1].curruns++;
Jason Evanse476f8a2010-01-16 09:53:50 -08001770 }
Jason Evans7372b152012-02-10 20:22:09 -08001771 if (config_prof)
Jason Evans88c222c2013-02-06 11:59:30 -08001772 idump = arena_prof_accum_locked(arena, size);
Jason Evanse476f8a2010-01-16 09:53:50 -08001773 malloc_mutex_unlock(&arena->lock);
Jason Evans88c222c2013-02-06 11:59:30 -08001774 if (config_prof && idump)
1775 prof_idump();
Jason Evanse476f8a2010-01-16 09:53:50 -08001776
1777 if (zero == false) {
Jason Evans7372b152012-02-10 20:22:09 -08001778 if (config_fill) {
1779 if (opt_junk)
1780 memset(ret, 0xa5, size);
1781 else if (opt_zero)
1782 memset(ret, 0, size);
1783 }
Jason Evanse476f8a2010-01-16 09:53:50 -08001784 }
1785
1786 return (ret);
1787}
1788
Jason Evanse476f8a2010-01-16 09:53:50 -08001789/* Only handles large allocations that require more than page alignment. */
1790void *
Jason Evans5ff709c2012-04-11 18:13:45 -07001791arena_palloc(arena_t *arena, size_t size, size_t alignment, bool zero)
Jason Evanse476f8a2010-01-16 09:53:50 -08001792{
1793 void *ret;
Jason Evans5ff709c2012-04-11 18:13:45 -07001794 size_t alloc_size, leadsize, trailsize;
1795 arena_run_t *run;
Jason Evanse476f8a2010-01-16 09:53:50 -08001796 arena_chunk_t *chunk;
1797
1798 assert((size & PAGE_MASK) == 0);
Jason Evans93443682010-10-20 17:39:18 -07001799
1800 alignment = PAGE_CEILING(alignment);
Jason Evans5ff709c2012-04-11 18:13:45 -07001801 alloc_size = size + alignment - PAGE;
Jason Evanse476f8a2010-01-16 09:53:50 -08001802
1803 malloc_mutex_lock(&arena->lock);
Jason Evansaa5113b2014-01-14 16:23:03 -08001804 run = arena_run_alloc_large(arena, alloc_size, false);
Jason Evans5ff709c2012-04-11 18:13:45 -07001805 if (run == NULL) {
Jason Evanse476f8a2010-01-16 09:53:50 -08001806 malloc_mutex_unlock(&arena->lock);
1807 return (NULL);
1808 }
Jason Evans5ff709c2012-04-11 18:13:45 -07001809 chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(run);
Jason Evanse476f8a2010-01-16 09:53:50 -08001810
Jason Evans5ff709c2012-04-11 18:13:45 -07001811 leadsize = ALIGNMENT_CEILING((uintptr_t)run, alignment) -
1812 (uintptr_t)run;
1813 assert(alloc_size >= leadsize + size);
1814 trailsize = alloc_size - leadsize - size;
1815 ret = (void *)((uintptr_t)run + leadsize);
1816 if (leadsize != 0) {
1817 arena_run_trim_head(arena, chunk, run, alloc_size, alloc_size -
1818 leadsize);
1819 }
1820 if (trailsize != 0) {
1821 arena_run_trim_tail(arena, chunk, ret, size + trailsize, size,
1822 false);
Jason Evanse476f8a2010-01-16 09:53:50 -08001823 }
Jason Evansaa5113b2014-01-14 16:23:03 -08001824 arena_run_init_large(arena, (arena_run_t *)ret, size, zero);
Jason Evanse476f8a2010-01-16 09:53:50 -08001825
Jason Evans7372b152012-02-10 20:22:09 -08001826 if (config_stats) {
1827 arena->stats.nmalloc_large++;
1828 arena->stats.nrequests_large++;
1829 arena->stats.allocated_large += size;
Jason Evansae4c7b42012-04-02 07:04:34 -07001830 arena->stats.lstats[(size >> LG_PAGE) - 1].nmalloc++;
1831 arena->stats.lstats[(size >> LG_PAGE) - 1].nrequests++;
1832 arena->stats.lstats[(size >> LG_PAGE) - 1].curruns++;
Jason Evanse476f8a2010-01-16 09:53:50 -08001833 }
Jason Evanse476f8a2010-01-16 09:53:50 -08001834 malloc_mutex_unlock(&arena->lock);
1835
Jason Evans7372b152012-02-10 20:22:09 -08001836 if (config_fill && zero == false) {
Jason Evans8e3c3c62010-09-17 15:46:18 -07001837 if (opt_junk)
1838 memset(ret, 0xa5, size);
1839 else if (opt_zero)
1840 memset(ret, 0, size);
1841 }
Jason Evanse476f8a2010-01-16 09:53:50 -08001842 return (ret);
1843}
1844
Jason Evans0b270a92010-03-31 16:45:04 -07001845void
1846arena_prof_promoted(const void *ptr, size_t size)
1847{
1848 arena_chunk_t *chunk;
1849 size_t pageind, binind;
1850
Jason Evans78f73522012-04-18 13:38:40 -07001851 cassert(config_prof);
Jason Evans0b270a92010-03-31 16:45:04 -07001852 assert(ptr != NULL);
1853 assert(CHUNK_ADDR2BASE(ptr) != ptr);
Jason Evans122449b2012-04-06 00:35:09 -07001854 assert(isalloc(ptr, false) == PAGE);
1855 assert(isalloc(ptr, true) == PAGE);
Jason Evansb1726102012-02-28 16:50:47 -08001856 assert(size <= SMALL_MAXCLASS);
Jason Evans0b270a92010-03-31 16:45:04 -07001857
1858 chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr);
Jason Evansae4c7b42012-04-02 07:04:34 -07001859 pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> LG_PAGE;
Jason Evans3541a902014-04-16 17:14:33 -07001860 binind = small_size2bin(size);
Jason Evansb1726102012-02-28 16:50:47 -08001861 assert(binind < NBINS);
Jason Evans203484e2012-05-02 00:30:36 -07001862 arena_mapbits_large_binind_set(chunk, pageind, binind);
Jason Evans0b270a92010-03-31 16:45:04 -07001863
Jason Evans122449b2012-04-06 00:35:09 -07001864 assert(isalloc(ptr, false) == PAGE);
1865 assert(isalloc(ptr, true) == size);
Jason Evans0b270a92010-03-31 16:45:04 -07001866}
Jason Evans6109fe02010-02-10 10:37:56 -08001867
Jason Evanse476f8a2010-01-16 09:53:50 -08001868static void
Jason Evans088e6a02010-10-18 00:04:44 -07001869arena_dissociate_bin_run(arena_chunk_t *chunk, arena_run_t *run,
Jason Evanse476f8a2010-01-16 09:53:50 -08001870 arena_bin_t *bin)
1871{
Jason Evanse476f8a2010-01-16 09:53:50 -08001872
Jason Evans19b3d612010-03-18 20:36:40 -07001873 /* Dissociate run from bin. */
Jason Evanse476f8a2010-01-16 09:53:50 -08001874 if (run == bin->runcur)
1875 bin->runcur = NULL;
Jason Evans49f7e8f2011-03-15 13:59:15 -07001876 else {
1877 size_t binind = arena_bin_index(chunk->arena, bin);
1878 arena_bin_info_t *bin_info = &arena_bin_info[binind];
1879
1880 if (bin_info->nregs != 1) {
Jason Evans49f7e8f2011-03-15 13:59:15 -07001881 /*
1882 * This block's conditional is necessary because if the
1883 * run only contains one region, then it never gets
1884 * inserted into the non-full runs tree.
1885 */
Jason Evanse7a10582012-02-13 17:36:52 -08001886 arena_bin_runs_remove(bin, run);
Jason Evans49f7e8f2011-03-15 13:59:15 -07001887 }
Jason Evanse476f8a2010-01-16 09:53:50 -08001888 }
Jason Evans088e6a02010-10-18 00:04:44 -07001889}
1890
1891static void
1892arena_dalloc_bin_run(arena_t *arena, arena_chunk_t *chunk, arena_run_t *run,
1893 arena_bin_t *bin)
1894{
Jason Evans49f7e8f2011-03-15 13:59:15 -07001895 size_t binind;
1896 arena_bin_info_t *bin_info;
Jason Evans088e6a02010-10-18 00:04:44 -07001897 size_t npages, run_ind, past;
1898
1899 assert(run != bin->runcur);
Jason Evans203484e2012-05-02 00:30:36 -07001900 assert(arena_run_tree_search(&bin->runs,
1901 arena_mapp_get(chunk, ((uintptr_t)run-(uintptr_t)chunk)>>LG_PAGE))
1902 == NULL);
Jason Evans86815df2010-03-13 20:32:56 -08001903
Jason Evans49f7e8f2011-03-15 13:59:15 -07001904 binind = arena_bin_index(chunk->arena, run->bin);
1905 bin_info = &arena_bin_info[binind];
1906
Jason Evanse00572b2010-03-14 19:43:56 -07001907 malloc_mutex_unlock(&bin->lock);
1908 /******************************/
Jason Evansae4c7b42012-04-02 07:04:34 -07001909 npages = bin_info->run_size >> LG_PAGE;
1910 run_ind = (size_t)(((uintptr_t)run - (uintptr_t)chunk) >> LG_PAGE);
Jason Evans84c8eef2011-03-16 10:30:13 -07001911 past = (size_t)(PAGE_CEILING((uintptr_t)run +
1912 (uintptr_t)bin_info->reg0_offset + (uintptr_t)(run->nextind *
Jason Evans122449b2012-04-06 00:35:09 -07001913 bin_info->reg_interval - bin_info->redzone_size) -
1914 (uintptr_t)chunk) >> LG_PAGE);
Jason Evans86815df2010-03-13 20:32:56 -08001915 malloc_mutex_lock(&arena->lock);
Jason Evans19b3d612010-03-18 20:36:40 -07001916
1917 /*
1918 * If the run was originally clean, and some pages were never touched,
1919 * trim the clean pages before deallocating the dirty portion of the
1920 * run.
1921 */
Jason Evans30fe12b2012-05-10 17:09:17 -07001922 assert(arena_mapbits_dirty_get(chunk, run_ind) ==
1923 arena_mapbits_dirty_get(chunk, run_ind+npages-1));
Jason Evans203484e2012-05-02 00:30:36 -07001924 if (arena_mapbits_dirty_get(chunk, run_ind) == 0 && past - run_ind <
1925 npages) {
Jason Evans30fe12b2012-05-10 17:09:17 -07001926 /* Trim clean pages. Convert to large run beforehand. */
1927 assert(npages > 0);
Jason Evansd8ceef62012-05-10 20:59:39 -07001928 arena_mapbits_large_set(chunk, run_ind, bin_info->run_size, 0);
1929 arena_mapbits_large_set(chunk, run_ind+npages-1, 0, 0);
Jason Evansae4c7b42012-04-02 07:04:34 -07001930 arena_run_trim_tail(arena, chunk, run, (npages << LG_PAGE),
1931 ((past - run_ind) << LG_PAGE), false);
Jason Evans940a2e02010-10-17 17:51:37 -07001932 /* npages = past - run_ind; */
Jason Evans1e0a6362010-03-13 13:41:58 -08001933 }
Jason Evanse3d13062012-10-30 15:42:37 -07001934 arena_run_dalloc(arena, run, true, false);
Jason Evans86815df2010-03-13 20:32:56 -08001935 malloc_mutex_unlock(&arena->lock);
Jason Evanse00572b2010-03-14 19:43:56 -07001936 /****************************/
1937 malloc_mutex_lock(&bin->lock);
Jason Evans7372b152012-02-10 20:22:09 -08001938 if (config_stats)
1939 bin->stats.curruns--;
Jason Evanse476f8a2010-01-16 09:53:50 -08001940}
1941
Jason Evans940a2e02010-10-17 17:51:37 -07001942static void
1943arena_bin_lower_run(arena_t *arena, arena_chunk_t *chunk, arena_run_t *run,
1944 arena_bin_t *bin)
Jason Evanse476f8a2010-01-16 09:53:50 -08001945{
Jason Evanse476f8a2010-01-16 09:53:50 -08001946
Jason Evans8de6a022010-10-17 20:57:30 -07001947 /*
Jason Evanse7a10582012-02-13 17:36:52 -08001948 * Make sure that if bin->runcur is non-NULL, it refers to the lowest
1949 * non-full run. It is okay to NULL runcur out rather than proactively
1950 * keeping it pointing at the lowest non-full run.
Jason Evans8de6a022010-10-17 20:57:30 -07001951 */
Jason Evanse7a10582012-02-13 17:36:52 -08001952 if ((uintptr_t)run < (uintptr_t)bin->runcur) {
Jason Evans8de6a022010-10-17 20:57:30 -07001953 /* Switch runcur. */
Jason Evanse7a10582012-02-13 17:36:52 -08001954 if (bin->runcur->nfree > 0)
1955 arena_bin_runs_insert(bin, bin->runcur);
Jason Evans8de6a022010-10-17 20:57:30 -07001956 bin->runcur = run;
Jason Evanse7a10582012-02-13 17:36:52 -08001957 if (config_stats)
1958 bin->stats.reruns++;
1959 } else
1960 arena_bin_runs_insert(bin, run);
Jason Evans940a2e02010-10-17 17:51:37 -07001961}
1962
1963void
Jason Evans203484e2012-05-02 00:30:36 -07001964arena_dalloc_bin_locked(arena_t *arena, arena_chunk_t *chunk, void *ptr,
Jason Evans940a2e02010-10-17 17:51:37 -07001965 arena_chunk_map_t *mapelm)
1966{
1967 size_t pageind;
1968 arena_run_t *run;
1969 arena_bin_t *bin;
Mike Hommey8b499712012-04-24 23:22:02 +02001970 arena_bin_info_t *bin_info;
1971 size_t size, binind;
Jason Evans940a2e02010-10-17 17:51:37 -07001972
Jason Evansae4c7b42012-04-02 07:04:34 -07001973 pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> LG_PAGE;
Jason Evans940a2e02010-10-17 17:51:37 -07001974 run = (arena_run_t *)((uintptr_t)chunk + (uintptr_t)((pageind -
Jason Evans203484e2012-05-02 00:30:36 -07001975 arena_mapbits_small_runind_get(chunk, pageind)) << LG_PAGE));
Jason Evans940a2e02010-10-17 17:51:37 -07001976 bin = run->bin;
Ben Maurerf9ff6032014-04-06 13:24:16 -07001977 binind = arena_ptr_small_binind_get(ptr, arena_mapbits_get(chunk, pageind));
Mike Hommey8b499712012-04-24 23:22:02 +02001978 bin_info = &arena_bin_info[binind];
Jason Evans7372b152012-02-10 20:22:09 -08001979 if (config_fill || config_stats)
1980 size = bin_info->reg_size;
Jason Evans940a2e02010-10-17 17:51:37 -07001981
Jason Evans7372b152012-02-10 20:22:09 -08001982 if (config_fill && opt_junk)
Jason Evans122449b2012-04-06 00:35:09 -07001983 arena_dalloc_junk_small(ptr, bin_info);
Jason Evans940a2e02010-10-17 17:51:37 -07001984
1985 arena_run_reg_dalloc(run, ptr);
Jason Evans49f7e8f2011-03-15 13:59:15 -07001986 if (run->nfree == bin_info->nregs) {
Jason Evans088e6a02010-10-18 00:04:44 -07001987 arena_dissociate_bin_run(chunk, run, bin);
Jason Evans8de6a022010-10-17 20:57:30 -07001988 arena_dalloc_bin_run(arena, chunk, run, bin);
Jason Evans088e6a02010-10-18 00:04:44 -07001989 } else if (run->nfree == 1 && run != bin->runcur)
Jason Evans8de6a022010-10-17 20:57:30 -07001990 arena_bin_lower_run(arena, chunk, run, bin);
Jason Evanse476f8a2010-01-16 09:53:50 -08001991
Jason Evans7372b152012-02-10 20:22:09 -08001992 if (config_stats) {
1993 bin->stats.allocated -= size;
1994 bin->stats.ndalloc++;
1995 }
Jason Evanse476f8a2010-01-16 09:53:50 -08001996}
1997
Jason Evanse476f8a2010-01-16 09:53:50 -08001998void
Jason Evans203484e2012-05-02 00:30:36 -07001999arena_dalloc_bin(arena_t *arena, arena_chunk_t *chunk, void *ptr,
2000 size_t pageind, arena_chunk_map_t *mapelm)
2001{
2002 arena_run_t *run;
2003 arena_bin_t *bin;
2004
2005 run = (arena_run_t *)((uintptr_t)chunk + (uintptr_t)((pageind -
2006 arena_mapbits_small_runind_get(chunk, pageind)) << LG_PAGE));
2007 bin = run->bin;
2008 malloc_mutex_lock(&bin->lock);
2009 arena_dalloc_bin_locked(arena, chunk, ptr, mapelm);
2010 malloc_mutex_unlock(&bin->lock);
2011}
2012
2013void
2014arena_dalloc_small(arena_t *arena, arena_chunk_t *chunk, void *ptr,
2015 size_t pageind)
2016{
2017 arena_chunk_map_t *mapelm;
2018
2019 if (config_debug) {
Jason Evans80737c32012-05-02 16:11:03 -07002020 /* arena_ptr_small_binind_get() does extra sanity checking. */
2021 assert(arena_ptr_small_binind_get(ptr, arena_mapbits_get(chunk,
2022 pageind)) != BININD_INVALID);
Jason Evans203484e2012-05-02 00:30:36 -07002023 }
2024 mapelm = arena_mapp_get(chunk, pageind);
2025 arena_dalloc_bin(arena, chunk, ptr, pageind, mapelm);
2026}
Jason Evanse476f8a2010-01-16 09:53:50 -08002027
Jason Evans6b694c42014-01-07 16:47:56 -08002028#ifdef JEMALLOC_JET
2029#undef arena_dalloc_junk_large
2030#define arena_dalloc_junk_large JEMALLOC_N(arena_dalloc_junk_large_impl)
2031#endif
2032static void
2033arena_dalloc_junk_large(void *ptr, size_t usize)
2034{
2035
2036 if (config_fill && opt_junk)
2037 memset(ptr, 0x5a, usize);
2038}
2039#ifdef JEMALLOC_JET
2040#undef arena_dalloc_junk_large
2041#define arena_dalloc_junk_large JEMALLOC_N(arena_dalloc_junk_large)
2042arena_dalloc_junk_large_t *arena_dalloc_junk_large =
2043 JEMALLOC_N(arena_dalloc_junk_large_impl);
2044#endif
2045
Jason Evanse476f8a2010-01-16 09:53:50 -08002046void
Jason Evans203484e2012-05-02 00:30:36 -07002047arena_dalloc_large_locked(arena_t *arena, arena_chunk_t *chunk, void *ptr)
Jason Evanse476f8a2010-01-16 09:53:50 -08002048{
Jason Evans13668262010-01-31 03:57:29 -08002049
Jason Evans7372b152012-02-10 20:22:09 -08002050 if (config_fill || config_stats) {
Jason Evansae4c7b42012-04-02 07:04:34 -07002051 size_t pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> LG_PAGE;
Jason Evans6b694c42014-01-07 16:47:56 -08002052 size_t usize = arena_mapbits_large_size_get(chunk, pageind);
Jason Evanse476f8a2010-01-16 09:53:50 -08002053
Jason Evans6b694c42014-01-07 16:47:56 -08002054 arena_dalloc_junk_large(ptr, usize);
Jason Evans7372b152012-02-10 20:22:09 -08002055 if (config_stats) {
2056 arena->stats.ndalloc_large++;
Jason Evans6b694c42014-01-07 16:47:56 -08002057 arena->stats.allocated_large -= usize;
2058 arena->stats.lstats[(usize >> LG_PAGE) - 1].ndalloc++;
2059 arena->stats.lstats[(usize >> LG_PAGE) - 1].curruns--;
Jason Evans7372b152012-02-10 20:22:09 -08002060 }
Jason Evanse476f8a2010-01-16 09:53:50 -08002061 }
Jason Evanse476f8a2010-01-16 09:53:50 -08002062
Jason Evanse3d13062012-10-30 15:42:37 -07002063 arena_run_dalloc(arena, (arena_run_t *)ptr, true, false);
Jason Evanse476f8a2010-01-16 09:53:50 -08002064}
2065
Jason Evans203484e2012-05-02 00:30:36 -07002066void
2067arena_dalloc_large(arena_t *arena, arena_chunk_t *chunk, void *ptr)
2068{
2069
2070 malloc_mutex_lock(&arena->lock);
2071 arena_dalloc_large_locked(arena, chunk, ptr);
2072 malloc_mutex_unlock(&arena->lock);
2073}
2074
Jason Evanse476f8a2010-01-16 09:53:50 -08002075static void
2076arena_ralloc_large_shrink(arena_t *arena, arena_chunk_t *chunk, void *ptr,
Jason Evans8e3c3c62010-09-17 15:46:18 -07002077 size_t oldsize, size_t size)
Jason Evanse476f8a2010-01-16 09:53:50 -08002078{
2079
2080 assert(size < oldsize);
2081
2082 /*
2083 * Shrink the run, and make trailing pages available for other
2084 * allocations.
2085 */
2086 malloc_mutex_lock(&arena->lock);
2087 arena_run_trim_tail(arena, chunk, (arena_run_t *)ptr, oldsize, size,
2088 true);
Jason Evans7372b152012-02-10 20:22:09 -08002089 if (config_stats) {
2090 arena->stats.ndalloc_large++;
2091 arena->stats.allocated_large -= oldsize;
Jason Evansae4c7b42012-04-02 07:04:34 -07002092 arena->stats.lstats[(oldsize >> LG_PAGE) - 1].ndalloc++;
2093 arena->stats.lstats[(oldsize >> LG_PAGE) - 1].curruns--;
Jason Evans990d10c2010-01-31 03:49:35 -08002094
Jason Evans7372b152012-02-10 20:22:09 -08002095 arena->stats.nmalloc_large++;
2096 arena->stats.nrequests_large++;
2097 arena->stats.allocated_large += size;
Jason Evansae4c7b42012-04-02 07:04:34 -07002098 arena->stats.lstats[(size >> LG_PAGE) - 1].nmalloc++;
2099 arena->stats.lstats[(size >> LG_PAGE) - 1].nrequests++;
2100 arena->stats.lstats[(size >> LG_PAGE) - 1].curruns++;
Jason Evanse476f8a2010-01-16 09:53:50 -08002101 }
Jason Evanse476f8a2010-01-16 09:53:50 -08002102 malloc_mutex_unlock(&arena->lock);
2103}
2104
2105static bool
2106arena_ralloc_large_grow(arena_t *arena, arena_chunk_t *chunk, void *ptr,
Jason Evans8e3c3c62010-09-17 15:46:18 -07002107 size_t oldsize, size_t size, size_t extra, bool zero)
Jason Evanse476f8a2010-01-16 09:53:50 -08002108{
Jason Evansae4c7b42012-04-02 07:04:34 -07002109 size_t pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> LG_PAGE;
2110 size_t npages = oldsize >> LG_PAGE;
Jason Evans8e3c3c62010-09-17 15:46:18 -07002111 size_t followsize;
Jason Evanse476f8a2010-01-16 09:53:50 -08002112
Jason Evans203484e2012-05-02 00:30:36 -07002113 assert(oldsize == arena_mapbits_large_size_get(chunk, pageind));
Jason Evanse476f8a2010-01-16 09:53:50 -08002114
2115 /* Try to extend the run. */
Jason Evans8e3c3c62010-09-17 15:46:18 -07002116 assert(size + extra > oldsize);
Jason Evanse476f8a2010-01-16 09:53:50 -08002117 malloc_mutex_lock(&arena->lock);
Jason Evans7393f442010-10-01 17:35:43 -07002118 if (pageind + npages < chunk_npages &&
Jason Evans203484e2012-05-02 00:30:36 -07002119 arena_mapbits_allocated_get(chunk, pageind+npages) == 0 &&
2120 (followsize = arena_mapbits_unallocated_size_get(chunk,
2121 pageind+npages)) >= size - oldsize) {
Jason Evanse476f8a2010-01-16 09:53:50 -08002122 /*
2123 * The next run is available and sufficiently large. Split the
2124 * following run, then merge the first part with the existing
2125 * allocation.
2126 */
Jason Evans940a2e02010-10-17 17:51:37 -07002127 size_t flag_dirty;
Jason Evans8e3c3c62010-09-17 15:46:18 -07002128 size_t splitsize = (oldsize + followsize <= size + extra)
2129 ? followsize : size + extra - oldsize;
Jason Evansaa5113b2014-01-14 16:23:03 -08002130 arena_run_split_large(arena, (arena_run_t *)((uintptr_t)chunk +
2131 ((pageind+npages) << LG_PAGE)), splitsize, zero);
Jason Evanse476f8a2010-01-16 09:53:50 -08002132
Jason Evans088e6a02010-10-18 00:04:44 -07002133 size = oldsize + splitsize;
Jason Evansae4c7b42012-04-02 07:04:34 -07002134 npages = size >> LG_PAGE;
Jason Evans940a2e02010-10-17 17:51:37 -07002135
2136 /*
2137 * Mark the extended run as dirty if either portion of the run
2138 * was dirty before allocation. This is rather pedantic,
2139 * because there's not actually any sequence of events that
2140 * could cause the resulting run to be passed to
2141 * arena_run_dalloc() with the dirty argument set to false
2142 * (which is when dirty flag consistency would really matter).
2143 */
Jason Evans203484e2012-05-02 00:30:36 -07002144 flag_dirty = arena_mapbits_dirty_get(chunk, pageind) |
2145 arena_mapbits_dirty_get(chunk, pageind+npages-1);
2146 arena_mapbits_large_set(chunk, pageind, size, flag_dirty);
2147 arena_mapbits_large_set(chunk, pageind+npages-1, 0, flag_dirty);
Jason Evanse476f8a2010-01-16 09:53:50 -08002148
Jason Evans7372b152012-02-10 20:22:09 -08002149 if (config_stats) {
2150 arena->stats.ndalloc_large++;
2151 arena->stats.allocated_large -= oldsize;
Jason Evans203484e2012-05-02 00:30:36 -07002152 arena->stats.lstats[(oldsize >> LG_PAGE) - 1].ndalloc++;
2153 arena->stats.lstats[(oldsize >> LG_PAGE) - 1].curruns--;
Jason Evans990d10c2010-01-31 03:49:35 -08002154
Jason Evans7372b152012-02-10 20:22:09 -08002155 arena->stats.nmalloc_large++;
2156 arena->stats.nrequests_large++;
2157 arena->stats.allocated_large += size;
Jason Evansae4c7b42012-04-02 07:04:34 -07002158 arena->stats.lstats[(size >> LG_PAGE) - 1].nmalloc++;
Jason Evans203484e2012-05-02 00:30:36 -07002159 arena->stats.lstats[(size >> LG_PAGE) - 1].nrequests++;
Jason Evansae4c7b42012-04-02 07:04:34 -07002160 arena->stats.lstats[(size >> LG_PAGE) - 1].curruns++;
Jason Evans940a2e02010-10-17 17:51:37 -07002161 }
Jason Evanse476f8a2010-01-16 09:53:50 -08002162 malloc_mutex_unlock(&arena->lock);
2163 return (false);
2164 }
2165 malloc_mutex_unlock(&arena->lock);
2166
2167 return (true);
2168}
2169
Jason Evans6b694c42014-01-07 16:47:56 -08002170#ifdef JEMALLOC_JET
2171#undef arena_ralloc_junk_large
2172#define arena_ralloc_junk_large JEMALLOC_N(arena_ralloc_junk_large_impl)
2173#endif
2174static void
2175arena_ralloc_junk_large(void *ptr, size_t old_usize, size_t usize)
2176{
2177
2178 if (config_fill && opt_junk) {
2179 memset((void *)((uintptr_t)ptr + usize), 0x5a,
2180 old_usize - usize);
2181 }
2182}
2183#ifdef JEMALLOC_JET
2184#undef arena_ralloc_junk_large
2185#define arena_ralloc_junk_large JEMALLOC_N(arena_ralloc_junk_large)
2186arena_ralloc_junk_large_t *arena_ralloc_junk_large =
2187 JEMALLOC_N(arena_ralloc_junk_large_impl);
2188#endif
2189
Jason Evanse476f8a2010-01-16 09:53:50 -08002190/*
2191 * Try to resize a large allocation, in order to avoid copying. This will
2192 * always fail if growing an object, and the following run is already in use.
2193 */
2194static bool
Jason Evans8e3c3c62010-09-17 15:46:18 -07002195arena_ralloc_large(void *ptr, size_t oldsize, size_t size, size_t extra,
2196 bool zero)
Jason Evanse476f8a2010-01-16 09:53:50 -08002197{
2198 size_t psize;
2199
Jason Evans8e3c3c62010-09-17 15:46:18 -07002200 psize = PAGE_CEILING(size + extra);
Jason Evanse476f8a2010-01-16 09:53:50 -08002201 if (psize == oldsize) {
2202 /* Same size class. */
Jason Evanse476f8a2010-01-16 09:53:50 -08002203 return (false);
2204 } else {
2205 arena_chunk_t *chunk;
2206 arena_t *arena;
2207
2208 chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr);
2209 arena = chunk->arena;
Jason Evanse476f8a2010-01-16 09:53:50 -08002210
2211 if (psize < oldsize) {
Jason Evanse476f8a2010-01-16 09:53:50 -08002212 /* Fill before shrinking in order avoid a race. */
Jason Evans6b694c42014-01-07 16:47:56 -08002213 arena_ralloc_junk_large(ptr, oldsize, psize);
Jason Evans8e3c3c62010-09-17 15:46:18 -07002214 arena_ralloc_large_shrink(arena, chunk, ptr, oldsize,
2215 psize);
Jason Evanse476f8a2010-01-16 09:53:50 -08002216 return (false);
2217 } else {
2218 bool ret = arena_ralloc_large_grow(arena, chunk, ptr,
Jason Evans8e3c3c62010-09-17 15:46:18 -07002219 oldsize, PAGE_CEILING(size),
2220 psize - PAGE_CEILING(size), zero);
Jason Evans6b694c42014-01-07 16:47:56 -08002221 if (config_fill && ret == false && zero == false) {
2222 if (opt_junk) {
2223 memset((void *)((uintptr_t)ptr +
2224 oldsize), 0xa5, isalloc(ptr,
2225 config_prof) - oldsize);
2226 } else if (opt_zero) {
2227 memset((void *)((uintptr_t)ptr +
2228 oldsize), 0, isalloc(ptr,
2229 config_prof) - oldsize);
2230 }
Jason Evanse476f8a2010-01-16 09:53:50 -08002231 }
Jason Evanse476f8a2010-01-16 09:53:50 -08002232 return (ret);
2233 }
2234 }
2235}
2236
Jason Evansb2c31662014-01-12 15:05:44 -08002237bool
Jason Evans8e3c3c62010-09-17 15:46:18 -07002238arena_ralloc_no_move(void *ptr, size_t oldsize, size_t size, size_t extra,
2239 bool zero)
Jason Evanse476f8a2010-01-16 09:53:50 -08002240{
Jason Evanse476f8a2010-01-16 09:53:50 -08002241
Jason Evans8e3c3c62010-09-17 15:46:18 -07002242 /*
2243 * Avoid moving the allocation if the size class can be left the same.
2244 */
Jason Evanse476f8a2010-01-16 09:53:50 -08002245 if (oldsize <= arena_maxclass) {
Jason Evansb1726102012-02-28 16:50:47 -08002246 if (oldsize <= SMALL_MAXCLASS) {
Jason Evans3541a902014-04-16 17:14:33 -07002247 assert(arena_bin_info[small_size2bin(oldsize)].reg_size
Jason Evans49f7e8f2011-03-15 13:59:15 -07002248 == oldsize);
Jason Evansb1726102012-02-28 16:50:47 -08002249 if ((size + extra <= SMALL_MAXCLASS &&
Jason Evans3541a902014-04-16 17:14:33 -07002250 small_size2bin(size + extra) ==
2251 small_size2bin(oldsize)) || (size <= oldsize &&
Jason Evans6e629842013-12-15 21:49:40 -08002252 size + extra >= oldsize))
Jason Evansb2c31662014-01-12 15:05:44 -08002253 return (false);
Jason Evanse476f8a2010-01-16 09:53:50 -08002254 } else {
2255 assert(size <= arena_maxclass);
Jason Evansb1726102012-02-28 16:50:47 -08002256 if (size + extra > SMALL_MAXCLASS) {
Jason Evans8e3c3c62010-09-17 15:46:18 -07002257 if (arena_ralloc_large(ptr, oldsize, size,
2258 extra, zero) == false)
Jason Evansb2c31662014-01-12 15:05:44 -08002259 return (false);
Jason Evanse476f8a2010-01-16 09:53:50 -08002260 }
2261 }
2262 }
2263
Jason Evans8e3c3c62010-09-17 15:46:18 -07002264 /* Reallocation would require a move. */
Jason Evansb2c31662014-01-12 15:05:44 -08002265 return (true);
Jason Evans8e3c3c62010-09-17 15:46:18 -07002266}
Jason Evanse476f8a2010-01-16 09:53:50 -08002267
Jason Evans8e3c3c62010-09-17 15:46:18 -07002268void *
Jason Evans609ae592012-10-11 13:53:15 -07002269arena_ralloc(arena_t *arena, void *ptr, size_t oldsize, size_t size,
2270 size_t extra, size_t alignment, bool zero, bool try_tcache_alloc,
2271 bool try_tcache_dalloc)
Jason Evans8e3c3c62010-09-17 15:46:18 -07002272{
2273 void *ret;
2274 size_t copysize;
2275
2276 /* Try to avoid moving the allocation. */
Jason Evansb2c31662014-01-12 15:05:44 -08002277 if (arena_ralloc_no_move(ptr, oldsize, size, extra, zero) == false)
2278 return (ptr);
Jason Evans8e3c3c62010-09-17 15:46:18 -07002279
Jason Evans8e3c3c62010-09-17 15:46:18 -07002280 /*
2281 * size and oldsize are different enough that we need to move the
2282 * object. In that case, fall back to allocating new space and
2283 * copying.
2284 */
Jason Evans38d92102011-03-23 00:37:29 -07002285 if (alignment != 0) {
Jason Evans5ff709c2012-04-11 18:13:45 -07002286 size_t usize = sa2u(size + extra, alignment);
Jason Evans38d92102011-03-23 00:37:29 -07002287 if (usize == 0)
2288 return (NULL);
Jason Evansd82a5e62013-12-12 22:35:52 -08002289 ret = ipalloct(usize, alignment, zero, try_tcache_alloc, arena);
Jason Evans38d92102011-03-23 00:37:29 -07002290 } else
Jason Evans609ae592012-10-11 13:53:15 -07002291 ret = arena_malloc(arena, size + extra, zero, try_tcache_alloc);
Jason Evans8e3c3c62010-09-17 15:46:18 -07002292
2293 if (ret == NULL) {
2294 if (extra == 0)
2295 return (NULL);
2296 /* Try again, this time without extra. */
Jason Evans38d92102011-03-23 00:37:29 -07002297 if (alignment != 0) {
Jason Evans5ff709c2012-04-11 18:13:45 -07002298 size_t usize = sa2u(size, alignment);
Jason Evans38d92102011-03-23 00:37:29 -07002299 if (usize == 0)
2300 return (NULL);
Jason Evansd82a5e62013-12-12 22:35:52 -08002301 ret = ipalloct(usize, alignment, zero, try_tcache_alloc,
Jason Evans609ae592012-10-11 13:53:15 -07002302 arena);
Jason Evans38d92102011-03-23 00:37:29 -07002303 } else
Jason Evans609ae592012-10-11 13:53:15 -07002304 ret = arena_malloc(arena, size, zero, try_tcache_alloc);
Jason Evans8e3c3c62010-09-17 15:46:18 -07002305
2306 if (ret == NULL)
2307 return (NULL);
2308 }
2309
2310 /* Junk/zero-filling were already done by ipalloc()/arena_malloc(). */
2311
2312 /*
2313 * Copy at most size bytes (not size+extra), since the caller has no
2314 * expectation that the extra bytes will be reliably preserved.
2315 */
Jason Evanse476f8a2010-01-16 09:53:50 -08002316 copysize = (size < oldsize) ? size : oldsize;
Jason Evansbd87b012014-04-15 16:35:08 -07002317 JEMALLOC_VALGRIND_MAKE_MEM_UNDEFINED(ret, copysize);
Jason Evanse476f8a2010-01-16 09:53:50 -08002318 memcpy(ret, ptr, copysize);
Jason Evansd82a5e62013-12-12 22:35:52 -08002319 iqalloct(ptr, try_tcache_dalloc);
Jason Evanse476f8a2010-01-16 09:53:50 -08002320 return (ret);
Jason Evanse476f8a2010-01-16 09:53:50 -08002321}
2322
Jason Evans609ae592012-10-11 13:53:15 -07002323dss_prec_t
2324arena_dss_prec_get(arena_t *arena)
2325{
2326 dss_prec_t ret;
2327
2328 malloc_mutex_lock(&arena->lock);
2329 ret = arena->dss_prec;
2330 malloc_mutex_unlock(&arena->lock);
2331 return (ret);
2332}
2333
Jason Evans4d434ad2014-04-15 12:09:48 -07002334bool
Jason Evans609ae592012-10-11 13:53:15 -07002335arena_dss_prec_set(arena_t *arena, dss_prec_t dss_prec)
2336{
2337
Jason Evans4d434ad2014-04-15 12:09:48 -07002338 if (have_dss == false)
2339 return (dss_prec != dss_prec_disabled);
Jason Evans609ae592012-10-11 13:53:15 -07002340 malloc_mutex_lock(&arena->lock);
2341 arena->dss_prec = dss_prec;
2342 malloc_mutex_unlock(&arena->lock);
Jason Evans4d434ad2014-04-15 12:09:48 -07002343 return (false);
Jason Evans609ae592012-10-11 13:53:15 -07002344}
2345
2346void
2347arena_stats_merge(arena_t *arena, const char **dss, size_t *nactive,
2348 size_t *ndirty, arena_stats_t *astats, malloc_bin_stats_t *bstats,
2349 malloc_large_stats_t *lstats)
2350{
2351 unsigned i;
2352
2353 malloc_mutex_lock(&arena->lock);
2354 *dss = dss_prec_names[arena->dss_prec];
2355 *nactive += arena->nactive;
2356 *ndirty += arena->ndirty;
2357
2358 astats->mapped += arena->stats.mapped;
2359 astats->npurge += arena->stats.npurge;
2360 astats->nmadvise += arena->stats.nmadvise;
2361 astats->purged += arena->stats.purged;
2362 astats->allocated_large += arena->stats.allocated_large;
2363 astats->nmalloc_large += arena->stats.nmalloc_large;
2364 astats->ndalloc_large += arena->stats.ndalloc_large;
2365 astats->nrequests_large += arena->stats.nrequests_large;
Jason Evanse2deab72014-05-15 22:22:27 -07002366 astats->allocated_huge += arena->stats.allocated_huge;
2367 astats->nmalloc_huge += arena->stats.nmalloc_huge;
2368 astats->ndalloc_huge += arena->stats.ndalloc_huge;
2369 astats->nrequests_huge += arena->stats.nrequests_huge;
Jason Evans609ae592012-10-11 13:53:15 -07002370
2371 for (i = 0; i < nlclasses; i++) {
2372 lstats[i].nmalloc += arena->stats.lstats[i].nmalloc;
2373 lstats[i].ndalloc += arena->stats.lstats[i].ndalloc;
2374 lstats[i].nrequests += arena->stats.lstats[i].nrequests;
2375 lstats[i].curruns += arena->stats.lstats[i].curruns;
2376 }
2377 malloc_mutex_unlock(&arena->lock);
2378
2379 for (i = 0; i < NBINS; i++) {
2380 arena_bin_t *bin = &arena->bins[i];
2381
2382 malloc_mutex_lock(&bin->lock);
2383 bstats[i].allocated += bin->stats.allocated;
2384 bstats[i].nmalloc += bin->stats.nmalloc;
2385 bstats[i].ndalloc += bin->stats.ndalloc;
2386 bstats[i].nrequests += bin->stats.nrequests;
2387 if (config_tcache) {
2388 bstats[i].nfills += bin->stats.nfills;
2389 bstats[i].nflushes += bin->stats.nflushes;
2390 }
2391 bstats[i].nruns += bin->stats.nruns;
2392 bstats[i].reruns += bin->stats.reruns;
2393 bstats[i].curruns += bin->stats.curruns;
2394 malloc_mutex_unlock(&bin->lock);
2395 }
2396}
2397
Jason Evanse476f8a2010-01-16 09:53:50 -08002398bool
2399arena_new(arena_t *arena, unsigned ind)
2400{
2401 unsigned i;
2402 arena_bin_t *bin;
Jason Evanse476f8a2010-01-16 09:53:50 -08002403
Jason Evans6109fe02010-02-10 10:37:56 -08002404 arena->ind = ind;
Jason Evans597632b2011-03-18 13:41:33 -07002405 arena->nthreads = 0;
aravindfb7fe502014-05-05 15:16:56 -07002406 arena->chunk_alloc = chunk_alloc_default;
Jason Evanse2deab72014-05-15 22:22:27 -07002407 arena->chunk_dalloc = chunk_dalloc_default;
Jason Evans6109fe02010-02-10 10:37:56 -08002408
Jason Evanse476f8a2010-01-16 09:53:50 -08002409 if (malloc_mutex_init(&arena->lock))
2410 return (true);
2411
Jason Evans7372b152012-02-10 20:22:09 -08002412 if (config_stats) {
2413 memset(&arena->stats, 0, sizeof(arena_stats_t));
2414 arena->stats.lstats =
2415 (malloc_large_stats_t *)base_alloc(nlclasses *
2416 sizeof(malloc_large_stats_t));
2417 if (arena->stats.lstats == NULL)
2418 return (true);
2419 memset(arena->stats.lstats, 0, nlclasses *
2420 sizeof(malloc_large_stats_t));
2421 if (config_tcache)
2422 ql_new(&arena->tcache_ql);
2423 }
Jason Evanse476f8a2010-01-16 09:53:50 -08002424
Jason Evans7372b152012-02-10 20:22:09 -08002425 if (config_prof)
2426 arena->prof_accumbytes = 0;
Jason Evansd34f9e72010-02-11 13:19:21 -08002427
Jason Evans609ae592012-10-11 13:53:15 -07002428 arena->dss_prec = chunk_dss_prec_get();
2429
Jason Evanse476f8a2010-01-16 09:53:50 -08002430 /* Initialize chunks. */
Jason Evanse3d13062012-10-30 15:42:37 -07002431 arena_chunk_dirty_new(&arena->chunks_dirty);
Jason Evanse476f8a2010-01-16 09:53:50 -08002432 arena->spare = NULL;
2433
2434 arena->nactive = 0;
2435 arena->ndirty = 0;
Jason Evans799ca0b2010-04-08 20:31:58 -07002436 arena->npurgatory = 0;
Jason Evanse476f8a2010-01-16 09:53:50 -08002437
Jason Evanse3d13062012-10-30 15:42:37 -07002438 arena_avail_tree_new(&arena->runs_avail);
Jason Evanse476f8a2010-01-16 09:53:50 -08002439
2440 /* Initialize bins. */
Jason Evansb1726102012-02-28 16:50:47 -08002441 for (i = 0; i < NBINS; i++) {
Jason Evanse476f8a2010-01-16 09:53:50 -08002442 bin = &arena->bins[i];
Jason Evans86815df2010-03-13 20:32:56 -08002443 if (malloc_mutex_init(&bin->lock))
2444 return (true);
Jason Evanse476f8a2010-01-16 09:53:50 -08002445 bin->runcur = NULL;
2446 arena_run_tree_new(&bin->runs);
Jason Evans7372b152012-02-10 20:22:09 -08002447 if (config_stats)
2448 memset(&bin->stats, 0, sizeof(malloc_bin_stats_t));
Jason Evanse476f8a2010-01-16 09:53:50 -08002449 }
2450
Jason Evanse476f8a2010-01-16 09:53:50 -08002451 return (false);
2452}
2453
Jason Evans49f7e8f2011-03-15 13:59:15 -07002454/*
2455 * Calculate bin_info->run_size such that it meets the following constraints:
2456 *
2457 * *) bin_info->run_size >= min_run_size
2458 * *) bin_info->run_size <= arena_maxclass
2459 * *) run header overhead <= RUN_MAX_OVRHD (or header overhead relaxed).
Jason Evans47e57f92011-03-22 09:00:56 -07002460 * *) bin_info->nregs <= RUN_MAXREGS
Jason Evans49f7e8f2011-03-15 13:59:15 -07002461 *
Jason Evans84c8eef2011-03-16 10:30:13 -07002462 * bin_info->nregs, bin_info->bitmap_offset, and bin_info->reg0_offset are also
2463 * calculated here, since these settings are all interdependent.
Jason Evans49f7e8f2011-03-15 13:59:15 -07002464 */
2465static size_t
2466bin_info_run_size_calc(arena_bin_info_t *bin_info, size_t min_run_size)
2467{
Jason Evans122449b2012-04-06 00:35:09 -07002468 size_t pad_size;
Jason Evans49f7e8f2011-03-15 13:59:15 -07002469 size_t try_run_size, good_run_size;
2470 uint32_t try_nregs, good_nregs;
2471 uint32_t try_hdr_size, good_hdr_size;
Jason Evans84c8eef2011-03-16 10:30:13 -07002472 uint32_t try_bitmap_offset, good_bitmap_offset;
Jason Evans122449b2012-04-06 00:35:09 -07002473 uint32_t try_redzone0_offset, good_redzone0_offset;
Jason Evans49f7e8f2011-03-15 13:59:15 -07002474
Jason Evansae4c7b42012-04-02 07:04:34 -07002475 assert(min_run_size >= PAGE);
Jason Evans49f7e8f2011-03-15 13:59:15 -07002476 assert(min_run_size <= arena_maxclass);
2477
2478 /*
Jason Evans122449b2012-04-06 00:35:09 -07002479 * Determine redzone size based on minimum alignment and minimum
2480 * redzone size. Add padding to the end of the run if it is needed to
2481 * align the regions. The padding allows each redzone to be half the
2482 * minimum alignment; without the padding, each redzone would have to
2483 * be twice as large in order to maintain alignment.
2484 */
2485 if (config_fill && opt_redzone) {
Richard Diamond9c3a10f2014-05-28 21:37:02 -05002486 size_t align_min = ZU(1) << (jemalloc_ffs(bin_info->reg_size) - 1);
Jason Evans122449b2012-04-06 00:35:09 -07002487 if (align_min <= REDZONE_MINSIZE) {
2488 bin_info->redzone_size = REDZONE_MINSIZE;
2489 pad_size = 0;
2490 } else {
2491 bin_info->redzone_size = align_min >> 1;
2492 pad_size = bin_info->redzone_size;
2493 }
2494 } else {
2495 bin_info->redzone_size = 0;
2496 pad_size = 0;
2497 }
2498 bin_info->reg_interval = bin_info->reg_size +
2499 (bin_info->redzone_size << 1);
2500
2501 /*
Jason Evans49f7e8f2011-03-15 13:59:15 -07002502 * Calculate known-valid settings before entering the run_size
2503 * expansion loop, so that the first part of the loop always copies
2504 * valid settings.
2505 *
2506 * The do..while loop iteratively reduces the number of regions until
2507 * the run header and the regions no longer overlap. A closed formula
2508 * would be quite messy, since there is an interdependency between the
2509 * header's mask length and the number of regions.
2510 */
2511 try_run_size = min_run_size;
Jason Evans122449b2012-04-06 00:35:09 -07002512 try_nregs = ((try_run_size - sizeof(arena_run_t)) /
2513 bin_info->reg_interval)
Jason Evans49f7e8f2011-03-15 13:59:15 -07002514 + 1; /* Counter-act try_nregs-- in loop. */
Jason Evans47e57f92011-03-22 09:00:56 -07002515 if (try_nregs > RUN_MAXREGS) {
2516 try_nregs = RUN_MAXREGS
2517 + 1; /* Counter-act try_nregs-- in loop. */
2518 }
Jason Evans49f7e8f2011-03-15 13:59:15 -07002519 do {
2520 try_nregs--;
2521 try_hdr_size = sizeof(arena_run_t);
Jason Evans84c8eef2011-03-16 10:30:13 -07002522 /* Pad to a long boundary. */
2523 try_hdr_size = LONG_CEILING(try_hdr_size);
2524 try_bitmap_offset = try_hdr_size;
2525 /* Add space for bitmap. */
2526 try_hdr_size += bitmap_size(try_nregs);
Jason Evans122449b2012-04-06 00:35:09 -07002527 try_redzone0_offset = try_run_size - (try_nregs *
2528 bin_info->reg_interval) - pad_size;
2529 } while (try_hdr_size > try_redzone0_offset);
Jason Evans49f7e8f2011-03-15 13:59:15 -07002530
2531 /* run_size expansion loop. */
2532 do {
2533 /*
2534 * Copy valid settings before trying more aggressive settings.
2535 */
2536 good_run_size = try_run_size;
2537 good_nregs = try_nregs;
2538 good_hdr_size = try_hdr_size;
Jason Evans84c8eef2011-03-16 10:30:13 -07002539 good_bitmap_offset = try_bitmap_offset;
Jason Evans122449b2012-04-06 00:35:09 -07002540 good_redzone0_offset = try_redzone0_offset;
Jason Evans49f7e8f2011-03-15 13:59:15 -07002541
2542 /* Try more aggressive settings. */
Jason Evansae4c7b42012-04-02 07:04:34 -07002543 try_run_size += PAGE;
Jason Evans122449b2012-04-06 00:35:09 -07002544 try_nregs = ((try_run_size - sizeof(arena_run_t) - pad_size) /
2545 bin_info->reg_interval)
Jason Evans49f7e8f2011-03-15 13:59:15 -07002546 + 1; /* Counter-act try_nregs-- in loop. */
Jason Evans47e57f92011-03-22 09:00:56 -07002547 if (try_nregs > RUN_MAXREGS) {
2548 try_nregs = RUN_MAXREGS
2549 + 1; /* Counter-act try_nregs-- in loop. */
2550 }
Jason Evans49f7e8f2011-03-15 13:59:15 -07002551 do {
2552 try_nregs--;
2553 try_hdr_size = sizeof(arena_run_t);
Jason Evans84c8eef2011-03-16 10:30:13 -07002554 /* Pad to a long boundary. */
2555 try_hdr_size = LONG_CEILING(try_hdr_size);
2556 try_bitmap_offset = try_hdr_size;
2557 /* Add space for bitmap. */
2558 try_hdr_size += bitmap_size(try_nregs);
Jason Evans122449b2012-04-06 00:35:09 -07002559 try_redzone0_offset = try_run_size - (try_nregs *
2560 bin_info->reg_interval) - pad_size;
2561 } while (try_hdr_size > try_redzone0_offset);
Jason Evans49f7e8f2011-03-15 13:59:15 -07002562 } while (try_run_size <= arena_maxclass
Jason Evans122449b2012-04-06 00:35:09 -07002563 && RUN_MAX_OVRHD * (bin_info->reg_interval << 3) >
2564 RUN_MAX_OVRHD_RELAX
2565 && (try_redzone0_offset << RUN_BFP) > RUN_MAX_OVRHD * try_run_size
Jason Evans47e57f92011-03-22 09:00:56 -07002566 && try_nregs < RUN_MAXREGS);
Jason Evans49f7e8f2011-03-15 13:59:15 -07002567
Jason Evans122449b2012-04-06 00:35:09 -07002568 assert(good_hdr_size <= good_redzone0_offset);
Jason Evans49f7e8f2011-03-15 13:59:15 -07002569
2570 /* Copy final settings. */
2571 bin_info->run_size = good_run_size;
2572 bin_info->nregs = good_nregs;
Jason Evans84c8eef2011-03-16 10:30:13 -07002573 bin_info->bitmap_offset = good_bitmap_offset;
Jason Evans122449b2012-04-06 00:35:09 -07002574 bin_info->reg0_offset = good_redzone0_offset + bin_info->redzone_size;
2575
2576 assert(bin_info->reg0_offset - bin_info->redzone_size + (bin_info->nregs
2577 * bin_info->reg_interval) + pad_size == bin_info->run_size);
Jason Evans49f7e8f2011-03-15 13:59:15 -07002578
2579 return (good_run_size);
2580}
2581
Jason Evansb1726102012-02-28 16:50:47 -08002582static void
Jason Evans49f7e8f2011-03-15 13:59:15 -07002583bin_info_init(void)
2584{
2585 arena_bin_info_t *bin_info;
Jason Evansae4c7b42012-04-02 07:04:34 -07002586 size_t prev_run_size = PAGE;
Jason Evans49f7e8f2011-03-15 13:59:15 -07002587
Jason Evansd04047c2014-05-28 16:11:55 -07002588#define BIN_INFO_INIT_bin_yes(index, size) \
2589 bin_info = &arena_bin_info[index]; \
Jason Evansb1726102012-02-28 16:50:47 -08002590 bin_info->reg_size = size; \
2591 prev_run_size = bin_info_run_size_calc(bin_info, prev_run_size);\
2592 bitmap_info_init(&bin_info->bitmap_info, bin_info->nregs);
Jason Evansd04047c2014-05-28 16:11:55 -07002593#define BIN_INFO_INIT_bin_no(index, size)
2594#define SC(index, lg_grp, lg_delta, ndelta, bin, lg_delta_lookup) \
2595 BIN_INFO_INIT_bin_##bin(index, (ZU(1)<<lg_grp) + (ZU(ndelta)<<lg_delta))
Jason Evansb1726102012-02-28 16:50:47 -08002596 SIZE_CLASSES
Jason Evansd04047c2014-05-28 16:11:55 -07002597#undef BIN_INFO_INIT_bin_yes
2598#undef BIN_INFO_INIT_bin_no
2599#undef SC
Jason Evans49f7e8f2011-03-15 13:59:15 -07002600}
2601
Jason Evansb1726102012-02-28 16:50:47 -08002602void
Jason Evansa0bf2422010-01-29 14:30:41 -08002603arena_boot(void)
Jason Evanse476f8a2010-01-16 09:53:50 -08002604{
Jason Evansa0bf2422010-01-29 14:30:41 -08002605 size_t header_size;
Jason Evans7393f442010-10-01 17:35:43 -07002606 unsigned i;
Jason Evanse476f8a2010-01-16 09:53:50 -08002607
Jason Evanse476f8a2010-01-16 09:53:50 -08002608 /*
2609 * Compute the header size such that it is large enough to contain the
Jason Evans7393f442010-10-01 17:35:43 -07002610 * page map. The page map is biased to omit entries for the header
2611 * itself, so some iteration is necessary to compute the map bias.
2612 *
2613 * 1) Compute safe header_size and map_bias values that include enough
2614 * space for an unbiased page map.
2615 * 2) Refine map_bias based on (1) to omit the header pages in the page
2616 * map. The resulting map_bias may be one too small.
2617 * 3) Refine map_bias based on (2). The result will be >= the result
2618 * from (2), and will always be correct.
Jason Evanse476f8a2010-01-16 09:53:50 -08002619 */
Jason Evans7393f442010-10-01 17:35:43 -07002620 map_bias = 0;
2621 for (i = 0; i < 3; i++) {
Jason Evansae4c7b42012-04-02 07:04:34 -07002622 header_size = offsetof(arena_chunk_t, map) +
2623 (sizeof(arena_chunk_map_t) * (chunk_npages-map_bias));
2624 map_bias = (header_size >> LG_PAGE) + ((header_size & PAGE_MASK)
2625 != 0);
Jason Evans7393f442010-10-01 17:35:43 -07002626 }
2627 assert(map_bias > 0);
2628
Jason Evansae4c7b42012-04-02 07:04:34 -07002629 arena_maxclass = chunksize - (map_bias << LG_PAGE);
Jason Evansa0bf2422010-01-29 14:30:41 -08002630
Jason Evansb1726102012-02-28 16:50:47 -08002631 bin_info_init();
Jason Evanse476f8a2010-01-16 09:53:50 -08002632}
Jason Evans4e2e3dd2012-03-13 16:31:41 -07002633
2634void
2635arena_prefork(arena_t *arena)
2636{
2637 unsigned i;
2638
2639 malloc_mutex_prefork(&arena->lock);
2640 for (i = 0; i < NBINS; i++)
2641 malloc_mutex_prefork(&arena->bins[i].lock);
2642}
2643
2644void
2645arena_postfork_parent(arena_t *arena)
2646{
2647 unsigned i;
2648
2649 for (i = 0; i < NBINS; i++)
2650 malloc_mutex_postfork_parent(&arena->bins[i].lock);
2651 malloc_mutex_postfork_parent(&arena->lock);
2652}
2653
2654void
2655arena_postfork_child(arena_t *arena)
2656{
2657 unsigned i;
2658
2659 for (i = 0; i < NBINS; i++)
2660 malloc_mutex_postfork_child(&arena->bins[i].lock);
2661 malloc_mutex_postfork_child(&arena->lock);
2662}