blob: 406cf5de0a83bba7bf8b8ca92dca8da097b35fdd [file] [log] [blame]
Jason Evanse476f8a2010-01-16 09:53:50 -08001#define JEMALLOC_ARENA_C_
Jason Evans376b1522010-02-11 14:45:59 -08002#include "jemalloc/internal/jemalloc_internal.h"
Jason Evanse476f8a2010-01-16 09:53:50 -08003
4/******************************************************************************/
5/* Data. */
6
Jason Evanse476f8a2010-01-16 09:53:50 -08007ssize_t opt_lg_dirty_mult = LG_DIRTY_MULT_DEFAULT;
Jason Evansb1726102012-02-28 16:50:47 -08008arena_bin_info_t arena_bin_info[NBINS];
Jason Evanse476f8a2010-01-16 09:53:50 -08009
Mike Hommeyda99e312012-04-30 12:38:29 +020010JEMALLOC_ALIGNED(CACHELINE)
Jason Evansb1726102012-02-28 16:50:47 -080011const uint8_t small_size2bin[] = {
Jason Evans41ade962011-03-06 22:56:36 -080012#define S2B_8(i) i,
Jason Evanse476f8a2010-01-16 09:53:50 -080013#define S2B_16(i) S2B_8(i) S2B_8(i)
14#define S2B_32(i) S2B_16(i) S2B_16(i)
15#define S2B_64(i) S2B_32(i) S2B_32(i)
16#define S2B_128(i) S2B_64(i) S2B_64(i)
17#define S2B_256(i) S2B_128(i) S2B_128(i)
Jason Evansb1726102012-02-28 16:50:47 -080018#define S2B_512(i) S2B_256(i) S2B_256(i)
19#define S2B_1024(i) S2B_512(i) S2B_512(i)
20#define S2B_2048(i) S2B_1024(i) S2B_1024(i)
21#define S2B_4096(i) S2B_2048(i) S2B_2048(i)
22#define S2B_8192(i) S2B_4096(i) S2B_4096(i)
23#define SIZE_CLASS(bin, delta, size) \
24 S2B_##delta(bin)
25 SIZE_CLASSES
Jason Evanse476f8a2010-01-16 09:53:50 -080026#undef S2B_8
27#undef S2B_16
28#undef S2B_32
29#undef S2B_64
30#undef S2B_128
31#undef S2B_256
Jason Evansb1726102012-02-28 16:50:47 -080032#undef S2B_512
33#undef S2B_1024
34#undef S2B_2048
35#undef S2B_4096
36#undef S2B_8192
37#undef SIZE_CLASS
38};
Jason Evanse476f8a2010-01-16 09:53:50 -080039
40/******************************************************************************/
41/* Function prototypes for non-inline static functions. */
42
Jason Evanse3d13062012-10-30 15:42:37 -070043static void arena_avail_insert(arena_t *arena, arena_chunk_t *chunk,
44 size_t pageind, size_t npages, bool maybe_adjac_pred,
45 bool maybe_adjac_succ);
46static void arena_avail_remove(arena_t *arena, arena_chunk_t *chunk,
47 size_t pageind, size_t npages, bool maybe_adjac_pred,
48 bool maybe_adjac_succ);
Jason Evansc368f8c2013-10-29 18:17:42 -070049static void arena_run_split_helper(arena_t *arena, arena_run_t *run,
50 size_t size, bool large, size_t binind, bool remove, bool zero);
Jason Evanse476f8a2010-01-16 09:53:50 -080051static void arena_run_split(arena_t *arena, arena_run_t *run, size_t size,
Jason Evans203484e2012-05-02 00:30:36 -070052 bool large, size_t binind, bool zero);
Jason Evansc368f8c2013-10-29 18:17:42 -070053static void arena_run_init(arena_t *arena, arena_run_t *run, size_t size,
54 bool large, size_t binind, bool zero);
Jason Evanse476f8a2010-01-16 09:53:50 -080055static arena_chunk_t *arena_chunk_alloc(arena_t *arena);
56static void arena_chunk_dealloc(arena_t *arena, arena_chunk_t *chunk);
Jason Evans5b0c9962012-05-10 15:47:24 -070057static arena_run_t *arena_run_alloc_helper(arena_t *arena, size_t size,
58 bool large, size_t binind, bool zero);
Jason Evanse476f8a2010-01-16 09:53:50 -080059static arena_run_t *arena_run_alloc(arena_t *arena, size_t size, bool large,
Jason Evans203484e2012-05-02 00:30:36 -070060 size_t binind, bool zero);
Jason Evanse3d13062012-10-30 15:42:37 -070061static arena_chunk_t *chunks_dirty_iter_cb(arena_chunk_tree_t *tree,
62 arena_chunk_t *chunk, void *arg);
Jason Evans6005f072010-09-30 16:55:08 -070063static void arena_purge(arena_t *arena, bool all);
Jason Evanse3d13062012-10-30 15:42:37 -070064static void arena_run_dalloc(arena_t *arena, arena_run_t *run, bool dirty,
65 bool cleaned);
Jason Evanse476f8a2010-01-16 09:53:50 -080066static void arena_run_trim_head(arena_t *arena, arena_chunk_t *chunk,
67 arena_run_t *run, size_t oldsize, size_t newsize);
68static void arena_run_trim_tail(arena_t *arena, arena_chunk_t *chunk,
69 arena_run_t *run, size_t oldsize, size_t newsize, bool dirty);
Jason Evanse7a10582012-02-13 17:36:52 -080070static arena_run_t *arena_bin_runs_first(arena_bin_t *bin);
71static void arena_bin_runs_insert(arena_bin_t *bin, arena_run_t *run);
72static void arena_bin_runs_remove(arena_bin_t *bin, arena_run_t *run);
73static arena_run_t *arena_bin_nonfull_run_tryget(arena_bin_t *bin);
Jason Evanse476f8a2010-01-16 09:53:50 -080074static arena_run_t *arena_bin_nonfull_run_get(arena_t *arena, arena_bin_t *bin);
75static void *arena_bin_malloc_hard(arena_t *arena, arena_bin_t *bin);
Jason Evans088e6a02010-10-18 00:04:44 -070076static void arena_dissociate_bin_run(arena_chunk_t *chunk, arena_run_t *run,
77 arena_bin_t *bin);
Jason Evanse476f8a2010-01-16 09:53:50 -080078static void arena_dalloc_bin_run(arena_t *arena, arena_chunk_t *chunk,
79 arena_run_t *run, arena_bin_t *bin);
Jason Evans940a2e02010-10-17 17:51:37 -070080static void arena_bin_lower_run(arena_t *arena, arena_chunk_t *chunk,
81 arena_run_t *run, arena_bin_t *bin);
Jason Evanse476f8a2010-01-16 09:53:50 -080082static void arena_ralloc_large_shrink(arena_t *arena, arena_chunk_t *chunk,
Jason Evans8e3c3c62010-09-17 15:46:18 -070083 void *ptr, size_t oldsize, size_t size);
Jason Evanse476f8a2010-01-16 09:53:50 -080084static bool arena_ralloc_large_grow(arena_t *arena, arena_chunk_t *chunk,
Jason Evans8e3c3c62010-09-17 15:46:18 -070085 void *ptr, size_t oldsize, size_t size, size_t extra, bool zero);
86static bool arena_ralloc_large(void *ptr, size_t oldsize, size_t size,
87 size_t extra, bool zero);
Jason Evans49f7e8f2011-03-15 13:59:15 -070088static size_t bin_info_run_size_calc(arena_bin_info_t *bin_info,
89 size_t min_run_size);
Jason Evansb1726102012-02-28 16:50:47 -080090static void bin_info_init(void);
Jason Evanse476f8a2010-01-16 09:53:50 -080091
92/******************************************************************************/
93
94static inline int
Jason Evanse476f8a2010-01-16 09:53:50 -080095arena_run_comp(arena_chunk_map_t *a, arena_chunk_map_t *b)
96{
97 uintptr_t a_mapelm = (uintptr_t)a;
98 uintptr_t b_mapelm = (uintptr_t)b;
99
100 assert(a != NULL);
101 assert(b != NULL);
102
103 return ((a_mapelm > b_mapelm) - (a_mapelm < b_mapelm));
104}
105
Jason Evansf3ff7522010-02-28 15:00:18 -0800106/* Generate red-black tree functions. */
Jason Evans7372b152012-02-10 20:22:09 -0800107rb_gen(static UNUSED, arena_run_tree_, arena_run_tree_t, arena_chunk_map_t,
108 u.rb_link, arena_run_comp)
Jason Evanse476f8a2010-01-16 09:53:50 -0800109
110static inline int
111arena_avail_comp(arena_chunk_map_t *a, arena_chunk_map_t *b)
112{
113 int ret;
114 size_t a_size = a->bits & ~PAGE_MASK;
115 size_t b_size = b->bits & ~PAGE_MASK;
116
117 ret = (a_size > b_size) - (a_size < b_size);
118 if (ret == 0) {
119 uintptr_t a_mapelm, b_mapelm;
120
121 if ((a->bits & CHUNK_MAP_KEY) != CHUNK_MAP_KEY)
122 a_mapelm = (uintptr_t)a;
123 else {
124 /*
125 * Treat keys as though they are lower than anything
126 * else.
127 */
128 a_mapelm = 0;
129 }
130 b_mapelm = (uintptr_t)b;
131
132 ret = (a_mapelm > b_mapelm) - (a_mapelm < b_mapelm);
133 }
134
135 return (ret);
136}
137
Jason Evansf3ff7522010-02-28 15:00:18 -0800138/* Generate red-black tree functions. */
Jason Evans7372b152012-02-10 20:22:09 -0800139rb_gen(static UNUSED, arena_avail_tree_, arena_avail_tree_t, arena_chunk_map_t,
140 u.rb_link, arena_avail_comp)
Jason Evanse476f8a2010-01-16 09:53:50 -0800141
Jason Evanse3d13062012-10-30 15:42:37 -0700142static inline int
143arena_chunk_dirty_comp(arena_chunk_t *a, arena_chunk_t *b)
144{
Jason Evanse3d13062012-10-30 15:42:37 -0700145
146 assert(a != NULL);
147 assert(b != NULL);
148
149 /*
Jason Evansabf67392012-11-07 10:05:04 -0800150 * Short-circuit for self comparison. The following comparison code
151 * would come to the same result, but at the cost of executing the slow
152 * path.
153 */
154 if (a == b)
155 return (0);
156
157 /*
Jason Evanse3d13062012-10-30 15:42:37 -0700158 * Order such that chunks with higher fragmentation are "less than"
Jason Evansabf67392012-11-07 10:05:04 -0800159 * those with lower fragmentation -- purging order is from "least" to
160 * "greatest". Fragmentation is measured as:
Jason Evanse3d13062012-10-30 15:42:37 -0700161 *
162 * mean current avail run size
163 * --------------------------------
164 * mean defragmented avail run size
165 *
166 * navail
167 * -----------
168 * nruns_avail nruns_avail-nruns_adjac
169 * = ========================= = -----------------------
170 * navail nruns_avail
171 * -----------------------
172 * nruns_avail-nruns_adjac
173 *
174 * The following code multiplies away the denominator prior to
175 * comparison, in order to avoid division.
176 *
177 */
Jason Evansabf67392012-11-07 10:05:04 -0800178 {
179 size_t a_val = (a->nruns_avail - a->nruns_adjac) *
180 b->nruns_avail;
181 size_t b_val = (b->nruns_avail - b->nruns_adjac) *
182 a->nruns_avail;
183
184 if (a_val < b_val)
185 return (1);
186 if (a_val > b_val)
187 return (-1);
188 }
189 /*
190 * Break ties by chunk address. For fragmented chunks, report lower
191 * addresses as "lower", so that fragmentation reduction happens first
192 * at lower addresses. However, use the opposite ordering for
193 * unfragmented chunks, in order to increase the chances of
194 * re-allocating dirty runs.
195 */
Jason Evanse3d13062012-10-30 15:42:37 -0700196 {
197 uintptr_t a_chunk = (uintptr_t)a;
198 uintptr_t b_chunk = (uintptr_t)b;
Jason Evansabf67392012-11-07 10:05:04 -0800199 int ret = ((a_chunk > b_chunk) - (a_chunk < b_chunk));
200 if (a->nruns_adjac == 0) {
201 assert(b->nruns_adjac == 0);
202 ret = -ret;
203 }
204 return (ret);
Jason Evanse3d13062012-10-30 15:42:37 -0700205 }
206}
207
208/* Generate red-black tree functions. */
209rb_gen(static UNUSED, arena_chunk_dirty_, arena_chunk_tree_t, arena_chunk_t,
210 dirty_link, arena_chunk_dirty_comp)
211
212static inline bool
213arena_avail_adjac_pred(arena_chunk_t *chunk, size_t pageind)
214{
215 bool ret;
216
217 if (pageind-1 < map_bias)
218 ret = false;
219 else {
220 ret = (arena_mapbits_allocated_get(chunk, pageind-1) == 0);
221 assert(ret == false || arena_mapbits_dirty_get(chunk,
222 pageind-1) != arena_mapbits_dirty_get(chunk, pageind));
223 }
224 return (ret);
225}
226
227static inline bool
228arena_avail_adjac_succ(arena_chunk_t *chunk, size_t pageind, size_t npages)
229{
230 bool ret;
231
232 if (pageind+npages == chunk_npages)
233 ret = false;
234 else {
235 assert(pageind+npages < chunk_npages);
236 ret = (arena_mapbits_allocated_get(chunk, pageind+npages) == 0);
237 assert(ret == false || arena_mapbits_dirty_get(chunk, pageind)
238 != arena_mapbits_dirty_get(chunk, pageind+npages));
239 }
240 return (ret);
241}
242
243static inline bool
244arena_avail_adjac(arena_chunk_t *chunk, size_t pageind, size_t npages)
245{
246
247 return (arena_avail_adjac_pred(chunk, pageind) ||
248 arena_avail_adjac_succ(chunk, pageind, npages));
249}
250
251static void
252arena_avail_insert(arena_t *arena, arena_chunk_t *chunk, size_t pageind,
253 size_t npages, bool maybe_adjac_pred, bool maybe_adjac_succ)
254{
255
256 assert(npages == (arena_mapbits_unallocated_size_get(chunk, pageind) >>
257 LG_PAGE));
258
259 /*
260 * chunks_dirty is keyed by nruns_{avail,adjac}, so the chunk must be
261 * removed and reinserted even if the run to be inserted is clean.
262 */
263 if (chunk->ndirty != 0)
264 arena_chunk_dirty_remove(&arena->chunks_dirty, chunk);
265
266 if (maybe_adjac_pred && arena_avail_adjac_pred(chunk, pageind))
267 chunk->nruns_adjac++;
268 if (maybe_adjac_succ && arena_avail_adjac_succ(chunk, pageind, npages))
269 chunk->nruns_adjac++;
270 chunk->nruns_avail++;
271 assert(chunk->nruns_avail > chunk->nruns_adjac);
272
273 if (arena_mapbits_dirty_get(chunk, pageind) != 0) {
274 arena->ndirty += npages;
275 chunk->ndirty += npages;
276 }
277 if (chunk->ndirty != 0)
278 arena_chunk_dirty_insert(&arena->chunks_dirty, chunk);
279
280 arena_avail_tree_insert(&arena->runs_avail, arena_mapp_get(chunk,
281 pageind));
282}
283
284static void
285arena_avail_remove(arena_t *arena, arena_chunk_t *chunk, size_t pageind,
286 size_t npages, bool maybe_adjac_pred, bool maybe_adjac_succ)
287{
288
289 assert(npages == (arena_mapbits_unallocated_size_get(chunk, pageind) >>
290 LG_PAGE));
291
292 /*
293 * chunks_dirty is keyed by nruns_{avail,adjac}, so the chunk must be
294 * removed and reinserted even if the run to be removed is clean.
295 */
296 if (chunk->ndirty != 0)
297 arena_chunk_dirty_remove(&arena->chunks_dirty, chunk);
298
299 if (maybe_adjac_pred && arena_avail_adjac_pred(chunk, pageind))
300 chunk->nruns_adjac--;
301 if (maybe_adjac_succ && arena_avail_adjac_succ(chunk, pageind, npages))
302 chunk->nruns_adjac--;
303 chunk->nruns_avail--;
304 assert(chunk->nruns_avail > chunk->nruns_adjac || (chunk->nruns_avail
305 == 0 && chunk->nruns_adjac == 0));
306
307 if (arena_mapbits_dirty_get(chunk, pageind) != 0) {
308 arena->ndirty -= npages;
309 chunk->ndirty -= npages;
310 }
311 if (chunk->ndirty != 0)
312 arena_chunk_dirty_insert(&arena->chunks_dirty, chunk);
313
314 arena_avail_tree_remove(&arena->runs_avail, arena_mapp_get(chunk,
315 pageind));
316}
317
Jason Evanse476f8a2010-01-16 09:53:50 -0800318static inline void *
Jason Evans49f7e8f2011-03-15 13:59:15 -0700319arena_run_reg_alloc(arena_run_t *run, arena_bin_info_t *bin_info)
Jason Evanse476f8a2010-01-16 09:53:50 -0800320{
321 void *ret;
Jason Evans84c8eef2011-03-16 10:30:13 -0700322 unsigned regind;
323 bitmap_t *bitmap = (bitmap_t *)((uintptr_t)run +
324 (uintptr_t)bin_info->bitmap_offset);
Jason Evanse476f8a2010-01-16 09:53:50 -0800325
Jason Evans1e0a6362010-03-13 13:41:58 -0800326 assert(run->nfree > 0);
Jason Evans84c8eef2011-03-16 10:30:13 -0700327 assert(bitmap_full(bitmap, &bin_info->bitmap_info) == false);
Jason Evanse476f8a2010-01-16 09:53:50 -0800328
Jason Evans84c8eef2011-03-16 10:30:13 -0700329 regind = bitmap_sfu(bitmap, &bin_info->bitmap_info);
330 ret = (void *)((uintptr_t)run + (uintptr_t)bin_info->reg0_offset +
Jason Evans122449b2012-04-06 00:35:09 -0700331 (uintptr_t)(bin_info->reg_interval * regind));
Jason Evans1e0a6362010-03-13 13:41:58 -0800332 run->nfree--;
Jason Evans84c8eef2011-03-16 10:30:13 -0700333 if (regind == run->nextind)
334 run->nextind++;
335 assert(regind < run->nextind);
Jason Evans1e0a6362010-03-13 13:41:58 -0800336 return (ret);
Jason Evans6109fe02010-02-10 10:37:56 -0800337}
338
339static inline void
Jason Evans1e0a6362010-03-13 13:41:58 -0800340arena_run_reg_dalloc(arena_run_t *run, void *ptr)
Jason Evans6109fe02010-02-10 10:37:56 -0800341{
Jason Evans49f7e8f2011-03-15 13:59:15 -0700342 arena_chunk_t *chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(run);
Jason Evans203484e2012-05-02 00:30:36 -0700343 size_t pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> LG_PAGE;
344 size_t mapbits = arena_mapbits_get(chunk, pageind);
Jason Evans80737c32012-05-02 16:11:03 -0700345 size_t binind = arena_ptr_small_binind_get(ptr, mapbits);
Jason Evans49f7e8f2011-03-15 13:59:15 -0700346 arena_bin_info_t *bin_info = &arena_bin_info[binind];
Jason Evans84c8eef2011-03-16 10:30:13 -0700347 unsigned regind = arena_run_regind(run, bin_info, ptr);
348 bitmap_t *bitmap = (bitmap_t *)((uintptr_t)run +
349 (uintptr_t)bin_info->bitmap_offset);
350
Jason Evans49f7e8f2011-03-15 13:59:15 -0700351 assert(run->nfree < bin_info->nregs);
Jason Evans1e0a6362010-03-13 13:41:58 -0800352 /* Freeing an interior pointer can cause assertion failure. */
353 assert(((uintptr_t)ptr - ((uintptr_t)run +
Jason Evans122449b2012-04-06 00:35:09 -0700354 (uintptr_t)bin_info->reg0_offset)) %
355 (uintptr_t)bin_info->reg_interval == 0);
Jason Evans21fb95b2010-10-18 17:45:40 -0700356 assert((uintptr_t)ptr >= (uintptr_t)run +
Jason Evans49f7e8f2011-03-15 13:59:15 -0700357 (uintptr_t)bin_info->reg0_offset);
Jason Evans84c8eef2011-03-16 10:30:13 -0700358 /* Freeing an unallocated pointer can cause assertion failure. */
359 assert(bitmap_get(bitmap, &bin_info->bitmap_info, regind));
Jason Evanse476f8a2010-01-16 09:53:50 -0800360
Jason Evans84c8eef2011-03-16 10:30:13 -0700361 bitmap_unset(bitmap, &bin_info->bitmap_info, regind);
Jason Evans1e0a6362010-03-13 13:41:58 -0800362 run->nfree++;
Jason Evanse476f8a2010-01-16 09:53:50 -0800363}
364
Jason Evans21fb95b2010-10-18 17:45:40 -0700365static inline void
Jason Evans38067482013-01-21 20:04:42 -0800366arena_run_zero(arena_chunk_t *chunk, size_t run_ind, size_t npages)
367{
368
369 VALGRIND_MAKE_MEM_UNDEFINED((void *)((uintptr_t)chunk + (run_ind <<
370 LG_PAGE)), (npages << LG_PAGE));
371 memset((void *)((uintptr_t)chunk + (run_ind << LG_PAGE)), 0,
372 (npages << LG_PAGE));
Jason Evans38067482013-01-21 20:04:42 -0800373}
374
375static inline void
Jason Evansdda90f52013-10-19 23:48:40 -0700376arena_run_page_mark_zeroed(arena_chunk_t *chunk, size_t run_ind)
377{
378
379 VALGRIND_MAKE_MEM_DEFINED((void *)((uintptr_t)chunk + (run_ind <<
380 LG_PAGE)), PAGE);
381}
382
383static inline void
Jason Evans38067482013-01-21 20:04:42 -0800384arena_run_page_validate_zeroed(arena_chunk_t *chunk, size_t run_ind)
Jason Evans21fb95b2010-10-18 17:45:40 -0700385{
Jason Evansd4bab212010-10-24 20:08:37 -0700386 size_t i;
Jason Evansae4c7b42012-04-02 07:04:34 -0700387 UNUSED size_t *p = (size_t *)((uintptr_t)chunk + (run_ind << LG_PAGE));
Jason Evansd4bab212010-10-24 20:08:37 -0700388
Jason Evansdda90f52013-10-19 23:48:40 -0700389 arena_run_page_mark_zeroed(chunk, run_ind);
Jason Evansae4c7b42012-04-02 07:04:34 -0700390 for (i = 0; i < PAGE / sizeof(size_t); i++)
Jason Evans21fb95b2010-10-18 17:45:40 -0700391 assert(p[i] == 0);
392}
Jason Evans21fb95b2010-10-18 17:45:40 -0700393
Jason Evanse476f8a2010-01-16 09:53:50 -0800394static void
Jason Evansc368f8c2013-10-29 18:17:42 -0700395arena_run_split_helper(arena_t *arena, arena_run_t *run, size_t size,
396 bool large, size_t binind, bool remove, bool zero)
Jason Evanse476f8a2010-01-16 09:53:50 -0800397{
398 arena_chunk_t *chunk;
Jason Evansc368f8c2013-10-29 18:17:42 -0700399 size_t run_ind, need_pages, i;
Jason Evans19b3d612010-03-18 20:36:40 -0700400 size_t flag_dirty;
Jason Evanse476f8a2010-01-16 09:53:50 -0800401
Jason Evansc368f8c2013-10-29 18:17:42 -0700402 assert(large || remove);
Jason Evans203484e2012-05-02 00:30:36 -0700403 assert((large && binind == BININD_INVALID) || (large == false && binind
404 != BININD_INVALID));
405
Jason Evanse476f8a2010-01-16 09:53:50 -0800406 chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(run);
Jason Evansae4c7b42012-04-02 07:04:34 -0700407 run_ind = (unsigned)(((uintptr_t)run - (uintptr_t)chunk) >> LG_PAGE);
Jason Evans203484e2012-05-02 00:30:36 -0700408 flag_dirty = arena_mapbits_dirty_get(chunk, run_ind);
Jason Evansae4c7b42012-04-02 07:04:34 -0700409 need_pages = (size >> LG_PAGE);
Jason Evanse476f8a2010-01-16 09:53:50 -0800410 assert(need_pages > 0);
Jason Evanse476f8a2010-01-16 09:53:50 -0800411
Jason Evansc368f8c2013-10-29 18:17:42 -0700412 if (remove) {
413 size_t total_pages, rem_pages;
Jason Evanse476f8a2010-01-16 09:53:50 -0800414
Jason Evansc368f8c2013-10-29 18:17:42 -0700415 total_pages = arena_mapbits_unallocated_size_get(chunk,
416 run_ind) >> LG_PAGE;
417 assert(arena_mapbits_dirty_get(chunk, run_ind+total_pages-1) ==
418 flag_dirty);
419 assert(need_pages <= total_pages);
420 rem_pages = total_pages - need_pages;
421
422 arena_avail_remove(arena, chunk, run_ind, total_pages, true,
423 true);
424 if (config_stats) {
425 /*
426 * Update stats_cactive if nactive is crossing a chunk
427 * multiple.
428 */
429 size_t cactive_diff = CHUNK_CEILING((arena->nactive +
430 need_pages) << LG_PAGE) -
431 CHUNK_CEILING(arena->nactive << LG_PAGE);
432 if (cactive_diff != 0)
433 stats_cactive_add(cactive_diff);
Jason Evans19b3d612010-03-18 20:36:40 -0700434 }
Jason Evansc368f8c2013-10-29 18:17:42 -0700435 arena->nactive += need_pages;
436
437 /* Keep track of trailing unused pages for later use. */
438 if (rem_pages > 0) {
439 if (flag_dirty != 0) {
440 arena_mapbits_unallocated_set(chunk,
441 run_ind+need_pages, (rem_pages << LG_PAGE),
442 flag_dirty);
443 arena_mapbits_unallocated_set(chunk,
444 run_ind+total_pages-1, (rem_pages <<
445 LG_PAGE), flag_dirty);
446 } else {
447 arena_mapbits_unallocated_set(chunk,
448 run_ind+need_pages, (rem_pages << LG_PAGE),
449 arena_mapbits_unzeroed_get(chunk,
450 run_ind+need_pages));
451 arena_mapbits_unallocated_set(chunk,
452 run_ind+total_pages-1, (rem_pages <<
453 LG_PAGE), arena_mapbits_unzeroed_get(chunk,
454 run_ind+total_pages-1));
455 }
456 arena_avail_insert(arena, chunk, run_ind+need_pages,
457 rem_pages, false, true);
458 }
Jason Evans19b3d612010-03-18 20:36:40 -0700459 }
460
461 /*
462 * Update the page map separately for large vs. small runs, since it is
463 * possible to avoid iteration for large mallocs.
464 */
465 if (large) {
Jason Evanse476f8a2010-01-16 09:53:50 -0800466 if (zero) {
Jason Evans19b3d612010-03-18 20:36:40 -0700467 if (flag_dirty == 0) {
468 /*
469 * The run is clean, so some pages may be
470 * zeroed (i.e. never before touched).
471 */
472 for (i = 0; i < need_pages; i++) {
Jason Evans203484e2012-05-02 00:30:36 -0700473 if (arena_mapbits_unzeroed_get(chunk,
474 run_ind+i) != 0) {
Jason Evans38067482013-01-21 20:04:42 -0800475 arena_run_zero(chunk, run_ind+i,
476 1);
Jason Evans7372b152012-02-10 20:22:09 -0800477 } else if (config_debug) {
Jason Evans38067482013-01-21 20:04:42 -0800478 arena_run_page_validate_zeroed(
Jason Evans21fb95b2010-10-18 17:45:40 -0700479 chunk, run_ind+i);
Jason Evansdda90f52013-10-19 23:48:40 -0700480 } else {
481 arena_run_page_mark_zeroed(
482 chunk, run_ind+i);
Jason Evans940a2e02010-10-17 17:51:37 -0700483 }
Jason Evans19b3d612010-03-18 20:36:40 -0700484 }
485 } else {
486 /*
487 * The run is dirty, so all pages must be
488 * zeroed.
489 */
Jason Evans38067482013-01-21 20:04:42 -0800490 arena_run_zero(chunk, run_ind, need_pages);
Jason Evanse476f8a2010-01-16 09:53:50 -0800491 }
Jason Evansdda90f52013-10-19 23:48:40 -0700492 } else {
493 VALGRIND_MAKE_MEM_UNDEFINED((void *)((uintptr_t)chunk +
494 (run_ind << LG_PAGE)), (need_pages << LG_PAGE));
Jason Evanse476f8a2010-01-16 09:53:50 -0800495 }
496
Jason Evanse476f8a2010-01-16 09:53:50 -0800497 /*
Jason Evans19b3d612010-03-18 20:36:40 -0700498 * Set the last element first, in case the run only contains one
499 * page (i.e. both statements set the same element).
Jason Evanse476f8a2010-01-16 09:53:50 -0800500 */
Jason Evans203484e2012-05-02 00:30:36 -0700501 arena_mapbits_large_set(chunk, run_ind+need_pages-1, 0,
502 flag_dirty);
503 arena_mapbits_large_set(chunk, run_ind, size, flag_dirty);
Jason Evans19b3d612010-03-18 20:36:40 -0700504 } else {
505 assert(zero == false);
506 /*
Jason Evans940a2e02010-10-17 17:51:37 -0700507 * Propagate the dirty and unzeroed flags to the allocated
508 * small run, so that arena_dalloc_bin_run() has the ability to
509 * conditionally trim clean pages.
Jason Evans19b3d612010-03-18 20:36:40 -0700510 */
Jason Evansd8ceef62012-05-10 20:59:39 -0700511 arena_mapbits_small_set(chunk, run_ind, 0, binind, flag_dirty);
Jason Evans21fb95b2010-10-18 17:45:40 -0700512 /*
513 * The first page will always be dirtied during small run
514 * initialization, so a validation failure here would not
515 * actually cause an observable failure.
516 */
Jason Evans7372b152012-02-10 20:22:09 -0800517 if (config_debug && flag_dirty == 0 &&
Jason Evans203484e2012-05-02 00:30:36 -0700518 arena_mapbits_unzeroed_get(chunk, run_ind) == 0)
Jason Evans38067482013-01-21 20:04:42 -0800519 arena_run_page_validate_zeroed(chunk, run_ind);
Jason Evans19b3d612010-03-18 20:36:40 -0700520 for (i = 1; i < need_pages - 1; i++) {
Jason Evansd8ceef62012-05-10 20:59:39 -0700521 arena_mapbits_small_set(chunk, run_ind+i, i, binind, 0);
Jason Evans7372b152012-02-10 20:22:09 -0800522 if (config_debug && flag_dirty == 0 &&
Jason Evans38067482013-01-21 20:04:42 -0800523 arena_mapbits_unzeroed_get(chunk, run_ind+i) == 0) {
524 arena_run_page_validate_zeroed(chunk,
525 run_ind+i);
526 }
Jason Evans19b3d612010-03-18 20:36:40 -0700527 }
Jason Evans203484e2012-05-02 00:30:36 -0700528 arena_mapbits_small_set(chunk, run_ind+need_pages-1,
Jason Evansd8ceef62012-05-10 20:59:39 -0700529 need_pages-1, binind, flag_dirty);
Jason Evans7372b152012-02-10 20:22:09 -0800530 if (config_debug && flag_dirty == 0 &&
Jason Evans203484e2012-05-02 00:30:36 -0700531 arena_mapbits_unzeroed_get(chunk, run_ind+need_pages-1) ==
532 0) {
Jason Evans38067482013-01-21 20:04:42 -0800533 arena_run_page_validate_zeroed(chunk,
Jason Evans21fb95b2010-10-18 17:45:40 -0700534 run_ind+need_pages-1);
535 }
Jason Evansdda90f52013-10-19 23:48:40 -0700536 VALGRIND_MAKE_MEM_UNDEFINED((void *)((uintptr_t)chunk +
537 (run_ind << LG_PAGE)), (need_pages << LG_PAGE));
Jason Evanse476f8a2010-01-16 09:53:50 -0800538 }
539}
540
Jason Evansc368f8c2013-10-29 18:17:42 -0700541static void
542arena_run_split(arena_t *arena, arena_run_t *run, size_t size, bool large,
543 size_t binind, bool zero)
544{
545
546 arena_run_split_helper(arena, run, size, large, binind, true, zero);
547}
548
549static void
550arena_run_init(arena_t *arena, arena_run_t *run, size_t size, bool large,
551 size_t binind, bool zero)
552{
553
554 arena_run_split_helper(arena, run, size, large, binind, false, zero);
555}
556
Jason Evanse476f8a2010-01-16 09:53:50 -0800557static arena_chunk_t *
558arena_chunk_alloc(arena_t *arena)
559{
560 arena_chunk_t *chunk;
561 size_t i;
562
563 if (arena->spare != NULL) {
564 chunk = arena->spare;
565 arena->spare = NULL;
Jason Evans19b3d612010-03-18 20:36:40 -0700566
Jason Evans30fe12b2012-05-10 17:09:17 -0700567 assert(arena_mapbits_allocated_get(chunk, map_bias) == 0);
568 assert(arena_mapbits_allocated_get(chunk, chunk_npages-1) == 0);
Jason Evans203484e2012-05-02 00:30:36 -0700569 assert(arena_mapbits_unallocated_size_get(chunk, map_bias) ==
570 arena_maxclass);
571 assert(arena_mapbits_unallocated_size_get(chunk,
572 chunk_npages-1) == arena_maxclass);
573 assert(arena_mapbits_dirty_get(chunk, map_bias) ==
574 arena_mapbits_dirty_get(chunk, chunk_npages-1));
Jason Evanse476f8a2010-01-16 09:53:50 -0800575 } else {
Jason Evans41631d02010-01-24 17:13:07 -0800576 bool zero;
Jason Evans3377ffa2010-10-01 17:53:37 -0700577 size_t unzeroed;
Jason Evans41631d02010-01-24 17:13:07 -0800578
579 zero = false;
Jason Evanse00572b2010-03-14 19:43:56 -0700580 malloc_mutex_unlock(&arena->lock);
Mike Hommeyeae26902012-04-10 19:50:33 +0200581 chunk = (arena_chunk_t *)chunk_alloc(chunksize, chunksize,
Jason Evans609ae592012-10-11 13:53:15 -0700582 false, &zero, arena->dss_prec);
Jason Evanse00572b2010-03-14 19:43:56 -0700583 malloc_mutex_lock(&arena->lock);
Jason Evanse476f8a2010-01-16 09:53:50 -0800584 if (chunk == NULL)
585 return (NULL);
Jason Evans7372b152012-02-10 20:22:09 -0800586 if (config_stats)
587 arena->stats.mapped += chunksize;
Jason Evanse476f8a2010-01-16 09:53:50 -0800588
589 chunk->arena = arena;
Jason Evanse476f8a2010-01-16 09:53:50 -0800590
591 /*
592 * Claim that no pages are in use, since the header is merely
593 * overhead.
594 */
595 chunk->ndirty = 0;
596
Jason Evanse3d13062012-10-30 15:42:37 -0700597 chunk->nruns_avail = 0;
598 chunk->nruns_adjac = 0;
599
Jason Evanse476f8a2010-01-16 09:53:50 -0800600 /*
601 * Initialize the map to contain one maximal free untouched run.
Jason Evans41631d02010-01-24 17:13:07 -0800602 * Mark the pages as zeroed iff chunk_alloc() returned a zeroed
603 * chunk.
Jason Evanse476f8a2010-01-16 09:53:50 -0800604 */
Jason Evans3377ffa2010-10-01 17:53:37 -0700605 unzeroed = zero ? 0 : CHUNK_MAP_UNZEROED;
Jason Evans203484e2012-05-02 00:30:36 -0700606 arena_mapbits_unallocated_set(chunk, map_bias, arena_maxclass,
607 unzeroed);
Jason Evans3377ffa2010-10-01 17:53:37 -0700608 /*
609 * There is no need to initialize the internal page map entries
610 * unless the chunk is not zeroed.
611 */
612 if (zero == false) {
Jason Evans87a02d22013-10-19 21:40:20 -0700613 VALGRIND_MAKE_MEM_UNDEFINED(
614 (void *)arena_mapp_get(chunk, map_bias+1),
615 (size_t)((uintptr_t) arena_mapp_get(chunk,
616 chunk_npages-1) - (uintptr_t)arena_mapp_get(chunk,
617 map_bias+1)));
Jason Evans3377ffa2010-10-01 17:53:37 -0700618 for (i = map_bias+1; i < chunk_npages-1; i++)
Jason Evans203484e2012-05-02 00:30:36 -0700619 arena_mapbits_unzeroed_set(chunk, i, unzeroed);
Jason Evans87a02d22013-10-19 21:40:20 -0700620 } else {
Jason Evans06912752013-01-31 17:02:53 -0800621 VALGRIND_MAKE_MEM_DEFINED(
622 (void *)arena_mapp_get(chunk, map_bias+1),
Jason Evans87a02d22013-10-19 21:40:20 -0700623 (size_t)((uintptr_t) arena_mapp_get(chunk,
624 chunk_npages-1) - (uintptr_t)arena_mapp_get(chunk,
625 map_bias+1)));
626 if (config_debug) {
627 for (i = map_bias+1; i < chunk_npages-1; i++) {
628 assert(arena_mapbits_unzeroed_get(chunk,
629 i) == unzeroed);
630 }
Jason Evans203484e2012-05-02 00:30:36 -0700631 }
Jason Evans940a2e02010-10-17 17:51:37 -0700632 }
Jason Evans203484e2012-05-02 00:30:36 -0700633 arena_mapbits_unallocated_set(chunk, chunk_npages-1,
634 arena_maxclass, unzeroed);
Jason Evans19b3d612010-03-18 20:36:40 -0700635 }
Jason Evanse476f8a2010-01-16 09:53:50 -0800636
Jason Evanse3d13062012-10-30 15:42:37 -0700637 /* Insert the run into the runs_avail tree. */
638 arena_avail_insert(arena, chunk, map_bias, chunk_npages-map_bias,
639 false, false);
640
Jason Evanse476f8a2010-01-16 09:53:50 -0800641 return (chunk);
642}
643
644static void
645arena_chunk_dealloc(arena_t *arena, arena_chunk_t *chunk)
646{
Jason Evans30fe12b2012-05-10 17:09:17 -0700647 assert(arena_mapbits_allocated_get(chunk, map_bias) == 0);
648 assert(arena_mapbits_allocated_get(chunk, chunk_npages-1) == 0);
649 assert(arena_mapbits_unallocated_size_get(chunk, map_bias) ==
650 arena_maxclass);
651 assert(arena_mapbits_unallocated_size_get(chunk, chunk_npages-1) ==
652 arena_maxclass);
653 assert(arena_mapbits_dirty_get(chunk, map_bias) ==
654 arena_mapbits_dirty_get(chunk, chunk_npages-1));
655
Jason Evanse476f8a2010-01-16 09:53:50 -0800656 /*
Jason Evanse3d13062012-10-30 15:42:37 -0700657 * Remove run from the runs_avail tree, so that the arena does not use
658 * it.
Jason Evanse476f8a2010-01-16 09:53:50 -0800659 */
Jason Evanse3d13062012-10-30 15:42:37 -0700660 arena_avail_remove(arena, chunk, map_bias, chunk_npages-map_bias,
661 false, false);
Jason Evanse476f8a2010-01-16 09:53:50 -0800662
Jason Evans8d4203c2010-04-13 20:53:21 -0700663 if (arena->spare != NULL) {
664 arena_chunk_t *spare = arena->spare;
665
666 arena->spare = chunk;
Jason Evans8d4203c2010-04-13 20:53:21 -0700667 malloc_mutex_unlock(&arena->lock);
Jason Evans12a48872011-11-11 14:41:59 -0800668 chunk_dealloc((void *)spare, chunksize, true);
Jason Evans8d4203c2010-04-13 20:53:21 -0700669 malloc_mutex_lock(&arena->lock);
Jason Evans7372b152012-02-10 20:22:09 -0800670 if (config_stats)
671 arena->stats.mapped -= chunksize;
Jason Evans8d4203c2010-04-13 20:53:21 -0700672 } else
673 arena->spare = chunk;
Jason Evanse476f8a2010-01-16 09:53:50 -0800674}
675
676static arena_run_t *
Jason Evans5b0c9962012-05-10 15:47:24 -0700677arena_run_alloc_helper(arena_t *arena, size_t size, bool large, size_t binind,
Jason Evans203484e2012-05-02 00:30:36 -0700678 bool zero)
Jason Evanse476f8a2010-01-16 09:53:50 -0800679{
Jason Evanse476f8a2010-01-16 09:53:50 -0800680 arena_run_t *run;
681 arena_chunk_map_t *mapelm, key;
682
Jason Evanse476f8a2010-01-16 09:53:50 -0800683 key.bits = size | CHUNK_MAP_KEY;
Jason Evanse3d13062012-10-30 15:42:37 -0700684 mapelm = arena_avail_tree_nsearch(&arena->runs_avail, &key);
Jason Evanse476f8a2010-01-16 09:53:50 -0800685 if (mapelm != NULL) {
686 arena_chunk_t *run_chunk = CHUNK_ADDR2BASE(mapelm);
Jason Evans7393f442010-10-01 17:35:43 -0700687 size_t pageind = (((uintptr_t)mapelm -
688 (uintptr_t)run_chunk->map) / sizeof(arena_chunk_map_t))
689 + map_bias;
Jason Evanse476f8a2010-01-16 09:53:50 -0800690
Jason Evanse00572b2010-03-14 19:43:56 -0700691 run = (arena_run_t *)((uintptr_t)run_chunk + (pageind <<
Jason Evansae4c7b42012-04-02 07:04:34 -0700692 LG_PAGE));
Jason Evans203484e2012-05-02 00:30:36 -0700693 arena_run_split(arena, run, size, large, binind, zero);
Jason Evanse476f8a2010-01-16 09:53:50 -0800694 return (run);
695 }
696
Jason Evans5b0c9962012-05-10 15:47:24 -0700697 return (NULL);
698}
699
700static arena_run_t *
701arena_run_alloc(arena_t *arena, size_t size, bool large, size_t binind,
702 bool zero)
703{
704 arena_chunk_t *chunk;
705 arena_run_t *run;
706
707 assert(size <= arena_maxclass);
708 assert((size & PAGE_MASK) == 0);
709 assert((large && binind == BININD_INVALID) || (large == false && binind
710 != BININD_INVALID));
711
712 /* Search the arena's chunks for the lowest best fit. */
713 run = arena_run_alloc_helper(arena, size, large, binind, zero);
714 if (run != NULL)
715 return (run);
716
Jason Evanse476f8a2010-01-16 09:53:50 -0800717 /*
718 * No usable runs. Create a new chunk from which to allocate the run.
719 */
720 chunk = arena_chunk_alloc(arena);
Jason Evanse00572b2010-03-14 19:43:56 -0700721 if (chunk != NULL) {
Jason Evansae4c7b42012-04-02 07:04:34 -0700722 run = (arena_run_t *)((uintptr_t)chunk + (map_bias << LG_PAGE));
Jason Evans203484e2012-05-02 00:30:36 -0700723 arena_run_split(arena, run, size, large, binind, zero);
Jason Evanse00572b2010-03-14 19:43:56 -0700724 return (run);
725 }
726
727 /*
728 * arena_chunk_alloc() failed, but another thread may have made
729 * sufficient memory available while this one dropped arena->lock in
730 * arena_chunk_alloc(), so search one more time.
731 */
Jason Evans5b0c9962012-05-10 15:47:24 -0700732 return (arena_run_alloc_helper(arena, size, large, binind, zero));
Jason Evanse476f8a2010-01-16 09:53:50 -0800733}
734
Jason Evans05b21be2010-03-14 17:36:10 -0700735static inline void
736arena_maybe_purge(arena_t *arena)
737{
Jason Evanse3d13062012-10-30 15:42:37 -0700738 size_t npurgeable, threshold;
Jason Evans05b21be2010-03-14 17:36:10 -0700739
Jason Evanse3d13062012-10-30 15:42:37 -0700740 /* Don't purge if the option is disabled. */
741 if (opt_lg_dirty_mult < 0)
742 return;
743 /* Don't purge if all dirty pages are already being purged. */
744 if (arena->ndirty <= arena->npurgatory)
745 return;
746 npurgeable = arena->ndirty - arena->npurgatory;
747 threshold = (arena->nactive >> opt_lg_dirty_mult);
748 /*
749 * Don't purge unless the number of purgeable pages exceeds the
750 * threshold.
751 */
752 if (npurgeable <= threshold)
753 return;
754
755 arena_purge(arena, false);
Jason Evans05b21be2010-03-14 17:36:10 -0700756}
757
Jason Evanse3d13062012-10-30 15:42:37 -0700758static inline size_t
759arena_chunk_purge(arena_t *arena, arena_chunk_t *chunk, bool all)
Jason Evans05b21be2010-03-14 17:36:10 -0700760{
Jason Evanse3d13062012-10-30 15:42:37 -0700761 size_t npurged;
Jason Evans05b21be2010-03-14 17:36:10 -0700762 ql_head(arena_chunk_map_t) mapelms;
763 arena_chunk_map_t *mapelm;
Jason Evanse3d13062012-10-30 15:42:37 -0700764 size_t pageind, npages;
Jason Evans05b21be2010-03-14 17:36:10 -0700765 size_t nmadvise;
Jason Evans05b21be2010-03-14 17:36:10 -0700766
767 ql_new(&mapelms);
768
769 /*
770 * If chunk is the spare, temporarily re-allocate it, 1) so that its
Jason Evanse3d13062012-10-30 15:42:37 -0700771 * run is reinserted into runs_avail, and 2) so that it cannot be
Jason Evans05b21be2010-03-14 17:36:10 -0700772 * completely discarded by another thread while arena->lock is dropped
773 * by this thread. Note that the arena_run_dalloc() call will
774 * implicitly deallocate the chunk, so no explicit action is required
775 * in this function to deallocate the chunk.
Jason Evans19b3d612010-03-18 20:36:40 -0700776 *
777 * Note that once a chunk contains dirty pages, it cannot again contain
778 * a single run unless 1) it is a dirty run, or 2) this function purges
779 * dirty pages and causes the transition to a single clean run. Thus
780 * (chunk == arena->spare) is possible, but it is not possible for
781 * this function to be called on the spare unless it contains a dirty
782 * run.
Jason Evans05b21be2010-03-14 17:36:10 -0700783 */
Jason Evans19b3d612010-03-18 20:36:40 -0700784 if (chunk == arena->spare) {
Jason Evans203484e2012-05-02 00:30:36 -0700785 assert(arena_mapbits_dirty_get(chunk, map_bias) != 0);
Jason Evans30fe12b2012-05-10 17:09:17 -0700786 assert(arena_mapbits_dirty_get(chunk, chunk_npages-1) != 0);
787
Jason Evans05b21be2010-03-14 17:36:10 -0700788 arena_chunk_alloc(arena);
Jason Evans19b3d612010-03-18 20:36:40 -0700789 }
Jason Evans05b21be2010-03-14 17:36:10 -0700790
Jason Evanse3d13062012-10-30 15:42:37 -0700791 if (config_stats)
792 arena->stats.purged += chunk->ndirty;
793
794 /*
795 * Operate on all dirty runs if there is no clean/dirty run
796 * fragmentation.
797 */
798 if (chunk->nruns_adjac == 0)
799 all = true;
800
801 /*
802 * Temporarily allocate free dirty runs within chunk. If all is false,
803 * only operate on dirty runs that are fragments; otherwise operate on
804 * all dirty runs.
805 */
806 for (pageind = map_bias; pageind < chunk_npages; pageind += npages) {
Jason Evans203484e2012-05-02 00:30:36 -0700807 mapelm = arena_mapp_get(chunk, pageind);
808 if (arena_mapbits_allocated_get(chunk, pageind) == 0) {
Jason Evanse3d13062012-10-30 15:42:37 -0700809 size_t run_size =
810 arena_mapbits_unallocated_size_get(chunk, pageind);
Jason Evans05b21be2010-03-14 17:36:10 -0700811
Jason Evanse3d13062012-10-30 15:42:37 -0700812 npages = run_size >> LG_PAGE;
Jason Evanse69bee02010-03-15 22:25:23 -0700813 assert(pageind + npages <= chunk_npages);
Jason Evans30fe12b2012-05-10 17:09:17 -0700814 assert(arena_mapbits_dirty_get(chunk, pageind) ==
815 arena_mapbits_dirty_get(chunk, pageind+npages-1));
Jason Evansc03a63d2010-03-22 11:45:01 -0700816
Jason Evanse3d13062012-10-30 15:42:37 -0700817 if (arena_mapbits_dirty_get(chunk, pageind) != 0 &&
818 (all || arena_avail_adjac(chunk, pageind,
819 npages))) {
820 arena_run_t *run = (arena_run_t *)((uintptr_t)
821 chunk + (uintptr_t)(pageind << LG_PAGE));
Jason Evansc03a63d2010-03-22 11:45:01 -0700822
Jason Evanse3d13062012-10-30 15:42:37 -0700823 arena_run_split(arena, run, run_size, true,
824 BININD_INVALID, false);
Jason Evans19b3d612010-03-18 20:36:40 -0700825 /* Append to list for later processing. */
826 ql_elm_new(mapelm, u.ql_link);
827 ql_tail_insert(&mapelms, mapelm, u.ql_link);
Jason Evans05b21be2010-03-14 17:36:10 -0700828 }
Jason Evans05b21be2010-03-14 17:36:10 -0700829 } else {
Jason Evanse3d13062012-10-30 15:42:37 -0700830 /* Skip run. */
831 if (arena_mapbits_large_get(chunk, pageind) != 0) {
832 npages = arena_mapbits_large_size_get(chunk,
Jason Evans203484e2012-05-02 00:30:36 -0700833 pageind) >> LG_PAGE;
Jason Evanse3d13062012-10-30 15:42:37 -0700834 } else {
Mike Hommey8b499712012-04-24 23:22:02 +0200835 size_t binind;
836 arena_bin_info_t *bin_info;
Jason Evans05b21be2010-03-14 17:36:10 -0700837 arena_run_t *run = (arena_run_t *)((uintptr_t)
Jason Evansae4c7b42012-04-02 07:04:34 -0700838 chunk + (uintptr_t)(pageind << LG_PAGE));
Jason Evanse69bee02010-03-15 22:25:23 -0700839
Jason Evans203484e2012-05-02 00:30:36 -0700840 assert(arena_mapbits_small_runind_get(chunk,
841 pageind) == 0);
Mike Hommey8b499712012-04-24 23:22:02 +0200842 binind = arena_bin_index(arena, run->bin);
843 bin_info = &arena_bin_info[binind];
Jason Evanse3d13062012-10-30 15:42:37 -0700844 npages = bin_info->run_size >> LG_PAGE;
Jason Evans05b21be2010-03-14 17:36:10 -0700845 }
846 }
847 }
Jason Evanse69bee02010-03-15 22:25:23 -0700848 assert(pageind == chunk_npages);
Jason Evanse3d13062012-10-30 15:42:37 -0700849 assert(chunk->ndirty == 0 || all == false);
850 assert(chunk->nruns_adjac == 0);
Jason Evans05b21be2010-03-14 17:36:10 -0700851
852 malloc_mutex_unlock(&arena->lock);
Jason Evans7372b152012-02-10 20:22:09 -0800853 if (config_stats)
854 nmadvise = 0;
Jason Evanse3d13062012-10-30 15:42:37 -0700855 npurged = 0;
Jason Evans05b21be2010-03-14 17:36:10 -0700856 ql_foreach(mapelm, &mapelms, u.ql_link) {
Jason Evans7de92762012-10-08 17:56:11 -0700857 bool unzeroed;
858 size_t flag_unzeroed, i;
Jason Evans05b21be2010-03-14 17:36:10 -0700859
Jason Evanse3d13062012-10-30 15:42:37 -0700860 pageind = (((uintptr_t)mapelm - (uintptr_t)chunk->map) /
861 sizeof(arena_chunk_map_t)) + map_bias;
862 npages = arena_mapbits_large_size_get(chunk, pageind) >>
863 LG_PAGE;
Jason Evanse69bee02010-03-15 22:25:23 -0700864 assert(pageind + npages <= chunk_npages);
Jason Evans7de92762012-10-08 17:56:11 -0700865 unzeroed = pages_purge((void *)((uintptr_t)chunk + (pageind <<
866 LG_PAGE)), (npages << LG_PAGE));
867 flag_unzeroed = unzeroed ? CHUNK_MAP_UNZEROED : 0;
868 /*
869 * Set the unzeroed flag for all pages, now that pages_purge()
870 * has returned whether the pages were zeroed as a side effect
871 * of purging. This chunk map modification is safe even though
872 * the arena mutex isn't currently owned by this thread,
873 * because the run is marked as allocated, thus protecting it
874 * from being modified by any other thread. As long as these
875 * writes don't perturb the first and last elements'
876 * CHUNK_MAP_ALLOCATED bits, behavior is well defined.
877 */
878 for (i = 0; i < npages; i++) {
879 arena_mapbits_unzeroed_set(chunk, pageind+i,
880 flag_unzeroed);
881 }
Jason Evanse3d13062012-10-30 15:42:37 -0700882 npurged += npages;
Jason Evans7372b152012-02-10 20:22:09 -0800883 if (config_stats)
884 nmadvise++;
Jason Evans05b21be2010-03-14 17:36:10 -0700885 }
Jason Evans05b21be2010-03-14 17:36:10 -0700886 malloc_mutex_lock(&arena->lock);
Jason Evans7372b152012-02-10 20:22:09 -0800887 if (config_stats)
888 arena->stats.nmadvise += nmadvise;
Jason Evans05b21be2010-03-14 17:36:10 -0700889
890 /* Deallocate runs. */
891 for (mapelm = ql_first(&mapelms); mapelm != NULL;
892 mapelm = ql_first(&mapelms)) {
Jason Evanse3d13062012-10-30 15:42:37 -0700893 arena_run_t *run;
Jason Evans05b21be2010-03-14 17:36:10 -0700894
Jason Evanse3d13062012-10-30 15:42:37 -0700895 pageind = (((uintptr_t)mapelm - (uintptr_t)chunk->map) /
896 sizeof(arena_chunk_map_t)) + map_bias;
897 run = (arena_run_t *)((uintptr_t)chunk + (uintptr_t)(pageind <<
898 LG_PAGE));
Jason Evans05b21be2010-03-14 17:36:10 -0700899 ql_remove(&mapelms, mapelm, u.ql_link);
Jason Evanse3d13062012-10-30 15:42:37 -0700900 arena_run_dalloc(arena, run, false, true);
Jason Evans05b21be2010-03-14 17:36:10 -0700901 }
Jason Evanse3d13062012-10-30 15:42:37 -0700902
903 return (npurged);
904}
905
906static arena_chunk_t *
907chunks_dirty_iter_cb(arena_chunk_tree_t *tree, arena_chunk_t *chunk, void *arg)
908{
909 size_t *ndirty = (size_t *)arg;
910
911 assert(chunk->ndirty != 0);
912 *ndirty += chunk->ndirty;
913 return (NULL);
Jason Evans05b21be2010-03-14 17:36:10 -0700914}
915
Jason Evanse476f8a2010-01-16 09:53:50 -0800916static void
Jason Evans6005f072010-09-30 16:55:08 -0700917arena_purge(arena_t *arena, bool all)
Jason Evanse476f8a2010-01-16 09:53:50 -0800918{
919 arena_chunk_t *chunk;
Jason Evans05b21be2010-03-14 17:36:10 -0700920 size_t npurgatory;
Jason Evans7372b152012-02-10 20:22:09 -0800921 if (config_debug) {
922 size_t ndirty = 0;
Jason Evanse476f8a2010-01-16 09:53:50 -0800923
Jason Evanse3d13062012-10-30 15:42:37 -0700924 arena_chunk_dirty_iter(&arena->chunks_dirty, NULL,
925 chunks_dirty_iter_cb, (void *)&ndirty);
Jason Evans7372b152012-02-10 20:22:09 -0800926 assert(ndirty == arena->ndirty);
Jason Evans2caa4712010-03-04 21:35:07 -0800927 }
Jason Evansaf8ad3e2011-03-23 20:39:02 -0700928 assert(arena->ndirty > arena->npurgatory || all);
Jason Evansaf8ad3e2011-03-23 20:39:02 -0700929 assert((arena->nactive >> opt_lg_dirty_mult) < (arena->ndirty -
Jason Evansf9a8edb2011-06-12 16:46:03 -0700930 arena->npurgatory) || all);
Jason Evanse476f8a2010-01-16 09:53:50 -0800931
Jason Evans7372b152012-02-10 20:22:09 -0800932 if (config_stats)
933 arena->stats.npurge++;
Jason Evanse476f8a2010-01-16 09:53:50 -0800934
935 /*
Jason Evans05b21be2010-03-14 17:36:10 -0700936 * Compute the minimum number of pages that this thread should try to
Jason Evans799ca0b2010-04-08 20:31:58 -0700937 * purge, and add the result to arena->npurgatory. This will keep
938 * multiple threads from racing to reduce ndirty below the threshold.
Jason Evanse476f8a2010-01-16 09:53:50 -0800939 */
Jason Evanse3d13062012-10-30 15:42:37 -0700940 {
941 size_t npurgeable = arena->ndirty - arena->npurgatory;
942
943 if (all == false) {
944 size_t threshold = (arena->nactive >>
945 opt_lg_dirty_mult);
946
947 npurgatory = npurgeable - threshold;
948 } else
949 npurgatory = npurgeable;
Jason Evansaf8ad3e2011-03-23 20:39:02 -0700950 }
Jason Evans799ca0b2010-04-08 20:31:58 -0700951 arena->npurgatory += npurgatory;
952
Jason Evans05b21be2010-03-14 17:36:10 -0700953 while (npurgatory > 0) {
Jason Evanse3d13062012-10-30 15:42:37 -0700954 size_t npurgeable, npurged, nunpurged;
955
Jason Evans05b21be2010-03-14 17:36:10 -0700956 /* Get next chunk with dirty pages. */
Jason Evanse3d13062012-10-30 15:42:37 -0700957 chunk = arena_chunk_dirty_first(&arena->chunks_dirty);
Jason Evans05b21be2010-03-14 17:36:10 -0700958 if (chunk == NULL) {
959 /*
960 * This thread was unable to purge as many pages as
961 * originally intended, due to races with other threads
Jason Evans799ca0b2010-04-08 20:31:58 -0700962 * that either did some of the purging work, or re-used
963 * dirty pages.
Jason Evans05b21be2010-03-14 17:36:10 -0700964 */
Jason Evans799ca0b2010-04-08 20:31:58 -0700965 arena->npurgatory -= npurgatory;
966 return;
Jason Evans05b21be2010-03-14 17:36:10 -0700967 }
Jason Evanse3d13062012-10-30 15:42:37 -0700968 npurgeable = chunk->ndirty;
969 assert(npurgeable != 0);
Jason Evanse476f8a2010-01-16 09:53:50 -0800970
Jason Evanse3d13062012-10-30 15:42:37 -0700971 if (npurgeable > npurgatory && chunk->nruns_adjac == 0) {
Jason Evans799ca0b2010-04-08 20:31:58 -0700972 /*
Jason Evanse3d13062012-10-30 15:42:37 -0700973 * This thread will purge all the dirty pages in chunk,
974 * so set npurgatory to reflect this thread's intent to
975 * purge the pages. This tends to reduce the chances
976 * of the following scenario:
Jason Evans799ca0b2010-04-08 20:31:58 -0700977 *
978 * 1) This thread sets arena->npurgatory such that
979 * (arena->ndirty - arena->npurgatory) is at the
980 * threshold.
981 * 2) This thread drops arena->lock.
982 * 3) Another thread causes one or more pages to be
983 * dirtied, and immediately determines that it must
984 * purge dirty pages.
985 *
986 * If this scenario *does* play out, that's okay,
987 * because all of the purging work being done really
988 * needs to happen.
989 */
Jason Evanse3d13062012-10-30 15:42:37 -0700990 arena->npurgatory += npurgeable - npurgatory;
991 npurgatory = npurgeable;
Jason Evans799ca0b2010-04-08 20:31:58 -0700992 }
993
Jason Evanse3d13062012-10-30 15:42:37 -0700994 /*
995 * Keep track of how many pages are purgeable, versus how many
996 * actually get purged, and adjust counters accordingly.
997 */
998 arena->npurgatory -= npurgeable;
999 npurgatory -= npurgeable;
1000 npurged = arena_chunk_purge(arena, chunk, all);
1001 nunpurged = npurgeable - npurged;
1002 arena->npurgatory += nunpurged;
1003 npurgatory += nunpurged;
Jason Evanse476f8a2010-01-16 09:53:50 -08001004 }
1005}
1006
Jason Evans6005f072010-09-30 16:55:08 -07001007void
1008arena_purge_all(arena_t *arena)
1009{
1010
1011 malloc_mutex_lock(&arena->lock);
1012 arena_purge(arena, true);
1013 malloc_mutex_unlock(&arena->lock);
1014}
1015
Jason Evanse476f8a2010-01-16 09:53:50 -08001016static void
Jason Evanse3d13062012-10-30 15:42:37 -07001017arena_run_dalloc(arena_t *arena, arena_run_t *run, bool dirty, bool cleaned)
Jason Evanse476f8a2010-01-16 09:53:50 -08001018{
1019 arena_chunk_t *chunk;
Jason Evans19b3d612010-03-18 20:36:40 -07001020 size_t size, run_ind, run_pages, flag_dirty;
Jason Evanse476f8a2010-01-16 09:53:50 -08001021
1022 chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(run);
Jason Evansae4c7b42012-04-02 07:04:34 -07001023 run_ind = (size_t)(((uintptr_t)run - (uintptr_t)chunk) >> LG_PAGE);
Jason Evans7393f442010-10-01 17:35:43 -07001024 assert(run_ind >= map_bias);
Jason Evanse476f8a2010-01-16 09:53:50 -08001025 assert(run_ind < chunk_npages);
Jason Evans203484e2012-05-02 00:30:36 -07001026 if (arena_mapbits_large_get(chunk, run_ind) != 0) {
1027 size = arena_mapbits_large_size_get(chunk, run_ind);
Jason Evansae4c7b42012-04-02 07:04:34 -07001028 assert(size == PAGE ||
Jason Evans203484e2012-05-02 00:30:36 -07001029 arena_mapbits_large_size_get(chunk,
1030 run_ind+(size>>LG_PAGE)-1) == 0);
Jason Evans49f7e8f2011-03-15 13:59:15 -07001031 } else {
1032 size_t binind = arena_bin_index(arena, run->bin);
1033 arena_bin_info_t *bin_info = &arena_bin_info[binind];
1034 size = bin_info->run_size;
1035 }
Jason Evansae4c7b42012-04-02 07:04:34 -07001036 run_pages = (size >> LG_PAGE);
Jason Evans7372b152012-02-10 20:22:09 -08001037 if (config_stats) {
1038 /*
1039 * Update stats_cactive if nactive is crossing a chunk
1040 * multiple.
1041 */
Jason Evansae4c7b42012-04-02 07:04:34 -07001042 size_t cactive_diff = CHUNK_CEILING(arena->nactive << LG_PAGE) -
1043 CHUNK_CEILING((arena->nactive - run_pages) << LG_PAGE);
Jason Evans7372b152012-02-10 20:22:09 -08001044 if (cactive_diff != 0)
1045 stats_cactive_sub(cactive_diff);
1046 }
Jason Evanse476f8a2010-01-16 09:53:50 -08001047 arena->nactive -= run_pages;
1048
Jason Evans19b3d612010-03-18 20:36:40 -07001049 /*
1050 * The run is dirty if the caller claims to have dirtied it, as well as
Jason Evanse3d13062012-10-30 15:42:37 -07001051 * if it was already dirty before being allocated and the caller
1052 * doesn't claim to have cleaned it.
Jason Evans19b3d612010-03-18 20:36:40 -07001053 */
Jason Evans30fe12b2012-05-10 17:09:17 -07001054 assert(arena_mapbits_dirty_get(chunk, run_ind) ==
1055 arena_mapbits_dirty_get(chunk, run_ind+run_pages-1));
Jason Evanse3d13062012-10-30 15:42:37 -07001056 if (cleaned == false && arena_mapbits_dirty_get(chunk, run_ind) != 0)
Jason Evans19b3d612010-03-18 20:36:40 -07001057 dirty = true;
1058 flag_dirty = dirty ? CHUNK_MAP_DIRTY : 0;
Jason Evans19b3d612010-03-18 20:36:40 -07001059
Jason Evanse476f8a2010-01-16 09:53:50 -08001060 /* Mark pages as unallocated in the chunk map. */
1061 if (dirty) {
Jason Evans203484e2012-05-02 00:30:36 -07001062 arena_mapbits_unallocated_set(chunk, run_ind, size,
1063 CHUNK_MAP_DIRTY);
1064 arena_mapbits_unallocated_set(chunk, run_ind+run_pages-1, size,
1065 CHUNK_MAP_DIRTY);
Jason Evanse476f8a2010-01-16 09:53:50 -08001066 } else {
Jason Evans203484e2012-05-02 00:30:36 -07001067 arena_mapbits_unallocated_set(chunk, run_ind, size,
1068 arena_mapbits_unzeroed_get(chunk, run_ind));
1069 arena_mapbits_unallocated_set(chunk, run_ind+run_pages-1, size,
1070 arena_mapbits_unzeroed_get(chunk, run_ind+run_pages-1));
Jason Evanse476f8a2010-01-16 09:53:50 -08001071 }
Jason Evanse476f8a2010-01-16 09:53:50 -08001072
1073 /* Try to coalesce forward. */
1074 if (run_ind + run_pages < chunk_npages &&
Jason Evans203484e2012-05-02 00:30:36 -07001075 arena_mapbits_allocated_get(chunk, run_ind+run_pages) == 0 &&
1076 arena_mapbits_dirty_get(chunk, run_ind+run_pages) == flag_dirty) {
1077 size_t nrun_size = arena_mapbits_unallocated_size_get(chunk,
1078 run_ind+run_pages);
Jason Evansae4c7b42012-04-02 07:04:34 -07001079 size_t nrun_pages = nrun_size >> LG_PAGE;
Jason Evanse476f8a2010-01-16 09:53:50 -08001080
1081 /*
1082 * Remove successor from runs_avail; the coalesced run is
1083 * inserted later.
1084 */
Jason Evans203484e2012-05-02 00:30:36 -07001085 assert(arena_mapbits_unallocated_size_get(chunk,
1086 run_ind+run_pages+nrun_pages-1) == nrun_size);
1087 assert(arena_mapbits_dirty_get(chunk,
1088 run_ind+run_pages+nrun_pages-1) == flag_dirty);
Jason Evanse3d13062012-10-30 15:42:37 -07001089 arena_avail_remove(arena, chunk, run_ind+run_pages, nrun_pages,
1090 false, true);
Jason Evanse476f8a2010-01-16 09:53:50 -08001091
1092 size += nrun_size;
Jason Evans12ca9142010-10-17 19:56:09 -07001093 run_pages += nrun_pages;
Jason Evanse476f8a2010-01-16 09:53:50 -08001094
Jason Evans203484e2012-05-02 00:30:36 -07001095 arena_mapbits_unallocated_size_set(chunk, run_ind, size);
1096 arena_mapbits_unallocated_size_set(chunk, run_ind+run_pages-1,
1097 size);
Jason Evanse476f8a2010-01-16 09:53:50 -08001098 }
1099
1100 /* Try to coalesce backward. */
Jason Evans203484e2012-05-02 00:30:36 -07001101 if (run_ind > map_bias && arena_mapbits_allocated_get(chunk, run_ind-1)
1102 == 0 && arena_mapbits_dirty_get(chunk, run_ind-1) == flag_dirty) {
1103 size_t prun_size = arena_mapbits_unallocated_size_get(chunk,
1104 run_ind-1);
Jason Evansae4c7b42012-04-02 07:04:34 -07001105 size_t prun_pages = prun_size >> LG_PAGE;
Jason Evanse476f8a2010-01-16 09:53:50 -08001106
Jason Evans12ca9142010-10-17 19:56:09 -07001107 run_ind -= prun_pages;
Jason Evanse476f8a2010-01-16 09:53:50 -08001108
1109 /*
1110 * Remove predecessor from runs_avail; the coalesced run is
1111 * inserted later.
1112 */
Jason Evans203484e2012-05-02 00:30:36 -07001113 assert(arena_mapbits_unallocated_size_get(chunk, run_ind) ==
1114 prun_size);
1115 assert(arena_mapbits_dirty_get(chunk, run_ind) == flag_dirty);
Jason Evanse3d13062012-10-30 15:42:37 -07001116 arena_avail_remove(arena, chunk, run_ind, prun_pages, true,
1117 false);
Jason Evanse476f8a2010-01-16 09:53:50 -08001118
1119 size += prun_size;
Jason Evans12ca9142010-10-17 19:56:09 -07001120 run_pages += prun_pages;
Jason Evanse476f8a2010-01-16 09:53:50 -08001121
Jason Evans203484e2012-05-02 00:30:36 -07001122 arena_mapbits_unallocated_size_set(chunk, run_ind, size);
1123 arena_mapbits_unallocated_size_set(chunk, run_ind+run_pages-1,
1124 size);
Jason Evanse476f8a2010-01-16 09:53:50 -08001125 }
1126
1127 /* Insert into runs_avail, now that coalescing is complete. */
Jason Evans203484e2012-05-02 00:30:36 -07001128 assert(arena_mapbits_unallocated_size_get(chunk, run_ind) ==
1129 arena_mapbits_unallocated_size_get(chunk, run_ind+run_pages-1));
1130 assert(arena_mapbits_dirty_get(chunk, run_ind) ==
1131 arena_mapbits_dirty_get(chunk, run_ind+run_pages-1));
Jason Evanse3d13062012-10-30 15:42:37 -07001132 arena_avail_insert(arena, chunk, run_ind, run_pages, true, true);
Jason Evans8d4203c2010-04-13 20:53:21 -07001133
Jason Evans203484e2012-05-02 00:30:36 -07001134 /* Deallocate chunk if it is now completely unused. */
1135 if (size == arena_maxclass) {
1136 assert(run_ind == map_bias);
1137 assert(run_pages == (arena_maxclass >> LG_PAGE));
Jason Evanse476f8a2010-01-16 09:53:50 -08001138 arena_chunk_dealloc(arena, chunk);
Jason Evans203484e2012-05-02 00:30:36 -07001139 }
Jason Evanse476f8a2010-01-16 09:53:50 -08001140
Jason Evans4fb7f512010-01-27 18:27:09 -08001141 /*
Jason Evans8d4203c2010-04-13 20:53:21 -07001142 * It is okay to do dirty page processing here even if the chunk was
Jason Evans4fb7f512010-01-27 18:27:09 -08001143 * deallocated above, since in that case it is the spare. Waiting
1144 * until after possible chunk deallocation to do dirty processing
1145 * allows for an old spare to be fully deallocated, thus decreasing the
1146 * chances of spuriously crossing the dirty page purging threshold.
1147 */
Jason Evans8d4203c2010-04-13 20:53:21 -07001148 if (dirty)
Jason Evans05b21be2010-03-14 17:36:10 -07001149 arena_maybe_purge(arena);
Jason Evanse476f8a2010-01-16 09:53:50 -08001150}
1151
1152static void
1153arena_run_trim_head(arena_t *arena, arena_chunk_t *chunk, arena_run_t *run,
1154 size_t oldsize, size_t newsize)
1155{
Jason Evansae4c7b42012-04-02 07:04:34 -07001156 size_t pageind = ((uintptr_t)run - (uintptr_t)chunk) >> LG_PAGE;
1157 size_t head_npages = (oldsize - newsize) >> LG_PAGE;
Jason Evans203484e2012-05-02 00:30:36 -07001158 size_t flag_dirty = arena_mapbits_dirty_get(chunk, pageind);
Jason Evanse476f8a2010-01-16 09:53:50 -08001159
1160 assert(oldsize > newsize);
1161
1162 /*
1163 * Update the chunk map so that arena_run_dalloc() can treat the
Jason Evans940a2e02010-10-17 17:51:37 -07001164 * leading run as separately allocated. Set the last element of each
1165 * run first, in case of single-page runs.
Jason Evanse476f8a2010-01-16 09:53:50 -08001166 */
Jason Evans203484e2012-05-02 00:30:36 -07001167 assert(arena_mapbits_large_size_get(chunk, pageind) == oldsize);
Jason Evansd8ceef62012-05-10 20:59:39 -07001168 arena_mapbits_large_set(chunk, pageind+head_npages-1, 0, flag_dirty);
1169 arena_mapbits_large_set(chunk, pageind, oldsize-newsize, flag_dirty);
Jason Evans940a2e02010-10-17 17:51:37 -07001170
Jason Evans7372b152012-02-10 20:22:09 -08001171 if (config_debug) {
Jason Evansae4c7b42012-04-02 07:04:34 -07001172 UNUSED size_t tail_npages = newsize >> LG_PAGE;
Jason Evans203484e2012-05-02 00:30:36 -07001173 assert(arena_mapbits_large_size_get(chunk,
1174 pageind+head_npages+tail_npages-1) == 0);
1175 assert(arena_mapbits_dirty_get(chunk,
1176 pageind+head_npages+tail_npages-1) == flag_dirty);
Jason Evans940a2e02010-10-17 17:51:37 -07001177 }
Jason Evansd8ceef62012-05-10 20:59:39 -07001178 arena_mapbits_large_set(chunk, pageind+head_npages, newsize,
1179 flag_dirty);
Jason Evanse476f8a2010-01-16 09:53:50 -08001180
Jason Evanse3d13062012-10-30 15:42:37 -07001181 arena_run_dalloc(arena, run, false, false);
Jason Evanse476f8a2010-01-16 09:53:50 -08001182}
1183
1184static void
1185arena_run_trim_tail(arena_t *arena, arena_chunk_t *chunk, arena_run_t *run,
1186 size_t oldsize, size_t newsize, bool dirty)
1187{
Jason Evansae4c7b42012-04-02 07:04:34 -07001188 size_t pageind = ((uintptr_t)run - (uintptr_t)chunk) >> LG_PAGE;
1189 size_t head_npages = newsize >> LG_PAGE;
Jason Evans203484e2012-05-02 00:30:36 -07001190 size_t flag_dirty = arena_mapbits_dirty_get(chunk, pageind);
Jason Evanse476f8a2010-01-16 09:53:50 -08001191
1192 assert(oldsize > newsize);
1193
1194 /*
1195 * Update the chunk map so that arena_run_dalloc() can treat the
Jason Evans940a2e02010-10-17 17:51:37 -07001196 * trailing run as separately allocated. Set the last element of each
1197 * run first, in case of single-page runs.
Jason Evanse476f8a2010-01-16 09:53:50 -08001198 */
Jason Evans203484e2012-05-02 00:30:36 -07001199 assert(arena_mapbits_large_size_get(chunk, pageind) == oldsize);
Jason Evansd8ceef62012-05-10 20:59:39 -07001200 arena_mapbits_large_set(chunk, pageind+head_npages-1, 0, flag_dirty);
1201 arena_mapbits_large_set(chunk, pageind, newsize, flag_dirty);
Jason Evans940a2e02010-10-17 17:51:37 -07001202
Jason Evans203484e2012-05-02 00:30:36 -07001203 if (config_debug) {
1204 UNUSED size_t tail_npages = (oldsize - newsize) >> LG_PAGE;
1205 assert(arena_mapbits_large_size_get(chunk,
1206 pageind+head_npages+tail_npages-1) == 0);
1207 assert(arena_mapbits_dirty_get(chunk,
1208 pageind+head_npages+tail_npages-1) == flag_dirty);
1209 }
1210 arena_mapbits_large_set(chunk, pageind+head_npages, oldsize-newsize,
Jason Evansd8ceef62012-05-10 20:59:39 -07001211 flag_dirty);
Jason Evanse476f8a2010-01-16 09:53:50 -08001212
1213 arena_run_dalloc(arena, (arena_run_t *)((uintptr_t)run + newsize),
Jason Evanse3d13062012-10-30 15:42:37 -07001214 dirty, false);
Jason Evanse476f8a2010-01-16 09:53:50 -08001215}
1216
1217static arena_run_t *
Jason Evanse7a10582012-02-13 17:36:52 -08001218arena_bin_runs_first(arena_bin_t *bin)
1219{
1220 arena_chunk_map_t *mapelm = arena_run_tree_first(&bin->runs);
1221 if (mapelm != NULL) {
1222 arena_chunk_t *chunk;
1223 size_t pageind;
Mike Hommey8b499712012-04-24 23:22:02 +02001224 arena_run_t *run;
Jason Evanse7a10582012-02-13 17:36:52 -08001225
1226 chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(mapelm);
1227 pageind = ((((uintptr_t)mapelm - (uintptr_t)chunk->map) /
1228 sizeof(arena_chunk_map_t))) + map_bias;
Jason Evans203484e2012-05-02 00:30:36 -07001229 run = (arena_run_t *)((uintptr_t)chunk + (uintptr_t)((pageind -
1230 arena_mapbits_small_runind_get(chunk, pageind)) <<
Jason Evansae4c7b42012-04-02 07:04:34 -07001231 LG_PAGE));
Jason Evanse7a10582012-02-13 17:36:52 -08001232 return (run);
1233 }
1234
1235 return (NULL);
1236}
1237
1238static void
1239arena_bin_runs_insert(arena_bin_t *bin, arena_run_t *run)
1240{
1241 arena_chunk_t *chunk = CHUNK_ADDR2BASE(run);
Jason Evansae4c7b42012-04-02 07:04:34 -07001242 size_t pageind = ((uintptr_t)run - (uintptr_t)chunk) >> LG_PAGE;
Jason Evans203484e2012-05-02 00:30:36 -07001243 arena_chunk_map_t *mapelm = arena_mapp_get(chunk, pageind);
Jason Evanse7a10582012-02-13 17:36:52 -08001244
1245 assert(arena_run_tree_search(&bin->runs, mapelm) == NULL);
1246
1247 arena_run_tree_insert(&bin->runs, mapelm);
1248}
1249
1250static void
1251arena_bin_runs_remove(arena_bin_t *bin, arena_run_t *run)
1252{
1253 arena_chunk_t *chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(run);
Jason Evansae4c7b42012-04-02 07:04:34 -07001254 size_t pageind = ((uintptr_t)run - (uintptr_t)chunk) >> LG_PAGE;
Jason Evans203484e2012-05-02 00:30:36 -07001255 arena_chunk_map_t *mapelm = arena_mapp_get(chunk, pageind);
Jason Evanse7a10582012-02-13 17:36:52 -08001256
1257 assert(arena_run_tree_search(&bin->runs, mapelm) != NULL);
1258
1259 arena_run_tree_remove(&bin->runs, mapelm);
1260}
1261
1262static arena_run_t *
1263arena_bin_nonfull_run_tryget(arena_bin_t *bin)
1264{
1265 arena_run_t *run = arena_bin_runs_first(bin);
1266 if (run != NULL) {
1267 arena_bin_runs_remove(bin, run);
1268 if (config_stats)
1269 bin->stats.reruns++;
1270 }
1271 return (run);
1272}
1273
1274static arena_run_t *
Jason Evanse476f8a2010-01-16 09:53:50 -08001275arena_bin_nonfull_run_get(arena_t *arena, arena_bin_t *bin)
1276{
Jason Evanse476f8a2010-01-16 09:53:50 -08001277 arena_run_t *run;
Jason Evans49f7e8f2011-03-15 13:59:15 -07001278 size_t binind;
1279 arena_bin_info_t *bin_info;
Jason Evanse476f8a2010-01-16 09:53:50 -08001280
1281 /* Look for a usable run. */
Jason Evanse7a10582012-02-13 17:36:52 -08001282 run = arena_bin_nonfull_run_tryget(bin);
1283 if (run != NULL)
Jason Evanse476f8a2010-01-16 09:53:50 -08001284 return (run);
Jason Evanse476f8a2010-01-16 09:53:50 -08001285 /* No existing runs have any space available. */
1286
Jason Evans49f7e8f2011-03-15 13:59:15 -07001287 binind = arena_bin_index(arena, bin);
1288 bin_info = &arena_bin_info[binind];
1289
Jason Evanse476f8a2010-01-16 09:53:50 -08001290 /* Allocate a new run. */
Jason Evanse00572b2010-03-14 19:43:56 -07001291 malloc_mutex_unlock(&bin->lock);
Jason Evanse69bee02010-03-15 22:25:23 -07001292 /******************************/
Jason Evans86815df2010-03-13 20:32:56 -08001293 malloc_mutex_lock(&arena->lock);
Jason Evans203484e2012-05-02 00:30:36 -07001294 run = arena_run_alloc(arena, bin_info->run_size, false, binind, false);
Jason Evanse00572b2010-03-14 19:43:56 -07001295 if (run != NULL) {
Jason Evans84c8eef2011-03-16 10:30:13 -07001296 bitmap_t *bitmap = (bitmap_t *)((uintptr_t)run +
1297 (uintptr_t)bin_info->bitmap_offset);
1298
Jason Evanse00572b2010-03-14 19:43:56 -07001299 /* Initialize run internals. */
1300 run->bin = bin;
Jason Evans84c8eef2011-03-16 10:30:13 -07001301 run->nextind = 0;
Jason Evans49f7e8f2011-03-15 13:59:15 -07001302 run->nfree = bin_info->nregs;
Jason Evans84c8eef2011-03-16 10:30:13 -07001303 bitmap_init(bitmap, &bin_info->bitmap_info);
Jason Evanse69bee02010-03-15 22:25:23 -07001304 }
1305 malloc_mutex_unlock(&arena->lock);
1306 /********************************/
1307 malloc_mutex_lock(&bin->lock);
1308 if (run != NULL) {
Jason Evans7372b152012-02-10 20:22:09 -08001309 if (config_stats) {
1310 bin->stats.nruns++;
1311 bin->stats.curruns++;
Jason Evans7372b152012-02-10 20:22:09 -08001312 }
Jason Evanse00572b2010-03-14 19:43:56 -07001313 return (run);
1314 }
1315
1316 /*
1317 * arena_run_alloc() failed, but another thread may have made
Jason Evans940a2e02010-10-17 17:51:37 -07001318 * sufficient memory available while this one dropped bin->lock above,
Jason Evanse00572b2010-03-14 19:43:56 -07001319 * so search one more time.
1320 */
Jason Evanse7a10582012-02-13 17:36:52 -08001321 run = arena_bin_nonfull_run_tryget(bin);
1322 if (run != NULL)
Jason Evanse00572b2010-03-14 19:43:56 -07001323 return (run);
Jason Evanse00572b2010-03-14 19:43:56 -07001324
1325 return (NULL);
Jason Evanse476f8a2010-01-16 09:53:50 -08001326}
1327
Jason Evans1e0a6362010-03-13 13:41:58 -08001328/* Re-fill bin->runcur, then call arena_run_reg_alloc(). */
Jason Evanse476f8a2010-01-16 09:53:50 -08001329static void *
1330arena_bin_malloc_hard(arena_t *arena, arena_bin_t *bin)
1331{
Jason Evanse00572b2010-03-14 19:43:56 -07001332 void *ret;
Jason Evans49f7e8f2011-03-15 13:59:15 -07001333 size_t binind;
1334 arena_bin_info_t *bin_info;
Jason Evanse00572b2010-03-14 19:43:56 -07001335 arena_run_t *run;
Jason Evanse476f8a2010-01-16 09:53:50 -08001336
Jason Evans49f7e8f2011-03-15 13:59:15 -07001337 binind = arena_bin_index(arena, bin);
1338 bin_info = &arena_bin_info[binind];
Jason Evanse00572b2010-03-14 19:43:56 -07001339 bin->runcur = NULL;
1340 run = arena_bin_nonfull_run_get(arena, bin);
1341 if (bin->runcur != NULL && bin->runcur->nfree > 0) {
1342 /*
1343 * Another thread updated runcur while this one ran without the
1344 * bin lock in arena_bin_nonfull_run_get().
1345 */
Jason Evanse00572b2010-03-14 19:43:56 -07001346 assert(bin->runcur->nfree > 0);
Jason Evans49f7e8f2011-03-15 13:59:15 -07001347 ret = arena_run_reg_alloc(bin->runcur, bin_info);
Jason Evanse00572b2010-03-14 19:43:56 -07001348 if (run != NULL) {
Jason Evans940a2e02010-10-17 17:51:37 -07001349 arena_chunk_t *chunk;
1350
1351 /*
1352 * arena_run_alloc() may have allocated run, or it may
Jason Evans84c8eef2011-03-16 10:30:13 -07001353 * have pulled run from the bin's run tree. Therefore
Jason Evans940a2e02010-10-17 17:51:37 -07001354 * it is unsafe to make any assumptions about how run
1355 * has previously been used, and arena_bin_lower_run()
1356 * must be called, as if a region were just deallocated
1357 * from the run.
1358 */
1359 chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(run);
Jason Evans49f7e8f2011-03-15 13:59:15 -07001360 if (run->nfree == bin_info->nregs)
Jason Evans8de6a022010-10-17 20:57:30 -07001361 arena_dalloc_bin_run(arena, chunk, run, bin);
1362 else
1363 arena_bin_lower_run(arena, chunk, run, bin);
Jason Evanse00572b2010-03-14 19:43:56 -07001364 }
1365 return (ret);
1366 }
1367
1368 if (run == NULL)
Jason Evanse476f8a2010-01-16 09:53:50 -08001369 return (NULL);
Jason Evanse00572b2010-03-14 19:43:56 -07001370
1371 bin->runcur = run;
1372
Jason Evanse476f8a2010-01-16 09:53:50 -08001373 assert(bin->runcur->nfree > 0);
1374
Jason Evans49f7e8f2011-03-15 13:59:15 -07001375 return (arena_run_reg_alloc(bin->runcur, bin_info));
Jason Evanse476f8a2010-01-16 09:53:50 -08001376}
1377
Jason Evans86815df2010-03-13 20:32:56 -08001378void
Jason Evans7372b152012-02-10 20:22:09 -08001379arena_tcache_fill_small(arena_t *arena, tcache_bin_t *tbin, size_t binind,
1380 uint64_t prof_accumbytes)
Jason Evanse476f8a2010-01-16 09:53:50 -08001381{
1382 unsigned i, nfill;
1383 arena_bin_t *bin;
1384 arena_run_t *run;
1385 void *ptr;
1386
1387 assert(tbin->ncached == 0);
1388
Jason Evans88c222c2013-02-06 11:59:30 -08001389 if (config_prof && arena_prof_accum(arena, prof_accumbytes))
1390 prof_idump();
Jason Evanse69bee02010-03-15 22:25:23 -07001391 bin = &arena->bins[binind];
1392 malloc_mutex_lock(&bin->lock);
Jason Evans1dcb4f82011-03-21 00:18:17 -07001393 for (i = 0, nfill = (tcache_bin_info[binind].ncached_max >>
1394 tbin->lg_fill_div); i < nfill; i++) {
Jason Evanse476f8a2010-01-16 09:53:50 -08001395 if ((run = bin->runcur) != NULL && run->nfree > 0)
Jason Evans49f7e8f2011-03-15 13:59:15 -07001396 ptr = arena_run_reg_alloc(run, &arena_bin_info[binind]);
Jason Evanse476f8a2010-01-16 09:53:50 -08001397 else
1398 ptr = arena_bin_malloc_hard(arena, bin);
Jason Evans3fa9a2f2010-03-07 15:34:14 -08001399 if (ptr == NULL)
Jason Evanse476f8a2010-01-16 09:53:50 -08001400 break;
Jason Evans122449b2012-04-06 00:35:09 -07001401 if (config_fill && opt_junk) {
1402 arena_alloc_junk_small(ptr, &arena_bin_info[binind],
1403 true);
1404 }
Jason Evans9c43c132011-03-18 10:53:15 -07001405 /* Insert such that low regions get used first. */
1406 tbin->avail[nfill - 1 - i] = ptr;
Jason Evanse476f8a2010-01-16 09:53:50 -08001407 }
Jason Evans7372b152012-02-10 20:22:09 -08001408 if (config_stats) {
1409 bin->stats.allocated += i * arena_bin_info[binind].reg_size;
1410 bin->stats.nmalloc += i;
1411 bin->stats.nrequests += tbin->tstats.nrequests;
1412 bin->stats.nfills++;
1413 tbin->tstats.nrequests = 0;
1414 }
Jason Evans86815df2010-03-13 20:32:56 -08001415 malloc_mutex_unlock(&bin->lock);
Jason Evanse476f8a2010-01-16 09:53:50 -08001416 tbin->ncached = i;
Jason Evanse476f8a2010-01-16 09:53:50 -08001417}
Jason Evanse476f8a2010-01-16 09:53:50 -08001418
Jason Evans122449b2012-04-06 00:35:09 -07001419void
1420arena_alloc_junk_small(void *ptr, arena_bin_info_t *bin_info, bool zero)
1421{
1422
1423 if (zero) {
1424 size_t redzone_size = bin_info->redzone_size;
1425 memset((void *)((uintptr_t)ptr - redzone_size), 0xa5,
1426 redzone_size);
1427 memset((void *)((uintptr_t)ptr + bin_info->reg_size), 0xa5,
1428 redzone_size);
1429 } else {
1430 memset((void *)((uintptr_t)ptr - bin_info->redzone_size), 0xa5,
1431 bin_info->reg_interval);
1432 }
1433}
1434
1435void
1436arena_dalloc_junk_small(void *ptr, arena_bin_info_t *bin_info)
1437{
1438 size_t size = bin_info->reg_size;
1439 size_t redzone_size = bin_info->redzone_size;
1440 size_t i;
1441 bool error = false;
1442
1443 for (i = 1; i <= redzone_size; i++) {
1444 unsigned byte;
1445 if ((byte = *(uint8_t *)((uintptr_t)ptr - i)) != 0xa5) {
1446 error = true;
1447 malloc_printf("<jemalloc>: Corrupt redzone "
1448 "%zu byte%s before %p (size %zu), byte=%#x\n", i,
1449 (i == 1) ? "" : "s", ptr, size, byte);
1450 }
1451 }
1452 for (i = 0; i < redzone_size; i++) {
1453 unsigned byte;
1454 if ((byte = *(uint8_t *)((uintptr_t)ptr + size + i)) != 0xa5) {
1455 error = true;
1456 malloc_printf("<jemalloc>: Corrupt redzone "
1457 "%zu byte%s after end of %p (size %zu), byte=%#x\n",
1458 i, (i == 1) ? "" : "s", ptr, size, byte);
1459 }
1460 }
1461 if (opt_abort && error)
1462 abort();
1463
1464 memset((void *)((uintptr_t)ptr - redzone_size), 0x5a,
1465 bin_info->reg_interval);
1466}
1467
Jason Evanse476f8a2010-01-16 09:53:50 -08001468void *
1469arena_malloc_small(arena_t *arena, size_t size, bool zero)
1470{
1471 void *ret;
1472 arena_bin_t *bin;
1473 arena_run_t *run;
1474 size_t binind;
1475
Jason Evans41ade962011-03-06 22:56:36 -08001476 binind = SMALL_SIZE2BIN(size);
Jason Evansb1726102012-02-28 16:50:47 -08001477 assert(binind < NBINS);
Jason Evanse476f8a2010-01-16 09:53:50 -08001478 bin = &arena->bins[binind];
Jason Evans49f7e8f2011-03-15 13:59:15 -07001479 size = arena_bin_info[binind].reg_size;
Jason Evanse476f8a2010-01-16 09:53:50 -08001480
Jason Evans86815df2010-03-13 20:32:56 -08001481 malloc_mutex_lock(&bin->lock);
Jason Evanse476f8a2010-01-16 09:53:50 -08001482 if ((run = bin->runcur) != NULL && run->nfree > 0)
Jason Evans49f7e8f2011-03-15 13:59:15 -07001483 ret = arena_run_reg_alloc(run, &arena_bin_info[binind]);
Jason Evanse476f8a2010-01-16 09:53:50 -08001484 else
1485 ret = arena_bin_malloc_hard(arena, bin);
1486
1487 if (ret == NULL) {
Jason Evans86815df2010-03-13 20:32:56 -08001488 malloc_mutex_unlock(&bin->lock);
Jason Evanse476f8a2010-01-16 09:53:50 -08001489 return (NULL);
1490 }
1491
Jason Evans7372b152012-02-10 20:22:09 -08001492 if (config_stats) {
1493 bin->stats.allocated += size;
1494 bin->stats.nmalloc++;
1495 bin->stats.nrequests++;
1496 }
Jason Evans86815df2010-03-13 20:32:56 -08001497 malloc_mutex_unlock(&bin->lock);
Jason Evans88c222c2013-02-06 11:59:30 -08001498 if (config_prof && isthreaded == false && arena_prof_accum(arena, size))
1499 prof_idump();
Jason Evanse476f8a2010-01-16 09:53:50 -08001500
1501 if (zero == false) {
Jason Evans7372b152012-02-10 20:22:09 -08001502 if (config_fill) {
Jason Evans122449b2012-04-06 00:35:09 -07001503 if (opt_junk) {
1504 arena_alloc_junk_small(ret,
1505 &arena_bin_info[binind], false);
1506 } else if (opt_zero)
Jason Evans7372b152012-02-10 20:22:09 -08001507 memset(ret, 0, size);
1508 }
Jason Evansdda90f52013-10-19 23:48:40 -07001509 VALGRIND_MAKE_MEM_UNDEFINED(ret, size);
Jason Evans122449b2012-04-06 00:35:09 -07001510 } else {
1511 if (config_fill && opt_junk) {
1512 arena_alloc_junk_small(ret, &arena_bin_info[binind],
1513 true);
1514 }
1515 VALGRIND_MAKE_MEM_UNDEFINED(ret, size);
Jason Evanse476f8a2010-01-16 09:53:50 -08001516 memset(ret, 0, size);
Jason Evans122449b2012-04-06 00:35:09 -07001517 }
Jason Evanse476f8a2010-01-16 09:53:50 -08001518
1519 return (ret);
1520}
1521
1522void *
Jason Evanse476f8a2010-01-16 09:53:50 -08001523arena_malloc_large(arena_t *arena, size_t size, bool zero)
1524{
1525 void *ret;
Jason Evans88c222c2013-02-06 11:59:30 -08001526 UNUSED bool idump;
Jason Evanse476f8a2010-01-16 09:53:50 -08001527
1528 /* Large allocation. */
1529 size = PAGE_CEILING(size);
1530 malloc_mutex_lock(&arena->lock);
Jason Evans203484e2012-05-02 00:30:36 -07001531 ret = (void *)arena_run_alloc(arena, size, true, BININD_INVALID, zero);
Jason Evanse476f8a2010-01-16 09:53:50 -08001532 if (ret == NULL) {
1533 malloc_mutex_unlock(&arena->lock);
1534 return (NULL);
1535 }
Jason Evans7372b152012-02-10 20:22:09 -08001536 if (config_stats) {
1537 arena->stats.nmalloc_large++;
1538 arena->stats.nrequests_large++;
1539 arena->stats.allocated_large += size;
Jason Evansae4c7b42012-04-02 07:04:34 -07001540 arena->stats.lstats[(size >> LG_PAGE) - 1].nmalloc++;
1541 arena->stats.lstats[(size >> LG_PAGE) - 1].nrequests++;
1542 arena->stats.lstats[(size >> LG_PAGE) - 1].curruns++;
Jason Evanse476f8a2010-01-16 09:53:50 -08001543 }
Jason Evans7372b152012-02-10 20:22:09 -08001544 if (config_prof)
Jason Evans88c222c2013-02-06 11:59:30 -08001545 idump = arena_prof_accum_locked(arena, size);
Jason Evanse476f8a2010-01-16 09:53:50 -08001546 malloc_mutex_unlock(&arena->lock);
Jason Evans88c222c2013-02-06 11:59:30 -08001547 if (config_prof && idump)
1548 prof_idump();
Jason Evanse476f8a2010-01-16 09:53:50 -08001549
1550 if (zero == false) {
Jason Evans7372b152012-02-10 20:22:09 -08001551 if (config_fill) {
1552 if (opt_junk)
1553 memset(ret, 0xa5, size);
1554 else if (opt_zero)
1555 memset(ret, 0, size);
1556 }
Jason Evanse476f8a2010-01-16 09:53:50 -08001557 }
1558
1559 return (ret);
1560}
1561
Jason Evanse476f8a2010-01-16 09:53:50 -08001562/* Only handles large allocations that require more than page alignment. */
1563void *
Jason Evans5ff709c2012-04-11 18:13:45 -07001564arena_palloc(arena_t *arena, size_t size, size_t alignment, bool zero)
Jason Evanse476f8a2010-01-16 09:53:50 -08001565{
1566 void *ret;
Jason Evans5ff709c2012-04-11 18:13:45 -07001567 size_t alloc_size, leadsize, trailsize;
1568 arena_run_t *run;
Jason Evanse476f8a2010-01-16 09:53:50 -08001569 arena_chunk_t *chunk;
1570
1571 assert((size & PAGE_MASK) == 0);
Jason Evans93443682010-10-20 17:39:18 -07001572
1573 alignment = PAGE_CEILING(alignment);
Jason Evans5ff709c2012-04-11 18:13:45 -07001574 alloc_size = size + alignment - PAGE;
Jason Evanse476f8a2010-01-16 09:53:50 -08001575
1576 malloc_mutex_lock(&arena->lock);
Jason Evansc368f8c2013-10-29 18:17:42 -07001577 run = arena_run_alloc(arena, alloc_size, true, BININD_INVALID, false);
Jason Evans5ff709c2012-04-11 18:13:45 -07001578 if (run == NULL) {
Jason Evanse476f8a2010-01-16 09:53:50 -08001579 malloc_mutex_unlock(&arena->lock);
1580 return (NULL);
1581 }
Jason Evans5ff709c2012-04-11 18:13:45 -07001582 chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(run);
Jason Evanse476f8a2010-01-16 09:53:50 -08001583
Jason Evans5ff709c2012-04-11 18:13:45 -07001584 leadsize = ALIGNMENT_CEILING((uintptr_t)run, alignment) -
1585 (uintptr_t)run;
1586 assert(alloc_size >= leadsize + size);
1587 trailsize = alloc_size - leadsize - size;
1588 ret = (void *)((uintptr_t)run + leadsize);
1589 if (leadsize != 0) {
1590 arena_run_trim_head(arena, chunk, run, alloc_size, alloc_size -
1591 leadsize);
1592 }
1593 if (trailsize != 0) {
1594 arena_run_trim_tail(arena, chunk, ret, size + trailsize, size,
1595 false);
Jason Evanse476f8a2010-01-16 09:53:50 -08001596 }
Jason Evansc368f8c2013-10-29 18:17:42 -07001597 arena_run_init(arena, (arena_run_t *)ret, size, true, BININD_INVALID,
1598 zero);
Jason Evanse476f8a2010-01-16 09:53:50 -08001599
Jason Evans7372b152012-02-10 20:22:09 -08001600 if (config_stats) {
1601 arena->stats.nmalloc_large++;
1602 arena->stats.nrequests_large++;
1603 arena->stats.allocated_large += size;
Jason Evansae4c7b42012-04-02 07:04:34 -07001604 arena->stats.lstats[(size >> LG_PAGE) - 1].nmalloc++;
1605 arena->stats.lstats[(size >> LG_PAGE) - 1].nrequests++;
1606 arena->stats.lstats[(size >> LG_PAGE) - 1].curruns++;
Jason Evanse476f8a2010-01-16 09:53:50 -08001607 }
Jason Evanse476f8a2010-01-16 09:53:50 -08001608 malloc_mutex_unlock(&arena->lock);
1609
Jason Evans7372b152012-02-10 20:22:09 -08001610 if (config_fill && zero == false) {
Jason Evans8e3c3c62010-09-17 15:46:18 -07001611 if (opt_junk)
1612 memset(ret, 0xa5, size);
1613 else if (opt_zero)
1614 memset(ret, 0, size);
1615 }
Jason Evanse476f8a2010-01-16 09:53:50 -08001616 return (ret);
1617}
1618
Jason Evans0b270a92010-03-31 16:45:04 -07001619void
1620arena_prof_promoted(const void *ptr, size_t size)
1621{
1622 arena_chunk_t *chunk;
1623 size_t pageind, binind;
1624
Jason Evans78f73522012-04-18 13:38:40 -07001625 cassert(config_prof);
Jason Evans0b270a92010-03-31 16:45:04 -07001626 assert(ptr != NULL);
1627 assert(CHUNK_ADDR2BASE(ptr) != ptr);
Jason Evans122449b2012-04-06 00:35:09 -07001628 assert(isalloc(ptr, false) == PAGE);
1629 assert(isalloc(ptr, true) == PAGE);
Jason Evansb1726102012-02-28 16:50:47 -08001630 assert(size <= SMALL_MAXCLASS);
Jason Evans0b270a92010-03-31 16:45:04 -07001631
1632 chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr);
Jason Evansae4c7b42012-04-02 07:04:34 -07001633 pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> LG_PAGE;
Jason Evans41ade962011-03-06 22:56:36 -08001634 binind = SMALL_SIZE2BIN(size);
Jason Evansb1726102012-02-28 16:50:47 -08001635 assert(binind < NBINS);
Jason Evans203484e2012-05-02 00:30:36 -07001636 arena_mapbits_large_binind_set(chunk, pageind, binind);
Jason Evans0b270a92010-03-31 16:45:04 -07001637
Jason Evans122449b2012-04-06 00:35:09 -07001638 assert(isalloc(ptr, false) == PAGE);
1639 assert(isalloc(ptr, true) == size);
Jason Evans0b270a92010-03-31 16:45:04 -07001640}
Jason Evans6109fe02010-02-10 10:37:56 -08001641
Jason Evanse476f8a2010-01-16 09:53:50 -08001642static void
Jason Evans088e6a02010-10-18 00:04:44 -07001643arena_dissociate_bin_run(arena_chunk_t *chunk, arena_run_t *run,
Jason Evanse476f8a2010-01-16 09:53:50 -08001644 arena_bin_t *bin)
1645{
Jason Evanse476f8a2010-01-16 09:53:50 -08001646
Jason Evans19b3d612010-03-18 20:36:40 -07001647 /* Dissociate run from bin. */
Jason Evanse476f8a2010-01-16 09:53:50 -08001648 if (run == bin->runcur)
1649 bin->runcur = NULL;
Jason Evans49f7e8f2011-03-15 13:59:15 -07001650 else {
1651 size_t binind = arena_bin_index(chunk->arena, bin);
1652 arena_bin_info_t *bin_info = &arena_bin_info[binind];
1653
1654 if (bin_info->nregs != 1) {
Jason Evans49f7e8f2011-03-15 13:59:15 -07001655 /*
1656 * This block's conditional is necessary because if the
1657 * run only contains one region, then it never gets
1658 * inserted into the non-full runs tree.
1659 */
Jason Evanse7a10582012-02-13 17:36:52 -08001660 arena_bin_runs_remove(bin, run);
Jason Evans49f7e8f2011-03-15 13:59:15 -07001661 }
Jason Evanse476f8a2010-01-16 09:53:50 -08001662 }
Jason Evans088e6a02010-10-18 00:04:44 -07001663}
1664
1665static void
1666arena_dalloc_bin_run(arena_t *arena, arena_chunk_t *chunk, arena_run_t *run,
1667 arena_bin_t *bin)
1668{
Jason Evans49f7e8f2011-03-15 13:59:15 -07001669 size_t binind;
1670 arena_bin_info_t *bin_info;
Jason Evans088e6a02010-10-18 00:04:44 -07001671 size_t npages, run_ind, past;
1672
1673 assert(run != bin->runcur);
Jason Evans203484e2012-05-02 00:30:36 -07001674 assert(arena_run_tree_search(&bin->runs,
1675 arena_mapp_get(chunk, ((uintptr_t)run-(uintptr_t)chunk)>>LG_PAGE))
1676 == NULL);
Jason Evans86815df2010-03-13 20:32:56 -08001677
Jason Evans49f7e8f2011-03-15 13:59:15 -07001678 binind = arena_bin_index(chunk->arena, run->bin);
1679 bin_info = &arena_bin_info[binind];
1680
Jason Evanse00572b2010-03-14 19:43:56 -07001681 malloc_mutex_unlock(&bin->lock);
1682 /******************************/
Jason Evansae4c7b42012-04-02 07:04:34 -07001683 npages = bin_info->run_size >> LG_PAGE;
1684 run_ind = (size_t)(((uintptr_t)run - (uintptr_t)chunk) >> LG_PAGE);
Jason Evans84c8eef2011-03-16 10:30:13 -07001685 past = (size_t)(PAGE_CEILING((uintptr_t)run +
1686 (uintptr_t)bin_info->reg0_offset + (uintptr_t)(run->nextind *
Jason Evans122449b2012-04-06 00:35:09 -07001687 bin_info->reg_interval - bin_info->redzone_size) -
1688 (uintptr_t)chunk) >> LG_PAGE);
Jason Evans86815df2010-03-13 20:32:56 -08001689 malloc_mutex_lock(&arena->lock);
Jason Evans19b3d612010-03-18 20:36:40 -07001690
1691 /*
1692 * If the run was originally clean, and some pages were never touched,
1693 * trim the clean pages before deallocating the dirty portion of the
1694 * run.
1695 */
Jason Evans30fe12b2012-05-10 17:09:17 -07001696 assert(arena_mapbits_dirty_get(chunk, run_ind) ==
1697 arena_mapbits_dirty_get(chunk, run_ind+npages-1));
Jason Evans203484e2012-05-02 00:30:36 -07001698 if (arena_mapbits_dirty_get(chunk, run_ind) == 0 && past - run_ind <
1699 npages) {
Jason Evans30fe12b2012-05-10 17:09:17 -07001700 /* Trim clean pages. Convert to large run beforehand. */
1701 assert(npages > 0);
Jason Evansd8ceef62012-05-10 20:59:39 -07001702 arena_mapbits_large_set(chunk, run_ind, bin_info->run_size, 0);
1703 arena_mapbits_large_set(chunk, run_ind+npages-1, 0, 0);
Jason Evansae4c7b42012-04-02 07:04:34 -07001704 arena_run_trim_tail(arena, chunk, run, (npages << LG_PAGE),
1705 ((past - run_ind) << LG_PAGE), false);
Jason Evans940a2e02010-10-17 17:51:37 -07001706 /* npages = past - run_ind; */
Jason Evans1e0a6362010-03-13 13:41:58 -08001707 }
Jason Evanse3d13062012-10-30 15:42:37 -07001708 arena_run_dalloc(arena, run, true, false);
Jason Evans86815df2010-03-13 20:32:56 -08001709 malloc_mutex_unlock(&arena->lock);
Jason Evanse00572b2010-03-14 19:43:56 -07001710 /****************************/
1711 malloc_mutex_lock(&bin->lock);
Jason Evans7372b152012-02-10 20:22:09 -08001712 if (config_stats)
1713 bin->stats.curruns--;
Jason Evanse476f8a2010-01-16 09:53:50 -08001714}
1715
Jason Evans940a2e02010-10-17 17:51:37 -07001716static void
1717arena_bin_lower_run(arena_t *arena, arena_chunk_t *chunk, arena_run_t *run,
1718 arena_bin_t *bin)
Jason Evanse476f8a2010-01-16 09:53:50 -08001719{
Jason Evanse476f8a2010-01-16 09:53:50 -08001720
Jason Evans8de6a022010-10-17 20:57:30 -07001721 /*
Jason Evanse7a10582012-02-13 17:36:52 -08001722 * Make sure that if bin->runcur is non-NULL, it refers to the lowest
1723 * non-full run. It is okay to NULL runcur out rather than proactively
1724 * keeping it pointing at the lowest non-full run.
Jason Evans8de6a022010-10-17 20:57:30 -07001725 */
Jason Evanse7a10582012-02-13 17:36:52 -08001726 if ((uintptr_t)run < (uintptr_t)bin->runcur) {
Jason Evans8de6a022010-10-17 20:57:30 -07001727 /* Switch runcur. */
Jason Evanse7a10582012-02-13 17:36:52 -08001728 if (bin->runcur->nfree > 0)
1729 arena_bin_runs_insert(bin, bin->runcur);
Jason Evans8de6a022010-10-17 20:57:30 -07001730 bin->runcur = run;
Jason Evanse7a10582012-02-13 17:36:52 -08001731 if (config_stats)
1732 bin->stats.reruns++;
1733 } else
1734 arena_bin_runs_insert(bin, run);
Jason Evans940a2e02010-10-17 17:51:37 -07001735}
1736
1737void
Jason Evans203484e2012-05-02 00:30:36 -07001738arena_dalloc_bin_locked(arena_t *arena, arena_chunk_t *chunk, void *ptr,
Jason Evans940a2e02010-10-17 17:51:37 -07001739 arena_chunk_map_t *mapelm)
1740{
1741 size_t pageind;
1742 arena_run_t *run;
1743 arena_bin_t *bin;
Mike Hommey8b499712012-04-24 23:22:02 +02001744 arena_bin_info_t *bin_info;
1745 size_t size, binind;
Jason Evans940a2e02010-10-17 17:51:37 -07001746
Jason Evansae4c7b42012-04-02 07:04:34 -07001747 pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> LG_PAGE;
Jason Evans940a2e02010-10-17 17:51:37 -07001748 run = (arena_run_t *)((uintptr_t)chunk + (uintptr_t)((pageind -
Jason Evans203484e2012-05-02 00:30:36 -07001749 arena_mapbits_small_runind_get(chunk, pageind)) << LG_PAGE));
Jason Evans940a2e02010-10-17 17:51:37 -07001750 bin = run->bin;
Jason Evans80737c32012-05-02 16:11:03 -07001751 binind = arena_ptr_small_binind_get(ptr, mapelm->bits);
Mike Hommey8b499712012-04-24 23:22:02 +02001752 bin_info = &arena_bin_info[binind];
Jason Evans7372b152012-02-10 20:22:09 -08001753 if (config_fill || config_stats)
1754 size = bin_info->reg_size;
Jason Evans940a2e02010-10-17 17:51:37 -07001755
Jason Evans7372b152012-02-10 20:22:09 -08001756 if (config_fill && opt_junk)
Jason Evans122449b2012-04-06 00:35:09 -07001757 arena_dalloc_junk_small(ptr, bin_info);
Jason Evans940a2e02010-10-17 17:51:37 -07001758
1759 arena_run_reg_dalloc(run, ptr);
Jason Evans49f7e8f2011-03-15 13:59:15 -07001760 if (run->nfree == bin_info->nregs) {
Jason Evans088e6a02010-10-18 00:04:44 -07001761 arena_dissociate_bin_run(chunk, run, bin);
Jason Evans8de6a022010-10-17 20:57:30 -07001762 arena_dalloc_bin_run(arena, chunk, run, bin);
Jason Evans088e6a02010-10-18 00:04:44 -07001763 } else if (run->nfree == 1 && run != bin->runcur)
Jason Evans8de6a022010-10-17 20:57:30 -07001764 arena_bin_lower_run(arena, chunk, run, bin);
Jason Evanse476f8a2010-01-16 09:53:50 -08001765
Jason Evans7372b152012-02-10 20:22:09 -08001766 if (config_stats) {
1767 bin->stats.allocated -= size;
1768 bin->stats.ndalloc++;
1769 }
Jason Evanse476f8a2010-01-16 09:53:50 -08001770}
1771
Jason Evanse476f8a2010-01-16 09:53:50 -08001772void
Jason Evans203484e2012-05-02 00:30:36 -07001773arena_dalloc_bin(arena_t *arena, arena_chunk_t *chunk, void *ptr,
1774 size_t pageind, arena_chunk_map_t *mapelm)
1775{
1776 arena_run_t *run;
1777 arena_bin_t *bin;
1778
1779 run = (arena_run_t *)((uintptr_t)chunk + (uintptr_t)((pageind -
1780 arena_mapbits_small_runind_get(chunk, pageind)) << LG_PAGE));
1781 bin = run->bin;
1782 malloc_mutex_lock(&bin->lock);
1783 arena_dalloc_bin_locked(arena, chunk, ptr, mapelm);
1784 malloc_mutex_unlock(&bin->lock);
1785}
1786
1787void
1788arena_dalloc_small(arena_t *arena, arena_chunk_t *chunk, void *ptr,
1789 size_t pageind)
1790{
1791 arena_chunk_map_t *mapelm;
1792
1793 if (config_debug) {
Jason Evans80737c32012-05-02 16:11:03 -07001794 /* arena_ptr_small_binind_get() does extra sanity checking. */
1795 assert(arena_ptr_small_binind_get(ptr, arena_mapbits_get(chunk,
1796 pageind)) != BININD_INVALID);
Jason Evans203484e2012-05-02 00:30:36 -07001797 }
1798 mapelm = arena_mapp_get(chunk, pageind);
1799 arena_dalloc_bin(arena, chunk, ptr, pageind, mapelm);
1800}
Jason Evanse476f8a2010-01-16 09:53:50 -08001801
1802void
Jason Evans203484e2012-05-02 00:30:36 -07001803arena_dalloc_large_locked(arena_t *arena, arena_chunk_t *chunk, void *ptr)
Jason Evanse476f8a2010-01-16 09:53:50 -08001804{
Jason Evans13668262010-01-31 03:57:29 -08001805
Jason Evans7372b152012-02-10 20:22:09 -08001806 if (config_fill || config_stats) {
Jason Evansae4c7b42012-04-02 07:04:34 -07001807 size_t pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> LG_PAGE;
Jason Evans203484e2012-05-02 00:30:36 -07001808 size_t size = arena_mapbits_large_size_get(chunk, pageind);
Jason Evanse476f8a2010-01-16 09:53:50 -08001809
Jason Evans7372b152012-02-10 20:22:09 -08001810 if (config_fill && config_stats && opt_junk)
Jason Evanse476f8a2010-01-16 09:53:50 -08001811 memset(ptr, 0x5a, size);
Jason Evans7372b152012-02-10 20:22:09 -08001812 if (config_stats) {
1813 arena->stats.ndalloc_large++;
1814 arena->stats.allocated_large -= size;
Jason Evansae4c7b42012-04-02 07:04:34 -07001815 arena->stats.lstats[(size >> LG_PAGE) - 1].ndalloc++;
1816 arena->stats.lstats[(size >> LG_PAGE) - 1].curruns--;
Jason Evans7372b152012-02-10 20:22:09 -08001817 }
Jason Evanse476f8a2010-01-16 09:53:50 -08001818 }
Jason Evanse476f8a2010-01-16 09:53:50 -08001819
Jason Evanse3d13062012-10-30 15:42:37 -07001820 arena_run_dalloc(arena, (arena_run_t *)ptr, true, false);
Jason Evanse476f8a2010-01-16 09:53:50 -08001821}
1822
Jason Evans203484e2012-05-02 00:30:36 -07001823void
1824arena_dalloc_large(arena_t *arena, arena_chunk_t *chunk, void *ptr)
1825{
1826
1827 malloc_mutex_lock(&arena->lock);
1828 arena_dalloc_large_locked(arena, chunk, ptr);
1829 malloc_mutex_unlock(&arena->lock);
1830}
1831
Jason Evanse476f8a2010-01-16 09:53:50 -08001832static void
1833arena_ralloc_large_shrink(arena_t *arena, arena_chunk_t *chunk, void *ptr,
Jason Evans8e3c3c62010-09-17 15:46:18 -07001834 size_t oldsize, size_t size)
Jason Evanse476f8a2010-01-16 09:53:50 -08001835{
1836
1837 assert(size < oldsize);
1838
1839 /*
1840 * Shrink the run, and make trailing pages available for other
1841 * allocations.
1842 */
1843 malloc_mutex_lock(&arena->lock);
1844 arena_run_trim_tail(arena, chunk, (arena_run_t *)ptr, oldsize, size,
1845 true);
Jason Evans7372b152012-02-10 20:22:09 -08001846 if (config_stats) {
1847 arena->stats.ndalloc_large++;
1848 arena->stats.allocated_large -= oldsize;
Jason Evansae4c7b42012-04-02 07:04:34 -07001849 arena->stats.lstats[(oldsize >> LG_PAGE) - 1].ndalloc++;
1850 arena->stats.lstats[(oldsize >> LG_PAGE) - 1].curruns--;
Jason Evans990d10c2010-01-31 03:49:35 -08001851
Jason Evans7372b152012-02-10 20:22:09 -08001852 arena->stats.nmalloc_large++;
1853 arena->stats.nrequests_large++;
1854 arena->stats.allocated_large += size;
Jason Evansae4c7b42012-04-02 07:04:34 -07001855 arena->stats.lstats[(size >> LG_PAGE) - 1].nmalloc++;
1856 arena->stats.lstats[(size >> LG_PAGE) - 1].nrequests++;
1857 arena->stats.lstats[(size >> LG_PAGE) - 1].curruns++;
Jason Evanse476f8a2010-01-16 09:53:50 -08001858 }
Jason Evanse476f8a2010-01-16 09:53:50 -08001859 malloc_mutex_unlock(&arena->lock);
1860}
1861
1862static bool
1863arena_ralloc_large_grow(arena_t *arena, arena_chunk_t *chunk, void *ptr,
Jason Evans8e3c3c62010-09-17 15:46:18 -07001864 size_t oldsize, size_t size, size_t extra, bool zero)
Jason Evanse476f8a2010-01-16 09:53:50 -08001865{
Jason Evansae4c7b42012-04-02 07:04:34 -07001866 size_t pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> LG_PAGE;
1867 size_t npages = oldsize >> LG_PAGE;
Jason Evans8e3c3c62010-09-17 15:46:18 -07001868 size_t followsize;
Jason Evanse476f8a2010-01-16 09:53:50 -08001869
Jason Evans203484e2012-05-02 00:30:36 -07001870 assert(oldsize == arena_mapbits_large_size_get(chunk, pageind));
Jason Evanse476f8a2010-01-16 09:53:50 -08001871
1872 /* Try to extend the run. */
Jason Evans8e3c3c62010-09-17 15:46:18 -07001873 assert(size + extra > oldsize);
Jason Evanse476f8a2010-01-16 09:53:50 -08001874 malloc_mutex_lock(&arena->lock);
Jason Evans7393f442010-10-01 17:35:43 -07001875 if (pageind + npages < chunk_npages &&
Jason Evans203484e2012-05-02 00:30:36 -07001876 arena_mapbits_allocated_get(chunk, pageind+npages) == 0 &&
1877 (followsize = arena_mapbits_unallocated_size_get(chunk,
1878 pageind+npages)) >= size - oldsize) {
Jason Evanse476f8a2010-01-16 09:53:50 -08001879 /*
1880 * The next run is available and sufficiently large. Split the
1881 * following run, then merge the first part with the existing
1882 * allocation.
1883 */
Jason Evans940a2e02010-10-17 17:51:37 -07001884 size_t flag_dirty;
Jason Evans8e3c3c62010-09-17 15:46:18 -07001885 size_t splitsize = (oldsize + followsize <= size + extra)
1886 ? followsize : size + extra - oldsize;
Jason Evanse476f8a2010-01-16 09:53:50 -08001887 arena_run_split(arena, (arena_run_t *)((uintptr_t)chunk +
Jason Evans203484e2012-05-02 00:30:36 -07001888 ((pageind+npages) << LG_PAGE)), splitsize, true,
1889 BININD_INVALID, zero);
Jason Evanse476f8a2010-01-16 09:53:50 -08001890
Jason Evans088e6a02010-10-18 00:04:44 -07001891 size = oldsize + splitsize;
Jason Evansae4c7b42012-04-02 07:04:34 -07001892 npages = size >> LG_PAGE;
Jason Evans940a2e02010-10-17 17:51:37 -07001893
1894 /*
1895 * Mark the extended run as dirty if either portion of the run
1896 * was dirty before allocation. This is rather pedantic,
1897 * because there's not actually any sequence of events that
1898 * could cause the resulting run to be passed to
1899 * arena_run_dalloc() with the dirty argument set to false
1900 * (which is when dirty flag consistency would really matter).
1901 */
Jason Evans203484e2012-05-02 00:30:36 -07001902 flag_dirty = arena_mapbits_dirty_get(chunk, pageind) |
1903 arena_mapbits_dirty_get(chunk, pageind+npages-1);
1904 arena_mapbits_large_set(chunk, pageind, size, flag_dirty);
1905 arena_mapbits_large_set(chunk, pageind+npages-1, 0, flag_dirty);
Jason Evanse476f8a2010-01-16 09:53:50 -08001906
Jason Evans7372b152012-02-10 20:22:09 -08001907 if (config_stats) {
1908 arena->stats.ndalloc_large++;
1909 arena->stats.allocated_large -= oldsize;
Jason Evans203484e2012-05-02 00:30:36 -07001910 arena->stats.lstats[(oldsize >> LG_PAGE) - 1].ndalloc++;
1911 arena->stats.lstats[(oldsize >> LG_PAGE) - 1].curruns--;
Jason Evans990d10c2010-01-31 03:49:35 -08001912
Jason Evans7372b152012-02-10 20:22:09 -08001913 arena->stats.nmalloc_large++;
1914 arena->stats.nrequests_large++;
1915 arena->stats.allocated_large += size;
Jason Evansae4c7b42012-04-02 07:04:34 -07001916 arena->stats.lstats[(size >> LG_PAGE) - 1].nmalloc++;
Jason Evans203484e2012-05-02 00:30:36 -07001917 arena->stats.lstats[(size >> LG_PAGE) - 1].nrequests++;
Jason Evansae4c7b42012-04-02 07:04:34 -07001918 arena->stats.lstats[(size >> LG_PAGE) - 1].curruns++;
Jason Evans940a2e02010-10-17 17:51:37 -07001919 }
Jason Evanse476f8a2010-01-16 09:53:50 -08001920 malloc_mutex_unlock(&arena->lock);
1921 return (false);
1922 }
1923 malloc_mutex_unlock(&arena->lock);
1924
1925 return (true);
1926}
1927
1928/*
1929 * Try to resize a large allocation, in order to avoid copying. This will
1930 * always fail if growing an object, and the following run is already in use.
1931 */
1932static bool
Jason Evans8e3c3c62010-09-17 15:46:18 -07001933arena_ralloc_large(void *ptr, size_t oldsize, size_t size, size_t extra,
1934 bool zero)
Jason Evanse476f8a2010-01-16 09:53:50 -08001935{
1936 size_t psize;
1937
Jason Evans8e3c3c62010-09-17 15:46:18 -07001938 psize = PAGE_CEILING(size + extra);
Jason Evanse476f8a2010-01-16 09:53:50 -08001939 if (psize == oldsize) {
1940 /* Same size class. */
Jason Evanse476f8a2010-01-16 09:53:50 -08001941 return (false);
1942 } else {
1943 arena_chunk_t *chunk;
1944 arena_t *arena;
1945
1946 chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr);
1947 arena = chunk->arena;
Jason Evanse476f8a2010-01-16 09:53:50 -08001948
1949 if (psize < oldsize) {
Jason Evanse476f8a2010-01-16 09:53:50 -08001950 /* Fill before shrinking in order avoid a race. */
Jason Evans7372b152012-02-10 20:22:09 -08001951 if (config_fill && opt_junk) {
Jason Evans6e629842013-12-15 21:49:40 -08001952 memset((void *)((uintptr_t)ptr + psize), 0x5a,
1953 oldsize - psize);
Jason Evanse476f8a2010-01-16 09:53:50 -08001954 }
Jason Evans8e3c3c62010-09-17 15:46:18 -07001955 arena_ralloc_large_shrink(arena, chunk, ptr, oldsize,
1956 psize);
Jason Evanse476f8a2010-01-16 09:53:50 -08001957 return (false);
1958 } else {
1959 bool ret = arena_ralloc_large_grow(arena, chunk, ptr,
Jason Evans8e3c3c62010-09-17 15:46:18 -07001960 oldsize, PAGE_CEILING(size),
1961 psize - PAGE_CEILING(size), zero);
Jason Evans7372b152012-02-10 20:22:09 -08001962 if (config_fill && ret == false && zero == false &&
1963 opt_zero) {
Jason Evanse476f8a2010-01-16 09:53:50 -08001964 memset((void *)((uintptr_t)ptr + oldsize), 0,
1965 size - oldsize);
1966 }
Jason Evanse476f8a2010-01-16 09:53:50 -08001967 return (ret);
1968 }
1969 }
1970}
1971
1972void *
Jason Evans8e3c3c62010-09-17 15:46:18 -07001973arena_ralloc_no_move(void *ptr, size_t oldsize, size_t size, size_t extra,
1974 bool zero)
Jason Evanse476f8a2010-01-16 09:53:50 -08001975{
Jason Evanse476f8a2010-01-16 09:53:50 -08001976
Jason Evans8e3c3c62010-09-17 15:46:18 -07001977 /*
1978 * Avoid moving the allocation if the size class can be left the same.
1979 */
Jason Evanse476f8a2010-01-16 09:53:50 -08001980 if (oldsize <= arena_maxclass) {
Jason Evansb1726102012-02-28 16:50:47 -08001981 if (oldsize <= SMALL_MAXCLASS) {
Jason Evans49f7e8f2011-03-15 13:59:15 -07001982 assert(arena_bin_info[SMALL_SIZE2BIN(oldsize)].reg_size
1983 == oldsize);
Jason Evansb1726102012-02-28 16:50:47 -08001984 if ((size + extra <= SMALL_MAXCLASS &&
Jason Evans41ade962011-03-06 22:56:36 -08001985 SMALL_SIZE2BIN(size + extra) ==
1986 SMALL_SIZE2BIN(oldsize)) || (size <= oldsize &&
Jason Evans6e629842013-12-15 21:49:40 -08001987 size + extra >= oldsize))
Jason Evans8e3c3c62010-09-17 15:46:18 -07001988 return (ptr);
Jason Evanse476f8a2010-01-16 09:53:50 -08001989 } else {
1990 assert(size <= arena_maxclass);
Jason Evansb1726102012-02-28 16:50:47 -08001991 if (size + extra > SMALL_MAXCLASS) {
Jason Evans8e3c3c62010-09-17 15:46:18 -07001992 if (arena_ralloc_large(ptr, oldsize, size,
1993 extra, zero) == false)
Jason Evanse476f8a2010-01-16 09:53:50 -08001994 return (ptr);
1995 }
1996 }
1997 }
1998
Jason Evans8e3c3c62010-09-17 15:46:18 -07001999 /* Reallocation would require a move. */
2000 return (NULL);
2001}
Jason Evanse476f8a2010-01-16 09:53:50 -08002002
Jason Evans8e3c3c62010-09-17 15:46:18 -07002003void *
Jason Evans609ae592012-10-11 13:53:15 -07002004arena_ralloc(arena_t *arena, void *ptr, size_t oldsize, size_t size,
2005 size_t extra, size_t alignment, bool zero, bool try_tcache_alloc,
2006 bool try_tcache_dalloc)
Jason Evans8e3c3c62010-09-17 15:46:18 -07002007{
2008 void *ret;
2009 size_t copysize;
2010
2011 /* Try to avoid moving the allocation. */
2012 ret = arena_ralloc_no_move(ptr, oldsize, size, extra, zero);
2013 if (ret != NULL)
2014 return (ret);
2015
Jason Evans8e3c3c62010-09-17 15:46:18 -07002016 /*
2017 * size and oldsize are different enough that we need to move the
2018 * object. In that case, fall back to allocating new space and
2019 * copying.
2020 */
Jason Evans38d92102011-03-23 00:37:29 -07002021 if (alignment != 0) {
Jason Evans5ff709c2012-04-11 18:13:45 -07002022 size_t usize = sa2u(size + extra, alignment);
Jason Evans38d92102011-03-23 00:37:29 -07002023 if (usize == 0)
2024 return (NULL);
Jason Evansd82a5e62013-12-12 22:35:52 -08002025 ret = ipalloct(usize, alignment, zero, try_tcache_alloc, arena);
Jason Evans38d92102011-03-23 00:37:29 -07002026 } else
Jason Evans609ae592012-10-11 13:53:15 -07002027 ret = arena_malloc(arena, size + extra, zero, try_tcache_alloc);
Jason Evans8e3c3c62010-09-17 15:46:18 -07002028
2029 if (ret == NULL) {
2030 if (extra == 0)
2031 return (NULL);
2032 /* Try again, this time without extra. */
Jason Evans38d92102011-03-23 00:37:29 -07002033 if (alignment != 0) {
Jason Evans5ff709c2012-04-11 18:13:45 -07002034 size_t usize = sa2u(size, alignment);
Jason Evans38d92102011-03-23 00:37:29 -07002035 if (usize == 0)
2036 return (NULL);
Jason Evansd82a5e62013-12-12 22:35:52 -08002037 ret = ipalloct(usize, alignment, zero, try_tcache_alloc,
Jason Evans609ae592012-10-11 13:53:15 -07002038 arena);
Jason Evans38d92102011-03-23 00:37:29 -07002039 } else
Jason Evans609ae592012-10-11 13:53:15 -07002040 ret = arena_malloc(arena, size, zero, try_tcache_alloc);
Jason Evans8e3c3c62010-09-17 15:46:18 -07002041
2042 if (ret == NULL)
2043 return (NULL);
2044 }
2045
2046 /* Junk/zero-filling were already done by ipalloc()/arena_malloc(). */
2047
2048 /*
2049 * Copy at most size bytes (not size+extra), since the caller has no
2050 * expectation that the extra bytes will be reliably preserved.
2051 */
Jason Evanse476f8a2010-01-16 09:53:50 -08002052 copysize = (size < oldsize) ? size : oldsize;
Jason Evansf54166e2012-04-23 22:41:36 -07002053 VALGRIND_MAKE_MEM_UNDEFINED(ret, copysize);
Jason Evanse476f8a2010-01-16 09:53:50 -08002054 memcpy(ret, ptr, copysize);
Jason Evansd82a5e62013-12-12 22:35:52 -08002055 iqalloct(ptr, try_tcache_dalloc);
Jason Evanse476f8a2010-01-16 09:53:50 -08002056 return (ret);
Jason Evanse476f8a2010-01-16 09:53:50 -08002057}
2058
Jason Evans609ae592012-10-11 13:53:15 -07002059dss_prec_t
2060arena_dss_prec_get(arena_t *arena)
2061{
2062 dss_prec_t ret;
2063
2064 malloc_mutex_lock(&arena->lock);
2065 ret = arena->dss_prec;
2066 malloc_mutex_unlock(&arena->lock);
2067 return (ret);
2068}
2069
2070void
2071arena_dss_prec_set(arena_t *arena, dss_prec_t dss_prec)
2072{
2073
2074 malloc_mutex_lock(&arena->lock);
2075 arena->dss_prec = dss_prec;
2076 malloc_mutex_unlock(&arena->lock);
2077}
2078
2079void
2080arena_stats_merge(arena_t *arena, const char **dss, size_t *nactive,
2081 size_t *ndirty, arena_stats_t *astats, malloc_bin_stats_t *bstats,
2082 malloc_large_stats_t *lstats)
2083{
2084 unsigned i;
2085
2086 malloc_mutex_lock(&arena->lock);
2087 *dss = dss_prec_names[arena->dss_prec];
2088 *nactive += arena->nactive;
2089 *ndirty += arena->ndirty;
2090
2091 astats->mapped += arena->stats.mapped;
2092 astats->npurge += arena->stats.npurge;
2093 astats->nmadvise += arena->stats.nmadvise;
2094 astats->purged += arena->stats.purged;
2095 astats->allocated_large += arena->stats.allocated_large;
2096 astats->nmalloc_large += arena->stats.nmalloc_large;
2097 astats->ndalloc_large += arena->stats.ndalloc_large;
2098 astats->nrequests_large += arena->stats.nrequests_large;
2099
2100 for (i = 0; i < nlclasses; i++) {
2101 lstats[i].nmalloc += arena->stats.lstats[i].nmalloc;
2102 lstats[i].ndalloc += arena->stats.lstats[i].ndalloc;
2103 lstats[i].nrequests += arena->stats.lstats[i].nrequests;
2104 lstats[i].curruns += arena->stats.lstats[i].curruns;
2105 }
2106 malloc_mutex_unlock(&arena->lock);
2107
2108 for (i = 0; i < NBINS; i++) {
2109 arena_bin_t *bin = &arena->bins[i];
2110
2111 malloc_mutex_lock(&bin->lock);
2112 bstats[i].allocated += bin->stats.allocated;
2113 bstats[i].nmalloc += bin->stats.nmalloc;
2114 bstats[i].ndalloc += bin->stats.ndalloc;
2115 bstats[i].nrequests += bin->stats.nrequests;
2116 if (config_tcache) {
2117 bstats[i].nfills += bin->stats.nfills;
2118 bstats[i].nflushes += bin->stats.nflushes;
2119 }
2120 bstats[i].nruns += bin->stats.nruns;
2121 bstats[i].reruns += bin->stats.reruns;
2122 bstats[i].curruns += bin->stats.curruns;
2123 malloc_mutex_unlock(&bin->lock);
2124 }
2125}
2126
Jason Evanse476f8a2010-01-16 09:53:50 -08002127bool
2128arena_new(arena_t *arena, unsigned ind)
2129{
2130 unsigned i;
2131 arena_bin_t *bin;
Jason Evanse476f8a2010-01-16 09:53:50 -08002132
Jason Evans6109fe02010-02-10 10:37:56 -08002133 arena->ind = ind;
Jason Evans597632b2011-03-18 13:41:33 -07002134 arena->nthreads = 0;
Jason Evans6109fe02010-02-10 10:37:56 -08002135
Jason Evanse476f8a2010-01-16 09:53:50 -08002136 if (malloc_mutex_init(&arena->lock))
2137 return (true);
2138
Jason Evans7372b152012-02-10 20:22:09 -08002139 if (config_stats) {
2140 memset(&arena->stats, 0, sizeof(arena_stats_t));
2141 arena->stats.lstats =
2142 (malloc_large_stats_t *)base_alloc(nlclasses *
2143 sizeof(malloc_large_stats_t));
2144 if (arena->stats.lstats == NULL)
2145 return (true);
2146 memset(arena->stats.lstats, 0, nlclasses *
2147 sizeof(malloc_large_stats_t));
2148 if (config_tcache)
2149 ql_new(&arena->tcache_ql);
2150 }
Jason Evanse476f8a2010-01-16 09:53:50 -08002151
Jason Evans7372b152012-02-10 20:22:09 -08002152 if (config_prof)
2153 arena->prof_accumbytes = 0;
Jason Evansd34f9e72010-02-11 13:19:21 -08002154
Jason Evans609ae592012-10-11 13:53:15 -07002155 arena->dss_prec = chunk_dss_prec_get();
2156
Jason Evanse476f8a2010-01-16 09:53:50 -08002157 /* Initialize chunks. */
Jason Evanse3d13062012-10-30 15:42:37 -07002158 arena_chunk_dirty_new(&arena->chunks_dirty);
Jason Evanse476f8a2010-01-16 09:53:50 -08002159 arena->spare = NULL;
2160
2161 arena->nactive = 0;
2162 arena->ndirty = 0;
Jason Evans799ca0b2010-04-08 20:31:58 -07002163 arena->npurgatory = 0;
Jason Evanse476f8a2010-01-16 09:53:50 -08002164
Jason Evanse3d13062012-10-30 15:42:37 -07002165 arena_avail_tree_new(&arena->runs_avail);
Jason Evanse476f8a2010-01-16 09:53:50 -08002166
2167 /* Initialize bins. */
Jason Evansb1726102012-02-28 16:50:47 -08002168 for (i = 0; i < NBINS; i++) {
Jason Evanse476f8a2010-01-16 09:53:50 -08002169 bin = &arena->bins[i];
Jason Evans86815df2010-03-13 20:32:56 -08002170 if (malloc_mutex_init(&bin->lock))
2171 return (true);
Jason Evanse476f8a2010-01-16 09:53:50 -08002172 bin->runcur = NULL;
2173 arena_run_tree_new(&bin->runs);
Jason Evans7372b152012-02-10 20:22:09 -08002174 if (config_stats)
2175 memset(&bin->stats, 0, sizeof(malloc_bin_stats_t));
Jason Evanse476f8a2010-01-16 09:53:50 -08002176 }
2177
Jason Evanse476f8a2010-01-16 09:53:50 -08002178 return (false);
2179}
2180
Jason Evans49f7e8f2011-03-15 13:59:15 -07002181/*
2182 * Calculate bin_info->run_size such that it meets the following constraints:
2183 *
2184 * *) bin_info->run_size >= min_run_size
2185 * *) bin_info->run_size <= arena_maxclass
2186 * *) run header overhead <= RUN_MAX_OVRHD (or header overhead relaxed).
Jason Evans47e57f92011-03-22 09:00:56 -07002187 * *) bin_info->nregs <= RUN_MAXREGS
Jason Evans49f7e8f2011-03-15 13:59:15 -07002188 *
Jason Evans84c8eef2011-03-16 10:30:13 -07002189 * bin_info->nregs, bin_info->bitmap_offset, and bin_info->reg0_offset are also
2190 * calculated here, since these settings are all interdependent.
Jason Evans49f7e8f2011-03-15 13:59:15 -07002191 */
2192static size_t
2193bin_info_run_size_calc(arena_bin_info_t *bin_info, size_t min_run_size)
2194{
Jason Evans122449b2012-04-06 00:35:09 -07002195 size_t pad_size;
Jason Evans49f7e8f2011-03-15 13:59:15 -07002196 size_t try_run_size, good_run_size;
2197 uint32_t try_nregs, good_nregs;
2198 uint32_t try_hdr_size, good_hdr_size;
Jason Evans84c8eef2011-03-16 10:30:13 -07002199 uint32_t try_bitmap_offset, good_bitmap_offset;
Jason Evans49f7e8f2011-03-15 13:59:15 -07002200 uint32_t try_ctx0_offset, good_ctx0_offset;
Jason Evans122449b2012-04-06 00:35:09 -07002201 uint32_t try_redzone0_offset, good_redzone0_offset;
Jason Evans49f7e8f2011-03-15 13:59:15 -07002202
Jason Evansae4c7b42012-04-02 07:04:34 -07002203 assert(min_run_size >= PAGE);
Jason Evans49f7e8f2011-03-15 13:59:15 -07002204 assert(min_run_size <= arena_maxclass);
2205
2206 /*
Jason Evans122449b2012-04-06 00:35:09 -07002207 * Determine redzone size based on minimum alignment and minimum
2208 * redzone size. Add padding to the end of the run if it is needed to
2209 * align the regions. The padding allows each redzone to be half the
2210 * minimum alignment; without the padding, each redzone would have to
2211 * be twice as large in order to maintain alignment.
2212 */
2213 if (config_fill && opt_redzone) {
2214 size_t align_min = ZU(1) << (ffs(bin_info->reg_size) - 1);
2215 if (align_min <= REDZONE_MINSIZE) {
2216 bin_info->redzone_size = REDZONE_MINSIZE;
2217 pad_size = 0;
2218 } else {
2219 bin_info->redzone_size = align_min >> 1;
2220 pad_size = bin_info->redzone_size;
2221 }
2222 } else {
2223 bin_info->redzone_size = 0;
2224 pad_size = 0;
2225 }
2226 bin_info->reg_interval = bin_info->reg_size +
2227 (bin_info->redzone_size << 1);
2228
2229 /*
Jason Evans49f7e8f2011-03-15 13:59:15 -07002230 * Calculate known-valid settings before entering the run_size
2231 * expansion loop, so that the first part of the loop always copies
2232 * valid settings.
2233 *
2234 * The do..while loop iteratively reduces the number of regions until
2235 * the run header and the regions no longer overlap. A closed formula
2236 * would be quite messy, since there is an interdependency between the
2237 * header's mask length and the number of regions.
2238 */
2239 try_run_size = min_run_size;
Jason Evans122449b2012-04-06 00:35:09 -07002240 try_nregs = ((try_run_size - sizeof(arena_run_t)) /
2241 bin_info->reg_interval)
Jason Evans49f7e8f2011-03-15 13:59:15 -07002242 + 1; /* Counter-act try_nregs-- in loop. */
Jason Evans47e57f92011-03-22 09:00:56 -07002243 if (try_nregs > RUN_MAXREGS) {
2244 try_nregs = RUN_MAXREGS
2245 + 1; /* Counter-act try_nregs-- in loop. */
2246 }
Jason Evans49f7e8f2011-03-15 13:59:15 -07002247 do {
2248 try_nregs--;
2249 try_hdr_size = sizeof(arena_run_t);
Jason Evans84c8eef2011-03-16 10:30:13 -07002250 /* Pad to a long boundary. */
2251 try_hdr_size = LONG_CEILING(try_hdr_size);
2252 try_bitmap_offset = try_hdr_size;
2253 /* Add space for bitmap. */
2254 try_hdr_size += bitmap_size(try_nregs);
Jason Evans7372b152012-02-10 20:22:09 -08002255 if (config_prof && opt_prof && prof_promote == false) {
Jason Evans49f7e8f2011-03-15 13:59:15 -07002256 /* Pad to a quantum boundary. */
2257 try_hdr_size = QUANTUM_CEILING(try_hdr_size);
2258 try_ctx0_offset = try_hdr_size;
2259 /* Add space for one (prof_ctx_t *) per region. */
2260 try_hdr_size += try_nregs * sizeof(prof_ctx_t *);
2261 } else
2262 try_ctx0_offset = 0;
Jason Evans122449b2012-04-06 00:35:09 -07002263 try_redzone0_offset = try_run_size - (try_nregs *
2264 bin_info->reg_interval) - pad_size;
2265 } while (try_hdr_size > try_redzone0_offset);
Jason Evans49f7e8f2011-03-15 13:59:15 -07002266
2267 /* run_size expansion loop. */
2268 do {
2269 /*
2270 * Copy valid settings before trying more aggressive settings.
2271 */
2272 good_run_size = try_run_size;
2273 good_nregs = try_nregs;
2274 good_hdr_size = try_hdr_size;
Jason Evans84c8eef2011-03-16 10:30:13 -07002275 good_bitmap_offset = try_bitmap_offset;
Jason Evans49f7e8f2011-03-15 13:59:15 -07002276 good_ctx0_offset = try_ctx0_offset;
Jason Evans122449b2012-04-06 00:35:09 -07002277 good_redzone0_offset = try_redzone0_offset;
Jason Evans49f7e8f2011-03-15 13:59:15 -07002278
2279 /* Try more aggressive settings. */
Jason Evansae4c7b42012-04-02 07:04:34 -07002280 try_run_size += PAGE;
Jason Evans122449b2012-04-06 00:35:09 -07002281 try_nregs = ((try_run_size - sizeof(arena_run_t) - pad_size) /
2282 bin_info->reg_interval)
Jason Evans49f7e8f2011-03-15 13:59:15 -07002283 + 1; /* Counter-act try_nregs-- in loop. */
Jason Evans47e57f92011-03-22 09:00:56 -07002284 if (try_nregs > RUN_MAXREGS) {
2285 try_nregs = RUN_MAXREGS
2286 + 1; /* Counter-act try_nregs-- in loop. */
2287 }
Jason Evans49f7e8f2011-03-15 13:59:15 -07002288 do {
2289 try_nregs--;
2290 try_hdr_size = sizeof(arena_run_t);
Jason Evans84c8eef2011-03-16 10:30:13 -07002291 /* Pad to a long boundary. */
2292 try_hdr_size = LONG_CEILING(try_hdr_size);
2293 try_bitmap_offset = try_hdr_size;
2294 /* Add space for bitmap. */
2295 try_hdr_size += bitmap_size(try_nregs);
Jason Evans7372b152012-02-10 20:22:09 -08002296 if (config_prof && opt_prof && prof_promote == false) {
Jason Evans49f7e8f2011-03-15 13:59:15 -07002297 /* Pad to a quantum boundary. */
2298 try_hdr_size = QUANTUM_CEILING(try_hdr_size);
2299 try_ctx0_offset = try_hdr_size;
2300 /*
2301 * Add space for one (prof_ctx_t *) per region.
2302 */
2303 try_hdr_size += try_nregs *
2304 sizeof(prof_ctx_t *);
2305 }
Jason Evans122449b2012-04-06 00:35:09 -07002306 try_redzone0_offset = try_run_size - (try_nregs *
2307 bin_info->reg_interval) - pad_size;
2308 } while (try_hdr_size > try_redzone0_offset);
Jason Evans49f7e8f2011-03-15 13:59:15 -07002309 } while (try_run_size <= arena_maxclass
2310 && try_run_size <= arena_maxclass
Jason Evans122449b2012-04-06 00:35:09 -07002311 && RUN_MAX_OVRHD * (bin_info->reg_interval << 3) >
2312 RUN_MAX_OVRHD_RELAX
2313 && (try_redzone0_offset << RUN_BFP) > RUN_MAX_OVRHD * try_run_size
Jason Evans47e57f92011-03-22 09:00:56 -07002314 && try_nregs < RUN_MAXREGS);
Jason Evans49f7e8f2011-03-15 13:59:15 -07002315
Jason Evans122449b2012-04-06 00:35:09 -07002316 assert(good_hdr_size <= good_redzone0_offset);
Jason Evans49f7e8f2011-03-15 13:59:15 -07002317
2318 /* Copy final settings. */
2319 bin_info->run_size = good_run_size;
2320 bin_info->nregs = good_nregs;
Jason Evans84c8eef2011-03-16 10:30:13 -07002321 bin_info->bitmap_offset = good_bitmap_offset;
Jason Evans49f7e8f2011-03-15 13:59:15 -07002322 bin_info->ctx0_offset = good_ctx0_offset;
Jason Evans122449b2012-04-06 00:35:09 -07002323 bin_info->reg0_offset = good_redzone0_offset + bin_info->redzone_size;
2324
2325 assert(bin_info->reg0_offset - bin_info->redzone_size + (bin_info->nregs
2326 * bin_info->reg_interval) + pad_size == bin_info->run_size);
Jason Evans49f7e8f2011-03-15 13:59:15 -07002327
2328 return (good_run_size);
2329}
2330
Jason Evansb1726102012-02-28 16:50:47 -08002331static void
Jason Evans49f7e8f2011-03-15 13:59:15 -07002332bin_info_init(void)
2333{
2334 arena_bin_info_t *bin_info;
Jason Evansae4c7b42012-04-02 07:04:34 -07002335 size_t prev_run_size = PAGE;
Jason Evans49f7e8f2011-03-15 13:59:15 -07002336
Jason Evansb1726102012-02-28 16:50:47 -08002337#define SIZE_CLASS(bin, delta, size) \
2338 bin_info = &arena_bin_info[bin]; \
2339 bin_info->reg_size = size; \
2340 prev_run_size = bin_info_run_size_calc(bin_info, prev_run_size);\
2341 bitmap_info_init(&bin_info->bitmap_info, bin_info->nregs);
2342 SIZE_CLASSES
2343#undef SIZE_CLASS
Jason Evans49f7e8f2011-03-15 13:59:15 -07002344}
2345
Jason Evansb1726102012-02-28 16:50:47 -08002346void
Jason Evansa0bf2422010-01-29 14:30:41 -08002347arena_boot(void)
Jason Evanse476f8a2010-01-16 09:53:50 -08002348{
Jason Evansa0bf2422010-01-29 14:30:41 -08002349 size_t header_size;
Jason Evans7393f442010-10-01 17:35:43 -07002350 unsigned i;
Jason Evanse476f8a2010-01-16 09:53:50 -08002351
Jason Evanse476f8a2010-01-16 09:53:50 -08002352 /*
2353 * Compute the header size such that it is large enough to contain the
Jason Evans7393f442010-10-01 17:35:43 -07002354 * page map. The page map is biased to omit entries for the header
2355 * itself, so some iteration is necessary to compute the map bias.
2356 *
2357 * 1) Compute safe header_size and map_bias values that include enough
2358 * space for an unbiased page map.
2359 * 2) Refine map_bias based on (1) to omit the header pages in the page
2360 * map. The resulting map_bias may be one too small.
2361 * 3) Refine map_bias based on (2). The result will be >= the result
2362 * from (2), and will always be correct.
Jason Evanse476f8a2010-01-16 09:53:50 -08002363 */
Jason Evans7393f442010-10-01 17:35:43 -07002364 map_bias = 0;
2365 for (i = 0; i < 3; i++) {
Jason Evansae4c7b42012-04-02 07:04:34 -07002366 header_size = offsetof(arena_chunk_t, map) +
2367 (sizeof(arena_chunk_map_t) * (chunk_npages-map_bias));
2368 map_bias = (header_size >> LG_PAGE) + ((header_size & PAGE_MASK)
2369 != 0);
Jason Evans7393f442010-10-01 17:35:43 -07002370 }
2371 assert(map_bias > 0);
2372
Jason Evansae4c7b42012-04-02 07:04:34 -07002373 arena_maxclass = chunksize - (map_bias << LG_PAGE);
Jason Evansa0bf2422010-01-29 14:30:41 -08002374
Jason Evansb1726102012-02-28 16:50:47 -08002375 bin_info_init();
Jason Evanse476f8a2010-01-16 09:53:50 -08002376}
Jason Evans4e2e3dd2012-03-13 16:31:41 -07002377
2378void
2379arena_prefork(arena_t *arena)
2380{
2381 unsigned i;
2382
2383 malloc_mutex_prefork(&arena->lock);
2384 for (i = 0; i < NBINS; i++)
2385 malloc_mutex_prefork(&arena->bins[i].lock);
2386}
2387
2388void
2389arena_postfork_parent(arena_t *arena)
2390{
2391 unsigned i;
2392
2393 for (i = 0; i < NBINS; i++)
2394 malloc_mutex_postfork_parent(&arena->bins[i].lock);
2395 malloc_mutex_postfork_parent(&arena->lock);
2396}
2397
2398void
2399arena_postfork_child(arena_t *arena)
2400{
2401 unsigned i;
2402
2403 for (i = 0; i < NBINS; i++)
2404 malloc_mutex_postfork_child(&arena->bins[i].lock);
2405 malloc_mutex_postfork_child(&arena->lock);
2406}