blob: 0c53b071b87f7920325ba401e22c1f5597cc10fc [file] [log] [blame]
Jason Evanse476f8a2010-01-16 09:53:50 -08001#define JEMALLOC_ARENA_C_
Jason Evans376b1522010-02-11 14:45:59 -08002#include "jemalloc/internal/jemalloc_internal.h"
Jason Evanse476f8a2010-01-16 09:53:50 -08003
4/******************************************************************************/
5/* Data. */
6
Jason Evanse476f8a2010-01-16 09:53:50 -08007ssize_t opt_lg_dirty_mult = LG_DIRTY_MULT_DEFAULT;
Jason Evansb1726102012-02-28 16:50:47 -08008arena_bin_info_t arena_bin_info[NBINS];
Jason Evanse476f8a2010-01-16 09:53:50 -08009
Mike Hommeyda99e312012-04-30 12:38:29 +020010JEMALLOC_ALIGNED(CACHELINE)
Jason Evansb1726102012-02-28 16:50:47 -080011const uint8_t small_size2bin[] = {
Jason Evans41ade962011-03-06 22:56:36 -080012#define S2B_8(i) i,
Jason Evanse476f8a2010-01-16 09:53:50 -080013#define S2B_16(i) S2B_8(i) S2B_8(i)
14#define S2B_32(i) S2B_16(i) S2B_16(i)
15#define S2B_64(i) S2B_32(i) S2B_32(i)
16#define S2B_128(i) S2B_64(i) S2B_64(i)
17#define S2B_256(i) S2B_128(i) S2B_128(i)
Jason Evansb1726102012-02-28 16:50:47 -080018#define S2B_512(i) S2B_256(i) S2B_256(i)
19#define S2B_1024(i) S2B_512(i) S2B_512(i)
20#define S2B_2048(i) S2B_1024(i) S2B_1024(i)
21#define S2B_4096(i) S2B_2048(i) S2B_2048(i)
22#define S2B_8192(i) S2B_4096(i) S2B_4096(i)
23#define SIZE_CLASS(bin, delta, size) \
24 S2B_##delta(bin)
25 SIZE_CLASSES
Jason Evanse476f8a2010-01-16 09:53:50 -080026#undef S2B_8
27#undef S2B_16
28#undef S2B_32
29#undef S2B_64
30#undef S2B_128
31#undef S2B_256
Jason Evansb1726102012-02-28 16:50:47 -080032#undef S2B_512
33#undef S2B_1024
34#undef S2B_2048
35#undef S2B_4096
36#undef S2B_8192
37#undef SIZE_CLASS
38};
Jason Evanse476f8a2010-01-16 09:53:50 -080039
40/******************************************************************************/
41/* Function prototypes for non-inline static functions. */
42
Jason Evanse3d13062012-10-30 15:42:37 -070043static void arena_avail_insert(arena_t *arena, arena_chunk_t *chunk,
44 size_t pageind, size_t npages, bool maybe_adjac_pred,
45 bool maybe_adjac_succ);
46static void arena_avail_remove(arena_t *arena, arena_chunk_t *chunk,
47 size_t pageind, size_t npages, bool maybe_adjac_pred,
48 bool maybe_adjac_succ);
Jason Evanse476f8a2010-01-16 09:53:50 -080049static void arena_run_split(arena_t *arena, arena_run_t *run, size_t size,
Jason Evans203484e2012-05-02 00:30:36 -070050 bool large, size_t binind, bool zero);
Jason Evanse476f8a2010-01-16 09:53:50 -080051static arena_chunk_t *arena_chunk_alloc(arena_t *arena);
52static void arena_chunk_dealloc(arena_t *arena, arena_chunk_t *chunk);
Jason Evans5b0c9962012-05-10 15:47:24 -070053static arena_run_t *arena_run_alloc_helper(arena_t *arena, size_t size,
54 bool large, size_t binind, bool zero);
Jason Evanse476f8a2010-01-16 09:53:50 -080055static arena_run_t *arena_run_alloc(arena_t *arena, size_t size, bool large,
Jason Evans203484e2012-05-02 00:30:36 -070056 size_t binind, bool zero);
Jason Evanse3d13062012-10-30 15:42:37 -070057static arena_chunk_t *chunks_dirty_iter_cb(arena_chunk_tree_t *tree,
58 arena_chunk_t *chunk, void *arg);
Jason Evans6005f072010-09-30 16:55:08 -070059static void arena_purge(arena_t *arena, bool all);
Jason Evanse3d13062012-10-30 15:42:37 -070060static void arena_run_dalloc(arena_t *arena, arena_run_t *run, bool dirty,
61 bool cleaned);
Jason Evanse476f8a2010-01-16 09:53:50 -080062static void arena_run_trim_head(arena_t *arena, arena_chunk_t *chunk,
63 arena_run_t *run, size_t oldsize, size_t newsize);
64static void arena_run_trim_tail(arena_t *arena, arena_chunk_t *chunk,
65 arena_run_t *run, size_t oldsize, size_t newsize, bool dirty);
Jason Evanse7a10582012-02-13 17:36:52 -080066static arena_run_t *arena_bin_runs_first(arena_bin_t *bin);
67static void arena_bin_runs_insert(arena_bin_t *bin, arena_run_t *run);
68static void arena_bin_runs_remove(arena_bin_t *bin, arena_run_t *run);
69static arena_run_t *arena_bin_nonfull_run_tryget(arena_bin_t *bin);
Jason Evanse476f8a2010-01-16 09:53:50 -080070static arena_run_t *arena_bin_nonfull_run_get(arena_t *arena, arena_bin_t *bin);
71static void *arena_bin_malloc_hard(arena_t *arena, arena_bin_t *bin);
Jason Evans088e6a02010-10-18 00:04:44 -070072static void arena_dissociate_bin_run(arena_chunk_t *chunk, arena_run_t *run,
73 arena_bin_t *bin);
Jason Evanse476f8a2010-01-16 09:53:50 -080074static void arena_dalloc_bin_run(arena_t *arena, arena_chunk_t *chunk,
75 arena_run_t *run, arena_bin_t *bin);
Jason Evans940a2e02010-10-17 17:51:37 -070076static void arena_bin_lower_run(arena_t *arena, arena_chunk_t *chunk,
77 arena_run_t *run, arena_bin_t *bin);
Jason Evanse476f8a2010-01-16 09:53:50 -080078static void arena_ralloc_large_shrink(arena_t *arena, arena_chunk_t *chunk,
Jason Evans8e3c3c62010-09-17 15:46:18 -070079 void *ptr, size_t oldsize, size_t size);
Jason Evanse476f8a2010-01-16 09:53:50 -080080static bool arena_ralloc_large_grow(arena_t *arena, arena_chunk_t *chunk,
Jason Evans8e3c3c62010-09-17 15:46:18 -070081 void *ptr, size_t oldsize, size_t size, size_t extra, bool zero);
82static bool arena_ralloc_large(void *ptr, size_t oldsize, size_t size,
83 size_t extra, bool zero);
Jason Evans49f7e8f2011-03-15 13:59:15 -070084static size_t bin_info_run_size_calc(arena_bin_info_t *bin_info,
85 size_t min_run_size);
Jason Evansb1726102012-02-28 16:50:47 -080086static void bin_info_init(void);
Jason Evanse476f8a2010-01-16 09:53:50 -080087
88/******************************************************************************/
89
90static inline int
Jason Evanse476f8a2010-01-16 09:53:50 -080091arena_run_comp(arena_chunk_map_t *a, arena_chunk_map_t *b)
92{
93 uintptr_t a_mapelm = (uintptr_t)a;
94 uintptr_t b_mapelm = (uintptr_t)b;
95
96 assert(a != NULL);
97 assert(b != NULL);
98
99 return ((a_mapelm > b_mapelm) - (a_mapelm < b_mapelm));
100}
101
Jason Evansf3ff7522010-02-28 15:00:18 -0800102/* Generate red-black tree functions. */
Jason Evans7372b152012-02-10 20:22:09 -0800103rb_gen(static UNUSED, arena_run_tree_, arena_run_tree_t, arena_chunk_map_t,
104 u.rb_link, arena_run_comp)
Jason Evanse476f8a2010-01-16 09:53:50 -0800105
106static inline int
107arena_avail_comp(arena_chunk_map_t *a, arena_chunk_map_t *b)
108{
109 int ret;
110 size_t a_size = a->bits & ~PAGE_MASK;
111 size_t b_size = b->bits & ~PAGE_MASK;
112
113 ret = (a_size > b_size) - (a_size < b_size);
114 if (ret == 0) {
115 uintptr_t a_mapelm, b_mapelm;
116
117 if ((a->bits & CHUNK_MAP_KEY) != CHUNK_MAP_KEY)
118 a_mapelm = (uintptr_t)a;
119 else {
120 /*
121 * Treat keys as though they are lower than anything
122 * else.
123 */
124 a_mapelm = 0;
125 }
126 b_mapelm = (uintptr_t)b;
127
128 ret = (a_mapelm > b_mapelm) - (a_mapelm < b_mapelm);
129 }
130
131 return (ret);
132}
133
Jason Evansf3ff7522010-02-28 15:00:18 -0800134/* Generate red-black tree functions. */
Jason Evans7372b152012-02-10 20:22:09 -0800135rb_gen(static UNUSED, arena_avail_tree_, arena_avail_tree_t, arena_chunk_map_t,
136 u.rb_link, arena_avail_comp)
Jason Evanse476f8a2010-01-16 09:53:50 -0800137
Jason Evanse3d13062012-10-30 15:42:37 -0700138static inline int
139arena_chunk_dirty_comp(arena_chunk_t *a, arena_chunk_t *b)
140{
Jason Evanse3d13062012-10-30 15:42:37 -0700141
142 assert(a != NULL);
143 assert(b != NULL);
144
145 /*
Jason Evansabf67392012-11-07 10:05:04 -0800146 * Short-circuit for self comparison. The following comparison code
147 * would come to the same result, but at the cost of executing the slow
148 * path.
149 */
150 if (a == b)
151 return (0);
152
153 /*
Jason Evanse3d13062012-10-30 15:42:37 -0700154 * Order such that chunks with higher fragmentation are "less than"
Jason Evansabf67392012-11-07 10:05:04 -0800155 * those with lower fragmentation -- purging order is from "least" to
156 * "greatest". Fragmentation is measured as:
Jason Evanse3d13062012-10-30 15:42:37 -0700157 *
158 * mean current avail run size
159 * --------------------------------
160 * mean defragmented avail run size
161 *
162 * navail
163 * -----------
164 * nruns_avail nruns_avail-nruns_adjac
165 * = ========================= = -----------------------
166 * navail nruns_avail
167 * -----------------------
168 * nruns_avail-nruns_adjac
169 *
170 * The following code multiplies away the denominator prior to
171 * comparison, in order to avoid division.
172 *
173 */
Jason Evansabf67392012-11-07 10:05:04 -0800174 {
175 size_t a_val = (a->nruns_avail - a->nruns_adjac) *
176 b->nruns_avail;
177 size_t b_val = (b->nruns_avail - b->nruns_adjac) *
178 a->nruns_avail;
179
180 if (a_val < b_val)
181 return (1);
182 if (a_val > b_val)
183 return (-1);
184 }
185 /*
186 * Break ties by chunk address. For fragmented chunks, report lower
187 * addresses as "lower", so that fragmentation reduction happens first
188 * at lower addresses. However, use the opposite ordering for
189 * unfragmented chunks, in order to increase the chances of
190 * re-allocating dirty runs.
191 */
Jason Evanse3d13062012-10-30 15:42:37 -0700192 {
193 uintptr_t a_chunk = (uintptr_t)a;
194 uintptr_t b_chunk = (uintptr_t)b;
Jason Evansabf67392012-11-07 10:05:04 -0800195 int ret = ((a_chunk > b_chunk) - (a_chunk < b_chunk));
196 if (a->nruns_adjac == 0) {
197 assert(b->nruns_adjac == 0);
198 ret = -ret;
199 }
200 return (ret);
Jason Evanse3d13062012-10-30 15:42:37 -0700201 }
202}
203
204/* Generate red-black tree functions. */
205rb_gen(static UNUSED, arena_chunk_dirty_, arena_chunk_tree_t, arena_chunk_t,
206 dirty_link, arena_chunk_dirty_comp)
207
208static inline bool
209arena_avail_adjac_pred(arena_chunk_t *chunk, size_t pageind)
210{
211 bool ret;
212
213 if (pageind-1 < map_bias)
214 ret = false;
215 else {
216 ret = (arena_mapbits_allocated_get(chunk, pageind-1) == 0);
217 assert(ret == false || arena_mapbits_dirty_get(chunk,
218 pageind-1) != arena_mapbits_dirty_get(chunk, pageind));
219 }
220 return (ret);
221}
222
223static inline bool
224arena_avail_adjac_succ(arena_chunk_t *chunk, size_t pageind, size_t npages)
225{
226 bool ret;
227
228 if (pageind+npages == chunk_npages)
229 ret = false;
230 else {
231 assert(pageind+npages < chunk_npages);
232 ret = (arena_mapbits_allocated_get(chunk, pageind+npages) == 0);
233 assert(ret == false || arena_mapbits_dirty_get(chunk, pageind)
234 != arena_mapbits_dirty_get(chunk, pageind+npages));
235 }
236 return (ret);
237}
238
239static inline bool
240arena_avail_adjac(arena_chunk_t *chunk, size_t pageind, size_t npages)
241{
242
243 return (arena_avail_adjac_pred(chunk, pageind) ||
244 arena_avail_adjac_succ(chunk, pageind, npages));
245}
246
247static void
248arena_avail_insert(arena_t *arena, arena_chunk_t *chunk, size_t pageind,
249 size_t npages, bool maybe_adjac_pred, bool maybe_adjac_succ)
250{
251
252 assert(npages == (arena_mapbits_unallocated_size_get(chunk, pageind) >>
253 LG_PAGE));
254
255 /*
256 * chunks_dirty is keyed by nruns_{avail,adjac}, so the chunk must be
257 * removed and reinserted even if the run to be inserted is clean.
258 */
259 if (chunk->ndirty != 0)
260 arena_chunk_dirty_remove(&arena->chunks_dirty, chunk);
261
262 if (maybe_adjac_pred && arena_avail_adjac_pred(chunk, pageind))
263 chunk->nruns_adjac++;
264 if (maybe_adjac_succ && arena_avail_adjac_succ(chunk, pageind, npages))
265 chunk->nruns_adjac++;
266 chunk->nruns_avail++;
267 assert(chunk->nruns_avail > chunk->nruns_adjac);
268
269 if (arena_mapbits_dirty_get(chunk, pageind) != 0) {
270 arena->ndirty += npages;
271 chunk->ndirty += npages;
272 }
273 if (chunk->ndirty != 0)
274 arena_chunk_dirty_insert(&arena->chunks_dirty, chunk);
275
276 arena_avail_tree_insert(&arena->runs_avail, arena_mapp_get(chunk,
277 pageind));
278}
279
280static void
281arena_avail_remove(arena_t *arena, arena_chunk_t *chunk, size_t pageind,
282 size_t npages, bool maybe_adjac_pred, bool maybe_adjac_succ)
283{
284
285 assert(npages == (arena_mapbits_unallocated_size_get(chunk, pageind) >>
286 LG_PAGE));
287
288 /*
289 * chunks_dirty is keyed by nruns_{avail,adjac}, so the chunk must be
290 * removed and reinserted even if the run to be removed is clean.
291 */
292 if (chunk->ndirty != 0)
293 arena_chunk_dirty_remove(&arena->chunks_dirty, chunk);
294
295 if (maybe_adjac_pred && arena_avail_adjac_pred(chunk, pageind))
296 chunk->nruns_adjac--;
297 if (maybe_adjac_succ && arena_avail_adjac_succ(chunk, pageind, npages))
298 chunk->nruns_adjac--;
299 chunk->nruns_avail--;
300 assert(chunk->nruns_avail > chunk->nruns_adjac || (chunk->nruns_avail
301 == 0 && chunk->nruns_adjac == 0));
302
303 if (arena_mapbits_dirty_get(chunk, pageind) != 0) {
304 arena->ndirty -= npages;
305 chunk->ndirty -= npages;
306 }
307 if (chunk->ndirty != 0)
308 arena_chunk_dirty_insert(&arena->chunks_dirty, chunk);
309
310 arena_avail_tree_remove(&arena->runs_avail, arena_mapp_get(chunk,
311 pageind));
312}
313
Jason Evanse476f8a2010-01-16 09:53:50 -0800314static inline void *
Jason Evans49f7e8f2011-03-15 13:59:15 -0700315arena_run_reg_alloc(arena_run_t *run, arena_bin_info_t *bin_info)
Jason Evanse476f8a2010-01-16 09:53:50 -0800316{
317 void *ret;
Jason Evans84c8eef2011-03-16 10:30:13 -0700318 unsigned regind;
319 bitmap_t *bitmap = (bitmap_t *)((uintptr_t)run +
320 (uintptr_t)bin_info->bitmap_offset);
Jason Evanse476f8a2010-01-16 09:53:50 -0800321
Jason Evans1e0a6362010-03-13 13:41:58 -0800322 assert(run->nfree > 0);
Jason Evans84c8eef2011-03-16 10:30:13 -0700323 assert(bitmap_full(bitmap, &bin_info->bitmap_info) == false);
Jason Evanse476f8a2010-01-16 09:53:50 -0800324
Jason Evans84c8eef2011-03-16 10:30:13 -0700325 regind = bitmap_sfu(bitmap, &bin_info->bitmap_info);
326 ret = (void *)((uintptr_t)run + (uintptr_t)bin_info->reg0_offset +
Jason Evans122449b2012-04-06 00:35:09 -0700327 (uintptr_t)(bin_info->reg_interval * regind));
Jason Evans1e0a6362010-03-13 13:41:58 -0800328 run->nfree--;
Jason Evans84c8eef2011-03-16 10:30:13 -0700329 if (regind == run->nextind)
330 run->nextind++;
331 assert(regind < run->nextind);
Jason Evans1e0a6362010-03-13 13:41:58 -0800332 return (ret);
Jason Evans6109fe02010-02-10 10:37:56 -0800333}
334
335static inline void
Jason Evans1e0a6362010-03-13 13:41:58 -0800336arena_run_reg_dalloc(arena_run_t *run, void *ptr)
Jason Evans6109fe02010-02-10 10:37:56 -0800337{
Jason Evans49f7e8f2011-03-15 13:59:15 -0700338 arena_chunk_t *chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(run);
Jason Evans203484e2012-05-02 00:30:36 -0700339 size_t pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> LG_PAGE;
340 size_t mapbits = arena_mapbits_get(chunk, pageind);
Jason Evans80737c32012-05-02 16:11:03 -0700341 size_t binind = arena_ptr_small_binind_get(ptr, mapbits);
Jason Evans49f7e8f2011-03-15 13:59:15 -0700342 arena_bin_info_t *bin_info = &arena_bin_info[binind];
Jason Evans84c8eef2011-03-16 10:30:13 -0700343 unsigned regind = arena_run_regind(run, bin_info, ptr);
344 bitmap_t *bitmap = (bitmap_t *)((uintptr_t)run +
345 (uintptr_t)bin_info->bitmap_offset);
346
Jason Evans49f7e8f2011-03-15 13:59:15 -0700347 assert(run->nfree < bin_info->nregs);
Jason Evans1e0a6362010-03-13 13:41:58 -0800348 /* Freeing an interior pointer can cause assertion failure. */
349 assert(((uintptr_t)ptr - ((uintptr_t)run +
Jason Evans122449b2012-04-06 00:35:09 -0700350 (uintptr_t)bin_info->reg0_offset)) %
351 (uintptr_t)bin_info->reg_interval == 0);
Jason Evans21fb95b2010-10-18 17:45:40 -0700352 assert((uintptr_t)ptr >= (uintptr_t)run +
Jason Evans49f7e8f2011-03-15 13:59:15 -0700353 (uintptr_t)bin_info->reg0_offset);
Jason Evans84c8eef2011-03-16 10:30:13 -0700354 /* Freeing an unallocated pointer can cause assertion failure. */
355 assert(bitmap_get(bitmap, &bin_info->bitmap_info, regind));
Jason Evanse476f8a2010-01-16 09:53:50 -0800356
Jason Evans84c8eef2011-03-16 10:30:13 -0700357 bitmap_unset(bitmap, &bin_info->bitmap_info, regind);
Jason Evans1e0a6362010-03-13 13:41:58 -0800358 run->nfree++;
Jason Evanse476f8a2010-01-16 09:53:50 -0800359}
360
Jason Evans21fb95b2010-10-18 17:45:40 -0700361static inline void
362arena_chunk_validate_zeroed(arena_chunk_t *chunk, size_t run_ind)
363{
Jason Evansd4bab212010-10-24 20:08:37 -0700364 size_t i;
Jason Evansae4c7b42012-04-02 07:04:34 -0700365 UNUSED size_t *p = (size_t *)((uintptr_t)chunk + (run_ind << LG_PAGE));
Jason Evansd4bab212010-10-24 20:08:37 -0700366
Jason Evansae4c7b42012-04-02 07:04:34 -0700367 for (i = 0; i < PAGE / sizeof(size_t); i++)
Jason Evans21fb95b2010-10-18 17:45:40 -0700368 assert(p[i] == 0);
369}
Jason Evans21fb95b2010-10-18 17:45:40 -0700370
Jason Evanse476f8a2010-01-16 09:53:50 -0800371static void
372arena_run_split(arena_t *arena, arena_run_t *run, size_t size, bool large,
Jason Evans203484e2012-05-02 00:30:36 -0700373 size_t binind, bool zero)
Jason Evanse476f8a2010-01-16 09:53:50 -0800374{
375 arena_chunk_t *chunk;
Jason Evansbdcadf42012-02-28 21:11:03 -0800376 size_t run_ind, total_pages, need_pages, rem_pages, i;
Jason Evans19b3d612010-03-18 20:36:40 -0700377 size_t flag_dirty;
Jason Evanse476f8a2010-01-16 09:53:50 -0800378
Jason Evans203484e2012-05-02 00:30:36 -0700379 assert((large && binind == BININD_INVALID) || (large == false && binind
380 != BININD_INVALID));
381
Jason Evanse476f8a2010-01-16 09:53:50 -0800382 chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(run);
Jason Evansae4c7b42012-04-02 07:04:34 -0700383 run_ind = (unsigned)(((uintptr_t)run - (uintptr_t)chunk) >> LG_PAGE);
Jason Evans203484e2012-05-02 00:30:36 -0700384 flag_dirty = arena_mapbits_dirty_get(chunk, run_ind);
Jason Evans203484e2012-05-02 00:30:36 -0700385 total_pages = arena_mapbits_unallocated_size_get(chunk, run_ind) >>
Jason Evansae4c7b42012-04-02 07:04:34 -0700386 LG_PAGE;
Jason Evans203484e2012-05-02 00:30:36 -0700387 assert(arena_mapbits_dirty_get(chunk, run_ind+total_pages-1) ==
388 flag_dirty);
Jason Evansae4c7b42012-04-02 07:04:34 -0700389 need_pages = (size >> LG_PAGE);
Jason Evanse476f8a2010-01-16 09:53:50 -0800390 assert(need_pages > 0);
391 assert(need_pages <= total_pages);
392 rem_pages = total_pages - need_pages;
393
Jason Evanse3d13062012-10-30 15:42:37 -0700394 arena_avail_remove(arena, chunk, run_ind, total_pages, true, true);
Jason Evans7372b152012-02-10 20:22:09 -0800395 if (config_stats) {
396 /*
397 * Update stats_cactive if nactive is crossing a chunk
398 * multiple.
399 */
400 size_t cactive_diff = CHUNK_CEILING((arena->nactive +
Jason Evansae4c7b42012-04-02 07:04:34 -0700401 need_pages) << LG_PAGE) - CHUNK_CEILING(arena->nactive <<
402 LG_PAGE);
Jason Evans7372b152012-02-10 20:22:09 -0800403 if (cactive_diff != 0)
404 stats_cactive_add(cactive_diff);
405 }
Jason Evanse476f8a2010-01-16 09:53:50 -0800406 arena->nactive += need_pages;
407
408 /* Keep track of trailing unused pages for later use. */
409 if (rem_pages > 0) {
Jason Evans19b3d612010-03-18 20:36:40 -0700410 if (flag_dirty != 0) {
Jason Evans203484e2012-05-02 00:30:36 -0700411 arena_mapbits_unallocated_set(chunk, run_ind+need_pages,
412 (rem_pages << LG_PAGE), CHUNK_MAP_DIRTY);
413 arena_mapbits_unallocated_set(chunk,
414 run_ind+total_pages-1, (rem_pages << LG_PAGE),
415 CHUNK_MAP_DIRTY);
Jason Evans19b3d612010-03-18 20:36:40 -0700416 } else {
Jason Evans203484e2012-05-02 00:30:36 -0700417 arena_mapbits_unallocated_set(chunk, run_ind+need_pages,
418 (rem_pages << LG_PAGE),
419 arena_mapbits_unzeroed_get(chunk,
420 run_ind+need_pages));
421 arena_mapbits_unallocated_set(chunk,
422 run_ind+total_pages-1, (rem_pages << LG_PAGE),
423 arena_mapbits_unzeroed_get(chunk,
424 run_ind+total_pages-1));
Jason Evans19b3d612010-03-18 20:36:40 -0700425 }
Jason Evanse3d13062012-10-30 15:42:37 -0700426 arena_avail_insert(arena, chunk, run_ind+need_pages, rem_pages,
427 false, true);
Jason Evans19b3d612010-03-18 20:36:40 -0700428 }
429
430 /*
431 * Update the page map separately for large vs. small runs, since it is
432 * possible to avoid iteration for large mallocs.
433 */
434 if (large) {
Jason Evanse476f8a2010-01-16 09:53:50 -0800435 if (zero) {
Jason Evans19b3d612010-03-18 20:36:40 -0700436 if (flag_dirty == 0) {
437 /*
438 * The run is clean, so some pages may be
439 * zeroed (i.e. never before touched).
440 */
441 for (i = 0; i < need_pages; i++) {
Jason Evans203484e2012-05-02 00:30:36 -0700442 if (arena_mapbits_unzeroed_get(chunk,
443 run_ind+i) != 0) {
Jason Evans122449b2012-04-06 00:35:09 -0700444 VALGRIND_MAKE_MEM_UNDEFINED(
445 (void *)((uintptr_t)
446 chunk + ((run_ind+i) <<
447 LG_PAGE)), PAGE);
Jason Evans19b3d612010-03-18 20:36:40 -0700448 memset((void *)((uintptr_t)
Jason Evans21fb95b2010-10-18 17:45:40 -0700449 chunk + ((run_ind+i) <<
Jason Evansae4c7b42012-04-02 07:04:34 -0700450 LG_PAGE)), 0, PAGE);
Jason Evans7372b152012-02-10 20:22:09 -0800451 } else if (config_debug) {
Jason Evans122449b2012-04-06 00:35:09 -0700452 VALGRIND_MAKE_MEM_DEFINED(
453 (void *)((uintptr_t)
454 chunk + ((run_ind+i) <<
455 LG_PAGE)), PAGE);
Jason Evans21fb95b2010-10-18 17:45:40 -0700456 arena_chunk_validate_zeroed(
457 chunk, run_ind+i);
Jason Evans940a2e02010-10-17 17:51:37 -0700458 }
Jason Evans19b3d612010-03-18 20:36:40 -0700459 }
460 } else {
461 /*
462 * The run is dirty, so all pages must be
463 * zeroed.
464 */
Jason Evans122449b2012-04-06 00:35:09 -0700465 VALGRIND_MAKE_MEM_UNDEFINED((void
466 *)((uintptr_t)chunk + (run_ind <<
467 LG_PAGE)), (need_pages << LG_PAGE));
Jason Evans19b3d612010-03-18 20:36:40 -0700468 memset((void *)((uintptr_t)chunk + (run_ind <<
Jason Evansae4c7b42012-04-02 07:04:34 -0700469 LG_PAGE)), 0, (need_pages << LG_PAGE));
Jason Evanse476f8a2010-01-16 09:53:50 -0800470 }
471 }
472
Jason Evanse476f8a2010-01-16 09:53:50 -0800473 /*
Jason Evans19b3d612010-03-18 20:36:40 -0700474 * Set the last element first, in case the run only contains one
475 * page (i.e. both statements set the same element).
Jason Evanse476f8a2010-01-16 09:53:50 -0800476 */
Jason Evans203484e2012-05-02 00:30:36 -0700477 arena_mapbits_large_set(chunk, run_ind+need_pages-1, 0,
478 flag_dirty);
479 arena_mapbits_large_set(chunk, run_ind, size, flag_dirty);
Jason Evans19b3d612010-03-18 20:36:40 -0700480 } else {
481 assert(zero == false);
482 /*
Jason Evans940a2e02010-10-17 17:51:37 -0700483 * Propagate the dirty and unzeroed flags to the allocated
484 * small run, so that arena_dalloc_bin_run() has the ability to
485 * conditionally trim clean pages.
Jason Evans19b3d612010-03-18 20:36:40 -0700486 */
Jason Evansd8ceef62012-05-10 20:59:39 -0700487 arena_mapbits_small_set(chunk, run_ind, 0, binind, flag_dirty);
Jason Evans21fb95b2010-10-18 17:45:40 -0700488 /*
489 * The first page will always be dirtied during small run
490 * initialization, so a validation failure here would not
491 * actually cause an observable failure.
492 */
Jason Evans7372b152012-02-10 20:22:09 -0800493 if (config_debug && flag_dirty == 0 &&
Jason Evans203484e2012-05-02 00:30:36 -0700494 arena_mapbits_unzeroed_get(chunk, run_ind) == 0)
Jason Evans21fb95b2010-10-18 17:45:40 -0700495 arena_chunk_validate_zeroed(chunk, run_ind);
Jason Evans19b3d612010-03-18 20:36:40 -0700496 for (i = 1; i < need_pages - 1; i++) {
Jason Evansd8ceef62012-05-10 20:59:39 -0700497 arena_mapbits_small_set(chunk, run_ind+i, i, binind, 0);
Jason Evans7372b152012-02-10 20:22:09 -0800498 if (config_debug && flag_dirty == 0 &&
Jason Evans203484e2012-05-02 00:30:36 -0700499 arena_mapbits_unzeroed_get(chunk, run_ind+i) == 0)
Jason Evans21fb95b2010-10-18 17:45:40 -0700500 arena_chunk_validate_zeroed(chunk, run_ind+i);
Jason Evans19b3d612010-03-18 20:36:40 -0700501 }
Jason Evans203484e2012-05-02 00:30:36 -0700502 arena_mapbits_small_set(chunk, run_ind+need_pages-1,
Jason Evansd8ceef62012-05-10 20:59:39 -0700503 need_pages-1, binind, flag_dirty);
Jason Evans7372b152012-02-10 20:22:09 -0800504 if (config_debug && flag_dirty == 0 &&
Jason Evans203484e2012-05-02 00:30:36 -0700505 arena_mapbits_unzeroed_get(chunk, run_ind+need_pages-1) ==
506 0) {
Jason Evans21fb95b2010-10-18 17:45:40 -0700507 arena_chunk_validate_zeroed(chunk,
508 run_ind+need_pages-1);
509 }
Jason Evanse476f8a2010-01-16 09:53:50 -0800510 }
511}
512
513static arena_chunk_t *
514arena_chunk_alloc(arena_t *arena)
515{
516 arena_chunk_t *chunk;
517 size_t i;
518
519 if (arena->spare != NULL) {
520 chunk = arena->spare;
521 arena->spare = NULL;
Jason Evans19b3d612010-03-18 20:36:40 -0700522
Jason Evans30fe12b2012-05-10 17:09:17 -0700523 assert(arena_mapbits_allocated_get(chunk, map_bias) == 0);
524 assert(arena_mapbits_allocated_get(chunk, chunk_npages-1) == 0);
Jason Evans203484e2012-05-02 00:30:36 -0700525 assert(arena_mapbits_unallocated_size_get(chunk, map_bias) ==
526 arena_maxclass);
527 assert(arena_mapbits_unallocated_size_get(chunk,
528 chunk_npages-1) == arena_maxclass);
529 assert(arena_mapbits_dirty_get(chunk, map_bias) ==
530 arena_mapbits_dirty_get(chunk, chunk_npages-1));
Jason Evanse476f8a2010-01-16 09:53:50 -0800531 } else {
Jason Evans41631d02010-01-24 17:13:07 -0800532 bool zero;
Jason Evans3377ffa2010-10-01 17:53:37 -0700533 size_t unzeroed;
Jason Evans41631d02010-01-24 17:13:07 -0800534
535 zero = false;
Jason Evanse00572b2010-03-14 19:43:56 -0700536 malloc_mutex_unlock(&arena->lock);
Mike Hommeyeae26902012-04-10 19:50:33 +0200537 chunk = (arena_chunk_t *)chunk_alloc(chunksize, chunksize,
Jason Evans609ae592012-10-11 13:53:15 -0700538 false, &zero, arena->dss_prec);
Jason Evanse00572b2010-03-14 19:43:56 -0700539 malloc_mutex_lock(&arena->lock);
Jason Evanse476f8a2010-01-16 09:53:50 -0800540 if (chunk == NULL)
541 return (NULL);
Jason Evans7372b152012-02-10 20:22:09 -0800542 if (config_stats)
543 arena->stats.mapped += chunksize;
Jason Evanse476f8a2010-01-16 09:53:50 -0800544
545 chunk->arena = arena;
Jason Evanse476f8a2010-01-16 09:53:50 -0800546
547 /*
548 * Claim that no pages are in use, since the header is merely
549 * overhead.
550 */
551 chunk->ndirty = 0;
552
Jason Evanse3d13062012-10-30 15:42:37 -0700553 chunk->nruns_avail = 0;
554 chunk->nruns_adjac = 0;
555
Jason Evanse476f8a2010-01-16 09:53:50 -0800556 /*
557 * Initialize the map to contain one maximal free untouched run.
Jason Evans41631d02010-01-24 17:13:07 -0800558 * Mark the pages as zeroed iff chunk_alloc() returned a zeroed
559 * chunk.
Jason Evanse476f8a2010-01-16 09:53:50 -0800560 */
Jason Evans3377ffa2010-10-01 17:53:37 -0700561 unzeroed = zero ? 0 : CHUNK_MAP_UNZEROED;
Jason Evans203484e2012-05-02 00:30:36 -0700562 arena_mapbits_unallocated_set(chunk, map_bias, arena_maxclass,
563 unzeroed);
Jason Evans3377ffa2010-10-01 17:53:37 -0700564 /*
565 * There is no need to initialize the internal page map entries
566 * unless the chunk is not zeroed.
567 */
568 if (zero == false) {
569 for (i = map_bias+1; i < chunk_npages-1; i++)
Jason Evans203484e2012-05-02 00:30:36 -0700570 arena_mapbits_unzeroed_set(chunk, i, unzeroed);
Jason Evans7372b152012-02-10 20:22:09 -0800571 } else if (config_debug) {
Jason Evans203484e2012-05-02 00:30:36 -0700572 for (i = map_bias+1; i < chunk_npages-1; i++) {
573 assert(arena_mapbits_unzeroed_get(chunk, i) ==
574 unzeroed);
575 }
Jason Evans940a2e02010-10-17 17:51:37 -0700576 }
Jason Evans203484e2012-05-02 00:30:36 -0700577 arena_mapbits_unallocated_set(chunk, chunk_npages-1,
578 arena_maxclass, unzeroed);
Jason Evans19b3d612010-03-18 20:36:40 -0700579 }
Jason Evanse476f8a2010-01-16 09:53:50 -0800580
Jason Evanse3d13062012-10-30 15:42:37 -0700581 /* Insert the run into the runs_avail tree. */
582 arena_avail_insert(arena, chunk, map_bias, chunk_npages-map_bias,
583 false, false);
584
Jason Evanse476f8a2010-01-16 09:53:50 -0800585 return (chunk);
586}
587
588static void
589arena_chunk_dealloc(arena_t *arena, arena_chunk_t *chunk)
590{
Jason Evans30fe12b2012-05-10 17:09:17 -0700591 assert(arena_mapbits_allocated_get(chunk, map_bias) == 0);
592 assert(arena_mapbits_allocated_get(chunk, chunk_npages-1) == 0);
593 assert(arena_mapbits_unallocated_size_get(chunk, map_bias) ==
594 arena_maxclass);
595 assert(arena_mapbits_unallocated_size_get(chunk, chunk_npages-1) ==
596 arena_maxclass);
597 assert(arena_mapbits_dirty_get(chunk, map_bias) ==
598 arena_mapbits_dirty_get(chunk, chunk_npages-1));
599
Jason Evanse476f8a2010-01-16 09:53:50 -0800600 /*
Jason Evanse3d13062012-10-30 15:42:37 -0700601 * Remove run from the runs_avail tree, so that the arena does not use
602 * it.
Jason Evanse476f8a2010-01-16 09:53:50 -0800603 */
Jason Evanse3d13062012-10-30 15:42:37 -0700604 arena_avail_remove(arena, chunk, map_bias, chunk_npages-map_bias,
605 false, false);
Jason Evanse476f8a2010-01-16 09:53:50 -0800606
Jason Evans8d4203c2010-04-13 20:53:21 -0700607 if (arena->spare != NULL) {
608 arena_chunk_t *spare = arena->spare;
609
610 arena->spare = chunk;
Jason Evans8d4203c2010-04-13 20:53:21 -0700611 malloc_mutex_unlock(&arena->lock);
Jason Evans12a48872011-11-11 14:41:59 -0800612 chunk_dealloc((void *)spare, chunksize, true);
Jason Evans8d4203c2010-04-13 20:53:21 -0700613 malloc_mutex_lock(&arena->lock);
Jason Evans7372b152012-02-10 20:22:09 -0800614 if (config_stats)
615 arena->stats.mapped -= chunksize;
Jason Evans8d4203c2010-04-13 20:53:21 -0700616 } else
617 arena->spare = chunk;
Jason Evanse476f8a2010-01-16 09:53:50 -0800618}
619
620static arena_run_t *
Jason Evans5b0c9962012-05-10 15:47:24 -0700621arena_run_alloc_helper(arena_t *arena, size_t size, bool large, size_t binind,
Jason Evans203484e2012-05-02 00:30:36 -0700622 bool zero)
Jason Evanse476f8a2010-01-16 09:53:50 -0800623{
Jason Evanse476f8a2010-01-16 09:53:50 -0800624 arena_run_t *run;
625 arena_chunk_map_t *mapelm, key;
626
Jason Evanse476f8a2010-01-16 09:53:50 -0800627 key.bits = size | CHUNK_MAP_KEY;
Jason Evanse3d13062012-10-30 15:42:37 -0700628 mapelm = arena_avail_tree_nsearch(&arena->runs_avail, &key);
Jason Evanse476f8a2010-01-16 09:53:50 -0800629 if (mapelm != NULL) {
630 arena_chunk_t *run_chunk = CHUNK_ADDR2BASE(mapelm);
Jason Evans7393f442010-10-01 17:35:43 -0700631 size_t pageind = (((uintptr_t)mapelm -
632 (uintptr_t)run_chunk->map) / sizeof(arena_chunk_map_t))
633 + map_bias;
Jason Evanse476f8a2010-01-16 09:53:50 -0800634
Jason Evanse00572b2010-03-14 19:43:56 -0700635 run = (arena_run_t *)((uintptr_t)run_chunk + (pageind <<
Jason Evansae4c7b42012-04-02 07:04:34 -0700636 LG_PAGE));
Jason Evans203484e2012-05-02 00:30:36 -0700637 arena_run_split(arena, run, size, large, binind, zero);
Jason Evanse476f8a2010-01-16 09:53:50 -0800638 return (run);
639 }
640
Jason Evans5b0c9962012-05-10 15:47:24 -0700641 return (NULL);
642}
643
644static arena_run_t *
645arena_run_alloc(arena_t *arena, size_t size, bool large, size_t binind,
646 bool zero)
647{
648 arena_chunk_t *chunk;
649 arena_run_t *run;
650
651 assert(size <= arena_maxclass);
652 assert((size & PAGE_MASK) == 0);
653 assert((large && binind == BININD_INVALID) || (large == false && binind
654 != BININD_INVALID));
655
656 /* Search the arena's chunks for the lowest best fit. */
657 run = arena_run_alloc_helper(arena, size, large, binind, zero);
658 if (run != NULL)
659 return (run);
660
Jason Evanse476f8a2010-01-16 09:53:50 -0800661 /*
662 * No usable runs. Create a new chunk from which to allocate the run.
663 */
664 chunk = arena_chunk_alloc(arena);
Jason Evanse00572b2010-03-14 19:43:56 -0700665 if (chunk != NULL) {
Jason Evansae4c7b42012-04-02 07:04:34 -0700666 run = (arena_run_t *)((uintptr_t)chunk + (map_bias << LG_PAGE));
Jason Evans203484e2012-05-02 00:30:36 -0700667 arena_run_split(arena, run, size, large, binind, zero);
Jason Evanse00572b2010-03-14 19:43:56 -0700668 return (run);
669 }
670
671 /*
672 * arena_chunk_alloc() failed, but another thread may have made
673 * sufficient memory available while this one dropped arena->lock in
674 * arena_chunk_alloc(), so search one more time.
675 */
Jason Evans5b0c9962012-05-10 15:47:24 -0700676 return (arena_run_alloc_helper(arena, size, large, binind, zero));
Jason Evanse476f8a2010-01-16 09:53:50 -0800677}
678
Jason Evans05b21be2010-03-14 17:36:10 -0700679static inline void
680arena_maybe_purge(arena_t *arena)
681{
Jason Evanse3d13062012-10-30 15:42:37 -0700682 size_t npurgeable, threshold;
Jason Evans05b21be2010-03-14 17:36:10 -0700683
Jason Evanse3d13062012-10-30 15:42:37 -0700684 /* Don't purge if the option is disabled. */
685 if (opt_lg_dirty_mult < 0)
686 return;
687 /* Don't purge if all dirty pages are already being purged. */
688 if (arena->ndirty <= arena->npurgatory)
689 return;
690 npurgeable = arena->ndirty - arena->npurgatory;
691 threshold = (arena->nactive >> opt_lg_dirty_mult);
692 /*
693 * Don't purge unless the number of purgeable pages exceeds the
694 * threshold.
695 */
696 if (npurgeable <= threshold)
697 return;
698
699 arena_purge(arena, false);
Jason Evans05b21be2010-03-14 17:36:10 -0700700}
701
Jason Evanse3d13062012-10-30 15:42:37 -0700702static inline size_t
703arena_chunk_purge(arena_t *arena, arena_chunk_t *chunk, bool all)
Jason Evans05b21be2010-03-14 17:36:10 -0700704{
Jason Evanse3d13062012-10-30 15:42:37 -0700705 size_t npurged;
Jason Evans05b21be2010-03-14 17:36:10 -0700706 ql_head(arena_chunk_map_t) mapelms;
707 arena_chunk_map_t *mapelm;
Jason Evanse3d13062012-10-30 15:42:37 -0700708 size_t pageind, npages;
Jason Evans05b21be2010-03-14 17:36:10 -0700709 size_t nmadvise;
Jason Evans05b21be2010-03-14 17:36:10 -0700710
711 ql_new(&mapelms);
712
713 /*
714 * If chunk is the spare, temporarily re-allocate it, 1) so that its
Jason Evanse3d13062012-10-30 15:42:37 -0700715 * run is reinserted into runs_avail, and 2) so that it cannot be
Jason Evans05b21be2010-03-14 17:36:10 -0700716 * completely discarded by another thread while arena->lock is dropped
717 * by this thread. Note that the arena_run_dalloc() call will
718 * implicitly deallocate the chunk, so no explicit action is required
719 * in this function to deallocate the chunk.
Jason Evans19b3d612010-03-18 20:36:40 -0700720 *
721 * Note that once a chunk contains dirty pages, it cannot again contain
722 * a single run unless 1) it is a dirty run, or 2) this function purges
723 * dirty pages and causes the transition to a single clean run. Thus
724 * (chunk == arena->spare) is possible, but it is not possible for
725 * this function to be called on the spare unless it contains a dirty
726 * run.
Jason Evans05b21be2010-03-14 17:36:10 -0700727 */
Jason Evans19b3d612010-03-18 20:36:40 -0700728 if (chunk == arena->spare) {
Jason Evans203484e2012-05-02 00:30:36 -0700729 assert(arena_mapbits_dirty_get(chunk, map_bias) != 0);
Jason Evans30fe12b2012-05-10 17:09:17 -0700730 assert(arena_mapbits_dirty_get(chunk, chunk_npages-1) != 0);
731
Jason Evans05b21be2010-03-14 17:36:10 -0700732 arena_chunk_alloc(arena);
Jason Evans19b3d612010-03-18 20:36:40 -0700733 }
Jason Evans05b21be2010-03-14 17:36:10 -0700734
Jason Evanse3d13062012-10-30 15:42:37 -0700735 if (config_stats)
736 arena->stats.purged += chunk->ndirty;
737
738 /*
739 * Operate on all dirty runs if there is no clean/dirty run
740 * fragmentation.
741 */
742 if (chunk->nruns_adjac == 0)
743 all = true;
744
745 /*
746 * Temporarily allocate free dirty runs within chunk. If all is false,
747 * only operate on dirty runs that are fragments; otherwise operate on
748 * all dirty runs.
749 */
750 for (pageind = map_bias; pageind < chunk_npages; pageind += npages) {
Jason Evans203484e2012-05-02 00:30:36 -0700751 mapelm = arena_mapp_get(chunk, pageind);
752 if (arena_mapbits_allocated_get(chunk, pageind) == 0) {
Jason Evanse3d13062012-10-30 15:42:37 -0700753 size_t run_size =
754 arena_mapbits_unallocated_size_get(chunk, pageind);
Jason Evans05b21be2010-03-14 17:36:10 -0700755
Jason Evanse3d13062012-10-30 15:42:37 -0700756 npages = run_size >> LG_PAGE;
Jason Evanse69bee02010-03-15 22:25:23 -0700757 assert(pageind + npages <= chunk_npages);
Jason Evans30fe12b2012-05-10 17:09:17 -0700758 assert(arena_mapbits_dirty_get(chunk, pageind) ==
759 arena_mapbits_dirty_get(chunk, pageind+npages-1));
Jason Evansc03a63d2010-03-22 11:45:01 -0700760
Jason Evanse3d13062012-10-30 15:42:37 -0700761 if (arena_mapbits_dirty_get(chunk, pageind) != 0 &&
762 (all || arena_avail_adjac(chunk, pageind,
763 npages))) {
764 arena_run_t *run = (arena_run_t *)((uintptr_t)
765 chunk + (uintptr_t)(pageind << LG_PAGE));
Jason Evansc03a63d2010-03-22 11:45:01 -0700766
Jason Evanse3d13062012-10-30 15:42:37 -0700767 arena_run_split(arena, run, run_size, true,
768 BININD_INVALID, false);
Jason Evans19b3d612010-03-18 20:36:40 -0700769 /* Append to list for later processing. */
770 ql_elm_new(mapelm, u.ql_link);
771 ql_tail_insert(&mapelms, mapelm, u.ql_link);
Jason Evans05b21be2010-03-14 17:36:10 -0700772 }
Jason Evans05b21be2010-03-14 17:36:10 -0700773 } else {
Jason Evanse3d13062012-10-30 15:42:37 -0700774 /* Skip run. */
775 if (arena_mapbits_large_get(chunk, pageind) != 0) {
776 npages = arena_mapbits_large_size_get(chunk,
Jason Evans203484e2012-05-02 00:30:36 -0700777 pageind) >> LG_PAGE;
Jason Evanse3d13062012-10-30 15:42:37 -0700778 } else {
Mike Hommey8b499712012-04-24 23:22:02 +0200779 size_t binind;
780 arena_bin_info_t *bin_info;
Jason Evans05b21be2010-03-14 17:36:10 -0700781 arena_run_t *run = (arena_run_t *)((uintptr_t)
Jason Evansae4c7b42012-04-02 07:04:34 -0700782 chunk + (uintptr_t)(pageind << LG_PAGE));
Jason Evanse69bee02010-03-15 22:25:23 -0700783
Jason Evans203484e2012-05-02 00:30:36 -0700784 assert(arena_mapbits_small_runind_get(chunk,
785 pageind) == 0);
Mike Hommey8b499712012-04-24 23:22:02 +0200786 binind = arena_bin_index(arena, run->bin);
787 bin_info = &arena_bin_info[binind];
Jason Evanse3d13062012-10-30 15:42:37 -0700788 npages = bin_info->run_size >> LG_PAGE;
Jason Evans05b21be2010-03-14 17:36:10 -0700789 }
790 }
791 }
Jason Evanse69bee02010-03-15 22:25:23 -0700792 assert(pageind == chunk_npages);
Jason Evanse3d13062012-10-30 15:42:37 -0700793 assert(chunk->ndirty == 0 || all == false);
794 assert(chunk->nruns_adjac == 0);
Jason Evans05b21be2010-03-14 17:36:10 -0700795
796 malloc_mutex_unlock(&arena->lock);
Jason Evans7372b152012-02-10 20:22:09 -0800797 if (config_stats)
798 nmadvise = 0;
Jason Evanse3d13062012-10-30 15:42:37 -0700799 npurged = 0;
Jason Evans05b21be2010-03-14 17:36:10 -0700800 ql_foreach(mapelm, &mapelms, u.ql_link) {
Jason Evans7de92762012-10-08 17:56:11 -0700801 bool unzeroed;
802 size_t flag_unzeroed, i;
Jason Evans05b21be2010-03-14 17:36:10 -0700803
Jason Evanse3d13062012-10-30 15:42:37 -0700804 pageind = (((uintptr_t)mapelm - (uintptr_t)chunk->map) /
805 sizeof(arena_chunk_map_t)) + map_bias;
806 npages = arena_mapbits_large_size_get(chunk, pageind) >>
807 LG_PAGE;
Jason Evanse69bee02010-03-15 22:25:23 -0700808 assert(pageind + npages <= chunk_npages);
Jason Evans7de92762012-10-08 17:56:11 -0700809 unzeroed = pages_purge((void *)((uintptr_t)chunk + (pageind <<
810 LG_PAGE)), (npages << LG_PAGE));
811 flag_unzeroed = unzeroed ? CHUNK_MAP_UNZEROED : 0;
812 /*
813 * Set the unzeroed flag for all pages, now that pages_purge()
814 * has returned whether the pages were zeroed as a side effect
815 * of purging. This chunk map modification is safe even though
816 * the arena mutex isn't currently owned by this thread,
817 * because the run is marked as allocated, thus protecting it
818 * from being modified by any other thread. As long as these
819 * writes don't perturb the first and last elements'
820 * CHUNK_MAP_ALLOCATED bits, behavior is well defined.
821 */
822 for (i = 0; i < npages; i++) {
823 arena_mapbits_unzeroed_set(chunk, pageind+i,
824 flag_unzeroed);
825 }
Jason Evanse3d13062012-10-30 15:42:37 -0700826 npurged += npages;
Jason Evans7372b152012-02-10 20:22:09 -0800827 if (config_stats)
828 nmadvise++;
Jason Evans05b21be2010-03-14 17:36:10 -0700829 }
Jason Evans05b21be2010-03-14 17:36:10 -0700830 malloc_mutex_lock(&arena->lock);
Jason Evans7372b152012-02-10 20:22:09 -0800831 if (config_stats)
832 arena->stats.nmadvise += nmadvise;
Jason Evans05b21be2010-03-14 17:36:10 -0700833
834 /* Deallocate runs. */
835 for (mapelm = ql_first(&mapelms); mapelm != NULL;
836 mapelm = ql_first(&mapelms)) {
Jason Evanse3d13062012-10-30 15:42:37 -0700837 arena_run_t *run;
Jason Evans05b21be2010-03-14 17:36:10 -0700838
Jason Evanse3d13062012-10-30 15:42:37 -0700839 pageind = (((uintptr_t)mapelm - (uintptr_t)chunk->map) /
840 sizeof(arena_chunk_map_t)) + map_bias;
841 run = (arena_run_t *)((uintptr_t)chunk + (uintptr_t)(pageind <<
842 LG_PAGE));
Jason Evans05b21be2010-03-14 17:36:10 -0700843 ql_remove(&mapelms, mapelm, u.ql_link);
Jason Evanse3d13062012-10-30 15:42:37 -0700844 arena_run_dalloc(arena, run, false, true);
Jason Evans05b21be2010-03-14 17:36:10 -0700845 }
Jason Evanse3d13062012-10-30 15:42:37 -0700846
847 return (npurged);
848}
849
850static arena_chunk_t *
851chunks_dirty_iter_cb(arena_chunk_tree_t *tree, arena_chunk_t *chunk, void *arg)
852{
853 size_t *ndirty = (size_t *)arg;
854
855 assert(chunk->ndirty != 0);
856 *ndirty += chunk->ndirty;
857 return (NULL);
Jason Evans05b21be2010-03-14 17:36:10 -0700858}
859
Jason Evanse476f8a2010-01-16 09:53:50 -0800860static void
Jason Evans6005f072010-09-30 16:55:08 -0700861arena_purge(arena_t *arena, bool all)
Jason Evanse476f8a2010-01-16 09:53:50 -0800862{
863 arena_chunk_t *chunk;
Jason Evans05b21be2010-03-14 17:36:10 -0700864 size_t npurgatory;
Jason Evans7372b152012-02-10 20:22:09 -0800865 if (config_debug) {
866 size_t ndirty = 0;
Jason Evanse476f8a2010-01-16 09:53:50 -0800867
Jason Evanse3d13062012-10-30 15:42:37 -0700868 arena_chunk_dirty_iter(&arena->chunks_dirty, NULL,
869 chunks_dirty_iter_cb, (void *)&ndirty);
Jason Evans7372b152012-02-10 20:22:09 -0800870 assert(ndirty == arena->ndirty);
Jason Evans2caa4712010-03-04 21:35:07 -0800871 }
Jason Evansaf8ad3e2011-03-23 20:39:02 -0700872 assert(arena->ndirty > arena->npurgatory || all);
Jason Evansaf8ad3e2011-03-23 20:39:02 -0700873 assert((arena->nactive >> opt_lg_dirty_mult) < (arena->ndirty -
Jason Evansf9a8edb2011-06-12 16:46:03 -0700874 arena->npurgatory) || all);
Jason Evanse476f8a2010-01-16 09:53:50 -0800875
Jason Evans7372b152012-02-10 20:22:09 -0800876 if (config_stats)
877 arena->stats.npurge++;
Jason Evanse476f8a2010-01-16 09:53:50 -0800878
879 /*
Jason Evans05b21be2010-03-14 17:36:10 -0700880 * Compute the minimum number of pages that this thread should try to
Jason Evans799ca0b2010-04-08 20:31:58 -0700881 * purge, and add the result to arena->npurgatory. This will keep
882 * multiple threads from racing to reduce ndirty below the threshold.
Jason Evanse476f8a2010-01-16 09:53:50 -0800883 */
Jason Evanse3d13062012-10-30 15:42:37 -0700884 {
885 size_t npurgeable = arena->ndirty - arena->npurgatory;
886
887 if (all == false) {
888 size_t threshold = (arena->nactive >>
889 opt_lg_dirty_mult);
890
891 npurgatory = npurgeable - threshold;
892 } else
893 npurgatory = npurgeable;
Jason Evansaf8ad3e2011-03-23 20:39:02 -0700894 }
Jason Evans799ca0b2010-04-08 20:31:58 -0700895 arena->npurgatory += npurgatory;
896
Jason Evans05b21be2010-03-14 17:36:10 -0700897 while (npurgatory > 0) {
Jason Evanse3d13062012-10-30 15:42:37 -0700898 size_t npurgeable, npurged, nunpurged;
899
Jason Evans05b21be2010-03-14 17:36:10 -0700900 /* Get next chunk with dirty pages. */
Jason Evanse3d13062012-10-30 15:42:37 -0700901 chunk = arena_chunk_dirty_first(&arena->chunks_dirty);
Jason Evans05b21be2010-03-14 17:36:10 -0700902 if (chunk == NULL) {
903 /*
904 * This thread was unable to purge as many pages as
905 * originally intended, due to races with other threads
Jason Evans799ca0b2010-04-08 20:31:58 -0700906 * that either did some of the purging work, or re-used
907 * dirty pages.
Jason Evans05b21be2010-03-14 17:36:10 -0700908 */
Jason Evans799ca0b2010-04-08 20:31:58 -0700909 arena->npurgatory -= npurgatory;
910 return;
Jason Evans05b21be2010-03-14 17:36:10 -0700911 }
Jason Evanse3d13062012-10-30 15:42:37 -0700912 npurgeable = chunk->ndirty;
913 assert(npurgeable != 0);
Jason Evanse476f8a2010-01-16 09:53:50 -0800914
Jason Evanse3d13062012-10-30 15:42:37 -0700915 if (npurgeable > npurgatory && chunk->nruns_adjac == 0) {
Jason Evans799ca0b2010-04-08 20:31:58 -0700916 /*
Jason Evanse3d13062012-10-30 15:42:37 -0700917 * This thread will purge all the dirty pages in chunk,
918 * so set npurgatory to reflect this thread's intent to
919 * purge the pages. This tends to reduce the chances
920 * of the following scenario:
Jason Evans799ca0b2010-04-08 20:31:58 -0700921 *
922 * 1) This thread sets arena->npurgatory such that
923 * (arena->ndirty - arena->npurgatory) is at the
924 * threshold.
925 * 2) This thread drops arena->lock.
926 * 3) Another thread causes one or more pages to be
927 * dirtied, and immediately determines that it must
928 * purge dirty pages.
929 *
930 * If this scenario *does* play out, that's okay,
931 * because all of the purging work being done really
932 * needs to happen.
933 */
Jason Evanse3d13062012-10-30 15:42:37 -0700934 arena->npurgatory += npurgeable - npurgatory;
935 npurgatory = npurgeable;
Jason Evans799ca0b2010-04-08 20:31:58 -0700936 }
937
Jason Evanse3d13062012-10-30 15:42:37 -0700938 /*
939 * Keep track of how many pages are purgeable, versus how many
940 * actually get purged, and adjust counters accordingly.
941 */
942 arena->npurgatory -= npurgeable;
943 npurgatory -= npurgeable;
944 npurged = arena_chunk_purge(arena, chunk, all);
945 nunpurged = npurgeable - npurged;
946 arena->npurgatory += nunpurged;
947 npurgatory += nunpurged;
Jason Evanse476f8a2010-01-16 09:53:50 -0800948 }
949}
950
Jason Evans6005f072010-09-30 16:55:08 -0700951void
952arena_purge_all(arena_t *arena)
953{
954
955 malloc_mutex_lock(&arena->lock);
956 arena_purge(arena, true);
957 malloc_mutex_unlock(&arena->lock);
958}
959
Jason Evanse476f8a2010-01-16 09:53:50 -0800960static void
Jason Evanse3d13062012-10-30 15:42:37 -0700961arena_run_dalloc(arena_t *arena, arena_run_t *run, bool dirty, bool cleaned)
Jason Evanse476f8a2010-01-16 09:53:50 -0800962{
963 arena_chunk_t *chunk;
Jason Evans19b3d612010-03-18 20:36:40 -0700964 size_t size, run_ind, run_pages, flag_dirty;
Jason Evanse476f8a2010-01-16 09:53:50 -0800965
966 chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(run);
Jason Evansae4c7b42012-04-02 07:04:34 -0700967 run_ind = (size_t)(((uintptr_t)run - (uintptr_t)chunk) >> LG_PAGE);
Jason Evans7393f442010-10-01 17:35:43 -0700968 assert(run_ind >= map_bias);
Jason Evanse476f8a2010-01-16 09:53:50 -0800969 assert(run_ind < chunk_npages);
Jason Evans203484e2012-05-02 00:30:36 -0700970 if (arena_mapbits_large_get(chunk, run_ind) != 0) {
971 size = arena_mapbits_large_size_get(chunk, run_ind);
Jason Evansae4c7b42012-04-02 07:04:34 -0700972 assert(size == PAGE ||
Jason Evans203484e2012-05-02 00:30:36 -0700973 arena_mapbits_large_size_get(chunk,
974 run_ind+(size>>LG_PAGE)-1) == 0);
Jason Evans49f7e8f2011-03-15 13:59:15 -0700975 } else {
976 size_t binind = arena_bin_index(arena, run->bin);
977 arena_bin_info_t *bin_info = &arena_bin_info[binind];
978 size = bin_info->run_size;
979 }
Jason Evansae4c7b42012-04-02 07:04:34 -0700980 run_pages = (size >> LG_PAGE);
Jason Evans7372b152012-02-10 20:22:09 -0800981 if (config_stats) {
982 /*
983 * Update stats_cactive if nactive is crossing a chunk
984 * multiple.
985 */
Jason Evansae4c7b42012-04-02 07:04:34 -0700986 size_t cactive_diff = CHUNK_CEILING(arena->nactive << LG_PAGE) -
987 CHUNK_CEILING((arena->nactive - run_pages) << LG_PAGE);
Jason Evans7372b152012-02-10 20:22:09 -0800988 if (cactive_diff != 0)
989 stats_cactive_sub(cactive_diff);
990 }
Jason Evanse476f8a2010-01-16 09:53:50 -0800991 arena->nactive -= run_pages;
992
Jason Evans19b3d612010-03-18 20:36:40 -0700993 /*
994 * The run is dirty if the caller claims to have dirtied it, as well as
Jason Evanse3d13062012-10-30 15:42:37 -0700995 * if it was already dirty before being allocated and the caller
996 * doesn't claim to have cleaned it.
Jason Evans19b3d612010-03-18 20:36:40 -0700997 */
Jason Evans30fe12b2012-05-10 17:09:17 -0700998 assert(arena_mapbits_dirty_get(chunk, run_ind) ==
999 arena_mapbits_dirty_get(chunk, run_ind+run_pages-1));
Jason Evanse3d13062012-10-30 15:42:37 -07001000 if (cleaned == false && arena_mapbits_dirty_get(chunk, run_ind) != 0)
Jason Evans19b3d612010-03-18 20:36:40 -07001001 dirty = true;
1002 flag_dirty = dirty ? CHUNK_MAP_DIRTY : 0;
Jason Evans19b3d612010-03-18 20:36:40 -07001003
Jason Evanse476f8a2010-01-16 09:53:50 -08001004 /* Mark pages as unallocated in the chunk map. */
1005 if (dirty) {
Jason Evans203484e2012-05-02 00:30:36 -07001006 arena_mapbits_unallocated_set(chunk, run_ind, size,
1007 CHUNK_MAP_DIRTY);
1008 arena_mapbits_unallocated_set(chunk, run_ind+run_pages-1, size,
1009 CHUNK_MAP_DIRTY);
Jason Evanse476f8a2010-01-16 09:53:50 -08001010 } else {
Jason Evans203484e2012-05-02 00:30:36 -07001011 arena_mapbits_unallocated_set(chunk, run_ind, size,
1012 arena_mapbits_unzeroed_get(chunk, run_ind));
1013 arena_mapbits_unallocated_set(chunk, run_ind+run_pages-1, size,
1014 arena_mapbits_unzeroed_get(chunk, run_ind+run_pages-1));
Jason Evanse476f8a2010-01-16 09:53:50 -08001015 }
Jason Evanse476f8a2010-01-16 09:53:50 -08001016
1017 /* Try to coalesce forward. */
1018 if (run_ind + run_pages < chunk_npages &&
Jason Evans203484e2012-05-02 00:30:36 -07001019 arena_mapbits_allocated_get(chunk, run_ind+run_pages) == 0 &&
1020 arena_mapbits_dirty_get(chunk, run_ind+run_pages) == flag_dirty) {
1021 size_t nrun_size = arena_mapbits_unallocated_size_get(chunk,
1022 run_ind+run_pages);
Jason Evansae4c7b42012-04-02 07:04:34 -07001023 size_t nrun_pages = nrun_size >> LG_PAGE;
Jason Evanse476f8a2010-01-16 09:53:50 -08001024
1025 /*
1026 * Remove successor from runs_avail; the coalesced run is
1027 * inserted later.
1028 */
Jason Evans203484e2012-05-02 00:30:36 -07001029 assert(arena_mapbits_unallocated_size_get(chunk,
1030 run_ind+run_pages+nrun_pages-1) == nrun_size);
1031 assert(arena_mapbits_dirty_get(chunk,
1032 run_ind+run_pages+nrun_pages-1) == flag_dirty);
Jason Evanse3d13062012-10-30 15:42:37 -07001033 arena_avail_remove(arena, chunk, run_ind+run_pages, nrun_pages,
1034 false, true);
Jason Evanse476f8a2010-01-16 09:53:50 -08001035
1036 size += nrun_size;
Jason Evans12ca9142010-10-17 19:56:09 -07001037 run_pages += nrun_pages;
Jason Evanse476f8a2010-01-16 09:53:50 -08001038
Jason Evans203484e2012-05-02 00:30:36 -07001039 arena_mapbits_unallocated_size_set(chunk, run_ind, size);
1040 arena_mapbits_unallocated_size_set(chunk, run_ind+run_pages-1,
1041 size);
Jason Evanse476f8a2010-01-16 09:53:50 -08001042 }
1043
1044 /* Try to coalesce backward. */
Jason Evans203484e2012-05-02 00:30:36 -07001045 if (run_ind > map_bias && arena_mapbits_allocated_get(chunk, run_ind-1)
1046 == 0 && arena_mapbits_dirty_get(chunk, run_ind-1) == flag_dirty) {
1047 size_t prun_size = arena_mapbits_unallocated_size_get(chunk,
1048 run_ind-1);
Jason Evansae4c7b42012-04-02 07:04:34 -07001049 size_t prun_pages = prun_size >> LG_PAGE;
Jason Evanse476f8a2010-01-16 09:53:50 -08001050
Jason Evans12ca9142010-10-17 19:56:09 -07001051 run_ind -= prun_pages;
Jason Evanse476f8a2010-01-16 09:53:50 -08001052
1053 /*
1054 * Remove predecessor from runs_avail; the coalesced run is
1055 * inserted later.
1056 */
Jason Evans203484e2012-05-02 00:30:36 -07001057 assert(arena_mapbits_unallocated_size_get(chunk, run_ind) ==
1058 prun_size);
1059 assert(arena_mapbits_dirty_get(chunk, run_ind) == flag_dirty);
Jason Evanse3d13062012-10-30 15:42:37 -07001060 arena_avail_remove(arena, chunk, run_ind, prun_pages, true,
1061 false);
Jason Evanse476f8a2010-01-16 09:53:50 -08001062
1063 size += prun_size;
Jason Evans12ca9142010-10-17 19:56:09 -07001064 run_pages += prun_pages;
Jason Evanse476f8a2010-01-16 09:53:50 -08001065
Jason Evans203484e2012-05-02 00:30:36 -07001066 arena_mapbits_unallocated_size_set(chunk, run_ind, size);
1067 arena_mapbits_unallocated_size_set(chunk, run_ind+run_pages-1,
1068 size);
Jason Evanse476f8a2010-01-16 09:53:50 -08001069 }
1070
1071 /* Insert into runs_avail, now that coalescing is complete. */
Jason Evans203484e2012-05-02 00:30:36 -07001072 assert(arena_mapbits_unallocated_size_get(chunk, run_ind) ==
1073 arena_mapbits_unallocated_size_get(chunk, run_ind+run_pages-1));
1074 assert(arena_mapbits_dirty_get(chunk, run_ind) ==
1075 arena_mapbits_dirty_get(chunk, run_ind+run_pages-1));
Jason Evanse3d13062012-10-30 15:42:37 -07001076 arena_avail_insert(arena, chunk, run_ind, run_pages, true, true);
Jason Evans8d4203c2010-04-13 20:53:21 -07001077
Jason Evans203484e2012-05-02 00:30:36 -07001078 /* Deallocate chunk if it is now completely unused. */
1079 if (size == arena_maxclass) {
1080 assert(run_ind == map_bias);
1081 assert(run_pages == (arena_maxclass >> LG_PAGE));
Jason Evanse476f8a2010-01-16 09:53:50 -08001082 arena_chunk_dealloc(arena, chunk);
Jason Evans203484e2012-05-02 00:30:36 -07001083 }
Jason Evanse476f8a2010-01-16 09:53:50 -08001084
Jason Evans4fb7f512010-01-27 18:27:09 -08001085 /*
Jason Evans8d4203c2010-04-13 20:53:21 -07001086 * It is okay to do dirty page processing here even if the chunk was
Jason Evans4fb7f512010-01-27 18:27:09 -08001087 * deallocated above, since in that case it is the spare. Waiting
1088 * until after possible chunk deallocation to do dirty processing
1089 * allows for an old spare to be fully deallocated, thus decreasing the
1090 * chances of spuriously crossing the dirty page purging threshold.
1091 */
Jason Evans8d4203c2010-04-13 20:53:21 -07001092 if (dirty)
Jason Evans05b21be2010-03-14 17:36:10 -07001093 arena_maybe_purge(arena);
Jason Evanse476f8a2010-01-16 09:53:50 -08001094}
1095
1096static void
1097arena_run_trim_head(arena_t *arena, arena_chunk_t *chunk, arena_run_t *run,
1098 size_t oldsize, size_t newsize)
1099{
Jason Evansae4c7b42012-04-02 07:04:34 -07001100 size_t pageind = ((uintptr_t)run - (uintptr_t)chunk) >> LG_PAGE;
1101 size_t head_npages = (oldsize - newsize) >> LG_PAGE;
Jason Evans203484e2012-05-02 00:30:36 -07001102 size_t flag_dirty = arena_mapbits_dirty_get(chunk, pageind);
Jason Evanse476f8a2010-01-16 09:53:50 -08001103
1104 assert(oldsize > newsize);
1105
1106 /*
1107 * Update the chunk map so that arena_run_dalloc() can treat the
Jason Evans940a2e02010-10-17 17:51:37 -07001108 * leading run as separately allocated. Set the last element of each
1109 * run first, in case of single-page runs.
Jason Evanse476f8a2010-01-16 09:53:50 -08001110 */
Jason Evans203484e2012-05-02 00:30:36 -07001111 assert(arena_mapbits_large_size_get(chunk, pageind) == oldsize);
Jason Evansd8ceef62012-05-10 20:59:39 -07001112 arena_mapbits_large_set(chunk, pageind+head_npages-1, 0, flag_dirty);
1113 arena_mapbits_large_set(chunk, pageind, oldsize-newsize, flag_dirty);
Jason Evans940a2e02010-10-17 17:51:37 -07001114
Jason Evans7372b152012-02-10 20:22:09 -08001115 if (config_debug) {
Jason Evansae4c7b42012-04-02 07:04:34 -07001116 UNUSED size_t tail_npages = newsize >> LG_PAGE;
Jason Evans203484e2012-05-02 00:30:36 -07001117 assert(arena_mapbits_large_size_get(chunk,
1118 pageind+head_npages+tail_npages-1) == 0);
1119 assert(arena_mapbits_dirty_get(chunk,
1120 pageind+head_npages+tail_npages-1) == flag_dirty);
Jason Evans940a2e02010-10-17 17:51:37 -07001121 }
Jason Evansd8ceef62012-05-10 20:59:39 -07001122 arena_mapbits_large_set(chunk, pageind+head_npages, newsize,
1123 flag_dirty);
Jason Evanse476f8a2010-01-16 09:53:50 -08001124
Jason Evanse3d13062012-10-30 15:42:37 -07001125 arena_run_dalloc(arena, run, false, false);
Jason Evanse476f8a2010-01-16 09:53:50 -08001126}
1127
1128static void
1129arena_run_trim_tail(arena_t *arena, arena_chunk_t *chunk, arena_run_t *run,
1130 size_t oldsize, size_t newsize, bool dirty)
1131{
Jason Evansae4c7b42012-04-02 07:04:34 -07001132 size_t pageind = ((uintptr_t)run - (uintptr_t)chunk) >> LG_PAGE;
1133 size_t head_npages = newsize >> LG_PAGE;
Jason Evans203484e2012-05-02 00:30:36 -07001134 size_t flag_dirty = arena_mapbits_dirty_get(chunk, pageind);
Jason Evanse476f8a2010-01-16 09:53:50 -08001135
1136 assert(oldsize > newsize);
1137
1138 /*
1139 * Update the chunk map so that arena_run_dalloc() can treat the
Jason Evans940a2e02010-10-17 17:51:37 -07001140 * trailing run as separately allocated. Set the last element of each
1141 * run first, in case of single-page runs.
Jason Evanse476f8a2010-01-16 09:53:50 -08001142 */
Jason Evans203484e2012-05-02 00:30:36 -07001143 assert(arena_mapbits_large_size_get(chunk, pageind) == oldsize);
Jason Evansd8ceef62012-05-10 20:59:39 -07001144 arena_mapbits_large_set(chunk, pageind+head_npages-1, 0, flag_dirty);
1145 arena_mapbits_large_set(chunk, pageind, newsize, flag_dirty);
Jason Evans940a2e02010-10-17 17:51:37 -07001146
Jason Evans203484e2012-05-02 00:30:36 -07001147 if (config_debug) {
1148 UNUSED size_t tail_npages = (oldsize - newsize) >> LG_PAGE;
1149 assert(arena_mapbits_large_size_get(chunk,
1150 pageind+head_npages+tail_npages-1) == 0);
1151 assert(arena_mapbits_dirty_get(chunk,
1152 pageind+head_npages+tail_npages-1) == flag_dirty);
1153 }
1154 arena_mapbits_large_set(chunk, pageind+head_npages, oldsize-newsize,
Jason Evansd8ceef62012-05-10 20:59:39 -07001155 flag_dirty);
Jason Evanse476f8a2010-01-16 09:53:50 -08001156
1157 arena_run_dalloc(arena, (arena_run_t *)((uintptr_t)run + newsize),
Jason Evanse3d13062012-10-30 15:42:37 -07001158 dirty, false);
Jason Evanse476f8a2010-01-16 09:53:50 -08001159}
1160
1161static arena_run_t *
Jason Evanse7a10582012-02-13 17:36:52 -08001162arena_bin_runs_first(arena_bin_t *bin)
1163{
1164 arena_chunk_map_t *mapelm = arena_run_tree_first(&bin->runs);
1165 if (mapelm != NULL) {
1166 arena_chunk_t *chunk;
1167 size_t pageind;
Mike Hommey8b499712012-04-24 23:22:02 +02001168 arena_run_t *run;
Jason Evanse7a10582012-02-13 17:36:52 -08001169
1170 chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(mapelm);
1171 pageind = ((((uintptr_t)mapelm - (uintptr_t)chunk->map) /
1172 sizeof(arena_chunk_map_t))) + map_bias;
Jason Evans203484e2012-05-02 00:30:36 -07001173 run = (arena_run_t *)((uintptr_t)chunk + (uintptr_t)((pageind -
1174 arena_mapbits_small_runind_get(chunk, pageind)) <<
Jason Evansae4c7b42012-04-02 07:04:34 -07001175 LG_PAGE));
Jason Evanse7a10582012-02-13 17:36:52 -08001176 return (run);
1177 }
1178
1179 return (NULL);
1180}
1181
1182static void
1183arena_bin_runs_insert(arena_bin_t *bin, arena_run_t *run)
1184{
1185 arena_chunk_t *chunk = CHUNK_ADDR2BASE(run);
Jason Evansae4c7b42012-04-02 07:04:34 -07001186 size_t pageind = ((uintptr_t)run - (uintptr_t)chunk) >> LG_PAGE;
Jason Evans203484e2012-05-02 00:30:36 -07001187 arena_chunk_map_t *mapelm = arena_mapp_get(chunk, pageind);
Jason Evanse7a10582012-02-13 17:36:52 -08001188
1189 assert(arena_run_tree_search(&bin->runs, mapelm) == NULL);
1190
1191 arena_run_tree_insert(&bin->runs, mapelm);
1192}
1193
1194static void
1195arena_bin_runs_remove(arena_bin_t *bin, arena_run_t *run)
1196{
1197 arena_chunk_t *chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(run);
Jason Evansae4c7b42012-04-02 07:04:34 -07001198 size_t pageind = ((uintptr_t)run - (uintptr_t)chunk) >> LG_PAGE;
Jason Evans203484e2012-05-02 00:30:36 -07001199 arena_chunk_map_t *mapelm = arena_mapp_get(chunk, pageind);
Jason Evanse7a10582012-02-13 17:36:52 -08001200
1201 assert(arena_run_tree_search(&bin->runs, mapelm) != NULL);
1202
1203 arena_run_tree_remove(&bin->runs, mapelm);
1204}
1205
1206static arena_run_t *
1207arena_bin_nonfull_run_tryget(arena_bin_t *bin)
1208{
1209 arena_run_t *run = arena_bin_runs_first(bin);
1210 if (run != NULL) {
1211 arena_bin_runs_remove(bin, run);
1212 if (config_stats)
1213 bin->stats.reruns++;
1214 }
1215 return (run);
1216}
1217
1218static arena_run_t *
Jason Evanse476f8a2010-01-16 09:53:50 -08001219arena_bin_nonfull_run_get(arena_t *arena, arena_bin_t *bin)
1220{
Jason Evanse476f8a2010-01-16 09:53:50 -08001221 arena_run_t *run;
Jason Evans49f7e8f2011-03-15 13:59:15 -07001222 size_t binind;
1223 arena_bin_info_t *bin_info;
Jason Evanse476f8a2010-01-16 09:53:50 -08001224
1225 /* Look for a usable run. */
Jason Evanse7a10582012-02-13 17:36:52 -08001226 run = arena_bin_nonfull_run_tryget(bin);
1227 if (run != NULL)
Jason Evanse476f8a2010-01-16 09:53:50 -08001228 return (run);
Jason Evanse476f8a2010-01-16 09:53:50 -08001229 /* No existing runs have any space available. */
1230
Jason Evans49f7e8f2011-03-15 13:59:15 -07001231 binind = arena_bin_index(arena, bin);
1232 bin_info = &arena_bin_info[binind];
1233
Jason Evanse476f8a2010-01-16 09:53:50 -08001234 /* Allocate a new run. */
Jason Evanse00572b2010-03-14 19:43:56 -07001235 malloc_mutex_unlock(&bin->lock);
Jason Evanse69bee02010-03-15 22:25:23 -07001236 /******************************/
Jason Evans86815df2010-03-13 20:32:56 -08001237 malloc_mutex_lock(&arena->lock);
Jason Evans203484e2012-05-02 00:30:36 -07001238 run = arena_run_alloc(arena, bin_info->run_size, false, binind, false);
Jason Evanse00572b2010-03-14 19:43:56 -07001239 if (run != NULL) {
Jason Evans84c8eef2011-03-16 10:30:13 -07001240 bitmap_t *bitmap = (bitmap_t *)((uintptr_t)run +
1241 (uintptr_t)bin_info->bitmap_offset);
1242
Jason Evanse00572b2010-03-14 19:43:56 -07001243 /* Initialize run internals. */
Jason Evansf54166e2012-04-23 22:41:36 -07001244 VALGRIND_MAKE_MEM_UNDEFINED(run, bin_info->reg0_offset -
1245 bin_info->redzone_size);
Jason Evanse00572b2010-03-14 19:43:56 -07001246 run->bin = bin;
Jason Evans84c8eef2011-03-16 10:30:13 -07001247 run->nextind = 0;
Jason Evans49f7e8f2011-03-15 13:59:15 -07001248 run->nfree = bin_info->nregs;
Jason Evans84c8eef2011-03-16 10:30:13 -07001249 bitmap_init(bitmap, &bin_info->bitmap_info);
Jason Evanse69bee02010-03-15 22:25:23 -07001250 }
1251 malloc_mutex_unlock(&arena->lock);
1252 /********************************/
1253 malloc_mutex_lock(&bin->lock);
1254 if (run != NULL) {
Jason Evans7372b152012-02-10 20:22:09 -08001255 if (config_stats) {
1256 bin->stats.nruns++;
1257 bin->stats.curruns++;
Jason Evans7372b152012-02-10 20:22:09 -08001258 }
Jason Evanse00572b2010-03-14 19:43:56 -07001259 return (run);
1260 }
1261
1262 /*
1263 * arena_run_alloc() failed, but another thread may have made
Jason Evans940a2e02010-10-17 17:51:37 -07001264 * sufficient memory available while this one dropped bin->lock above,
Jason Evanse00572b2010-03-14 19:43:56 -07001265 * so search one more time.
1266 */
Jason Evanse7a10582012-02-13 17:36:52 -08001267 run = arena_bin_nonfull_run_tryget(bin);
1268 if (run != NULL)
Jason Evanse00572b2010-03-14 19:43:56 -07001269 return (run);
Jason Evanse00572b2010-03-14 19:43:56 -07001270
1271 return (NULL);
Jason Evanse476f8a2010-01-16 09:53:50 -08001272}
1273
Jason Evans1e0a6362010-03-13 13:41:58 -08001274/* Re-fill bin->runcur, then call arena_run_reg_alloc(). */
Jason Evanse476f8a2010-01-16 09:53:50 -08001275static void *
1276arena_bin_malloc_hard(arena_t *arena, arena_bin_t *bin)
1277{
Jason Evanse00572b2010-03-14 19:43:56 -07001278 void *ret;
Jason Evans49f7e8f2011-03-15 13:59:15 -07001279 size_t binind;
1280 arena_bin_info_t *bin_info;
Jason Evanse00572b2010-03-14 19:43:56 -07001281 arena_run_t *run;
Jason Evanse476f8a2010-01-16 09:53:50 -08001282
Jason Evans49f7e8f2011-03-15 13:59:15 -07001283 binind = arena_bin_index(arena, bin);
1284 bin_info = &arena_bin_info[binind];
Jason Evanse00572b2010-03-14 19:43:56 -07001285 bin->runcur = NULL;
1286 run = arena_bin_nonfull_run_get(arena, bin);
1287 if (bin->runcur != NULL && bin->runcur->nfree > 0) {
1288 /*
1289 * Another thread updated runcur while this one ran without the
1290 * bin lock in arena_bin_nonfull_run_get().
1291 */
Jason Evanse00572b2010-03-14 19:43:56 -07001292 assert(bin->runcur->nfree > 0);
Jason Evans49f7e8f2011-03-15 13:59:15 -07001293 ret = arena_run_reg_alloc(bin->runcur, bin_info);
Jason Evanse00572b2010-03-14 19:43:56 -07001294 if (run != NULL) {
Jason Evans940a2e02010-10-17 17:51:37 -07001295 arena_chunk_t *chunk;
1296
1297 /*
1298 * arena_run_alloc() may have allocated run, or it may
Jason Evans84c8eef2011-03-16 10:30:13 -07001299 * have pulled run from the bin's run tree. Therefore
Jason Evans940a2e02010-10-17 17:51:37 -07001300 * it is unsafe to make any assumptions about how run
1301 * has previously been used, and arena_bin_lower_run()
1302 * must be called, as if a region were just deallocated
1303 * from the run.
1304 */
1305 chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(run);
Jason Evans49f7e8f2011-03-15 13:59:15 -07001306 if (run->nfree == bin_info->nregs)
Jason Evans8de6a022010-10-17 20:57:30 -07001307 arena_dalloc_bin_run(arena, chunk, run, bin);
1308 else
1309 arena_bin_lower_run(arena, chunk, run, bin);
Jason Evanse00572b2010-03-14 19:43:56 -07001310 }
1311 return (ret);
1312 }
1313
1314 if (run == NULL)
Jason Evanse476f8a2010-01-16 09:53:50 -08001315 return (NULL);
Jason Evanse00572b2010-03-14 19:43:56 -07001316
1317 bin->runcur = run;
1318
Jason Evanse476f8a2010-01-16 09:53:50 -08001319 assert(bin->runcur->nfree > 0);
1320
Jason Evans49f7e8f2011-03-15 13:59:15 -07001321 return (arena_run_reg_alloc(bin->runcur, bin_info));
Jason Evanse476f8a2010-01-16 09:53:50 -08001322}
1323
Jason Evans86815df2010-03-13 20:32:56 -08001324void
1325arena_prof_accum(arena_t *arena, uint64_t accumbytes)
1326{
1327
Jason Evans78f73522012-04-18 13:38:40 -07001328 cassert(config_prof);
1329
1330 if (config_prof && prof_interval != 0) {
Jason Evans86815df2010-03-13 20:32:56 -08001331 arena->prof_accumbytes += accumbytes;
1332 if (arena->prof_accumbytes >= prof_interval) {
1333 prof_idump();
1334 arena->prof_accumbytes -= prof_interval;
1335 }
1336 }
1337}
Jason Evans86815df2010-03-13 20:32:56 -08001338
Jason Evanse476f8a2010-01-16 09:53:50 -08001339void
Jason Evans7372b152012-02-10 20:22:09 -08001340arena_tcache_fill_small(arena_t *arena, tcache_bin_t *tbin, size_t binind,
1341 uint64_t prof_accumbytes)
Jason Evanse476f8a2010-01-16 09:53:50 -08001342{
1343 unsigned i, nfill;
1344 arena_bin_t *bin;
1345 arena_run_t *run;
1346 void *ptr;
1347
1348 assert(tbin->ncached == 0);
1349
Jason Evans7372b152012-02-10 20:22:09 -08001350 if (config_prof) {
1351 malloc_mutex_lock(&arena->lock);
1352 arena_prof_accum(arena, prof_accumbytes);
1353 malloc_mutex_unlock(&arena->lock);
1354 }
Jason Evanse69bee02010-03-15 22:25:23 -07001355 bin = &arena->bins[binind];
1356 malloc_mutex_lock(&bin->lock);
Jason Evans1dcb4f82011-03-21 00:18:17 -07001357 for (i = 0, nfill = (tcache_bin_info[binind].ncached_max >>
1358 tbin->lg_fill_div); i < nfill; i++) {
Jason Evanse476f8a2010-01-16 09:53:50 -08001359 if ((run = bin->runcur) != NULL && run->nfree > 0)
Jason Evans49f7e8f2011-03-15 13:59:15 -07001360 ptr = arena_run_reg_alloc(run, &arena_bin_info[binind]);
Jason Evanse476f8a2010-01-16 09:53:50 -08001361 else
1362 ptr = arena_bin_malloc_hard(arena, bin);
Jason Evans3fa9a2f2010-03-07 15:34:14 -08001363 if (ptr == NULL)
Jason Evanse476f8a2010-01-16 09:53:50 -08001364 break;
Jason Evans122449b2012-04-06 00:35:09 -07001365 if (config_fill && opt_junk) {
1366 arena_alloc_junk_small(ptr, &arena_bin_info[binind],
1367 true);
1368 }
Jason Evans9c43c132011-03-18 10:53:15 -07001369 /* Insert such that low regions get used first. */
1370 tbin->avail[nfill - 1 - i] = ptr;
Jason Evanse476f8a2010-01-16 09:53:50 -08001371 }
Jason Evans7372b152012-02-10 20:22:09 -08001372 if (config_stats) {
1373 bin->stats.allocated += i * arena_bin_info[binind].reg_size;
1374 bin->stats.nmalloc += i;
1375 bin->stats.nrequests += tbin->tstats.nrequests;
1376 bin->stats.nfills++;
1377 tbin->tstats.nrequests = 0;
1378 }
Jason Evans86815df2010-03-13 20:32:56 -08001379 malloc_mutex_unlock(&bin->lock);
Jason Evanse476f8a2010-01-16 09:53:50 -08001380 tbin->ncached = i;
Jason Evanse476f8a2010-01-16 09:53:50 -08001381}
Jason Evanse476f8a2010-01-16 09:53:50 -08001382
Jason Evans122449b2012-04-06 00:35:09 -07001383void
1384arena_alloc_junk_small(void *ptr, arena_bin_info_t *bin_info, bool zero)
1385{
1386
1387 if (zero) {
1388 size_t redzone_size = bin_info->redzone_size;
1389 memset((void *)((uintptr_t)ptr - redzone_size), 0xa5,
1390 redzone_size);
1391 memset((void *)((uintptr_t)ptr + bin_info->reg_size), 0xa5,
1392 redzone_size);
1393 } else {
1394 memset((void *)((uintptr_t)ptr - bin_info->redzone_size), 0xa5,
1395 bin_info->reg_interval);
1396 }
1397}
1398
1399void
1400arena_dalloc_junk_small(void *ptr, arena_bin_info_t *bin_info)
1401{
1402 size_t size = bin_info->reg_size;
1403 size_t redzone_size = bin_info->redzone_size;
1404 size_t i;
1405 bool error = false;
1406
1407 for (i = 1; i <= redzone_size; i++) {
1408 unsigned byte;
1409 if ((byte = *(uint8_t *)((uintptr_t)ptr - i)) != 0xa5) {
1410 error = true;
1411 malloc_printf("<jemalloc>: Corrupt redzone "
1412 "%zu byte%s before %p (size %zu), byte=%#x\n", i,
1413 (i == 1) ? "" : "s", ptr, size, byte);
1414 }
1415 }
1416 for (i = 0; i < redzone_size; i++) {
1417 unsigned byte;
1418 if ((byte = *(uint8_t *)((uintptr_t)ptr + size + i)) != 0xa5) {
1419 error = true;
1420 malloc_printf("<jemalloc>: Corrupt redzone "
1421 "%zu byte%s after end of %p (size %zu), byte=%#x\n",
1422 i, (i == 1) ? "" : "s", ptr, size, byte);
1423 }
1424 }
1425 if (opt_abort && error)
1426 abort();
1427
1428 memset((void *)((uintptr_t)ptr - redzone_size), 0x5a,
1429 bin_info->reg_interval);
1430}
1431
Jason Evanse476f8a2010-01-16 09:53:50 -08001432void *
1433arena_malloc_small(arena_t *arena, size_t size, bool zero)
1434{
1435 void *ret;
1436 arena_bin_t *bin;
1437 arena_run_t *run;
1438 size_t binind;
1439
Jason Evans41ade962011-03-06 22:56:36 -08001440 binind = SMALL_SIZE2BIN(size);
Jason Evansb1726102012-02-28 16:50:47 -08001441 assert(binind < NBINS);
Jason Evanse476f8a2010-01-16 09:53:50 -08001442 bin = &arena->bins[binind];
Jason Evans49f7e8f2011-03-15 13:59:15 -07001443 size = arena_bin_info[binind].reg_size;
Jason Evanse476f8a2010-01-16 09:53:50 -08001444
Jason Evans86815df2010-03-13 20:32:56 -08001445 malloc_mutex_lock(&bin->lock);
Jason Evanse476f8a2010-01-16 09:53:50 -08001446 if ((run = bin->runcur) != NULL && run->nfree > 0)
Jason Evans49f7e8f2011-03-15 13:59:15 -07001447 ret = arena_run_reg_alloc(run, &arena_bin_info[binind]);
Jason Evanse476f8a2010-01-16 09:53:50 -08001448 else
1449 ret = arena_bin_malloc_hard(arena, bin);
1450
1451 if (ret == NULL) {
Jason Evans86815df2010-03-13 20:32:56 -08001452 malloc_mutex_unlock(&bin->lock);
Jason Evanse476f8a2010-01-16 09:53:50 -08001453 return (NULL);
1454 }
1455
Jason Evans7372b152012-02-10 20:22:09 -08001456 if (config_stats) {
1457 bin->stats.allocated += size;
1458 bin->stats.nmalloc++;
1459 bin->stats.nrequests++;
1460 }
Jason Evans86815df2010-03-13 20:32:56 -08001461 malloc_mutex_unlock(&bin->lock);
Jason Evans7372b152012-02-10 20:22:09 -08001462 if (config_prof && isthreaded == false) {
Jason Evans86815df2010-03-13 20:32:56 -08001463 malloc_mutex_lock(&arena->lock);
Jason Evansd34f9e72010-02-11 13:19:21 -08001464 arena_prof_accum(arena, size);
Jason Evans86815df2010-03-13 20:32:56 -08001465 malloc_mutex_unlock(&arena->lock);
1466 }
Jason Evanse476f8a2010-01-16 09:53:50 -08001467
1468 if (zero == false) {
Jason Evans7372b152012-02-10 20:22:09 -08001469 if (config_fill) {
Jason Evans122449b2012-04-06 00:35:09 -07001470 if (opt_junk) {
1471 arena_alloc_junk_small(ret,
1472 &arena_bin_info[binind], false);
1473 } else if (opt_zero)
Jason Evans7372b152012-02-10 20:22:09 -08001474 memset(ret, 0, size);
1475 }
Jason Evans122449b2012-04-06 00:35:09 -07001476 } else {
1477 if (config_fill && opt_junk) {
1478 arena_alloc_junk_small(ret, &arena_bin_info[binind],
1479 true);
1480 }
1481 VALGRIND_MAKE_MEM_UNDEFINED(ret, size);
Jason Evanse476f8a2010-01-16 09:53:50 -08001482 memset(ret, 0, size);
Jason Evans122449b2012-04-06 00:35:09 -07001483 }
Jason Evanse476f8a2010-01-16 09:53:50 -08001484
1485 return (ret);
1486}
1487
1488void *
Jason Evanse476f8a2010-01-16 09:53:50 -08001489arena_malloc_large(arena_t *arena, size_t size, bool zero)
1490{
1491 void *ret;
1492
1493 /* Large allocation. */
1494 size = PAGE_CEILING(size);
1495 malloc_mutex_lock(&arena->lock);
Jason Evans203484e2012-05-02 00:30:36 -07001496 ret = (void *)arena_run_alloc(arena, size, true, BININD_INVALID, zero);
Jason Evanse476f8a2010-01-16 09:53:50 -08001497 if (ret == NULL) {
1498 malloc_mutex_unlock(&arena->lock);
1499 return (NULL);
1500 }
Jason Evans7372b152012-02-10 20:22:09 -08001501 if (config_stats) {
1502 arena->stats.nmalloc_large++;
1503 arena->stats.nrequests_large++;
1504 arena->stats.allocated_large += size;
Jason Evansae4c7b42012-04-02 07:04:34 -07001505 arena->stats.lstats[(size >> LG_PAGE) - 1].nmalloc++;
1506 arena->stats.lstats[(size >> LG_PAGE) - 1].nrequests++;
1507 arena->stats.lstats[(size >> LG_PAGE) - 1].curruns++;
Jason Evanse476f8a2010-01-16 09:53:50 -08001508 }
Jason Evans7372b152012-02-10 20:22:09 -08001509 if (config_prof)
1510 arena_prof_accum(arena, size);
Jason Evanse476f8a2010-01-16 09:53:50 -08001511 malloc_mutex_unlock(&arena->lock);
1512
1513 if (zero == false) {
Jason Evans7372b152012-02-10 20:22:09 -08001514 if (config_fill) {
1515 if (opt_junk)
1516 memset(ret, 0xa5, size);
1517 else if (opt_zero)
1518 memset(ret, 0, size);
1519 }
Jason Evanse476f8a2010-01-16 09:53:50 -08001520 }
1521
1522 return (ret);
1523}
1524
Jason Evanse476f8a2010-01-16 09:53:50 -08001525/* Only handles large allocations that require more than page alignment. */
1526void *
Jason Evans5ff709c2012-04-11 18:13:45 -07001527arena_palloc(arena_t *arena, size_t size, size_t alignment, bool zero)
Jason Evanse476f8a2010-01-16 09:53:50 -08001528{
1529 void *ret;
Jason Evans5ff709c2012-04-11 18:13:45 -07001530 size_t alloc_size, leadsize, trailsize;
1531 arena_run_t *run;
Jason Evanse476f8a2010-01-16 09:53:50 -08001532 arena_chunk_t *chunk;
1533
1534 assert((size & PAGE_MASK) == 0);
Jason Evans93443682010-10-20 17:39:18 -07001535
1536 alignment = PAGE_CEILING(alignment);
Jason Evans5ff709c2012-04-11 18:13:45 -07001537 alloc_size = size + alignment - PAGE;
Jason Evanse476f8a2010-01-16 09:53:50 -08001538
1539 malloc_mutex_lock(&arena->lock);
Jason Evans203484e2012-05-02 00:30:36 -07001540 run = arena_run_alloc(arena, alloc_size, true, BININD_INVALID, zero);
Jason Evans5ff709c2012-04-11 18:13:45 -07001541 if (run == NULL) {
Jason Evanse476f8a2010-01-16 09:53:50 -08001542 malloc_mutex_unlock(&arena->lock);
1543 return (NULL);
1544 }
Jason Evans5ff709c2012-04-11 18:13:45 -07001545 chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(run);
Jason Evanse476f8a2010-01-16 09:53:50 -08001546
Jason Evans5ff709c2012-04-11 18:13:45 -07001547 leadsize = ALIGNMENT_CEILING((uintptr_t)run, alignment) -
1548 (uintptr_t)run;
1549 assert(alloc_size >= leadsize + size);
1550 trailsize = alloc_size - leadsize - size;
1551 ret = (void *)((uintptr_t)run + leadsize);
1552 if (leadsize != 0) {
1553 arena_run_trim_head(arena, chunk, run, alloc_size, alloc_size -
1554 leadsize);
1555 }
1556 if (trailsize != 0) {
1557 arena_run_trim_tail(arena, chunk, ret, size + trailsize, size,
1558 false);
Jason Evanse476f8a2010-01-16 09:53:50 -08001559 }
1560
Jason Evans7372b152012-02-10 20:22:09 -08001561 if (config_stats) {
1562 arena->stats.nmalloc_large++;
1563 arena->stats.nrequests_large++;
1564 arena->stats.allocated_large += size;
Jason Evansae4c7b42012-04-02 07:04:34 -07001565 arena->stats.lstats[(size >> LG_PAGE) - 1].nmalloc++;
1566 arena->stats.lstats[(size >> LG_PAGE) - 1].nrequests++;
1567 arena->stats.lstats[(size >> LG_PAGE) - 1].curruns++;
Jason Evanse476f8a2010-01-16 09:53:50 -08001568 }
Jason Evanse476f8a2010-01-16 09:53:50 -08001569 malloc_mutex_unlock(&arena->lock);
1570
Jason Evans7372b152012-02-10 20:22:09 -08001571 if (config_fill && zero == false) {
Jason Evans8e3c3c62010-09-17 15:46:18 -07001572 if (opt_junk)
1573 memset(ret, 0xa5, size);
1574 else if (opt_zero)
1575 memset(ret, 0, size);
1576 }
Jason Evanse476f8a2010-01-16 09:53:50 -08001577 return (ret);
1578}
1579
Jason Evans0b270a92010-03-31 16:45:04 -07001580void
1581arena_prof_promoted(const void *ptr, size_t size)
1582{
1583 arena_chunk_t *chunk;
1584 size_t pageind, binind;
1585
Jason Evans78f73522012-04-18 13:38:40 -07001586 cassert(config_prof);
Jason Evans0b270a92010-03-31 16:45:04 -07001587 assert(ptr != NULL);
1588 assert(CHUNK_ADDR2BASE(ptr) != ptr);
Jason Evans122449b2012-04-06 00:35:09 -07001589 assert(isalloc(ptr, false) == PAGE);
1590 assert(isalloc(ptr, true) == PAGE);
Jason Evansb1726102012-02-28 16:50:47 -08001591 assert(size <= SMALL_MAXCLASS);
Jason Evans0b270a92010-03-31 16:45:04 -07001592
1593 chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr);
Jason Evansae4c7b42012-04-02 07:04:34 -07001594 pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> LG_PAGE;
Jason Evans41ade962011-03-06 22:56:36 -08001595 binind = SMALL_SIZE2BIN(size);
Jason Evansb1726102012-02-28 16:50:47 -08001596 assert(binind < NBINS);
Jason Evans203484e2012-05-02 00:30:36 -07001597 arena_mapbits_large_binind_set(chunk, pageind, binind);
Jason Evans0b270a92010-03-31 16:45:04 -07001598
Jason Evans122449b2012-04-06 00:35:09 -07001599 assert(isalloc(ptr, false) == PAGE);
1600 assert(isalloc(ptr, true) == size);
Jason Evans0b270a92010-03-31 16:45:04 -07001601}
Jason Evans6109fe02010-02-10 10:37:56 -08001602
Jason Evanse476f8a2010-01-16 09:53:50 -08001603static void
Jason Evans088e6a02010-10-18 00:04:44 -07001604arena_dissociate_bin_run(arena_chunk_t *chunk, arena_run_t *run,
Jason Evanse476f8a2010-01-16 09:53:50 -08001605 arena_bin_t *bin)
1606{
Jason Evanse476f8a2010-01-16 09:53:50 -08001607
Jason Evans19b3d612010-03-18 20:36:40 -07001608 /* Dissociate run from bin. */
Jason Evanse476f8a2010-01-16 09:53:50 -08001609 if (run == bin->runcur)
1610 bin->runcur = NULL;
Jason Evans49f7e8f2011-03-15 13:59:15 -07001611 else {
1612 size_t binind = arena_bin_index(chunk->arena, bin);
1613 arena_bin_info_t *bin_info = &arena_bin_info[binind];
1614
1615 if (bin_info->nregs != 1) {
Jason Evans49f7e8f2011-03-15 13:59:15 -07001616 /*
1617 * This block's conditional is necessary because if the
1618 * run only contains one region, then it never gets
1619 * inserted into the non-full runs tree.
1620 */
Jason Evanse7a10582012-02-13 17:36:52 -08001621 arena_bin_runs_remove(bin, run);
Jason Evans49f7e8f2011-03-15 13:59:15 -07001622 }
Jason Evanse476f8a2010-01-16 09:53:50 -08001623 }
Jason Evans088e6a02010-10-18 00:04:44 -07001624}
1625
1626static void
1627arena_dalloc_bin_run(arena_t *arena, arena_chunk_t *chunk, arena_run_t *run,
1628 arena_bin_t *bin)
1629{
Jason Evans49f7e8f2011-03-15 13:59:15 -07001630 size_t binind;
1631 arena_bin_info_t *bin_info;
Jason Evans088e6a02010-10-18 00:04:44 -07001632 size_t npages, run_ind, past;
1633
1634 assert(run != bin->runcur);
Jason Evans203484e2012-05-02 00:30:36 -07001635 assert(arena_run_tree_search(&bin->runs,
1636 arena_mapp_get(chunk, ((uintptr_t)run-(uintptr_t)chunk)>>LG_PAGE))
1637 == NULL);
Jason Evans86815df2010-03-13 20:32:56 -08001638
Jason Evans49f7e8f2011-03-15 13:59:15 -07001639 binind = arena_bin_index(chunk->arena, run->bin);
1640 bin_info = &arena_bin_info[binind];
1641
Jason Evanse00572b2010-03-14 19:43:56 -07001642 malloc_mutex_unlock(&bin->lock);
1643 /******************************/
Jason Evansae4c7b42012-04-02 07:04:34 -07001644 npages = bin_info->run_size >> LG_PAGE;
1645 run_ind = (size_t)(((uintptr_t)run - (uintptr_t)chunk) >> LG_PAGE);
Jason Evans84c8eef2011-03-16 10:30:13 -07001646 past = (size_t)(PAGE_CEILING((uintptr_t)run +
1647 (uintptr_t)bin_info->reg0_offset + (uintptr_t)(run->nextind *
Jason Evans122449b2012-04-06 00:35:09 -07001648 bin_info->reg_interval - bin_info->redzone_size) -
1649 (uintptr_t)chunk) >> LG_PAGE);
Jason Evans86815df2010-03-13 20:32:56 -08001650 malloc_mutex_lock(&arena->lock);
Jason Evans19b3d612010-03-18 20:36:40 -07001651
1652 /*
1653 * If the run was originally clean, and some pages were never touched,
1654 * trim the clean pages before deallocating the dirty portion of the
1655 * run.
1656 */
Jason Evans30fe12b2012-05-10 17:09:17 -07001657 assert(arena_mapbits_dirty_get(chunk, run_ind) ==
1658 arena_mapbits_dirty_get(chunk, run_ind+npages-1));
Jason Evans203484e2012-05-02 00:30:36 -07001659 if (arena_mapbits_dirty_get(chunk, run_ind) == 0 && past - run_ind <
1660 npages) {
Jason Evans30fe12b2012-05-10 17:09:17 -07001661 /* Trim clean pages. Convert to large run beforehand. */
1662 assert(npages > 0);
Jason Evansd8ceef62012-05-10 20:59:39 -07001663 arena_mapbits_large_set(chunk, run_ind, bin_info->run_size, 0);
1664 arena_mapbits_large_set(chunk, run_ind+npages-1, 0, 0);
Jason Evansae4c7b42012-04-02 07:04:34 -07001665 arena_run_trim_tail(arena, chunk, run, (npages << LG_PAGE),
1666 ((past - run_ind) << LG_PAGE), false);
Jason Evans940a2e02010-10-17 17:51:37 -07001667 /* npages = past - run_ind; */
Jason Evans1e0a6362010-03-13 13:41:58 -08001668 }
Jason Evanse3d13062012-10-30 15:42:37 -07001669 arena_run_dalloc(arena, run, true, false);
Jason Evans86815df2010-03-13 20:32:56 -08001670 malloc_mutex_unlock(&arena->lock);
Jason Evanse00572b2010-03-14 19:43:56 -07001671 /****************************/
1672 malloc_mutex_lock(&bin->lock);
Jason Evans7372b152012-02-10 20:22:09 -08001673 if (config_stats)
1674 bin->stats.curruns--;
Jason Evanse476f8a2010-01-16 09:53:50 -08001675}
1676
Jason Evans940a2e02010-10-17 17:51:37 -07001677static void
1678arena_bin_lower_run(arena_t *arena, arena_chunk_t *chunk, arena_run_t *run,
1679 arena_bin_t *bin)
Jason Evanse476f8a2010-01-16 09:53:50 -08001680{
Jason Evanse476f8a2010-01-16 09:53:50 -08001681
Jason Evans8de6a022010-10-17 20:57:30 -07001682 /*
Jason Evanse7a10582012-02-13 17:36:52 -08001683 * Make sure that if bin->runcur is non-NULL, it refers to the lowest
1684 * non-full run. It is okay to NULL runcur out rather than proactively
1685 * keeping it pointing at the lowest non-full run.
Jason Evans8de6a022010-10-17 20:57:30 -07001686 */
Jason Evanse7a10582012-02-13 17:36:52 -08001687 if ((uintptr_t)run < (uintptr_t)bin->runcur) {
Jason Evans8de6a022010-10-17 20:57:30 -07001688 /* Switch runcur. */
Jason Evanse7a10582012-02-13 17:36:52 -08001689 if (bin->runcur->nfree > 0)
1690 arena_bin_runs_insert(bin, bin->runcur);
Jason Evans8de6a022010-10-17 20:57:30 -07001691 bin->runcur = run;
Jason Evanse7a10582012-02-13 17:36:52 -08001692 if (config_stats)
1693 bin->stats.reruns++;
1694 } else
1695 arena_bin_runs_insert(bin, run);
Jason Evans940a2e02010-10-17 17:51:37 -07001696}
1697
1698void
Jason Evans203484e2012-05-02 00:30:36 -07001699arena_dalloc_bin_locked(arena_t *arena, arena_chunk_t *chunk, void *ptr,
Jason Evans940a2e02010-10-17 17:51:37 -07001700 arena_chunk_map_t *mapelm)
1701{
1702 size_t pageind;
1703 arena_run_t *run;
1704 arena_bin_t *bin;
Mike Hommey8b499712012-04-24 23:22:02 +02001705 arena_bin_info_t *bin_info;
1706 size_t size, binind;
Jason Evans940a2e02010-10-17 17:51:37 -07001707
Jason Evansae4c7b42012-04-02 07:04:34 -07001708 pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> LG_PAGE;
Jason Evans940a2e02010-10-17 17:51:37 -07001709 run = (arena_run_t *)((uintptr_t)chunk + (uintptr_t)((pageind -
Jason Evans203484e2012-05-02 00:30:36 -07001710 arena_mapbits_small_runind_get(chunk, pageind)) << LG_PAGE));
Jason Evans940a2e02010-10-17 17:51:37 -07001711 bin = run->bin;
Jason Evans80737c32012-05-02 16:11:03 -07001712 binind = arena_ptr_small_binind_get(ptr, mapelm->bits);
Mike Hommey8b499712012-04-24 23:22:02 +02001713 bin_info = &arena_bin_info[binind];
Jason Evans7372b152012-02-10 20:22:09 -08001714 if (config_fill || config_stats)
1715 size = bin_info->reg_size;
Jason Evans940a2e02010-10-17 17:51:37 -07001716
Jason Evans7372b152012-02-10 20:22:09 -08001717 if (config_fill && opt_junk)
Jason Evans122449b2012-04-06 00:35:09 -07001718 arena_dalloc_junk_small(ptr, bin_info);
Jason Evans940a2e02010-10-17 17:51:37 -07001719
1720 arena_run_reg_dalloc(run, ptr);
Jason Evans49f7e8f2011-03-15 13:59:15 -07001721 if (run->nfree == bin_info->nregs) {
Jason Evans088e6a02010-10-18 00:04:44 -07001722 arena_dissociate_bin_run(chunk, run, bin);
Jason Evans8de6a022010-10-17 20:57:30 -07001723 arena_dalloc_bin_run(arena, chunk, run, bin);
Jason Evans088e6a02010-10-18 00:04:44 -07001724 } else if (run->nfree == 1 && run != bin->runcur)
Jason Evans8de6a022010-10-17 20:57:30 -07001725 arena_bin_lower_run(arena, chunk, run, bin);
Jason Evanse476f8a2010-01-16 09:53:50 -08001726
Jason Evans7372b152012-02-10 20:22:09 -08001727 if (config_stats) {
1728 bin->stats.allocated -= size;
1729 bin->stats.ndalloc++;
1730 }
Jason Evanse476f8a2010-01-16 09:53:50 -08001731}
1732
Jason Evanse476f8a2010-01-16 09:53:50 -08001733void
Jason Evans203484e2012-05-02 00:30:36 -07001734arena_dalloc_bin(arena_t *arena, arena_chunk_t *chunk, void *ptr,
1735 size_t pageind, arena_chunk_map_t *mapelm)
1736{
1737 arena_run_t *run;
1738 arena_bin_t *bin;
1739
1740 run = (arena_run_t *)((uintptr_t)chunk + (uintptr_t)((pageind -
1741 arena_mapbits_small_runind_get(chunk, pageind)) << LG_PAGE));
1742 bin = run->bin;
1743 malloc_mutex_lock(&bin->lock);
1744 arena_dalloc_bin_locked(arena, chunk, ptr, mapelm);
1745 malloc_mutex_unlock(&bin->lock);
1746}
1747
1748void
1749arena_dalloc_small(arena_t *arena, arena_chunk_t *chunk, void *ptr,
1750 size_t pageind)
1751{
1752 arena_chunk_map_t *mapelm;
1753
1754 if (config_debug) {
Jason Evans80737c32012-05-02 16:11:03 -07001755 /* arena_ptr_small_binind_get() does extra sanity checking. */
1756 assert(arena_ptr_small_binind_get(ptr, arena_mapbits_get(chunk,
1757 pageind)) != BININD_INVALID);
Jason Evans203484e2012-05-02 00:30:36 -07001758 }
1759 mapelm = arena_mapp_get(chunk, pageind);
1760 arena_dalloc_bin(arena, chunk, ptr, pageind, mapelm);
1761}
Jason Evanse476f8a2010-01-16 09:53:50 -08001762
1763void
Jason Evans203484e2012-05-02 00:30:36 -07001764arena_dalloc_large_locked(arena_t *arena, arena_chunk_t *chunk, void *ptr)
Jason Evanse476f8a2010-01-16 09:53:50 -08001765{
Jason Evans13668262010-01-31 03:57:29 -08001766
Jason Evans7372b152012-02-10 20:22:09 -08001767 if (config_fill || config_stats) {
Jason Evansae4c7b42012-04-02 07:04:34 -07001768 size_t pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> LG_PAGE;
Jason Evans203484e2012-05-02 00:30:36 -07001769 size_t size = arena_mapbits_large_size_get(chunk, pageind);
Jason Evanse476f8a2010-01-16 09:53:50 -08001770
Jason Evans7372b152012-02-10 20:22:09 -08001771 if (config_fill && config_stats && opt_junk)
Jason Evanse476f8a2010-01-16 09:53:50 -08001772 memset(ptr, 0x5a, size);
Jason Evans7372b152012-02-10 20:22:09 -08001773 if (config_stats) {
1774 arena->stats.ndalloc_large++;
1775 arena->stats.allocated_large -= size;
Jason Evansae4c7b42012-04-02 07:04:34 -07001776 arena->stats.lstats[(size >> LG_PAGE) - 1].ndalloc++;
1777 arena->stats.lstats[(size >> LG_PAGE) - 1].curruns--;
Jason Evans7372b152012-02-10 20:22:09 -08001778 }
Jason Evanse476f8a2010-01-16 09:53:50 -08001779 }
Jason Evanse476f8a2010-01-16 09:53:50 -08001780
Jason Evanse3d13062012-10-30 15:42:37 -07001781 arena_run_dalloc(arena, (arena_run_t *)ptr, true, false);
Jason Evanse476f8a2010-01-16 09:53:50 -08001782}
1783
Jason Evans203484e2012-05-02 00:30:36 -07001784void
1785arena_dalloc_large(arena_t *arena, arena_chunk_t *chunk, void *ptr)
1786{
1787
1788 malloc_mutex_lock(&arena->lock);
1789 arena_dalloc_large_locked(arena, chunk, ptr);
1790 malloc_mutex_unlock(&arena->lock);
1791}
1792
Jason Evanse476f8a2010-01-16 09:53:50 -08001793static void
1794arena_ralloc_large_shrink(arena_t *arena, arena_chunk_t *chunk, void *ptr,
Jason Evans8e3c3c62010-09-17 15:46:18 -07001795 size_t oldsize, size_t size)
Jason Evanse476f8a2010-01-16 09:53:50 -08001796{
1797
1798 assert(size < oldsize);
1799
1800 /*
1801 * Shrink the run, and make trailing pages available for other
1802 * allocations.
1803 */
1804 malloc_mutex_lock(&arena->lock);
1805 arena_run_trim_tail(arena, chunk, (arena_run_t *)ptr, oldsize, size,
1806 true);
Jason Evans7372b152012-02-10 20:22:09 -08001807 if (config_stats) {
1808 arena->stats.ndalloc_large++;
1809 arena->stats.allocated_large -= oldsize;
Jason Evansae4c7b42012-04-02 07:04:34 -07001810 arena->stats.lstats[(oldsize >> LG_PAGE) - 1].ndalloc++;
1811 arena->stats.lstats[(oldsize >> LG_PAGE) - 1].curruns--;
Jason Evans990d10c2010-01-31 03:49:35 -08001812
Jason Evans7372b152012-02-10 20:22:09 -08001813 arena->stats.nmalloc_large++;
1814 arena->stats.nrequests_large++;
1815 arena->stats.allocated_large += size;
Jason Evansae4c7b42012-04-02 07:04:34 -07001816 arena->stats.lstats[(size >> LG_PAGE) - 1].nmalloc++;
1817 arena->stats.lstats[(size >> LG_PAGE) - 1].nrequests++;
1818 arena->stats.lstats[(size >> LG_PAGE) - 1].curruns++;
Jason Evanse476f8a2010-01-16 09:53:50 -08001819 }
Jason Evanse476f8a2010-01-16 09:53:50 -08001820 malloc_mutex_unlock(&arena->lock);
1821}
1822
1823static bool
1824arena_ralloc_large_grow(arena_t *arena, arena_chunk_t *chunk, void *ptr,
Jason Evans8e3c3c62010-09-17 15:46:18 -07001825 size_t oldsize, size_t size, size_t extra, bool zero)
Jason Evanse476f8a2010-01-16 09:53:50 -08001826{
Jason Evansae4c7b42012-04-02 07:04:34 -07001827 size_t pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> LG_PAGE;
1828 size_t npages = oldsize >> LG_PAGE;
Jason Evans8e3c3c62010-09-17 15:46:18 -07001829 size_t followsize;
Jason Evanse476f8a2010-01-16 09:53:50 -08001830
Jason Evans203484e2012-05-02 00:30:36 -07001831 assert(oldsize == arena_mapbits_large_size_get(chunk, pageind));
Jason Evanse476f8a2010-01-16 09:53:50 -08001832
1833 /* Try to extend the run. */
Jason Evans8e3c3c62010-09-17 15:46:18 -07001834 assert(size + extra > oldsize);
Jason Evanse476f8a2010-01-16 09:53:50 -08001835 malloc_mutex_lock(&arena->lock);
Jason Evans7393f442010-10-01 17:35:43 -07001836 if (pageind + npages < chunk_npages &&
Jason Evans203484e2012-05-02 00:30:36 -07001837 arena_mapbits_allocated_get(chunk, pageind+npages) == 0 &&
1838 (followsize = arena_mapbits_unallocated_size_get(chunk,
1839 pageind+npages)) >= size - oldsize) {
Jason Evanse476f8a2010-01-16 09:53:50 -08001840 /*
1841 * The next run is available and sufficiently large. Split the
1842 * following run, then merge the first part with the existing
1843 * allocation.
1844 */
Jason Evans940a2e02010-10-17 17:51:37 -07001845 size_t flag_dirty;
Jason Evans8e3c3c62010-09-17 15:46:18 -07001846 size_t splitsize = (oldsize + followsize <= size + extra)
1847 ? followsize : size + extra - oldsize;
Jason Evanse476f8a2010-01-16 09:53:50 -08001848 arena_run_split(arena, (arena_run_t *)((uintptr_t)chunk +
Jason Evans203484e2012-05-02 00:30:36 -07001849 ((pageind+npages) << LG_PAGE)), splitsize, true,
1850 BININD_INVALID, zero);
Jason Evanse476f8a2010-01-16 09:53:50 -08001851
Jason Evans088e6a02010-10-18 00:04:44 -07001852 size = oldsize + splitsize;
Jason Evansae4c7b42012-04-02 07:04:34 -07001853 npages = size >> LG_PAGE;
Jason Evans940a2e02010-10-17 17:51:37 -07001854
1855 /*
1856 * Mark the extended run as dirty if either portion of the run
1857 * was dirty before allocation. This is rather pedantic,
1858 * because there's not actually any sequence of events that
1859 * could cause the resulting run to be passed to
1860 * arena_run_dalloc() with the dirty argument set to false
1861 * (which is when dirty flag consistency would really matter).
1862 */
Jason Evans203484e2012-05-02 00:30:36 -07001863 flag_dirty = arena_mapbits_dirty_get(chunk, pageind) |
1864 arena_mapbits_dirty_get(chunk, pageind+npages-1);
1865 arena_mapbits_large_set(chunk, pageind, size, flag_dirty);
1866 arena_mapbits_large_set(chunk, pageind+npages-1, 0, flag_dirty);
Jason Evanse476f8a2010-01-16 09:53:50 -08001867
Jason Evans7372b152012-02-10 20:22:09 -08001868 if (config_stats) {
1869 arena->stats.ndalloc_large++;
1870 arena->stats.allocated_large -= oldsize;
Jason Evans203484e2012-05-02 00:30:36 -07001871 arena->stats.lstats[(oldsize >> LG_PAGE) - 1].ndalloc++;
1872 arena->stats.lstats[(oldsize >> LG_PAGE) - 1].curruns--;
Jason Evans990d10c2010-01-31 03:49:35 -08001873
Jason Evans7372b152012-02-10 20:22:09 -08001874 arena->stats.nmalloc_large++;
1875 arena->stats.nrequests_large++;
1876 arena->stats.allocated_large += size;
Jason Evansae4c7b42012-04-02 07:04:34 -07001877 arena->stats.lstats[(size >> LG_PAGE) - 1].nmalloc++;
Jason Evans203484e2012-05-02 00:30:36 -07001878 arena->stats.lstats[(size >> LG_PAGE) - 1].nrequests++;
Jason Evansae4c7b42012-04-02 07:04:34 -07001879 arena->stats.lstats[(size >> LG_PAGE) - 1].curruns++;
Jason Evans940a2e02010-10-17 17:51:37 -07001880 }
Jason Evanse476f8a2010-01-16 09:53:50 -08001881 malloc_mutex_unlock(&arena->lock);
1882 return (false);
1883 }
1884 malloc_mutex_unlock(&arena->lock);
1885
1886 return (true);
1887}
1888
1889/*
1890 * Try to resize a large allocation, in order to avoid copying. This will
1891 * always fail if growing an object, and the following run is already in use.
1892 */
1893static bool
Jason Evans8e3c3c62010-09-17 15:46:18 -07001894arena_ralloc_large(void *ptr, size_t oldsize, size_t size, size_t extra,
1895 bool zero)
Jason Evanse476f8a2010-01-16 09:53:50 -08001896{
1897 size_t psize;
1898
Jason Evans8e3c3c62010-09-17 15:46:18 -07001899 psize = PAGE_CEILING(size + extra);
Jason Evanse476f8a2010-01-16 09:53:50 -08001900 if (psize == oldsize) {
1901 /* Same size class. */
Jason Evans7372b152012-02-10 20:22:09 -08001902 if (config_fill && opt_junk && size < oldsize) {
Jason Evanse476f8a2010-01-16 09:53:50 -08001903 memset((void *)((uintptr_t)ptr + size), 0x5a, oldsize -
1904 size);
1905 }
Jason Evanse476f8a2010-01-16 09:53:50 -08001906 return (false);
1907 } else {
1908 arena_chunk_t *chunk;
1909 arena_t *arena;
1910
1911 chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr);
1912 arena = chunk->arena;
Jason Evanse476f8a2010-01-16 09:53:50 -08001913
1914 if (psize < oldsize) {
Jason Evanse476f8a2010-01-16 09:53:50 -08001915 /* Fill before shrinking in order avoid a race. */
Jason Evans7372b152012-02-10 20:22:09 -08001916 if (config_fill && opt_junk) {
Jason Evanse476f8a2010-01-16 09:53:50 -08001917 memset((void *)((uintptr_t)ptr + size), 0x5a,
1918 oldsize - size);
1919 }
Jason Evans8e3c3c62010-09-17 15:46:18 -07001920 arena_ralloc_large_shrink(arena, chunk, ptr, oldsize,
1921 psize);
Jason Evanse476f8a2010-01-16 09:53:50 -08001922 return (false);
1923 } else {
1924 bool ret = arena_ralloc_large_grow(arena, chunk, ptr,
Jason Evans8e3c3c62010-09-17 15:46:18 -07001925 oldsize, PAGE_CEILING(size),
1926 psize - PAGE_CEILING(size), zero);
Jason Evans7372b152012-02-10 20:22:09 -08001927 if (config_fill && ret == false && zero == false &&
1928 opt_zero) {
Jason Evanse476f8a2010-01-16 09:53:50 -08001929 memset((void *)((uintptr_t)ptr + oldsize), 0,
1930 size - oldsize);
1931 }
Jason Evanse476f8a2010-01-16 09:53:50 -08001932 return (ret);
1933 }
1934 }
1935}
1936
1937void *
Jason Evans8e3c3c62010-09-17 15:46:18 -07001938arena_ralloc_no_move(void *ptr, size_t oldsize, size_t size, size_t extra,
1939 bool zero)
Jason Evanse476f8a2010-01-16 09:53:50 -08001940{
Jason Evanse476f8a2010-01-16 09:53:50 -08001941
Jason Evans8e3c3c62010-09-17 15:46:18 -07001942 /*
1943 * Avoid moving the allocation if the size class can be left the same.
1944 */
Jason Evanse476f8a2010-01-16 09:53:50 -08001945 if (oldsize <= arena_maxclass) {
Jason Evansb1726102012-02-28 16:50:47 -08001946 if (oldsize <= SMALL_MAXCLASS) {
Jason Evans49f7e8f2011-03-15 13:59:15 -07001947 assert(arena_bin_info[SMALL_SIZE2BIN(oldsize)].reg_size
1948 == oldsize);
Jason Evansb1726102012-02-28 16:50:47 -08001949 if ((size + extra <= SMALL_MAXCLASS &&
Jason Evans41ade962011-03-06 22:56:36 -08001950 SMALL_SIZE2BIN(size + extra) ==
1951 SMALL_SIZE2BIN(oldsize)) || (size <= oldsize &&
Jason Evans8e3c3c62010-09-17 15:46:18 -07001952 size + extra >= oldsize)) {
Jason Evans7372b152012-02-10 20:22:09 -08001953 if (config_fill && opt_junk && size < oldsize) {
Jason Evans8e3c3c62010-09-17 15:46:18 -07001954 memset((void *)((uintptr_t)ptr + size),
1955 0x5a, oldsize - size);
1956 }
Jason Evans8e3c3c62010-09-17 15:46:18 -07001957 return (ptr);
1958 }
Jason Evanse476f8a2010-01-16 09:53:50 -08001959 } else {
1960 assert(size <= arena_maxclass);
Jason Evansb1726102012-02-28 16:50:47 -08001961 if (size + extra > SMALL_MAXCLASS) {
Jason Evans8e3c3c62010-09-17 15:46:18 -07001962 if (arena_ralloc_large(ptr, oldsize, size,
1963 extra, zero) == false)
Jason Evanse476f8a2010-01-16 09:53:50 -08001964 return (ptr);
1965 }
1966 }
1967 }
1968
Jason Evans8e3c3c62010-09-17 15:46:18 -07001969 /* Reallocation would require a move. */
1970 return (NULL);
1971}
Jason Evanse476f8a2010-01-16 09:53:50 -08001972
Jason Evans8e3c3c62010-09-17 15:46:18 -07001973void *
Jason Evans609ae592012-10-11 13:53:15 -07001974arena_ralloc(arena_t *arena, void *ptr, size_t oldsize, size_t size,
1975 size_t extra, size_t alignment, bool zero, bool try_tcache_alloc,
1976 bool try_tcache_dalloc)
Jason Evans8e3c3c62010-09-17 15:46:18 -07001977{
1978 void *ret;
1979 size_t copysize;
1980
1981 /* Try to avoid moving the allocation. */
1982 ret = arena_ralloc_no_move(ptr, oldsize, size, extra, zero);
1983 if (ret != NULL)
1984 return (ret);
1985
Jason Evans8e3c3c62010-09-17 15:46:18 -07001986 /*
1987 * size and oldsize are different enough that we need to move the
1988 * object. In that case, fall back to allocating new space and
1989 * copying.
1990 */
Jason Evans38d92102011-03-23 00:37:29 -07001991 if (alignment != 0) {
Jason Evans5ff709c2012-04-11 18:13:45 -07001992 size_t usize = sa2u(size + extra, alignment);
Jason Evans38d92102011-03-23 00:37:29 -07001993 if (usize == 0)
1994 return (NULL);
Jason Evans609ae592012-10-11 13:53:15 -07001995 ret = ipallocx(usize, alignment, zero, try_tcache_alloc, arena);
Jason Evans38d92102011-03-23 00:37:29 -07001996 } else
Jason Evans609ae592012-10-11 13:53:15 -07001997 ret = arena_malloc(arena, size + extra, zero, try_tcache_alloc);
Jason Evans8e3c3c62010-09-17 15:46:18 -07001998
1999 if (ret == NULL) {
2000 if (extra == 0)
2001 return (NULL);
2002 /* Try again, this time without extra. */
Jason Evans38d92102011-03-23 00:37:29 -07002003 if (alignment != 0) {
Jason Evans5ff709c2012-04-11 18:13:45 -07002004 size_t usize = sa2u(size, alignment);
Jason Evans38d92102011-03-23 00:37:29 -07002005 if (usize == 0)
2006 return (NULL);
Jason Evans609ae592012-10-11 13:53:15 -07002007 ret = ipallocx(usize, alignment, zero, try_tcache_alloc,
2008 arena);
Jason Evans38d92102011-03-23 00:37:29 -07002009 } else
Jason Evans609ae592012-10-11 13:53:15 -07002010 ret = arena_malloc(arena, size, zero, try_tcache_alloc);
Jason Evans8e3c3c62010-09-17 15:46:18 -07002011
2012 if (ret == NULL)
2013 return (NULL);
2014 }
2015
2016 /* Junk/zero-filling were already done by ipalloc()/arena_malloc(). */
2017
2018 /*
2019 * Copy at most size bytes (not size+extra), since the caller has no
2020 * expectation that the extra bytes will be reliably preserved.
2021 */
Jason Evanse476f8a2010-01-16 09:53:50 -08002022 copysize = (size < oldsize) ? size : oldsize;
Jason Evansf54166e2012-04-23 22:41:36 -07002023 VALGRIND_MAKE_MEM_UNDEFINED(ret, copysize);
Jason Evanse476f8a2010-01-16 09:53:50 -08002024 memcpy(ret, ptr, copysize);
Jason Evans609ae592012-10-11 13:53:15 -07002025 iqallocx(ptr, try_tcache_dalloc);
Jason Evanse476f8a2010-01-16 09:53:50 -08002026 return (ret);
Jason Evanse476f8a2010-01-16 09:53:50 -08002027}
2028
Jason Evans609ae592012-10-11 13:53:15 -07002029dss_prec_t
2030arena_dss_prec_get(arena_t *arena)
2031{
2032 dss_prec_t ret;
2033
2034 malloc_mutex_lock(&arena->lock);
2035 ret = arena->dss_prec;
2036 malloc_mutex_unlock(&arena->lock);
2037 return (ret);
2038}
2039
2040void
2041arena_dss_prec_set(arena_t *arena, dss_prec_t dss_prec)
2042{
2043
2044 malloc_mutex_lock(&arena->lock);
2045 arena->dss_prec = dss_prec;
2046 malloc_mutex_unlock(&arena->lock);
2047}
2048
2049void
2050arena_stats_merge(arena_t *arena, const char **dss, size_t *nactive,
2051 size_t *ndirty, arena_stats_t *astats, malloc_bin_stats_t *bstats,
2052 malloc_large_stats_t *lstats)
2053{
2054 unsigned i;
2055
2056 malloc_mutex_lock(&arena->lock);
2057 *dss = dss_prec_names[arena->dss_prec];
2058 *nactive += arena->nactive;
2059 *ndirty += arena->ndirty;
2060
2061 astats->mapped += arena->stats.mapped;
2062 astats->npurge += arena->stats.npurge;
2063 astats->nmadvise += arena->stats.nmadvise;
2064 astats->purged += arena->stats.purged;
2065 astats->allocated_large += arena->stats.allocated_large;
2066 astats->nmalloc_large += arena->stats.nmalloc_large;
2067 astats->ndalloc_large += arena->stats.ndalloc_large;
2068 astats->nrequests_large += arena->stats.nrequests_large;
2069
2070 for (i = 0; i < nlclasses; i++) {
2071 lstats[i].nmalloc += arena->stats.lstats[i].nmalloc;
2072 lstats[i].ndalloc += arena->stats.lstats[i].ndalloc;
2073 lstats[i].nrequests += arena->stats.lstats[i].nrequests;
2074 lstats[i].curruns += arena->stats.lstats[i].curruns;
2075 }
2076 malloc_mutex_unlock(&arena->lock);
2077
2078 for (i = 0; i < NBINS; i++) {
2079 arena_bin_t *bin = &arena->bins[i];
2080
2081 malloc_mutex_lock(&bin->lock);
2082 bstats[i].allocated += bin->stats.allocated;
2083 bstats[i].nmalloc += bin->stats.nmalloc;
2084 bstats[i].ndalloc += bin->stats.ndalloc;
2085 bstats[i].nrequests += bin->stats.nrequests;
2086 if (config_tcache) {
2087 bstats[i].nfills += bin->stats.nfills;
2088 bstats[i].nflushes += bin->stats.nflushes;
2089 }
2090 bstats[i].nruns += bin->stats.nruns;
2091 bstats[i].reruns += bin->stats.reruns;
2092 bstats[i].curruns += bin->stats.curruns;
2093 malloc_mutex_unlock(&bin->lock);
2094 }
2095}
2096
Jason Evanse476f8a2010-01-16 09:53:50 -08002097bool
2098arena_new(arena_t *arena, unsigned ind)
2099{
2100 unsigned i;
2101 arena_bin_t *bin;
Jason Evanse476f8a2010-01-16 09:53:50 -08002102
Jason Evans6109fe02010-02-10 10:37:56 -08002103 arena->ind = ind;
Jason Evans597632b2011-03-18 13:41:33 -07002104 arena->nthreads = 0;
Jason Evans6109fe02010-02-10 10:37:56 -08002105
Jason Evanse476f8a2010-01-16 09:53:50 -08002106 if (malloc_mutex_init(&arena->lock))
2107 return (true);
2108
Jason Evans7372b152012-02-10 20:22:09 -08002109 if (config_stats) {
2110 memset(&arena->stats, 0, sizeof(arena_stats_t));
2111 arena->stats.lstats =
2112 (malloc_large_stats_t *)base_alloc(nlclasses *
2113 sizeof(malloc_large_stats_t));
2114 if (arena->stats.lstats == NULL)
2115 return (true);
2116 memset(arena->stats.lstats, 0, nlclasses *
2117 sizeof(malloc_large_stats_t));
2118 if (config_tcache)
2119 ql_new(&arena->tcache_ql);
2120 }
Jason Evanse476f8a2010-01-16 09:53:50 -08002121
Jason Evans7372b152012-02-10 20:22:09 -08002122 if (config_prof)
2123 arena->prof_accumbytes = 0;
Jason Evansd34f9e72010-02-11 13:19:21 -08002124
Jason Evans609ae592012-10-11 13:53:15 -07002125 arena->dss_prec = chunk_dss_prec_get();
2126
Jason Evanse476f8a2010-01-16 09:53:50 -08002127 /* Initialize chunks. */
Jason Evanse3d13062012-10-30 15:42:37 -07002128 arena_chunk_dirty_new(&arena->chunks_dirty);
Jason Evanse476f8a2010-01-16 09:53:50 -08002129 arena->spare = NULL;
2130
2131 arena->nactive = 0;
2132 arena->ndirty = 0;
Jason Evans799ca0b2010-04-08 20:31:58 -07002133 arena->npurgatory = 0;
Jason Evanse476f8a2010-01-16 09:53:50 -08002134
Jason Evanse3d13062012-10-30 15:42:37 -07002135 arena_avail_tree_new(&arena->runs_avail);
Jason Evanse476f8a2010-01-16 09:53:50 -08002136
2137 /* Initialize bins. */
Jason Evansb1726102012-02-28 16:50:47 -08002138 for (i = 0; i < NBINS; i++) {
Jason Evanse476f8a2010-01-16 09:53:50 -08002139 bin = &arena->bins[i];
Jason Evans86815df2010-03-13 20:32:56 -08002140 if (malloc_mutex_init(&bin->lock))
2141 return (true);
Jason Evanse476f8a2010-01-16 09:53:50 -08002142 bin->runcur = NULL;
2143 arena_run_tree_new(&bin->runs);
Jason Evans7372b152012-02-10 20:22:09 -08002144 if (config_stats)
2145 memset(&bin->stats, 0, sizeof(malloc_bin_stats_t));
Jason Evanse476f8a2010-01-16 09:53:50 -08002146 }
2147
Jason Evanse476f8a2010-01-16 09:53:50 -08002148 return (false);
2149}
2150
Jason Evans49f7e8f2011-03-15 13:59:15 -07002151/*
2152 * Calculate bin_info->run_size such that it meets the following constraints:
2153 *
2154 * *) bin_info->run_size >= min_run_size
2155 * *) bin_info->run_size <= arena_maxclass
2156 * *) run header overhead <= RUN_MAX_OVRHD (or header overhead relaxed).
Jason Evans47e57f92011-03-22 09:00:56 -07002157 * *) bin_info->nregs <= RUN_MAXREGS
Jason Evans49f7e8f2011-03-15 13:59:15 -07002158 *
Jason Evans84c8eef2011-03-16 10:30:13 -07002159 * bin_info->nregs, bin_info->bitmap_offset, and bin_info->reg0_offset are also
2160 * calculated here, since these settings are all interdependent.
Jason Evans49f7e8f2011-03-15 13:59:15 -07002161 */
2162static size_t
2163bin_info_run_size_calc(arena_bin_info_t *bin_info, size_t min_run_size)
2164{
Jason Evans122449b2012-04-06 00:35:09 -07002165 size_t pad_size;
Jason Evans49f7e8f2011-03-15 13:59:15 -07002166 size_t try_run_size, good_run_size;
2167 uint32_t try_nregs, good_nregs;
2168 uint32_t try_hdr_size, good_hdr_size;
Jason Evans84c8eef2011-03-16 10:30:13 -07002169 uint32_t try_bitmap_offset, good_bitmap_offset;
Jason Evans49f7e8f2011-03-15 13:59:15 -07002170 uint32_t try_ctx0_offset, good_ctx0_offset;
Jason Evans122449b2012-04-06 00:35:09 -07002171 uint32_t try_redzone0_offset, good_redzone0_offset;
Jason Evans49f7e8f2011-03-15 13:59:15 -07002172
Jason Evansae4c7b42012-04-02 07:04:34 -07002173 assert(min_run_size >= PAGE);
Jason Evans49f7e8f2011-03-15 13:59:15 -07002174 assert(min_run_size <= arena_maxclass);
2175
2176 /*
Jason Evans122449b2012-04-06 00:35:09 -07002177 * Determine redzone size based on minimum alignment and minimum
2178 * redzone size. Add padding to the end of the run if it is needed to
2179 * align the regions. The padding allows each redzone to be half the
2180 * minimum alignment; without the padding, each redzone would have to
2181 * be twice as large in order to maintain alignment.
2182 */
2183 if (config_fill && opt_redzone) {
2184 size_t align_min = ZU(1) << (ffs(bin_info->reg_size) - 1);
2185 if (align_min <= REDZONE_MINSIZE) {
2186 bin_info->redzone_size = REDZONE_MINSIZE;
2187 pad_size = 0;
2188 } else {
2189 bin_info->redzone_size = align_min >> 1;
2190 pad_size = bin_info->redzone_size;
2191 }
2192 } else {
2193 bin_info->redzone_size = 0;
2194 pad_size = 0;
2195 }
2196 bin_info->reg_interval = bin_info->reg_size +
2197 (bin_info->redzone_size << 1);
2198
2199 /*
Jason Evans49f7e8f2011-03-15 13:59:15 -07002200 * Calculate known-valid settings before entering the run_size
2201 * expansion loop, so that the first part of the loop always copies
2202 * valid settings.
2203 *
2204 * The do..while loop iteratively reduces the number of regions until
2205 * the run header and the regions no longer overlap. A closed formula
2206 * would be quite messy, since there is an interdependency between the
2207 * header's mask length and the number of regions.
2208 */
2209 try_run_size = min_run_size;
Jason Evans122449b2012-04-06 00:35:09 -07002210 try_nregs = ((try_run_size - sizeof(arena_run_t)) /
2211 bin_info->reg_interval)
Jason Evans49f7e8f2011-03-15 13:59:15 -07002212 + 1; /* Counter-act try_nregs-- in loop. */
Jason Evans47e57f92011-03-22 09:00:56 -07002213 if (try_nregs > RUN_MAXREGS) {
2214 try_nregs = RUN_MAXREGS
2215 + 1; /* Counter-act try_nregs-- in loop. */
2216 }
Jason Evans49f7e8f2011-03-15 13:59:15 -07002217 do {
2218 try_nregs--;
2219 try_hdr_size = sizeof(arena_run_t);
Jason Evans84c8eef2011-03-16 10:30:13 -07002220 /* Pad to a long boundary. */
2221 try_hdr_size = LONG_CEILING(try_hdr_size);
2222 try_bitmap_offset = try_hdr_size;
2223 /* Add space for bitmap. */
2224 try_hdr_size += bitmap_size(try_nregs);
Jason Evans7372b152012-02-10 20:22:09 -08002225 if (config_prof && opt_prof && prof_promote == false) {
Jason Evans49f7e8f2011-03-15 13:59:15 -07002226 /* Pad to a quantum boundary. */
2227 try_hdr_size = QUANTUM_CEILING(try_hdr_size);
2228 try_ctx0_offset = try_hdr_size;
2229 /* Add space for one (prof_ctx_t *) per region. */
2230 try_hdr_size += try_nregs * sizeof(prof_ctx_t *);
2231 } else
2232 try_ctx0_offset = 0;
Jason Evans122449b2012-04-06 00:35:09 -07002233 try_redzone0_offset = try_run_size - (try_nregs *
2234 bin_info->reg_interval) - pad_size;
2235 } while (try_hdr_size > try_redzone0_offset);
Jason Evans49f7e8f2011-03-15 13:59:15 -07002236
2237 /* run_size expansion loop. */
2238 do {
2239 /*
2240 * Copy valid settings before trying more aggressive settings.
2241 */
2242 good_run_size = try_run_size;
2243 good_nregs = try_nregs;
2244 good_hdr_size = try_hdr_size;
Jason Evans84c8eef2011-03-16 10:30:13 -07002245 good_bitmap_offset = try_bitmap_offset;
Jason Evans49f7e8f2011-03-15 13:59:15 -07002246 good_ctx0_offset = try_ctx0_offset;
Jason Evans122449b2012-04-06 00:35:09 -07002247 good_redzone0_offset = try_redzone0_offset;
Jason Evans49f7e8f2011-03-15 13:59:15 -07002248
2249 /* Try more aggressive settings. */
Jason Evansae4c7b42012-04-02 07:04:34 -07002250 try_run_size += PAGE;
Jason Evans122449b2012-04-06 00:35:09 -07002251 try_nregs = ((try_run_size - sizeof(arena_run_t) - pad_size) /
2252 bin_info->reg_interval)
Jason Evans49f7e8f2011-03-15 13:59:15 -07002253 + 1; /* Counter-act try_nregs-- in loop. */
Jason Evans47e57f92011-03-22 09:00:56 -07002254 if (try_nregs > RUN_MAXREGS) {
2255 try_nregs = RUN_MAXREGS
2256 + 1; /* Counter-act try_nregs-- in loop. */
2257 }
Jason Evans49f7e8f2011-03-15 13:59:15 -07002258 do {
2259 try_nregs--;
2260 try_hdr_size = sizeof(arena_run_t);
Jason Evans84c8eef2011-03-16 10:30:13 -07002261 /* Pad to a long boundary. */
2262 try_hdr_size = LONG_CEILING(try_hdr_size);
2263 try_bitmap_offset = try_hdr_size;
2264 /* Add space for bitmap. */
2265 try_hdr_size += bitmap_size(try_nregs);
Jason Evans7372b152012-02-10 20:22:09 -08002266 if (config_prof && opt_prof && prof_promote == false) {
Jason Evans49f7e8f2011-03-15 13:59:15 -07002267 /* Pad to a quantum boundary. */
2268 try_hdr_size = QUANTUM_CEILING(try_hdr_size);
2269 try_ctx0_offset = try_hdr_size;
2270 /*
2271 * Add space for one (prof_ctx_t *) per region.
2272 */
2273 try_hdr_size += try_nregs *
2274 sizeof(prof_ctx_t *);
2275 }
Jason Evans122449b2012-04-06 00:35:09 -07002276 try_redzone0_offset = try_run_size - (try_nregs *
2277 bin_info->reg_interval) - pad_size;
2278 } while (try_hdr_size > try_redzone0_offset);
Jason Evans49f7e8f2011-03-15 13:59:15 -07002279 } while (try_run_size <= arena_maxclass
2280 && try_run_size <= arena_maxclass
Jason Evans122449b2012-04-06 00:35:09 -07002281 && RUN_MAX_OVRHD * (bin_info->reg_interval << 3) >
2282 RUN_MAX_OVRHD_RELAX
2283 && (try_redzone0_offset << RUN_BFP) > RUN_MAX_OVRHD * try_run_size
Jason Evans47e57f92011-03-22 09:00:56 -07002284 && try_nregs < RUN_MAXREGS);
Jason Evans49f7e8f2011-03-15 13:59:15 -07002285
Jason Evans122449b2012-04-06 00:35:09 -07002286 assert(good_hdr_size <= good_redzone0_offset);
Jason Evans49f7e8f2011-03-15 13:59:15 -07002287
2288 /* Copy final settings. */
2289 bin_info->run_size = good_run_size;
2290 bin_info->nregs = good_nregs;
Jason Evans84c8eef2011-03-16 10:30:13 -07002291 bin_info->bitmap_offset = good_bitmap_offset;
Jason Evans49f7e8f2011-03-15 13:59:15 -07002292 bin_info->ctx0_offset = good_ctx0_offset;
Jason Evans122449b2012-04-06 00:35:09 -07002293 bin_info->reg0_offset = good_redzone0_offset + bin_info->redzone_size;
2294
2295 assert(bin_info->reg0_offset - bin_info->redzone_size + (bin_info->nregs
2296 * bin_info->reg_interval) + pad_size == bin_info->run_size);
Jason Evans49f7e8f2011-03-15 13:59:15 -07002297
2298 return (good_run_size);
2299}
2300
Jason Evansb1726102012-02-28 16:50:47 -08002301static void
Jason Evans49f7e8f2011-03-15 13:59:15 -07002302bin_info_init(void)
2303{
2304 arena_bin_info_t *bin_info;
Jason Evansae4c7b42012-04-02 07:04:34 -07002305 size_t prev_run_size = PAGE;
Jason Evans49f7e8f2011-03-15 13:59:15 -07002306
Jason Evansb1726102012-02-28 16:50:47 -08002307#define SIZE_CLASS(bin, delta, size) \
2308 bin_info = &arena_bin_info[bin]; \
2309 bin_info->reg_size = size; \
2310 prev_run_size = bin_info_run_size_calc(bin_info, prev_run_size);\
2311 bitmap_info_init(&bin_info->bitmap_info, bin_info->nregs);
2312 SIZE_CLASSES
2313#undef SIZE_CLASS
Jason Evans49f7e8f2011-03-15 13:59:15 -07002314}
2315
Jason Evansb1726102012-02-28 16:50:47 -08002316void
Jason Evansa0bf2422010-01-29 14:30:41 -08002317arena_boot(void)
Jason Evanse476f8a2010-01-16 09:53:50 -08002318{
Jason Evansa0bf2422010-01-29 14:30:41 -08002319 size_t header_size;
Jason Evans7393f442010-10-01 17:35:43 -07002320 unsigned i;
Jason Evanse476f8a2010-01-16 09:53:50 -08002321
Jason Evanse476f8a2010-01-16 09:53:50 -08002322 /*
2323 * Compute the header size such that it is large enough to contain the
Jason Evans7393f442010-10-01 17:35:43 -07002324 * page map. The page map is biased to omit entries for the header
2325 * itself, so some iteration is necessary to compute the map bias.
2326 *
2327 * 1) Compute safe header_size and map_bias values that include enough
2328 * space for an unbiased page map.
2329 * 2) Refine map_bias based on (1) to omit the header pages in the page
2330 * map. The resulting map_bias may be one too small.
2331 * 3) Refine map_bias based on (2). The result will be >= the result
2332 * from (2), and will always be correct.
Jason Evanse476f8a2010-01-16 09:53:50 -08002333 */
Jason Evans7393f442010-10-01 17:35:43 -07002334 map_bias = 0;
2335 for (i = 0; i < 3; i++) {
Jason Evansae4c7b42012-04-02 07:04:34 -07002336 header_size = offsetof(arena_chunk_t, map) +
2337 (sizeof(arena_chunk_map_t) * (chunk_npages-map_bias));
2338 map_bias = (header_size >> LG_PAGE) + ((header_size & PAGE_MASK)
2339 != 0);
Jason Evans7393f442010-10-01 17:35:43 -07002340 }
2341 assert(map_bias > 0);
2342
Jason Evansae4c7b42012-04-02 07:04:34 -07002343 arena_maxclass = chunksize - (map_bias << LG_PAGE);
Jason Evansa0bf2422010-01-29 14:30:41 -08002344
Jason Evansb1726102012-02-28 16:50:47 -08002345 bin_info_init();
Jason Evanse476f8a2010-01-16 09:53:50 -08002346}
Jason Evans4e2e3dd2012-03-13 16:31:41 -07002347
2348void
2349arena_prefork(arena_t *arena)
2350{
2351 unsigned i;
2352
2353 malloc_mutex_prefork(&arena->lock);
2354 for (i = 0; i < NBINS; i++)
2355 malloc_mutex_prefork(&arena->bins[i].lock);
2356}
2357
2358void
2359arena_postfork_parent(arena_t *arena)
2360{
2361 unsigned i;
2362
2363 for (i = 0; i < NBINS; i++)
2364 malloc_mutex_postfork_parent(&arena->bins[i].lock);
2365 malloc_mutex_postfork_parent(&arena->lock);
2366}
2367
2368void
2369arena_postfork_child(arena_t *arena)
2370{
2371 unsigned i;
2372
2373 for (i = 0; i < NBINS; i++)
2374 malloc_mutex_postfork_child(&arena->bins[i].lock);
2375 malloc_mutex_postfork_child(&arena->lock);
2376}