blob: 304d4e5aeeaf04e0e9dcda0c63c64c17fed78e8c [file] [log] [blame]
Jason Evanse476f8a2010-01-16 09:53:50 -08001#define JEMALLOC_CHUNK_C_
Jason Evans376b1522010-02-11 14:45:59 -08002#include "jemalloc/internal/jemalloc_internal.h"
Jason Evanse476f8a2010-01-16 09:53:50 -08003
4/******************************************************************************/
5/* Data. */
6
Jason Evans609ae592012-10-11 13:53:15 -07007const char *opt_dss = DSS_DEFAULT;
Matthijsa1aaf942015-06-25 22:53:58 +02008size_t opt_lg_chunk = 0;
Jason Evanse476f8a2010-01-16 09:53:50 -08009
Jason Evanscbf3a6d2015-02-11 12:24:27 -080010/* Used exclusively for gdump triggering. */
11static size_t curchunks;
12static size_t highchunks;
Jason Evans7ca0fdf2012-04-12 20:20:58 -070013
Jason Evans8d0e04d2015-01-30 22:54:08 -080014rtree_t chunks_rtree;
Jason Evans2dbecf12010-09-05 10:35:13 -070015
Jason Evanse476f8a2010-01-16 09:53:50 -080016/* Various chunk-related settings. */
17size_t chunksize;
18size_t chunksize_mask; /* (chunksize - 1). */
19size_t chunk_npages;
Jason Evanse476f8a2010-01-16 09:53:50 -080020
Jason Evansb49a3342015-07-28 11:28:19 -040021static void *chunk_alloc_default(void *new_addr, size_t size,
Jason Evans8fadb1a2015-08-04 10:49:46 -070022 size_t alignment, bool *zero, bool *commit, unsigned arena_ind);
23static bool chunk_dalloc_default(void *chunk, size_t size, bool committed,
Jason Evansb49a3342015-07-28 11:28:19 -040024 unsigned arena_ind);
Jason Evans8fadb1a2015-08-04 10:49:46 -070025static bool chunk_commit_default(void *chunk, size_t size, size_t offset,
26 size_t length, unsigned arena_ind);
27static bool chunk_decommit_default(void *chunk, size_t size, size_t offset,
28 size_t length, unsigned arena_ind);
Jason Evansb49a3342015-07-28 11:28:19 -040029static bool chunk_purge_default(void *chunk, size_t size, size_t offset,
30 size_t length, unsigned arena_ind);
31static bool chunk_split_default(void *chunk, size_t size, size_t size_a,
32 size_t size_b, bool committed, unsigned arena_ind);
33static bool chunk_merge_default(void *chunk_a, size_t size_a, void *chunk_b,
34 size_t size_b, bool committed, unsigned arena_ind);
35
36const chunk_hooks_t chunk_hooks_default = {
37 chunk_alloc_default,
38 chunk_dalloc_default,
39 chunk_commit_default,
40 chunk_decommit_default,
41 chunk_purge_default,
42 chunk_split_default,
43 chunk_merge_default
44};
45
Jason Evanse476f8a2010-01-16 09:53:50 -080046/******************************************************************************/
Jason Evansb49a3342015-07-28 11:28:19 -040047/*
48 * Function prototypes for static functions that are referenced prior to
49 * definition.
50 */
51
52static void chunk_record(arena_t *arena, chunk_hooks_t *chunk_hooks,
53 extent_tree_t *chunks_szad, extent_tree_t *chunks_ad, bool cache,
Jason Evans8fadb1a2015-08-04 10:49:46 -070054 void *chunk, size_t size, bool zeroed, bool committed);
Jason Evansb49a3342015-07-28 11:28:19 -040055
56/******************************************************************************/
57
58static chunk_hooks_t
59chunk_hooks_get_locked(arena_t *arena)
60{
61
62 return (arena->chunk_hooks);
63}
64
65chunk_hooks_t
66chunk_hooks_get(arena_t *arena)
67{
68 chunk_hooks_t chunk_hooks;
69
70 malloc_mutex_lock(&arena->chunks_mtx);
71 chunk_hooks = chunk_hooks_get_locked(arena);
72 malloc_mutex_unlock(&arena->chunks_mtx);
73
74 return (chunk_hooks);
75}
76
77chunk_hooks_t
78chunk_hooks_set(arena_t *arena, const chunk_hooks_t *chunk_hooks)
79{
80 chunk_hooks_t old_chunk_hooks;
81
82 malloc_mutex_lock(&arena->chunks_mtx);
83 old_chunk_hooks = arena->chunk_hooks;
Jason Evans8fadb1a2015-08-04 10:49:46 -070084 /*
85 * Copy each field atomically so that it is impossible for readers to
86 * see partially updated pointers. There are places where readers only
87 * need one hook function pointer (therefore no need to copy the
88 * entirety of arena->chunk_hooks), and stale reads do not affect
89 * correctness, so they perform unlocked reads.
90 */
91#define ATOMIC_COPY_HOOK(n) do { \
Jason Evans56af64d2015-08-12 16:38:20 -070092 union { \
93 chunk_##n##_t **n; \
94 void **v; \
95 } u; \
96 u.n = &arena->chunk_hooks.n; \
97 atomic_write_p(u.v, chunk_hooks->n); \
Jason Evans8fadb1a2015-08-04 10:49:46 -070098} while (0)
99 ATOMIC_COPY_HOOK(alloc);
100 ATOMIC_COPY_HOOK(dalloc);
101 ATOMIC_COPY_HOOK(commit);
102 ATOMIC_COPY_HOOK(decommit);
103 ATOMIC_COPY_HOOK(purge);
104 ATOMIC_COPY_HOOK(split);
105 ATOMIC_COPY_HOOK(merge);
106#undef ATOMIC_COPY_HOOK
Jason Evansb49a3342015-07-28 11:28:19 -0400107 malloc_mutex_unlock(&arena->chunks_mtx);
108
109 return (old_chunk_hooks);
110}
111
112static void
113chunk_hooks_assure_initialized_impl(arena_t *arena, chunk_hooks_t *chunk_hooks,
114 bool locked)
115{
116 static const chunk_hooks_t uninitialized_hooks =
117 CHUNK_HOOKS_INITIALIZER;
118
119 if (memcmp(chunk_hooks, &uninitialized_hooks, sizeof(chunk_hooks_t)) ==
120 0) {
121 *chunk_hooks = locked ? chunk_hooks_get_locked(arena) :
122 chunk_hooks_get(arena);
123 }
124}
125
126static void
127chunk_hooks_assure_initialized_locked(arena_t *arena,
128 chunk_hooks_t *chunk_hooks)
129{
130
131 chunk_hooks_assure_initialized_impl(arena, chunk_hooks, true);
132}
133
134static void
135chunk_hooks_assure_initialized(arena_t *arena, chunk_hooks_t *chunk_hooks)
136{
137
138 chunk_hooks_assure_initialized_impl(arena, chunk_hooks, false);
139}
Jason Evans7ca0fdf2012-04-12 20:20:58 -0700140
Jason Evanscbf3a6d2015-02-11 12:24:27 -0800141bool
142chunk_register(const void *chunk, const extent_node_t *node)
143{
Jason Evans7ca0fdf2012-04-12 20:20:58 -0700144
Jason Evansee41ad42015-02-15 18:04:46 -0800145 assert(extent_node_addr_get(node) == chunk);
Jason Evanscbf3a6d2015-02-11 12:24:27 -0800146
147 if (rtree_set(&chunks_rtree, (uintptr_t)chunk, node))
148 return (true);
149 if (config_prof && opt_prof) {
Jason Evansee41ad42015-02-15 18:04:46 -0800150 size_t size = extent_node_size_get(node);
151 size_t nadd = (size == 0) ? 1 : size / chunksize;
Jason Evanscbf3a6d2015-02-11 12:24:27 -0800152 size_t cur = atomic_add_z(&curchunks, nadd);
153 size_t high = atomic_read_z(&highchunks);
154 while (cur > high && atomic_cas_z(&highchunks, high, cur)) {
155 /*
156 * Don't refresh cur, because it may have decreased
157 * since this thread lost the highchunks update race.
158 */
159 high = atomic_read_z(&highchunks);
160 }
161 if (cur > high && prof_gdump_get_unlocked())
162 prof_gdump();
163 }
164
165 return (false);
166}
167
168void
169chunk_deregister(const void *chunk, const extent_node_t *node)
170{
171 bool err;
172
173 err = rtree_set(&chunks_rtree, (uintptr_t)chunk, NULL);
174 assert(!err);
175 if (config_prof && opt_prof) {
Jason Evansee41ad42015-02-15 18:04:46 -0800176 size_t size = extent_node_size_get(node);
177 size_t nsub = (size == 0) ? 1 : size / chunksize;
Jason Evanscbf3a6d2015-02-11 12:24:27 -0800178 assert(atomic_read_z(&curchunks) >= nsub);
179 atomic_sub_z(&curchunks, nsub);
180 }
181}
Jason Evans7ca0fdf2012-04-12 20:20:58 -0700182
Jason Evansaa282662015-07-15 16:02:21 -0700183/*
184 * Do first-best-fit chunk selection, i.e. select the lowest chunk that best
185 * fits.
186 */
Jason Evans97c04a92015-03-06 19:57:36 -0800187static extent_node_t *
Jason Evansaa282662015-07-15 16:02:21 -0700188chunk_first_best_fit(arena_t *arena, extent_tree_t *chunks_szad,
Jason Evans04ca7582015-03-06 23:25:13 -0800189 extent_tree_t *chunks_ad, size_t size)
Jason Evans97c04a92015-03-06 19:57:36 -0800190{
Jason Evansaa282662015-07-15 16:02:21 -0700191 extent_node_t key;
Jason Evans97c04a92015-03-06 19:57:36 -0800192
193 assert(size == CHUNK_CEILING(size));
194
Jason Evansb49a3342015-07-28 11:28:19 -0400195 extent_node_init(&key, arena, NULL, size, false, false);
Jason Evansaa282662015-07-15 16:02:21 -0700196 return (extent_tree_szad_nsearch(chunks_szad, &key));
Jason Evans97c04a92015-03-06 19:57:36 -0800197}
198
Jason Evans7ca0fdf2012-04-12 20:20:58 -0700199static void *
Jason Evansb49a3342015-07-28 11:28:19 -0400200chunk_recycle(arena_t *arena, chunk_hooks_t *chunk_hooks,
201 extent_tree_t *chunks_szad, extent_tree_t *chunks_ad, bool cache,
Jason Evans8fadb1a2015-08-04 10:49:46 -0700202 void *new_addr, size_t size, size_t alignment, bool *zero, bool *commit,
203 bool dalloc_node)
Jason Evans7ca0fdf2012-04-12 20:20:58 -0700204{
205 void *ret;
206 extent_node_t *node;
Jason Evans7ca0fdf2012-04-12 20:20:58 -0700207 size_t alloc_size, leadsize, trailsize;
Jason Evans8fadb1a2015-08-04 10:49:46 -0700208 bool zeroed, committed;
Jason Evans7ca0fdf2012-04-12 20:20:58 -0700209
Jason Evans8ddc9322015-01-30 21:22:54 -0800210 assert(new_addr == NULL || alignment == chunksize);
Jason Evansb49a3342015-07-28 11:28:19 -0400211 /*
212 * Cached chunks use the node linkage embedded in their headers, in
213 * which case dalloc_node is true, and new_addr is non-NULL because
214 * we're operating on a specific chunk.
215 */
Jason Evans99bd94f2015-02-18 16:40:53 -0800216 assert(dalloc_node || new_addr != NULL);
Jason Evans8ddc9322015-01-30 21:22:54 -0800217
Jason Evans5707d6f2015-03-06 17:14:05 -0800218 alloc_size = CHUNK_CEILING(s2u(size + alignment - chunksize));
Jason Evans7ca0fdf2012-04-12 20:20:58 -0700219 /* Beware size_t wrap-around. */
220 if (alloc_size < size)
221 return (NULL);
Jason Evanscbf3a6d2015-02-11 12:24:27 -0800222 malloc_mutex_lock(&arena->chunks_mtx);
Jason Evansb49a3342015-07-28 11:28:19 -0400223 chunk_hooks_assure_initialized_locked(arena, chunk_hooks);
Jason Evans04ca7582015-03-06 23:25:13 -0800224 if (new_addr != NULL) {
Jason Evans97c04a92015-03-06 19:57:36 -0800225 extent_node_t key;
Jason Evansb49a3342015-07-28 11:28:19 -0400226 extent_node_init(&key, arena, new_addr, alloc_size, false,
227 false);
Jason Evans97c04a92015-03-06 19:57:36 -0800228 node = extent_tree_ad_search(chunks_ad, &key);
Jason Evans04ca7582015-03-06 23:25:13 -0800229 } else {
Jason Evansaa282662015-07-15 16:02:21 -0700230 node = chunk_first_best_fit(arena, chunks_szad, chunks_ad,
Jason Evans04ca7582015-03-06 23:25:13 -0800231 alloc_size);
232 }
Jason Evansee41ad42015-02-15 18:04:46 -0800233 if (node == NULL || (new_addr != NULL && extent_node_size_get(node) <
234 size)) {
Jason Evanscbf3a6d2015-02-11 12:24:27 -0800235 malloc_mutex_unlock(&arena->chunks_mtx);
Jason Evans7ca0fdf2012-04-12 20:20:58 -0700236 return (NULL);
237 }
Jason Evansee41ad42015-02-15 18:04:46 -0800238 leadsize = ALIGNMENT_CEILING((uintptr_t)extent_node_addr_get(node),
239 alignment) - (uintptr_t)extent_node_addr_get(node);
Jason Evans8ddc9322015-01-30 21:22:54 -0800240 assert(new_addr == NULL || leadsize == 0);
Jason Evansee41ad42015-02-15 18:04:46 -0800241 assert(extent_node_size_get(node) >= leadsize + size);
242 trailsize = extent_node_size_get(node) - leadsize - size;
243 ret = (void *)((uintptr_t)extent_node_addr_get(node) + leadsize);
244 zeroed = extent_node_zeroed_get(node);
Jason Evans14a2c6a2013-01-21 19:56:34 -0800245 if (zeroed)
Jason Evans8fadb1a2015-08-04 10:49:46 -0700246 *zero = true;
247 committed = extent_node_committed_get(node);
248 if (committed)
249 *commit = true;
Jason Evansb49a3342015-07-28 11:28:19 -0400250 /* Split the lead. */
251 if (leadsize != 0 &&
252 chunk_hooks->split(extent_node_addr_get(node),
253 extent_node_size_get(node), leadsize, size, false, arena->ind)) {
254 malloc_mutex_unlock(&arena->chunks_mtx);
255 return (NULL);
256 }
Jason Evans7ca0fdf2012-04-12 20:20:58 -0700257 /* Remove node from the tree. */
Jason Evans609ae592012-10-11 13:53:15 -0700258 extent_tree_szad_remove(chunks_szad, node);
259 extent_tree_ad_remove(chunks_ad, node);
Jason Evans738e0892015-02-18 01:15:50 -0800260 arena_chunk_cache_maybe_remove(arena, node, cache);
Jason Evans7ca0fdf2012-04-12 20:20:58 -0700261 if (leadsize != 0) {
262 /* Insert the leading space as a smaller chunk. */
Jason Evansee41ad42015-02-15 18:04:46 -0800263 extent_node_size_set(node, leadsize);
Jason Evans609ae592012-10-11 13:53:15 -0700264 extent_tree_szad_insert(chunks_szad, node);
265 extent_tree_ad_insert(chunks_ad, node);
Jason Evans738e0892015-02-18 01:15:50 -0800266 arena_chunk_cache_maybe_insert(arena, node, cache);
Jason Evans7ca0fdf2012-04-12 20:20:58 -0700267 node = NULL;
268 }
269 if (trailsize != 0) {
Jason Evansb49a3342015-07-28 11:28:19 -0400270 /* Split the trail. */
271 if (chunk_hooks->split(ret, size + trailsize, size,
272 trailsize, false, arena->ind)) {
273 if (dalloc_node && node != NULL)
274 arena_node_dalloc(arena, node);
275 malloc_mutex_unlock(&arena->chunks_mtx);
276 chunk_record(arena, chunk_hooks, chunks_szad, chunks_ad,
Jason Evans8fadb1a2015-08-04 10:49:46 -0700277 cache, ret, size + trailsize, zeroed, committed);
Jason Evansb49a3342015-07-28 11:28:19 -0400278 return (NULL);
279 }
Jason Evans7ca0fdf2012-04-12 20:20:58 -0700280 /* Insert the trailing space as a smaller chunk. */
281 if (node == NULL) {
Jason Evanscbf3a6d2015-02-11 12:24:27 -0800282 node = arena_node_alloc(arena);
Jason Evans7ca0fdf2012-04-12 20:20:58 -0700283 if (node == NULL) {
Jason Evanscbf3a6d2015-02-11 12:24:27 -0800284 malloc_mutex_unlock(&arena->chunks_mtx);
Jason Evansb49a3342015-07-28 11:28:19 -0400285 chunk_record(arena, chunk_hooks, chunks_szad,
286 chunks_ad, cache, ret, size + trailsize,
Jason Evans8fadb1a2015-08-04 10:49:46 -0700287 zeroed, committed);
Jason Evans7ca0fdf2012-04-12 20:20:58 -0700288 return (NULL);
289 }
Jason Evans7ca0fdf2012-04-12 20:20:58 -0700290 }
Jason Evansa4e18882015-02-17 15:13:52 -0800291 extent_node_init(node, arena, (void *)((uintptr_t)(ret) + size),
Jason Evans8fadb1a2015-08-04 10:49:46 -0700292 trailsize, zeroed, committed);
Jason Evans609ae592012-10-11 13:53:15 -0700293 extent_tree_szad_insert(chunks_szad, node);
294 extent_tree_ad_insert(chunks_ad, node);
Jason Evans738e0892015-02-18 01:15:50 -0800295 arena_chunk_cache_maybe_insert(arena, node, cache);
Jason Evans7ca0fdf2012-04-12 20:20:58 -0700296 node = NULL;
297 }
Jason Evans8fadb1a2015-08-04 10:49:46 -0700298 if (!committed && chunk_hooks->commit(ret, size, 0, size, arena->ind)) {
Jason Evansb49a3342015-07-28 11:28:19 -0400299 malloc_mutex_unlock(&arena->chunks_mtx);
300 chunk_record(arena, chunk_hooks, chunks_szad, chunks_ad, cache,
Jason Evans8fadb1a2015-08-04 10:49:46 -0700301 ret, size, zeroed, committed);
Jason Evansb49a3342015-07-28 11:28:19 -0400302 return (NULL);
303 }
Jason Evanscbf3a6d2015-02-11 12:24:27 -0800304 malloc_mutex_unlock(&arena->chunks_mtx);
Jason Evans7ca0fdf2012-04-12 20:20:58 -0700305
Jason Evans35e3fd92015-02-18 16:51:51 -0800306 assert(dalloc_node || node != NULL);
Jason Evans99bd94f2015-02-18 16:40:53 -0800307 if (dalloc_node && node != NULL)
Jason Evanscbf3a6d2015-02-11 12:24:27 -0800308 arena_node_dalloc(arena, node);
Jason Evans14a2c6a2013-01-21 19:56:34 -0800309 if (*zero) {
Jason Evans551ebc42014-10-03 10:16:09 -0700310 if (!zeroed)
Jason Evans14a2c6a2013-01-21 19:56:34 -0800311 memset(ret, 0, size);
312 else if (config_debug) {
313 size_t i;
314 size_t *p = (size_t *)(uintptr_t)ret;
315
Jason Evansbd87b012014-04-15 16:35:08 -0700316 JEMALLOC_VALGRIND_MAKE_MEM_DEFINED(ret, size);
Jason Evans14a2c6a2013-01-21 19:56:34 -0800317 for (i = 0; i < size / sizeof(size_t); i++)
318 assert(p[i] == 0);
Jason Evans14a2c6a2013-01-21 19:56:34 -0800319 }
320 }
Jason Evans7ca0fdf2012-04-12 20:20:58 -0700321 return (ret);
322}
Jason Evanse476f8a2010-01-16 09:53:50 -0800323
Jason Evans41631d02010-01-24 17:13:07 -0800324/*
Jason Evans551ebc42014-10-03 10:16:09 -0700325 * If the caller specifies (!*zero), it is still possible to receive zeroed
326 * memory, in which case *zero is toggled to true. arena_chunk_alloc() takes
327 * advantage of this to avoid demanding zeroed chunks, but taking advantage of
328 * them if they are returned.
Jason Evans41631d02010-01-24 17:13:07 -0800329 */
aravindfb7fe502014-05-05 15:16:56 -0700330static void *
Jason Evanscbf3a6d2015-02-11 12:24:27 -0800331chunk_alloc_core(arena_t *arena, void *new_addr, size_t size, size_t alignment,
Jason Evans8fadb1a2015-08-04 10:49:46 -0700332 bool *zero, bool *commit, dss_prec_t dss_prec)
Jason Evanse476f8a2010-01-16 09:53:50 -0800333{
334 void *ret;
335
336 assert(size != 0);
337 assert((size & chunksize_mask) == 0);
Jason Evansde6fbdb2012-05-09 13:05:04 -0700338 assert(alignment != 0);
Mike Hommeyeae26902012-04-10 19:50:33 +0200339 assert((alignment & chunksize_mask) == 0);
Jason Evanse476f8a2010-01-16 09:53:50 -0800340
Jason Evans609ae592012-10-11 13:53:15 -0700341 /* "primary" dss. */
Jason Evans0fd663e2015-01-25 17:31:24 -0800342 if (have_dss && dss_prec == dss_prec_primary && (ret =
Jason Evans8fadb1a2015-08-04 10:49:46 -0700343 chunk_alloc_dss(arena, new_addr, size, alignment, zero, commit)) !=
344 NULL)
Jason Evans0fd663e2015-01-25 17:31:24 -0800345 return (ret);
Jason Evansc7a9a6c2016-02-24 17:18:44 -0800346 /* mmap. */
347 if ((ret = chunk_alloc_mmap(new_addr, size, alignment, zero, commit)) !=
348 NULL)
aravindfb7fe502014-05-05 15:16:56 -0700349 return (ret);
Jason Evans609ae592012-10-11 13:53:15 -0700350 /* "secondary" dss. */
Jason Evans0fd663e2015-01-25 17:31:24 -0800351 if (have_dss && dss_prec == dss_prec_secondary && (ret =
Jason Evans8fadb1a2015-08-04 10:49:46 -0700352 chunk_alloc_dss(arena, new_addr, size, alignment, zero, commit)) !=
353 NULL)
Jason Evans0fd663e2015-01-25 17:31:24 -0800354 return (ret);
Jason Evanse476f8a2010-01-16 09:53:50 -0800355
356 /* All strategies for allocation failed. */
aravindfb7fe502014-05-05 15:16:56 -0700357 return (NULL);
358}
359
Jason Evanse2deab72014-05-15 22:22:27 -0700360void *
361chunk_alloc_base(size_t size)
362{
363 void *ret;
Jason Evans8fadb1a2015-08-04 10:49:46 -0700364 bool zero, commit;
Jason Evanse2deab72014-05-15 22:22:27 -0700365
Jason Evansf500a102015-01-30 21:49:19 -0800366 /*
367 * Directly call chunk_alloc_mmap() rather than chunk_alloc_core()
368 * because it's critical that chunk_alloc_base() return untouched
369 * demand-zeroed virtual memory.
370 */
371 zero = true;
Jason Evans8fadb1a2015-08-04 10:49:46 -0700372 commit = true;
Jason Evansc7a9a6c2016-02-24 17:18:44 -0800373 ret = chunk_alloc_mmap(NULL, size, chunksize, &zero, &commit);
Jason Evanscbf3a6d2015-02-11 12:24:27 -0800374 if (ret == NULL)
375 return (NULL);
376 if (config_valgrind)
377 JEMALLOC_VALGRIND_MAKE_MEM_UNDEFINED(ret, size);
Jason Evansf500a102015-01-30 21:49:19 -0800378
Jason Evanse2deab72014-05-15 22:22:27 -0700379 return (ret);
380}
381
382void *
Jason Evansb49a3342015-07-28 11:28:19 -0400383chunk_alloc_cache(arena_t *arena, chunk_hooks_t *chunk_hooks, void *new_addr,
384 size_t size, size_t alignment, bool *zero, bool dalloc_node)
Jason Evanse2deab72014-05-15 22:22:27 -0700385{
Jason Evans4f6f2b12015-06-22 14:38:06 -0700386 void *ret;
Jason Evans8fadb1a2015-08-04 10:49:46 -0700387 bool commit;
Jason Evanse2deab72014-05-15 22:22:27 -0700388
Jason Evans99bd94f2015-02-18 16:40:53 -0800389 assert(size != 0);
390 assert((size & chunksize_mask) == 0);
391 assert(alignment != 0);
392 assert((alignment & chunksize_mask) == 0);
Jason Evanse2deab72014-05-15 22:22:27 -0700393
Jason Evans8fadb1a2015-08-04 10:49:46 -0700394 commit = true;
Jason Evansb49a3342015-07-28 11:28:19 -0400395 ret = chunk_recycle(arena, chunk_hooks, &arena->chunks_szad_cached,
396 &arena->chunks_ad_cached, true, new_addr, size, alignment, zero,
Jason Evans8fadb1a2015-08-04 10:49:46 -0700397 &commit, dalloc_node);
Jason Evans4f6f2b12015-06-22 14:38:06 -0700398 if (ret == NULL)
399 return (NULL);
Jason Evans8fadb1a2015-08-04 10:49:46 -0700400 assert(commit);
Jason Evans4f6f2b12015-06-22 14:38:06 -0700401 if (config_valgrind)
402 JEMALLOC_VALGRIND_MAKE_MEM_UNDEFINED(ret, size);
403 return (ret);
Jason Evanse2deab72014-05-15 22:22:27 -0700404}
405
Jason Evanscbf3a6d2015-02-11 12:24:27 -0800406static arena_t *
407chunk_arena_get(unsigned arena_ind)
aravindfb7fe502014-05-05 15:16:56 -0700408{
Jason Evans8bb31982014-10-07 23:14:57 -0700409 arena_t *arena;
410
Jason Evans767d8502016-02-24 23:58:10 -0800411 arena = arena_get(arena_ind, false);
Jason Evans8bb31982014-10-07 23:14:57 -0700412 /*
413 * The arena we're allocating on behalf of must have been initialized
414 * already.
415 */
416 assert(arena != NULL);
Jason Evanscbf3a6d2015-02-11 12:24:27 -0800417 return (arena);
418}
aravindfb7fe502014-05-05 15:16:56 -0700419
Jason Evans99bd94f2015-02-18 16:40:53 -0800420static void *
Jason Evansb49a3342015-07-28 11:28:19 -0400421chunk_alloc_default(void *new_addr, size_t size, size_t alignment, bool *zero,
Jason Evans8fadb1a2015-08-04 10:49:46 -0700422 bool *commit, unsigned arena_ind)
Jason Evans99bd94f2015-02-18 16:40:53 -0800423{
424 void *ret;
Jason Evansb49a3342015-07-28 11:28:19 -0400425 arena_t *arena;
Jason Evans99bd94f2015-02-18 16:40:53 -0800426
Jason Evansb49a3342015-07-28 11:28:19 -0400427 arena = chunk_arena_get(arena_ind);
Jason Evans8d8960f2016-03-30 18:36:04 -0700428 ret = chunk_alloc_core(arena, new_addr, size, alignment, zero, commit,
429 arena->dss_prec);
Jason Evans99bd94f2015-02-18 16:40:53 -0800430 if (ret == NULL)
431 return (NULL);
432 if (config_valgrind)
433 JEMALLOC_VALGRIND_MAKE_MEM_UNDEFINED(ret, size);
434
435 return (ret);
436}
437
buchgrd4126242015-12-09 18:00:57 +0100438static void *
439chunk_alloc_retained(arena_t *arena, chunk_hooks_t *chunk_hooks, void *new_addr,
440 size_t size, size_t alignment, bool *zero, bool *commit)
441{
442
443 assert(size != 0);
444 assert((size & chunksize_mask) == 0);
445 assert(alignment != 0);
446 assert((alignment & chunksize_mask) == 0);
447
448 return (chunk_recycle(arena, chunk_hooks, &arena->chunks_szad_retained,
449 &arena->chunks_ad_retained, false, new_addr, size, alignment, zero,
450 commit, true));
451}
452
Jason Evanscbf3a6d2015-02-11 12:24:27 -0800453void *
Jason Evansb49a3342015-07-28 11:28:19 -0400454chunk_alloc_wrapper(arena_t *arena, chunk_hooks_t *chunk_hooks, void *new_addr,
Jason Evans8fadb1a2015-08-04 10:49:46 -0700455 size_t size, size_t alignment, bool *zero, bool *commit)
Jason Evans99bd94f2015-02-18 16:40:53 -0800456{
457 void *ret;
458
Jason Evansb49a3342015-07-28 11:28:19 -0400459 chunk_hooks_assure_initialized(arena, chunk_hooks);
buchgrd4126242015-12-09 18:00:57 +0100460
461 ret = chunk_alloc_retained(arena, chunk_hooks, new_addr, size,
462 alignment, zero, commit);
463 if (ret == NULL) {
464 ret = chunk_hooks->alloc(new_addr, size, alignment, zero,
465 commit, arena->ind);
466 if (ret == NULL)
467 return (NULL);
468 }
469
Jason Evansb49a3342015-07-28 11:28:19 -0400470 if (config_valgrind && chunk_hooks->alloc != chunk_alloc_default)
Jason Evans35e3fd92015-02-18 16:51:51 -0800471 JEMALLOC_VALGRIND_MAKE_MEM_UNDEFINED(ret, chunksize);
Jason Evans99bd94f2015-02-18 16:40:53 -0800472 return (ret);
aravindfb7fe502014-05-05 15:16:56 -0700473}
474
Jason Evansb49a3342015-07-28 11:28:19 -0400475static void
476chunk_record(arena_t *arena, chunk_hooks_t *chunk_hooks,
477 extent_tree_t *chunks_szad, extent_tree_t *chunks_ad, bool cache,
Jason Evans8fadb1a2015-08-04 10:49:46 -0700478 void *chunk, size_t size, bool zeroed, bool committed)
Jason Evans7ca0fdf2012-04-12 20:20:58 -0700479{
Jason Evans7de92762012-10-08 17:56:11 -0700480 bool unzeroed;
Jason Evansee41ad42015-02-15 18:04:46 -0800481 extent_node_t *node, *prev;
482 extent_node_t key;
Jason Evans7ca0fdf2012-04-12 20:20:58 -0700483
Jason Evans738e0892015-02-18 01:15:50 -0800484 assert(!cache || !zeroed);
485 unzeroed = cache || !zeroed;
Jason Evansbd87b012014-04-15 16:35:08 -0700486 JEMALLOC_VALGRIND_MAKE_MEM_NOACCESS(chunk, size);
Jason Evans7ca0fdf2012-04-12 20:20:58 -0700487
Jason Evanscbf3a6d2015-02-11 12:24:27 -0800488 malloc_mutex_lock(&arena->chunks_mtx);
Jason Evansb49a3342015-07-28 11:28:19 -0400489 chunk_hooks_assure_initialized_locked(arena, chunk_hooks);
Jason Evansa4e18882015-02-17 15:13:52 -0800490 extent_node_init(&key, arena, (void *)((uintptr_t)chunk + size), 0,
Jason Evansb49a3342015-07-28 11:28:19 -0400491 false, false);
Jason Evans609ae592012-10-11 13:53:15 -0700492 node = extent_tree_ad_nsearch(chunks_ad, &key);
Jason Evans374d26a2012-05-09 14:48:35 -0700493 /* Try to coalesce forward. */
Jason Evansee41ad42015-02-15 18:04:46 -0800494 if (node != NULL && extent_node_addr_get(node) ==
Jason Evansb49a3342015-07-28 11:28:19 -0400495 extent_node_addr_get(&key) && extent_node_committed_get(node) ==
496 committed && !chunk_hooks->merge(chunk, size,
497 extent_node_addr_get(node), extent_node_size_get(node), false,
498 arena->ind)) {
Jason Evans374d26a2012-05-09 14:48:35 -0700499 /*
500 * Coalesce chunk with the following address range. This does
501 * not change the position within chunks_ad, so only
502 * remove/insert from/into chunks_szad.
503 */
Jason Evans609ae592012-10-11 13:53:15 -0700504 extent_tree_szad_remove(chunks_szad, node);
Jason Evans738e0892015-02-18 01:15:50 -0800505 arena_chunk_cache_maybe_remove(arena, node, cache);
Jason Evansee41ad42015-02-15 18:04:46 -0800506 extent_node_addr_set(node, chunk);
Jason Evansa4e18882015-02-17 15:13:52 -0800507 extent_node_size_set(node, size + extent_node_size_get(node));
Jason Evansee41ad42015-02-15 18:04:46 -0800508 extent_node_zeroed_set(node, extent_node_zeroed_get(node) &&
509 !unzeroed);
Jason Evans609ae592012-10-11 13:53:15 -0700510 extent_tree_szad_insert(chunks_szad, node);
Jason Evans738e0892015-02-18 01:15:50 -0800511 arena_chunk_cache_maybe_insert(arena, node, cache);
Jason Evans374d26a2012-05-09 14:48:35 -0700512 } else {
513 /* Coalescing forward failed, so insert a new node. */
Jason Evanscbf3a6d2015-02-11 12:24:27 -0800514 node = arena_node_alloc(arena);
515 if (node == NULL) {
Jason Evans7ca0fdf2012-04-12 20:20:58 -0700516 /*
Jason Evanscbf3a6d2015-02-11 12:24:27 -0800517 * Node allocation failed, which is an exceedingly
Jason Evansee41ad42015-02-15 18:04:46 -0800518 * unlikely failure. Leak chunk after making sure its
519 * pages have already been purged, so that this is only
520 * a virtual memory leak.
Jason Evans7ca0fdf2012-04-12 20:20:58 -0700521 */
Jason Evans8d6a3e82015-03-18 18:55:33 -0700522 if (cache) {
Jason Evansb49a3342015-07-28 11:28:19 -0400523 chunk_purge_wrapper(arena, chunk_hooks, chunk,
524 size, 0, size);
Jason Evans8d6a3e82015-03-18 18:55:33 -0700525 }
Jason Evans741fbc62013-04-17 09:57:11 -0700526 goto label_return;
Jason Evans7ca0fdf2012-04-12 20:20:58 -0700527 }
Jason Evans8fadb1a2015-08-04 10:49:46 -0700528 extent_node_init(node, arena, chunk, size, !unzeroed,
529 committed);
Jason Evans609ae592012-10-11 13:53:15 -0700530 extent_tree_ad_insert(chunks_ad, node);
531 extent_tree_szad_insert(chunks_szad, node);
Jason Evans738e0892015-02-18 01:15:50 -0800532 arena_chunk_cache_maybe_insert(arena, node, cache);
Jason Evans7ca0fdf2012-04-12 20:20:58 -0700533 }
Jason Evans7ca0fdf2012-04-12 20:20:58 -0700534
535 /* Try to coalesce backward. */
Jason Evans609ae592012-10-11 13:53:15 -0700536 prev = extent_tree_ad_prev(chunks_ad, node);
Jason Evansee41ad42015-02-15 18:04:46 -0800537 if (prev != NULL && (void *)((uintptr_t)extent_node_addr_get(prev) +
Jason Evansb49a3342015-07-28 11:28:19 -0400538 extent_node_size_get(prev)) == chunk &&
539 extent_node_committed_get(prev) == committed &&
540 !chunk_hooks->merge(extent_node_addr_get(prev),
541 extent_node_size_get(prev), chunk, size, false, arena->ind)) {
Jason Evans7ca0fdf2012-04-12 20:20:58 -0700542 /*
543 * Coalesce chunk with the previous address range. This does
544 * not change the position within chunks_ad, so only
545 * remove/insert node from/into chunks_szad.
546 */
Jason Evans609ae592012-10-11 13:53:15 -0700547 extent_tree_szad_remove(chunks_szad, prev);
548 extent_tree_ad_remove(chunks_ad, prev);
Jason Evans738e0892015-02-18 01:15:50 -0800549 arena_chunk_cache_maybe_remove(arena, prev, cache);
Jason Evans609ae592012-10-11 13:53:15 -0700550 extent_tree_szad_remove(chunks_szad, node);
Jason Evans738e0892015-02-18 01:15:50 -0800551 arena_chunk_cache_maybe_remove(arena, node, cache);
Jason Evansee41ad42015-02-15 18:04:46 -0800552 extent_node_addr_set(node, extent_node_addr_get(prev));
Jason Evansa4e18882015-02-17 15:13:52 -0800553 extent_node_size_set(node, extent_node_size_get(prev) +
554 extent_node_size_get(node));
555 extent_node_zeroed_set(node, extent_node_zeroed_get(prev) &&
556 extent_node_zeroed_get(node));
Jason Evans609ae592012-10-11 13:53:15 -0700557 extent_tree_szad_insert(chunks_szad, node);
Jason Evans738e0892015-02-18 01:15:50 -0800558 arena_chunk_cache_maybe_insert(arena, node, cache);
Jason Evans7ca0fdf2012-04-12 20:20:58 -0700559
Jason Evanscbf3a6d2015-02-11 12:24:27 -0800560 arena_node_dalloc(arena, prev);
Jason Evans7ca0fdf2012-04-12 20:20:58 -0700561 }
Jason Evans741fbc62013-04-17 09:57:11 -0700562
563label_return:
Jason Evanscbf3a6d2015-02-11 12:24:27 -0800564 malloc_mutex_unlock(&arena->chunks_mtx);
Jason Evans7ca0fdf2012-04-12 20:20:58 -0700565}
566
Jason Evans99bd94f2015-02-18 16:40:53 -0800567void
Jason Evansb49a3342015-07-28 11:28:19 -0400568chunk_dalloc_cache(arena_t *arena, chunk_hooks_t *chunk_hooks, void *chunk,
Jason Evansde249c82015-08-09 16:47:27 -0700569 size_t size, bool committed)
Jason Evansee41ad42015-02-15 18:04:46 -0800570{
571
572 assert(chunk != NULL);
573 assert(CHUNK_ADDR2BASE(chunk) == chunk);
574 assert(size != 0);
575 assert((size & chunksize_mask) == 0);
576
Jason Evansb49a3342015-07-28 11:28:19 -0400577 chunk_record(arena, chunk_hooks, &arena->chunks_szad_cached,
Jason Evansde249c82015-08-09 16:47:27 -0700578 &arena->chunks_ad_cached, true, chunk, size, false, committed);
Jason Evans99bd94f2015-02-18 16:40:53 -0800579 arena_maybe_purge(arena);
Jason Evansee41ad42015-02-15 18:04:46 -0800580}
581
Jason Evans8d8960f2016-03-30 18:36:04 -0700582static bool
583chunk_dalloc_default(void *chunk, size_t size, bool committed,
584 unsigned arena_ind)
585{
586
587 if (!have_dss || !chunk_in_dss(chunk))
588 return (chunk_dalloc_mmap(chunk, size));
589 return (true);
590}
591
Jason Evanse476f8a2010-01-16 09:53:50 -0800592void
Jason Evans8d8960f2016-03-30 18:36:04 -0700593chunk_dalloc_wrapper(arena_t *arena, chunk_hooks_t *chunk_hooks, void *chunk,
Jason Evans8fadb1a2015-08-04 10:49:46 -0700594 size_t size, bool zeroed, bool committed)
Jason Evanse476f8a2010-01-16 09:53:50 -0800595{
596
597 assert(chunk != NULL);
598 assert(CHUNK_ADDR2BASE(chunk) == chunk);
599 assert(size != 0);
600 assert((size & chunksize_mask) == 0);
601
Jason Evansb49a3342015-07-28 11:28:19 -0400602 chunk_hooks_assure_initialized(arena, chunk_hooks);
603 /* Try to deallocate. */
Jason Evans8fadb1a2015-08-04 10:49:46 -0700604 if (!chunk_hooks->dalloc(chunk, size, committed, arena->ind))
Jason Evansb49a3342015-07-28 11:28:19 -0400605 return;
606 /* Try to decommit; purge if that fails. */
Jason Evans8fadb1a2015-08-04 10:49:46 -0700607 if (committed) {
608 committed = chunk_hooks->decommit(chunk, size, 0, size,
609 arena->ind);
610 }
Jason Evans6ed18cb2015-08-12 15:20:34 -0700611 zeroed = !committed || !chunk_hooks->purge(chunk, size, 0, size,
Jason Evansb49a3342015-07-28 11:28:19 -0400612 arena->ind);
613 chunk_record(arena, chunk_hooks, &arena->chunks_szad_retained,
Jason Evans8fadb1a2015-08-04 10:49:46 -0700614 &arena->chunks_ad_retained, false, chunk, size, zeroed, committed);
Jason Evanse2deab72014-05-15 22:22:27 -0700615}
616
Jason Evansb49a3342015-07-28 11:28:19 -0400617static bool
Jason Evans8fadb1a2015-08-04 10:49:46 -0700618chunk_commit_default(void *chunk, size_t size, size_t offset, size_t length,
619 unsigned arena_ind)
Jason Evansb49a3342015-07-28 11:28:19 -0400620{
621
Jason Evans8fadb1a2015-08-04 10:49:46 -0700622 return (pages_commit((void *)((uintptr_t)chunk + (uintptr_t)offset),
623 length));
Jason Evansb49a3342015-07-28 11:28:19 -0400624}
625
626static bool
Jason Evans8fadb1a2015-08-04 10:49:46 -0700627chunk_decommit_default(void *chunk, size_t size, size_t offset, size_t length,
628 unsigned arena_ind)
Jason Evansb49a3342015-07-28 11:28:19 -0400629{
630
Jason Evans8fadb1a2015-08-04 10:49:46 -0700631 return (pages_decommit((void *)((uintptr_t)chunk + (uintptr_t)offset),
632 length));
Jason Evansb49a3342015-07-28 11:28:19 -0400633}
634
Jason Evans8d8960f2016-03-30 18:36:04 -0700635static bool
636chunk_purge_default(void *chunk, size_t size, size_t offset, size_t length,
637 unsigned arena_ind)
Jason Evans8d6a3e82015-03-18 18:55:33 -0700638{
639
640 assert(chunk != NULL);
641 assert(CHUNK_ADDR2BASE(chunk) == chunk);
642 assert((offset & PAGE_MASK) == 0);
643 assert(length != 0);
644 assert((length & PAGE_MASK) == 0);
645
646 return (pages_purge((void *)((uintptr_t)chunk + (uintptr_t)offset),
647 length));
648}
649
Jason Evans8d6a3e82015-03-18 18:55:33 -0700650bool
Jason Evansb49a3342015-07-28 11:28:19 -0400651chunk_purge_wrapper(arena_t *arena, chunk_hooks_t *chunk_hooks, void *chunk,
652 size_t size, size_t offset, size_t length)
Jason Evans8d6a3e82015-03-18 18:55:33 -0700653{
654
Jason Evansb49a3342015-07-28 11:28:19 -0400655 chunk_hooks_assure_initialized(arena, chunk_hooks);
656 return (chunk_hooks->purge(chunk, size, offset, length, arena->ind));
657}
658
659static bool
660chunk_split_default(void *chunk, size_t size, size_t size_a, size_t size_b,
661 bool committed, unsigned arena_ind)
662{
663
664 if (!maps_coalesce)
665 return (true);
666 return (false);
667}
668
669static bool
670chunk_merge_default(void *chunk_a, size_t size_a, void *chunk_b, size_t size_b,
671 bool committed, unsigned arena_ind)
672{
673
674 if (!maps_coalesce)
675 return (true);
676 if (have_dss && chunk_in_dss(chunk_a) != chunk_in_dss(chunk_b))
677 return (true);
678
679 return (false);
Jason Evans8d6a3e82015-03-18 18:55:33 -0700680}
681
Jason Evans8d0e04d2015-01-30 22:54:08 -0800682static rtree_node_elm_t *
683chunks_rtree_node_alloc(size_t nelms)
684{
685
686 return ((rtree_node_elm_t *)base_alloc(nelms *
687 sizeof(rtree_node_elm_t)));
688}
689
Jason Evanse476f8a2010-01-16 09:53:50 -0800690bool
Jason Evansa8f8d752012-04-21 19:17:21 -0700691chunk_boot(void)
Jason Evanse476f8a2010-01-16 09:53:50 -0800692{
Matthijsa1aaf942015-06-25 22:53:58 +0200693#ifdef _WIN32
694 SYSTEM_INFO info;
695 GetSystemInfo(&info);
696
Jason Evansb9460862015-07-07 20:16:25 -0700697 /*
698 * Verify actual page size is equal to or an integral multiple of
699 * configured page size.
700 */
Matthijsa1aaf942015-06-25 22:53:58 +0200701 if (info.dwPageSize & ((1U << LG_PAGE) - 1))
702 return (true);
703
Jason Evansb9460862015-07-07 20:16:25 -0700704 /*
705 * Configure chunksize (if not set) to match granularity (usually 64K),
706 * so pages_map will always take fast path.
707 */
708 if (!opt_lg_chunk) {
Jason Evans9f4ee602016-02-24 10:32:45 -0800709 opt_lg_chunk = ffs_u((unsigned)info.dwAllocationGranularity)
Jason Evansb9460862015-07-07 20:16:25 -0700710 - 1;
711 }
Matthijsa1aaf942015-06-25 22:53:58 +0200712#else
713 if (!opt_lg_chunk)
714 opt_lg_chunk = LG_CHUNK_DEFAULT;
715#endif
Jason Evanse476f8a2010-01-16 09:53:50 -0800716
717 /* Set variables according to the value of opt_lg_chunk. */
Jason Evans2dbecf12010-09-05 10:35:13 -0700718 chunksize = (ZU(1) << opt_lg_chunk);
Jason Evansae4c7b42012-04-02 07:04:34 -0700719 assert(chunksize >= PAGE);
Jason Evanse476f8a2010-01-16 09:53:50 -0800720 chunksize_mask = chunksize - 1;
Jason Evansae4c7b42012-04-02 07:04:34 -0700721 chunk_npages = (chunksize >> LG_PAGE);
Jason Evanse476f8a2010-01-16 09:53:50 -0800722
Jason Evans4d434ad2014-04-15 12:09:48 -0700723 if (have_dss && chunk_dss_boot())
Jason Evans4201af02010-01-24 02:53:40 -0800724 return (true);
Jason Evans9e1810c2016-02-24 12:42:23 -0800725 if (rtree_new(&chunks_rtree, (unsigned)((ZU(1) << (LG_SIZEOF_PTR+3)) -
726 opt_lg_chunk), chunks_rtree_node_alloc, NULL))
Jason Evanscbf3a6d2015-02-11 12:24:27 -0800727 return (true);
Jason Evanse476f8a2010-01-16 09:53:50 -0800728
729 return (false);
730}
Jason Evans20f1fc92012-10-09 14:46:22 -0700731
732void
733chunk_prefork(void)
734{
735
Jason Evans20f1fc92012-10-09 14:46:22 -0700736 chunk_dss_prefork();
737}
738
739void
740chunk_postfork_parent(void)
741{
742
743 chunk_dss_postfork_parent();
Jason Evans20f1fc92012-10-09 14:46:22 -0700744}
745
746void
747chunk_postfork_child(void)
748{
749
750 chunk_dss_postfork_child();
Jason Evans20f1fc92012-10-09 14:46:22 -0700751}