blob: b3737180ffe6af22b4c0cfa576316742250e0f66 [file] [log] [blame]
Jason Evanse476f8a2010-01-16 09:53:50 -08001#define JEMALLOC_CHUNK_C_
Jason Evans376b1522010-02-11 14:45:59 -08002#include "jemalloc/internal/jemalloc_internal.h"
Jason Evanse476f8a2010-01-16 09:53:50 -08003
4/******************************************************************************/
5/* Data. */
6
Jason Evans609ae592012-10-11 13:53:15 -07007const char *opt_dss = DSS_DEFAULT;
8size_t opt_lg_chunk = LG_CHUNK_DEFAULT;
Jason Evanse476f8a2010-01-16 09:53:50 -08009
Jason Evans3c234352010-01-27 13:10:55 -080010malloc_mutex_t chunks_mtx;
Jason Evanse476f8a2010-01-16 09:53:50 -080011chunk_stats_t stats_chunks;
Jason Evanse476f8a2010-01-16 09:53:50 -080012
Jason Evans7ca0fdf2012-04-12 20:20:58 -070013/*
14 * Trees of chunks that were previously allocated (trees differ only in node
15 * ordering). These are used when allocating chunks, in an attempt to re-use
16 * address space. Depending on function, different tree orderings are needed,
17 * which is why there are two trees with the same contents.
18 */
Jason Evans609ae592012-10-11 13:53:15 -070019static extent_tree_t chunks_szad_mmap;
20static extent_tree_t chunks_ad_mmap;
21static extent_tree_t chunks_szad_dss;
22static extent_tree_t chunks_ad_dss;
Jason Evans7ca0fdf2012-04-12 20:20:58 -070023
Jason Evans2dbecf12010-09-05 10:35:13 -070024rtree_t *chunks_rtree;
Jason Evans2dbecf12010-09-05 10:35:13 -070025
Jason Evanse476f8a2010-01-16 09:53:50 -080026/* Various chunk-related settings. */
27size_t chunksize;
28size_t chunksize_mask; /* (chunksize - 1). */
29size_t chunk_npages;
Jason Evanse476f8a2010-01-16 09:53:50 -080030
Jason Evanse476f8a2010-01-16 09:53:50 -080031/******************************************************************************/
Jason Evanse2deab72014-05-15 22:22:27 -070032/*
33 * Function prototypes for static functions that are referenced prior to
34 * definition.
35 */
Jason Evans7ca0fdf2012-04-12 20:20:58 -070036
Jason Evanse2deab72014-05-15 22:22:27 -070037static void chunk_dalloc_core(void *chunk, size_t size);
Jason Evans7ca0fdf2012-04-12 20:20:58 -070038
39/******************************************************************************/
40
41static void *
Daniel Micaya95018e2014-10-04 01:39:32 -040042chunk_recycle(extent_tree_t *chunks_szad, extent_tree_t *chunks_ad,
43 void *new_addr, size_t size, size_t alignment, bool base, bool *zero)
Jason Evans7ca0fdf2012-04-12 20:20:58 -070044{
45 void *ret;
46 extent_node_t *node;
47 extent_node_t key;
48 size_t alloc_size, leadsize, trailsize;
Jason Evans7de92762012-10-08 17:56:11 -070049 bool zeroed;
Jason Evans7ca0fdf2012-04-12 20:20:58 -070050
Jason Evans34a8cf62012-05-02 20:41:42 -070051 if (base) {
52 /*
53 * This function may need to call base_node_{,de}alloc(), but
54 * the current chunk allocation request is on behalf of the
55 * base allocator. Avoid deadlock (and if that weren't an
56 * issue, potential for infinite recursion) by returning NULL.
57 */
58 return (NULL);
59 }
60
Jason Evans7ca0fdf2012-04-12 20:20:58 -070061 alloc_size = size + alignment - chunksize;
62 /* Beware size_t wrap-around. */
63 if (alloc_size < size)
64 return (NULL);
Daniel Micaya95018e2014-10-04 01:39:32 -040065 key.addr = new_addr;
Jason Evans7ca0fdf2012-04-12 20:20:58 -070066 key.size = alloc_size;
67 malloc_mutex_lock(&chunks_mtx);
Jason Evans609ae592012-10-11 13:53:15 -070068 node = extent_tree_szad_nsearch(chunks_szad, &key);
Daniel Micaya95018e2014-10-04 01:39:32 -040069 if (node == NULL || (new_addr && node->addr != new_addr)) {
Jason Evans7ca0fdf2012-04-12 20:20:58 -070070 malloc_mutex_unlock(&chunks_mtx);
71 return (NULL);
72 }
73 leadsize = ALIGNMENT_CEILING((uintptr_t)node->addr, alignment) -
74 (uintptr_t)node->addr;
Jason Evans374d26a2012-05-09 14:48:35 -070075 assert(node->size >= leadsize + size);
76 trailsize = node->size - leadsize - size;
Jason Evans7ca0fdf2012-04-12 20:20:58 -070077 ret = (void *)((uintptr_t)node->addr + leadsize);
Jason Evans14a2c6a2013-01-21 19:56:34 -080078 zeroed = node->zeroed;
79 if (zeroed)
80 *zero = true;
Jason Evans7ca0fdf2012-04-12 20:20:58 -070081 /* Remove node from the tree. */
Jason Evans609ae592012-10-11 13:53:15 -070082 extent_tree_szad_remove(chunks_szad, node);
83 extent_tree_ad_remove(chunks_ad, node);
Jason Evans7ca0fdf2012-04-12 20:20:58 -070084 if (leadsize != 0) {
85 /* Insert the leading space as a smaller chunk. */
86 node->size = leadsize;
Jason Evans609ae592012-10-11 13:53:15 -070087 extent_tree_szad_insert(chunks_szad, node);
88 extent_tree_ad_insert(chunks_ad, node);
Jason Evans7ca0fdf2012-04-12 20:20:58 -070089 node = NULL;
90 }
91 if (trailsize != 0) {
92 /* Insert the trailing space as a smaller chunk. */
93 if (node == NULL) {
94 /*
95 * An additional node is required, but
96 * base_node_alloc() can cause a new base chunk to be
97 * allocated. Drop chunks_mtx in order to avoid
98 * deadlock, and if node allocation fails, deallocate
99 * the result before returning an error.
100 */
101 malloc_mutex_unlock(&chunks_mtx);
102 node = base_node_alloc();
103 if (node == NULL) {
Jason Evanse2deab72014-05-15 22:22:27 -0700104 chunk_dalloc_core(ret, size);
Jason Evans7ca0fdf2012-04-12 20:20:58 -0700105 return (NULL);
106 }
107 malloc_mutex_lock(&chunks_mtx);
108 }
109 node->addr = (void *)((uintptr_t)(ret) + size);
110 node->size = trailsize;
Jason Evansa7a28c32013-01-31 16:53:58 -0800111 node->zeroed = zeroed;
Jason Evans609ae592012-10-11 13:53:15 -0700112 extent_tree_szad_insert(chunks_szad, node);
113 extent_tree_ad_insert(chunks_ad, node);
Jason Evans7ca0fdf2012-04-12 20:20:58 -0700114 node = NULL;
115 }
116 malloc_mutex_unlock(&chunks_mtx);
117
Jason Evans14a2c6a2013-01-21 19:56:34 -0800118 if (node != NULL)
Jason Evanse2deab72014-05-15 22:22:27 -0700119 base_node_dalloc(node);
Jason Evans14a2c6a2013-01-21 19:56:34 -0800120 if (*zero) {
Jason Evans551ebc42014-10-03 10:16:09 -0700121 if (!zeroed)
Jason Evans14a2c6a2013-01-21 19:56:34 -0800122 memset(ret, 0, size);
123 else if (config_debug) {
124 size_t i;
125 size_t *p = (size_t *)(uintptr_t)ret;
126
Jason Evansbd87b012014-04-15 16:35:08 -0700127 JEMALLOC_VALGRIND_MAKE_MEM_DEFINED(ret, size);
Jason Evans14a2c6a2013-01-21 19:56:34 -0800128 for (i = 0; i < size / sizeof(size_t); i++)
129 assert(p[i] == 0);
Jason Evans14a2c6a2013-01-21 19:56:34 -0800130 }
131 }
Jason Evans7ca0fdf2012-04-12 20:20:58 -0700132 return (ret);
133}
Jason Evanse476f8a2010-01-16 09:53:50 -0800134
Jason Evans41631d02010-01-24 17:13:07 -0800135/*
Jason Evans551ebc42014-10-03 10:16:09 -0700136 * If the caller specifies (!*zero), it is still possible to receive zeroed
137 * memory, in which case *zero is toggled to true. arena_chunk_alloc() takes
138 * advantage of this to avoid demanding zeroed chunks, but taking advantage of
139 * them if they are returned.
Jason Evans41631d02010-01-24 17:13:07 -0800140 */
aravindfb7fe502014-05-05 15:16:56 -0700141static void *
Daniel Micaya95018e2014-10-04 01:39:32 -0400142chunk_alloc_core(void *new_addr, size_t size, size_t alignment, bool base,
143 bool *zero, dss_prec_t dss_prec)
Jason Evanse476f8a2010-01-16 09:53:50 -0800144{
145 void *ret;
146
147 assert(size != 0);
148 assert((size & chunksize_mask) == 0);
Jason Evansde6fbdb2012-05-09 13:05:04 -0700149 assert(alignment != 0);
Mike Hommeyeae26902012-04-10 19:50:33 +0200150 assert((alignment & chunksize_mask) == 0);
Jason Evanse476f8a2010-01-16 09:53:50 -0800151
Jason Evans609ae592012-10-11 13:53:15 -0700152 /* "primary" dss. */
Jason Evans4d434ad2014-04-15 12:09:48 -0700153 if (have_dss && dss_prec == dss_prec_primary) {
Daniel Micaya95018e2014-10-04 01:39:32 -0400154 if ((ret = chunk_recycle(&chunks_szad_dss, &chunks_ad_dss,
155 new_addr, size, alignment, base, zero)) != NULL)
aravindfb7fe502014-05-05 15:16:56 -0700156 return (ret);
Daniel Micay879e76a2014-11-03 14:02:52 -0500157 if ((ret = chunk_alloc_dss(new_addr, size, alignment, zero))
158 != NULL)
aravindfb7fe502014-05-05 15:16:56 -0700159 return (ret);
Jason Evans12efefb2012-10-16 22:06:56 -0700160 }
Jason Evans609ae592012-10-11 13:53:15 -0700161 /* mmap. */
Daniel Micaya95018e2014-10-04 01:39:32 -0400162 if ((ret = chunk_recycle(&chunks_szad_mmap, &chunks_ad_mmap, new_addr,
163 size, alignment, base, zero)) != NULL)
aravindfb7fe502014-05-05 15:16:56 -0700164 return (ret);
Daniel Micay879e76a2014-11-03 14:02:52 -0500165 /* requesting an address not implemented for chunk_alloc_mmap */
Daniel Micaya95018e2014-10-04 01:39:32 -0400166 if (new_addr == NULL &&
167 (ret = chunk_alloc_mmap(size, alignment, zero)) != NULL)
aravindfb7fe502014-05-05 15:16:56 -0700168 return (ret);
Jason Evans609ae592012-10-11 13:53:15 -0700169 /* "secondary" dss. */
Jason Evans4d434ad2014-04-15 12:09:48 -0700170 if (have_dss && dss_prec == dss_prec_secondary) {
Daniel Micaya95018e2014-10-04 01:39:32 -0400171 if ((ret = chunk_recycle(&chunks_szad_dss, &chunks_ad_dss,
172 new_addr, size, alignment, base, zero)) != NULL)
aravindfb7fe502014-05-05 15:16:56 -0700173 return (ret);
Daniel Micay879e76a2014-11-03 14:02:52 -0500174 if ((ret = chunk_alloc_dss(new_addr, size, alignment, zero))
175 != NULL)
aravindfb7fe502014-05-05 15:16:56 -0700176 return (ret);
Jason Evans12efefb2012-10-16 22:06:56 -0700177 }
Jason Evanse476f8a2010-01-16 09:53:50 -0800178
179 /* All strategies for allocation failed. */
aravindfb7fe502014-05-05 15:16:56 -0700180 return (NULL);
181}
182
Jason Evanse2deab72014-05-15 22:22:27 -0700183static bool
184chunk_register(void *chunk, size_t size, bool base)
185{
186
187 assert(chunk != NULL);
188 assert(CHUNK_ADDR2BASE(chunk) == chunk);
189
Jason Evans551ebc42014-10-03 10:16:09 -0700190 if (config_ivsalloc && !base) {
Jason Evanse2deab72014-05-15 22:22:27 -0700191 if (rtree_set(chunks_rtree, (uintptr_t)chunk, 1))
192 return (true);
193 }
194 if (config_stats || config_prof) {
195 bool gdump;
196 malloc_mutex_lock(&chunks_mtx);
197 if (config_stats)
198 stats_chunks.nchunks += (size / chunksize);
199 stats_chunks.curchunks += (size / chunksize);
200 if (stats_chunks.curchunks > stats_chunks.highchunks) {
201 stats_chunks.highchunks =
202 stats_chunks.curchunks;
203 if (config_prof)
204 gdump = true;
205 } else if (config_prof)
206 gdump = false;
207 malloc_mutex_unlock(&chunks_mtx);
208 if (config_prof && opt_prof && opt_prof_gdump && gdump)
209 prof_gdump();
210 }
211 if (config_valgrind)
212 JEMALLOC_VALGRIND_MAKE_MEM_UNDEFINED(chunk, size);
213 return (false);
214}
215
216void *
217chunk_alloc_base(size_t size)
218{
219 void *ret;
220 bool zero;
221
222 zero = false;
Daniel Micaya95018e2014-10-04 01:39:32 -0400223 ret = chunk_alloc_core(NULL, size, chunksize, true, &zero,
Jason Evanse2deab72014-05-15 22:22:27 -0700224 chunk_dss_prec_get());
225 if (ret == NULL)
226 return (NULL);
227 if (chunk_register(ret, size, true)) {
228 chunk_dalloc_core(ret, size);
229 return (NULL);
230 }
231 return (ret);
232}
233
234void *
235chunk_alloc_arena(chunk_alloc_t *chunk_alloc, chunk_dalloc_t *chunk_dalloc,
Daniel Micaya95018e2014-10-04 01:39:32 -0400236 unsigned arena_ind, void *new_addr, size_t size, size_t alignment,
237 bool *zero)
Jason Evanse2deab72014-05-15 22:22:27 -0700238{
239 void *ret;
240
Daniel Micaya95018e2014-10-04 01:39:32 -0400241 ret = chunk_alloc(new_addr, size, alignment, zero, arena_ind);
Jason Evanse2deab72014-05-15 22:22:27 -0700242 if (ret != NULL && chunk_register(ret, size, false)) {
243 chunk_dalloc(ret, size, arena_ind);
244 ret = NULL;
245 }
246
247 return (ret);
248}
249
250/* Default arena chunk allocation routine in the absence of user override. */
aravindfb7fe502014-05-05 15:16:56 -0700251void *
Daniel Micaya95018e2014-10-04 01:39:32 -0400252chunk_alloc_default(void *new_addr, size_t size, size_t alignment, bool *zero,
aravindfb7fe502014-05-05 15:16:56 -0700253 unsigned arena_ind)
254{
Jason Evans8bb31982014-10-07 23:14:57 -0700255 arena_t *arena;
256
257 arena = arena_get(tsd_fetch(), arena_ind, false, true);
258 /*
259 * The arena we're allocating on behalf of must have been initialized
260 * already.
261 */
262 assert(arena != NULL);
aravindfb7fe502014-05-05 15:16:56 -0700263
Daniel Micaya95018e2014-10-04 01:39:32 -0400264 return (chunk_alloc_core(new_addr, size, alignment, false, zero,
Jason Evans8bb31982014-10-07 23:14:57 -0700265 arena->dss_prec));
aravindfb7fe502014-05-05 15:16:56 -0700266}
267
Jason Evans7ca0fdf2012-04-12 20:20:58 -0700268static void
Jason Evans609ae592012-10-11 13:53:15 -0700269chunk_record(extent_tree_t *chunks_szad, extent_tree_t *chunks_ad, void *chunk,
270 size_t size)
Jason Evans7ca0fdf2012-04-12 20:20:58 -0700271{
Jason Evans7de92762012-10-08 17:56:11 -0700272 bool unzeroed;
Jason Evans4f929aa2013-04-22 22:36:18 -0700273 extent_node_t *xnode, *node, *prev, *xprev, key;
Jason Evans7ca0fdf2012-04-12 20:20:58 -0700274
Jason Evans7de92762012-10-08 17:56:11 -0700275 unzeroed = pages_purge(chunk, size);
Jason Evansbd87b012014-04-15 16:35:08 -0700276 JEMALLOC_VALGRIND_MAKE_MEM_NOACCESS(chunk, size);
Jason Evans7ca0fdf2012-04-12 20:20:58 -0700277
Jason Evans374d26a2012-05-09 14:48:35 -0700278 /*
279 * Allocate a node before acquiring chunks_mtx even though it might not
280 * be needed, because base_node_alloc() may cause a new base chunk to
281 * be allocated, which could cause deadlock if chunks_mtx were already
282 * held.
283 */
284 xnode = base_node_alloc();
Jason Evans4f929aa2013-04-22 22:36:18 -0700285 /* Use xprev to implement conditional deferred deallocation of prev. */
286 xprev = NULL;
Jason Evans374d26a2012-05-09 14:48:35 -0700287
Jason Evans7ca0fdf2012-04-12 20:20:58 -0700288 malloc_mutex_lock(&chunks_mtx);
Jason Evans374d26a2012-05-09 14:48:35 -0700289 key.addr = (void *)((uintptr_t)chunk + size);
Jason Evans609ae592012-10-11 13:53:15 -0700290 node = extent_tree_ad_nsearch(chunks_ad, &key);
Jason Evans374d26a2012-05-09 14:48:35 -0700291 /* Try to coalesce forward. */
292 if (node != NULL && node->addr == key.addr) {
293 /*
294 * Coalesce chunk with the following address range. This does
295 * not change the position within chunks_ad, so only
296 * remove/insert from/into chunks_szad.
297 */
Jason Evans609ae592012-10-11 13:53:15 -0700298 extent_tree_szad_remove(chunks_szad, node);
Jason Evans374d26a2012-05-09 14:48:35 -0700299 node->addr = chunk;
300 node->size += size;
Jason Evans551ebc42014-10-03 10:16:09 -0700301 node->zeroed = (node->zeroed && !unzeroed);
Jason Evans609ae592012-10-11 13:53:15 -0700302 extent_tree_szad_insert(chunks_szad, node);
Jason Evans374d26a2012-05-09 14:48:35 -0700303 } else {
304 /* Coalescing forward failed, so insert a new node. */
305 if (xnode == NULL) {
Jason Evans7ca0fdf2012-04-12 20:20:58 -0700306 /*
Jason Evans374d26a2012-05-09 14:48:35 -0700307 * base_node_alloc() failed, which is an exceedingly
308 * unlikely failure. Leak chunk; its pages have
309 * already been purged, so this is only a virtual
310 * memory leak.
Jason Evans7ca0fdf2012-04-12 20:20:58 -0700311 */
Jason Evans741fbc62013-04-17 09:57:11 -0700312 goto label_return;
Jason Evans7ca0fdf2012-04-12 20:20:58 -0700313 }
Jason Evans374d26a2012-05-09 14:48:35 -0700314 node = xnode;
Jason Evans741fbc62013-04-17 09:57:11 -0700315 xnode = NULL; /* Prevent deallocation below. */
Jason Evans374d26a2012-05-09 14:48:35 -0700316 node->addr = chunk;
317 node->size = size;
Jason Evans551ebc42014-10-03 10:16:09 -0700318 node->zeroed = !unzeroed;
Jason Evans609ae592012-10-11 13:53:15 -0700319 extent_tree_ad_insert(chunks_ad, node);
320 extent_tree_szad_insert(chunks_szad, node);
Jason Evans7ca0fdf2012-04-12 20:20:58 -0700321 }
Jason Evans7ca0fdf2012-04-12 20:20:58 -0700322
323 /* Try to coalesce backward. */
Jason Evans609ae592012-10-11 13:53:15 -0700324 prev = extent_tree_ad_prev(chunks_ad, node);
Jason Evans7ca0fdf2012-04-12 20:20:58 -0700325 if (prev != NULL && (void *)((uintptr_t)prev->addr + prev->size) ==
326 chunk) {
327 /*
328 * Coalesce chunk with the previous address range. This does
329 * not change the position within chunks_ad, so only
330 * remove/insert node from/into chunks_szad.
331 */
Jason Evans609ae592012-10-11 13:53:15 -0700332 extent_tree_szad_remove(chunks_szad, prev);
333 extent_tree_ad_remove(chunks_ad, prev);
Jason Evans7ca0fdf2012-04-12 20:20:58 -0700334
Jason Evans609ae592012-10-11 13:53:15 -0700335 extent_tree_szad_remove(chunks_szad, node);
Jason Evans7ca0fdf2012-04-12 20:20:58 -0700336 node->addr = prev->addr;
337 node->size += prev->size;
Jason Evans7de92762012-10-08 17:56:11 -0700338 node->zeroed = (node->zeroed && prev->zeroed);
Jason Evans609ae592012-10-11 13:53:15 -0700339 extent_tree_szad_insert(chunks_szad, node);
Jason Evans7ca0fdf2012-04-12 20:20:58 -0700340
Jason Evans4f929aa2013-04-22 22:36:18 -0700341 xprev = prev;
Jason Evans7ca0fdf2012-04-12 20:20:58 -0700342 }
Jason Evans741fbc62013-04-17 09:57:11 -0700343
344label_return:
Jason Evans7ca0fdf2012-04-12 20:20:58 -0700345 malloc_mutex_unlock(&chunks_mtx);
Jason Evans4f929aa2013-04-22 22:36:18 -0700346 /*
347 * Deallocate xnode and/or xprev after unlocking chunks_mtx in order to
348 * avoid potential deadlock.
349 */
350 if (xnode != NULL)
Jason Evanse2deab72014-05-15 22:22:27 -0700351 base_node_dalloc(xnode);
Jason Evans4f929aa2013-04-22 22:36:18 -0700352 if (xprev != NULL)
Jason Evanse2deab72014-05-15 22:22:27 -0700353 base_node_dalloc(xprev);
Jason Evans7ca0fdf2012-04-12 20:20:58 -0700354}
355
Jason Evanse476f8a2010-01-16 09:53:50 -0800356void
Jason Evans609ae592012-10-11 13:53:15 -0700357chunk_unmap(void *chunk, size_t size)
358{
359 assert(chunk != NULL);
360 assert(CHUNK_ADDR2BASE(chunk) == chunk);
361 assert(size != 0);
362 assert((size & chunksize_mask) == 0);
363
Jason Evans4d434ad2014-04-15 12:09:48 -0700364 if (have_dss && chunk_in_dss(chunk))
Jason Evans609ae592012-10-11 13:53:15 -0700365 chunk_record(&chunks_szad_dss, &chunks_ad_dss, chunk, size);
Jason Evanse2deab72014-05-15 22:22:27 -0700366 else if (chunk_dalloc_mmap(chunk, size))
Jason Evans609ae592012-10-11 13:53:15 -0700367 chunk_record(&chunks_szad_mmap, &chunks_ad_mmap, chunk, size);
368}
369
Jason Evanse2deab72014-05-15 22:22:27 -0700370static void
371chunk_dalloc_core(void *chunk, size_t size)
Jason Evanse476f8a2010-01-16 09:53:50 -0800372{
373
374 assert(chunk != NULL);
375 assert(CHUNK_ADDR2BASE(chunk) == chunk);
376 assert(size != 0);
377 assert((size & chunksize_mask) == 0);
378
Jason Evans7372b152012-02-10 20:22:09 -0800379 if (config_ivsalloc)
Jason Evansb954bc52014-01-02 17:36:38 -0800380 rtree_set(chunks_rtree, (uintptr_t)chunk, 0);
Jason Evans7372b152012-02-10 20:22:09 -0800381 if (config_stats || config_prof) {
382 malloc_mutex_lock(&chunks_mtx);
Jason Evans609ae592012-10-11 13:53:15 -0700383 assert(stats_chunks.curchunks >= (size / chunksize));
Jason Evans7372b152012-02-10 20:22:09 -0800384 stats_chunks.curchunks -= (size / chunksize);
385 malloc_mutex_unlock(&chunks_mtx);
386 }
Jason Evanse476f8a2010-01-16 09:53:50 -0800387
Jason Evanse2deab72014-05-15 22:22:27 -0700388 chunk_unmap(chunk, size);
389}
390
391/* Default arena chunk deallocation routine in the absence of user override. */
392bool
393chunk_dalloc_default(void *chunk, size_t size, unsigned arena_ind)
394{
395
396 chunk_dalloc_core(chunk, size);
397 return (false);
Jason Evanse476f8a2010-01-16 09:53:50 -0800398}
399
400bool
Jason Evansa8f8d752012-04-21 19:17:21 -0700401chunk_boot(void)
Jason Evanse476f8a2010-01-16 09:53:50 -0800402{
403
404 /* Set variables according to the value of opt_lg_chunk. */
Jason Evans2dbecf12010-09-05 10:35:13 -0700405 chunksize = (ZU(1) << opt_lg_chunk);
Jason Evansae4c7b42012-04-02 07:04:34 -0700406 assert(chunksize >= PAGE);
Jason Evanse476f8a2010-01-16 09:53:50 -0800407 chunksize_mask = chunksize - 1;
Jason Evansae4c7b42012-04-02 07:04:34 -0700408 chunk_npages = (chunksize >> LG_PAGE);
Jason Evanse476f8a2010-01-16 09:53:50 -0800409
Jason Evansc83bccd2014-10-16 12:33:18 -0700410 if (malloc_mutex_init(&chunks_mtx))
411 return (true);
412 if (config_stats || config_prof)
Jason Evans7372b152012-02-10 20:22:09 -0800413 memset(&stats_chunks, 0, sizeof(chunk_stats_t));
Jason Evans4d434ad2014-04-15 12:09:48 -0700414 if (have_dss && chunk_dss_boot())
Jason Evans4201af02010-01-24 02:53:40 -0800415 return (true);
Jason Evans609ae592012-10-11 13:53:15 -0700416 extent_tree_szad_new(&chunks_szad_mmap);
417 extent_tree_ad_new(&chunks_ad_mmap);
418 extent_tree_szad_new(&chunks_szad_dss);
419 extent_tree_ad_new(&chunks_ad_dss);
Jason Evans7372b152012-02-10 20:22:09 -0800420 if (config_ivsalloc) {
421 chunks_rtree = rtree_new((ZU(1) << (LG_SIZEOF_PTR+3)) -
Jason Evansb980cc72014-01-02 16:08:28 -0800422 opt_lg_chunk, base_alloc, NULL);
Jason Evans7372b152012-02-10 20:22:09 -0800423 if (chunks_rtree == NULL)
424 return (true);
425 }
Jason Evanse476f8a2010-01-16 09:53:50 -0800426
427 return (false);
428}
Jason Evans20f1fc92012-10-09 14:46:22 -0700429
430void
431chunk_prefork(void)
432{
433
Jason Evansf1c3da82013-10-21 14:59:10 -0700434 malloc_mutex_prefork(&chunks_mtx);
Jason Evans20f1fc92012-10-09 14:46:22 -0700435 if (config_ivsalloc)
436 rtree_prefork(chunks_rtree);
437 chunk_dss_prefork();
438}
439
440void
441chunk_postfork_parent(void)
442{
443
444 chunk_dss_postfork_parent();
445 if (config_ivsalloc)
446 rtree_postfork_parent(chunks_rtree);
447 malloc_mutex_postfork_parent(&chunks_mtx);
448}
449
450void
451chunk_postfork_child(void)
452{
453
454 chunk_dss_postfork_child();
455 if (config_ivsalloc)
456 rtree_postfork_child(chunks_rtree);
457 malloc_mutex_postfork_child(&chunks_mtx);
458}