blob: 90ab116ae5fa42e53070ff03d389ff22420a25c1 [file] [log] [blame]
Jason Evanse476f8a2010-01-16 09:53:50 -08001#define JEMALLOC_CHUNK_C_
Jason Evans376b1522010-02-11 14:45:59 -08002#include "jemalloc/internal/jemalloc_internal.h"
Jason Evanse476f8a2010-01-16 09:53:50 -08003
4/******************************************************************************/
5/* Data. */
6
Jason Evans609ae592012-10-11 13:53:15 -07007const char *opt_dss = DSS_DEFAULT;
8size_t opt_lg_chunk = LG_CHUNK_DEFAULT;
Jason Evanse476f8a2010-01-16 09:53:50 -08009
Jason Evans3c234352010-01-27 13:10:55 -080010malloc_mutex_t chunks_mtx;
Jason Evanse476f8a2010-01-16 09:53:50 -080011chunk_stats_t stats_chunks;
Jason Evanse476f8a2010-01-16 09:53:50 -080012
Jason Evans7ca0fdf2012-04-12 20:20:58 -070013/*
14 * Trees of chunks that were previously allocated (trees differ only in node
15 * ordering). These are used when allocating chunks, in an attempt to re-use
16 * address space. Depending on function, different tree orderings are needed,
17 * which is why there are two trees with the same contents.
18 */
Jason Evans609ae592012-10-11 13:53:15 -070019static extent_tree_t chunks_szad_mmap;
20static extent_tree_t chunks_ad_mmap;
21static extent_tree_t chunks_szad_dss;
22static extent_tree_t chunks_ad_dss;
Jason Evans7ca0fdf2012-04-12 20:20:58 -070023
Jason Evans2dbecf12010-09-05 10:35:13 -070024rtree_t *chunks_rtree;
Jason Evans2dbecf12010-09-05 10:35:13 -070025
Jason Evanse476f8a2010-01-16 09:53:50 -080026/* Various chunk-related settings. */
27size_t chunksize;
28size_t chunksize_mask; /* (chunksize - 1). */
29size_t chunk_npages;
Jason Evans7393f442010-10-01 17:35:43 -070030size_t map_bias;
Jason Evanse476f8a2010-01-16 09:53:50 -080031size_t arena_maxclass; /* Max size class for arenas. */
32
Jason Evanse476f8a2010-01-16 09:53:50 -080033/******************************************************************************/
Jason Evans7ca0fdf2012-04-12 20:20:58 -070034/* Function prototypes for non-inline static functions. */
35
Jason Evans609ae592012-10-11 13:53:15 -070036static void *chunk_recycle(extent_tree_t *chunks_szad,
37 extent_tree_t *chunks_ad, size_t size, size_t alignment, bool base,
Jason Evans34a8cf62012-05-02 20:41:42 -070038 bool *zero);
Jason Evans609ae592012-10-11 13:53:15 -070039static void chunk_record(extent_tree_t *chunks_szad,
40 extent_tree_t *chunks_ad, void *chunk, size_t size);
Jason Evans7ca0fdf2012-04-12 20:20:58 -070041
42/******************************************************************************/
43
44static void *
Jason Evans609ae592012-10-11 13:53:15 -070045chunk_recycle(extent_tree_t *chunks_szad, extent_tree_t *chunks_ad, size_t size,
46 size_t alignment, bool base, bool *zero)
Jason Evans7ca0fdf2012-04-12 20:20:58 -070047{
48 void *ret;
49 extent_node_t *node;
50 extent_node_t key;
51 size_t alloc_size, leadsize, trailsize;
Jason Evans7de92762012-10-08 17:56:11 -070052 bool zeroed;
Jason Evans7ca0fdf2012-04-12 20:20:58 -070053
Jason Evans34a8cf62012-05-02 20:41:42 -070054 if (base) {
55 /*
56 * This function may need to call base_node_{,de}alloc(), but
57 * the current chunk allocation request is on behalf of the
58 * base allocator. Avoid deadlock (and if that weren't an
59 * issue, potential for infinite recursion) by returning NULL.
60 */
61 return (NULL);
62 }
63
Jason Evans7ca0fdf2012-04-12 20:20:58 -070064 alloc_size = size + alignment - chunksize;
65 /* Beware size_t wrap-around. */
66 if (alloc_size < size)
67 return (NULL);
68 key.addr = NULL;
69 key.size = alloc_size;
70 malloc_mutex_lock(&chunks_mtx);
Jason Evans609ae592012-10-11 13:53:15 -070071 node = extent_tree_szad_nsearch(chunks_szad, &key);
Jason Evans7ca0fdf2012-04-12 20:20:58 -070072 if (node == NULL) {
73 malloc_mutex_unlock(&chunks_mtx);
74 return (NULL);
75 }
76 leadsize = ALIGNMENT_CEILING((uintptr_t)node->addr, alignment) -
77 (uintptr_t)node->addr;
Jason Evans374d26a2012-05-09 14:48:35 -070078 assert(node->size >= leadsize + size);
79 trailsize = node->size - leadsize - size;
Jason Evans7ca0fdf2012-04-12 20:20:58 -070080 ret = (void *)((uintptr_t)node->addr + leadsize);
Jason Evans14a2c6a2013-01-21 19:56:34 -080081 zeroed = node->zeroed;
82 if (zeroed)
83 *zero = true;
Jason Evans7ca0fdf2012-04-12 20:20:58 -070084 /* Remove node from the tree. */
Jason Evans609ae592012-10-11 13:53:15 -070085 extent_tree_szad_remove(chunks_szad, node);
86 extent_tree_ad_remove(chunks_ad, node);
Jason Evans7ca0fdf2012-04-12 20:20:58 -070087 if (leadsize != 0) {
88 /* Insert the leading space as a smaller chunk. */
89 node->size = leadsize;
Jason Evans609ae592012-10-11 13:53:15 -070090 extent_tree_szad_insert(chunks_szad, node);
91 extent_tree_ad_insert(chunks_ad, node);
Jason Evans7ca0fdf2012-04-12 20:20:58 -070092 node = NULL;
93 }
94 if (trailsize != 0) {
95 /* Insert the trailing space as a smaller chunk. */
96 if (node == NULL) {
97 /*
98 * An additional node is required, but
99 * base_node_alloc() can cause a new base chunk to be
100 * allocated. Drop chunks_mtx in order to avoid
101 * deadlock, and if node allocation fails, deallocate
102 * the result before returning an error.
103 */
104 malloc_mutex_unlock(&chunks_mtx);
105 node = base_node_alloc();
106 if (node == NULL) {
107 chunk_dealloc(ret, size, true);
108 return (NULL);
109 }
110 malloc_mutex_lock(&chunks_mtx);
111 }
112 node->addr = (void *)((uintptr_t)(ret) + size);
113 node->size = trailsize;
Jason Evansa7a28c32013-01-31 16:53:58 -0800114 node->zeroed = zeroed;
Jason Evans609ae592012-10-11 13:53:15 -0700115 extent_tree_szad_insert(chunks_szad, node);
116 extent_tree_ad_insert(chunks_ad, node);
Jason Evans7ca0fdf2012-04-12 20:20:58 -0700117 node = NULL;
118 }
119 malloc_mutex_unlock(&chunks_mtx);
120
Jason Evans14a2c6a2013-01-21 19:56:34 -0800121 if (node != NULL)
Jason Evans7ca0fdf2012-04-12 20:20:58 -0700122 base_node_dealloc(node);
Jason Evans14a2c6a2013-01-21 19:56:34 -0800123 if (*zero) {
124 if (zeroed == false)
125 memset(ret, 0, size);
126 else if (config_debug) {
127 size_t i;
128 size_t *p = (size_t *)(uintptr_t)ret;
129
130 VALGRIND_MAKE_MEM_DEFINED(ret, size);
131 for (i = 0; i < size / sizeof(size_t); i++)
132 assert(p[i] == 0);
Jason Evans14a2c6a2013-01-21 19:56:34 -0800133 }
134 }
Jason Evans7ca0fdf2012-04-12 20:20:58 -0700135 return (ret);
136}
Jason Evanse476f8a2010-01-16 09:53:50 -0800137
Jason Evans41631d02010-01-24 17:13:07 -0800138/*
139 * If the caller specifies (*zero == false), it is still possible to receive
140 * zeroed memory, in which case *zero is toggled to true. arena_chunk_alloc()
141 * takes advantage of this to avoid demanding zeroed chunks, but taking
142 * advantage of them if they are returned.
143 */
Jason Evanse476f8a2010-01-16 09:53:50 -0800144void *
Jason Evans609ae592012-10-11 13:53:15 -0700145chunk_alloc(size_t size, size_t alignment, bool base, bool *zero,
146 dss_prec_t dss_prec)
Jason Evanse476f8a2010-01-16 09:53:50 -0800147{
148 void *ret;
149
150 assert(size != 0);
151 assert((size & chunksize_mask) == 0);
Jason Evansde6fbdb2012-05-09 13:05:04 -0700152 assert(alignment != 0);
Mike Hommeyeae26902012-04-10 19:50:33 +0200153 assert((alignment & chunksize_mask) == 0);
Jason Evanse476f8a2010-01-16 09:53:50 -0800154
Jason Evans609ae592012-10-11 13:53:15 -0700155 /* "primary" dss. */
Jason Evans12efefb2012-10-16 22:06:56 -0700156 if (config_dss && dss_prec == dss_prec_primary) {
157 if ((ret = chunk_recycle(&chunks_szad_dss, &chunks_ad_dss, size,
158 alignment, base, zero)) != NULL)
159 goto label_return;
160 if ((ret = chunk_alloc_dss(size, alignment, zero)) != NULL)
161 goto label_return;
162 }
Jason Evans609ae592012-10-11 13:53:15 -0700163 /* mmap. */
164 if ((ret = chunk_recycle(&chunks_szad_mmap, &chunks_ad_mmap, size,
165 alignment, base, zero)) != NULL)
166 goto label_return;
Jason Evans609ae592012-10-11 13:53:15 -0700167 if ((ret = chunk_alloc_mmap(size, alignment, zero)) != NULL)
168 goto label_return;
169 /* "secondary" dss. */
Jason Evans12efefb2012-10-16 22:06:56 -0700170 if (config_dss && dss_prec == dss_prec_secondary) {
171 if ((ret = chunk_recycle(&chunks_szad_dss, &chunks_ad_dss, size,
172 alignment, base, zero)) != NULL)
173 goto label_return;
174 if ((ret = chunk_alloc_dss(size, alignment, zero)) != NULL)
175 goto label_return;
176 }
Jason Evanse476f8a2010-01-16 09:53:50 -0800177
178 /* All strategies for allocation failed. */
179 ret = NULL;
Jason Evansa1ee7832012-04-10 15:07:44 -0700180label_return:
Jason Evans06912752013-01-31 17:02:53 -0800181 if (ret != NULL) {
182 if (config_ivsalloc && base == false) {
Jason Evansb954bc52014-01-02 17:36:38 -0800183 if (rtree_set(chunks_rtree, (uintptr_t)ret, 1)) {
Jason Evans06912752013-01-31 17:02:53 -0800184 chunk_dealloc(ret, size, true);
185 return (NULL);
186 }
Jason Evans2dbecf12010-09-05 10:35:13 -0700187 }
Jason Evans06912752013-01-31 17:02:53 -0800188 if (config_stats || config_prof) {
189 bool gdump;
190 malloc_mutex_lock(&chunks_mtx);
191 if (config_stats)
192 stats_chunks.nchunks += (size / chunksize);
193 stats_chunks.curchunks += (size / chunksize);
194 if (stats_chunks.curchunks > stats_chunks.highchunks) {
195 stats_chunks.highchunks =
196 stats_chunks.curchunks;
197 if (config_prof)
198 gdump = true;
199 } else if (config_prof)
200 gdump = false;
201 malloc_mutex_unlock(&chunks_mtx);
202 if (config_prof && opt_prof && opt_prof_gdump && gdump)
203 prof_gdump();
204 }
205 if (config_valgrind)
206 VALGRIND_MAKE_MEM_UNDEFINED(ret, size);
Jason Evanse476f8a2010-01-16 09:53:50 -0800207 }
Jason Evanse476f8a2010-01-16 09:53:50 -0800208 assert(CHUNK_ADDR2BASE(ret) == ret);
209 return (ret);
210}
211
Jason Evans7ca0fdf2012-04-12 20:20:58 -0700212static void
Jason Evans609ae592012-10-11 13:53:15 -0700213chunk_record(extent_tree_t *chunks_szad, extent_tree_t *chunks_ad, void *chunk,
214 size_t size)
Jason Evans7ca0fdf2012-04-12 20:20:58 -0700215{
Jason Evans7de92762012-10-08 17:56:11 -0700216 bool unzeroed;
Jason Evans4f929aa2013-04-22 22:36:18 -0700217 extent_node_t *xnode, *node, *prev, *xprev, key;
Jason Evans7ca0fdf2012-04-12 20:20:58 -0700218
Jason Evans7de92762012-10-08 17:56:11 -0700219 unzeroed = pages_purge(chunk, size);
Jason Evans06912752013-01-31 17:02:53 -0800220 VALGRIND_MAKE_MEM_NOACCESS(chunk, size);
Jason Evans7ca0fdf2012-04-12 20:20:58 -0700221
Jason Evans374d26a2012-05-09 14:48:35 -0700222 /*
223 * Allocate a node before acquiring chunks_mtx even though it might not
224 * be needed, because base_node_alloc() may cause a new base chunk to
225 * be allocated, which could cause deadlock if chunks_mtx were already
226 * held.
227 */
228 xnode = base_node_alloc();
Jason Evans4f929aa2013-04-22 22:36:18 -0700229 /* Use xprev to implement conditional deferred deallocation of prev. */
230 xprev = NULL;
Jason Evans374d26a2012-05-09 14:48:35 -0700231
Jason Evans7ca0fdf2012-04-12 20:20:58 -0700232 malloc_mutex_lock(&chunks_mtx);
Jason Evans374d26a2012-05-09 14:48:35 -0700233 key.addr = (void *)((uintptr_t)chunk + size);
Jason Evans609ae592012-10-11 13:53:15 -0700234 node = extent_tree_ad_nsearch(chunks_ad, &key);
Jason Evans374d26a2012-05-09 14:48:35 -0700235 /* Try to coalesce forward. */
236 if (node != NULL && node->addr == key.addr) {
237 /*
238 * Coalesce chunk with the following address range. This does
239 * not change the position within chunks_ad, so only
240 * remove/insert from/into chunks_szad.
241 */
Jason Evans609ae592012-10-11 13:53:15 -0700242 extent_tree_szad_remove(chunks_szad, node);
Jason Evans374d26a2012-05-09 14:48:35 -0700243 node->addr = chunk;
244 node->size += size;
Jason Evans7de92762012-10-08 17:56:11 -0700245 node->zeroed = (node->zeroed && (unzeroed == false));
Jason Evans609ae592012-10-11 13:53:15 -0700246 extent_tree_szad_insert(chunks_szad, node);
Jason Evans374d26a2012-05-09 14:48:35 -0700247 } else {
248 /* Coalescing forward failed, so insert a new node. */
249 if (xnode == NULL) {
Jason Evans7ca0fdf2012-04-12 20:20:58 -0700250 /*
Jason Evans374d26a2012-05-09 14:48:35 -0700251 * base_node_alloc() failed, which is an exceedingly
252 * unlikely failure. Leak chunk; its pages have
253 * already been purged, so this is only a virtual
254 * memory leak.
Jason Evans7ca0fdf2012-04-12 20:20:58 -0700255 */
Jason Evans741fbc62013-04-17 09:57:11 -0700256 goto label_return;
Jason Evans7ca0fdf2012-04-12 20:20:58 -0700257 }
Jason Evans374d26a2012-05-09 14:48:35 -0700258 node = xnode;
Jason Evans741fbc62013-04-17 09:57:11 -0700259 xnode = NULL; /* Prevent deallocation below. */
Jason Evans374d26a2012-05-09 14:48:35 -0700260 node->addr = chunk;
261 node->size = size;
Jason Evans7de92762012-10-08 17:56:11 -0700262 node->zeroed = (unzeroed == false);
Jason Evans609ae592012-10-11 13:53:15 -0700263 extent_tree_ad_insert(chunks_ad, node);
264 extent_tree_szad_insert(chunks_szad, node);
Jason Evans7ca0fdf2012-04-12 20:20:58 -0700265 }
Jason Evans7ca0fdf2012-04-12 20:20:58 -0700266
267 /* Try to coalesce backward. */
Jason Evans609ae592012-10-11 13:53:15 -0700268 prev = extent_tree_ad_prev(chunks_ad, node);
Jason Evans7ca0fdf2012-04-12 20:20:58 -0700269 if (prev != NULL && (void *)((uintptr_t)prev->addr + prev->size) ==
270 chunk) {
271 /*
272 * Coalesce chunk with the previous address range. This does
273 * not change the position within chunks_ad, so only
274 * remove/insert node from/into chunks_szad.
275 */
Jason Evans609ae592012-10-11 13:53:15 -0700276 extent_tree_szad_remove(chunks_szad, prev);
277 extent_tree_ad_remove(chunks_ad, prev);
Jason Evans7ca0fdf2012-04-12 20:20:58 -0700278
Jason Evans609ae592012-10-11 13:53:15 -0700279 extent_tree_szad_remove(chunks_szad, node);
Jason Evans7ca0fdf2012-04-12 20:20:58 -0700280 node->addr = prev->addr;
281 node->size += prev->size;
Jason Evans7de92762012-10-08 17:56:11 -0700282 node->zeroed = (node->zeroed && prev->zeroed);
Jason Evans609ae592012-10-11 13:53:15 -0700283 extent_tree_szad_insert(chunks_szad, node);
Jason Evans7ca0fdf2012-04-12 20:20:58 -0700284
Jason Evans4f929aa2013-04-22 22:36:18 -0700285 xprev = prev;
Jason Evans7ca0fdf2012-04-12 20:20:58 -0700286 }
Jason Evans741fbc62013-04-17 09:57:11 -0700287
288label_return:
Jason Evans7ca0fdf2012-04-12 20:20:58 -0700289 malloc_mutex_unlock(&chunks_mtx);
Jason Evans4f929aa2013-04-22 22:36:18 -0700290 /*
291 * Deallocate xnode and/or xprev after unlocking chunks_mtx in order to
292 * avoid potential deadlock.
293 */
294 if (xnode != NULL)
Jason Evans741fbc62013-04-17 09:57:11 -0700295 base_node_dealloc(xnode);
Jason Evans4f929aa2013-04-22 22:36:18 -0700296 if (xprev != NULL)
Jason Evansd5044772013-10-20 15:11:01 -0700297 base_node_dealloc(xprev);
Jason Evans7ca0fdf2012-04-12 20:20:58 -0700298}
299
Jason Evanse476f8a2010-01-16 09:53:50 -0800300void
Jason Evans609ae592012-10-11 13:53:15 -0700301chunk_unmap(void *chunk, size_t size)
302{
303 assert(chunk != NULL);
304 assert(CHUNK_ADDR2BASE(chunk) == chunk);
305 assert(size != 0);
306 assert((size & chunksize_mask) == 0);
307
308 if (config_dss && chunk_in_dss(chunk))
309 chunk_record(&chunks_szad_dss, &chunks_ad_dss, chunk, size);
310 else if (chunk_dealloc_mmap(chunk, size))
311 chunk_record(&chunks_szad_mmap, &chunks_ad_mmap, chunk, size);
312}
313
314void
Jason Evans12a48872011-11-11 14:41:59 -0800315chunk_dealloc(void *chunk, size_t size, bool unmap)
Jason Evanse476f8a2010-01-16 09:53:50 -0800316{
317
318 assert(chunk != NULL);
319 assert(CHUNK_ADDR2BASE(chunk) == chunk);
320 assert(size != 0);
321 assert((size & chunksize_mask) == 0);
322
Jason Evans7372b152012-02-10 20:22:09 -0800323 if (config_ivsalloc)
Jason Evansb954bc52014-01-02 17:36:38 -0800324 rtree_set(chunks_rtree, (uintptr_t)chunk, 0);
Jason Evans7372b152012-02-10 20:22:09 -0800325 if (config_stats || config_prof) {
326 malloc_mutex_lock(&chunks_mtx);
Jason Evans609ae592012-10-11 13:53:15 -0700327 assert(stats_chunks.curchunks >= (size / chunksize));
Jason Evans7372b152012-02-10 20:22:09 -0800328 stats_chunks.curchunks -= (size / chunksize);
329 malloc_mutex_unlock(&chunks_mtx);
330 }
Jason Evanse476f8a2010-01-16 09:53:50 -0800331
Jason Evans609ae592012-10-11 13:53:15 -0700332 if (unmap)
333 chunk_unmap(chunk, size);
Jason Evanse476f8a2010-01-16 09:53:50 -0800334}
335
336bool
Jason Evansa8f8d752012-04-21 19:17:21 -0700337chunk_boot(void)
Jason Evanse476f8a2010-01-16 09:53:50 -0800338{
339
340 /* Set variables according to the value of opt_lg_chunk. */
Jason Evans2dbecf12010-09-05 10:35:13 -0700341 chunksize = (ZU(1) << opt_lg_chunk);
Jason Evansae4c7b42012-04-02 07:04:34 -0700342 assert(chunksize >= PAGE);
Jason Evanse476f8a2010-01-16 09:53:50 -0800343 chunksize_mask = chunksize - 1;
Jason Evansae4c7b42012-04-02 07:04:34 -0700344 chunk_npages = (chunksize >> LG_PAGE);
Jason Evanse476f8a2010-01-16 09:53:50 -0800345
Jason Evans7372b152012-02-10 20:22:09 -0800346 if (config_stats || config_prof) {
347 if (malloc_mutex_init(&chunks_mtx))
348 return (true);
349 memset(&stats_chunks, 0, sizeof(chunk_stats_t));
350 }
Jason Evans7372b152012-02-10 20:22:09 -0800351 if (config_dss && chunk_dss_boot())
Jason Evans4201af02010-01-24 02:53:40 -0800352 return (true);
Jason Evans609ae592012-10-11 13:53:15 -0700353 extent_tree_szad_new(&chunks_szad_mmap);
354 extent_tree_ad_new(&chunks_ad_mmap);
355 extent_tree_szad_new(&chunks_szad_dss);
356 extent_tree_ad_new(&chunks_ad_dss);
Jason Evans7372b152012-02-10 20:22:09 -0800357 if (config_ivsalloc) {
358 chunks_rtree = rtree_new((ZU(1) << (LG_SIZEOF_PTR+3)) -
Jason Evansb980cc72014-01-02 16:08:28 -0800359 opt_lg_chunk, base_alloc, NULL);
Jason Evans7372b152012-02-10 20:22:09 -0800360 if (chunks_rtree == NULL)
361 return (true);
362 }
Jason Evanse476f8a2010-01-16 09:53:50 -0800363
364 return (false);
365}
Jason Evans20f1fc92012-10-09 14:46:22 -0700366
367void
368chunk_prefork(void)
369{
370
Jason Evansf1c3da82013-10-21 14:59:10 -0700371 malloc_mutex_prefork(&chunks_mtx);
Jason Evans20f1fc92012-10-09 14:46:22 -0700372 if (config_ivsalloc)
373 rtree_prefork(chunks_rtree);
374 chunk_dss_prefork();
375}
376
377void
378chunk_postfork_parent(void)
379{
380
381 chunk_dss_postfork_parent();
382 if (config_ivsalloc)
383 rtree_postfork_parent(chunks_rtree);
384 malloc_mutex_postfork_parent(&chunks_mtx);
385}
386
387void
388chunk_postfork_child(void)
389{
390
391 chunk_dss_postfork_child();
392 if (config_ivsalloc)
393 rtree_postfork_child(chunks_rtree);
394 malloc_mutex_postfork_child(&chunks_mtx);
395}