blob: 5426b0275c7900ed0ca3a641bd721e87f116d43e [file] [log] [blame]
Jason Evanse476f8a2010-01-16 09:53:50 -08001#define JEMALLOC_CHUNK_C_
Jason Evans376b1522010-02-11 14:45:59 -08002#include "jemalloc/internal/jemalloc_internal.h"
Jason Evanse476f8a2010-01-16 09:53:50 -08003
4/******************************************************************************/
5/* Data. */
6
7size_t opt_lg_chunk = LG_CHUNK_DEFAULT;
8
Jason Evans3c234352010-01-27 13:10:55 -08009malloc_mutex_t chunks_mtx;
Jason Evanse476f8a2010-01-16 09:53:50 -080010chunk_stats_t stats_chunks;
Jason Evanse476f8a2010-01-16 09:53:50 -080011
Jason Evans7ca0fdf2012-04-12 20:20:58 -070012/*
13 * Trees of chunks that were previously allocated (trees differ only in node
14 * ordering). These are used when allocating chunks, in an attempt to re-use
15 * address space. Depending on function, different tree orderings are needed,
16 * which is why there are two trees with the same contents.
17 */
18static extent_tree_t chunks_szad;
19static extent_tree_t chunks_ad;
20
Jason Evans2dbecf12010-09-05 10:35:13 -070021rtree_t *chunks_rtree;
Jason Evans2dbecf12010-09-05 10:35:13 -070022
Jason Evanse476f8a2010-01-16 09:53:50 -080023/* Various chunk-related settings. */
24size_t chunksize;
25size_t chunksize_mask; /* (chunksize - 1). */
26size_t chunk_npages;
Jason Evans7393f442010-10-01 17:35:43 -070027size_t map_bias;
Jason Evanse476f8a2010-01-16 09:53:50 -080028size_t arena_maxclass; /* Max size class for arenas. */
29
Jason Evanse476f8a2010-01-16 09:53:50 -080030/******************************************************************************/
Jason Evans7ca0fdf2012-04-12 20:20:58 -070031/* Function prototypes for non-inline static functions. */
32
33static void *chunk_recycle(size_t size, size_t alignment, bool *zero);
34static void chunk_record(void *chunk, size_t size);
35
36/******************************************************************************/
37
38static void *
39chunk_recycle(size_t size, size_t alignment, bool *zero)
40{
41 void *ret;
42 extent_node_t *node;
43 extent_node_t key;
44 size_t alloc_size, leadsize, trailsize;
45
46 alloc_size = size + alignment - chunksize;
47 /* Beware size_t wrap-around. */
48 if (alloc_size < size)
49 return (NULL);
50 key.addr = NULL;
51 key.size = alloc_size;
52 malloc_mutex_lock(&chunks_mtx);
53 node = extent_tree_szad_nsearch(&chunks_szad, &key);
54 if (node == NULL) {
55 malloc_mutex_unlock(&chunks_mtx);
56 return (NULL);
57 }
58 leadsize = ALIGNMENT_CEILING((uintptr_t)node->addr, alignment) -
59 (uintptr_t)node->addr;
60 assert(alloc_size >= leadsize + size);
61 trailsize = alloc_size - leadsize - size;
62 ret = (void *)((uintptr_t)node->addr + leadsize);
63 /* Remove node from the tree. */
64 extent_tree_szad_remove(&chunks_szad, node);
65 extent_tree_ad_remove(&chunks_ad, node);
66 if (leadsize != 0) {
67 /* Insert the leading space as a smaller chunk. */
68 node->size = leadsize;
69 extent_tree_szad_insert(&chunks_szad, node);
70 extent_tree_ad_insert(&chunks_ad, node);
71 node = NULL;
72 }
73 if (trailsize != 0) {
74 /* Insert the trailing space as a smaller chunk. */
75 if (node == NULL) {
76 /*
77 * An additional node is required, but
78 * base_node_alloc() can cause a new base chunk to be
79 * allocated. Drop chunks_mtx in order to avoid
80 * deadlock, and if node allocation fails, deallocate
81 * the result before returning an error.
82 */
83 malloc_mutex_unlock(&chunks_mtx);
84 node = base_node_alloc();
85 if (node == NULL) {
86 chunk_dealloc(ret, size, true);
87 return (NULL);
88 }
89 malloc_mutex_lock(&chunks_mtx);
90 }
91 node->addr = (void *)((uintptr_t)(ret) + size);
92 node->size = trailsize;
93 extent_tree_szad_insert(&chunks_szad, node);
94 extent_tree_ad_insert(&chunks_ad, node);
95 node = NULL;
96 }
97 malloc_mutex_unlock(&chunks_mtx);
98
99 if (node != NULL)
100 base_node_dealloc(node);
Jason Evans7ad54c12012-04-21 16:04:51 -0700101#ifdef JEMALLOC_PURGE_MADVISE_DONTNEED
102 /* Pages are zeroed as a side effect of pages_purge(). */
103 *zero = true;
104#else
Jason Evans7ca0fdf2012-04-12 20:20:58 -0700105 if (*zero) {
106 VALGRIND_MAKE_MEM_UNDEFINED(ret, size);
107 memset(ret, 0, size);
108 }
109#endif
110 return (ret);
111}
Jason Evanse476f8a2010-01-16 09:53:50 -0800112
Jason Evans41631d02010-01-24 17:13:07 -0800113/*
114 * If the caller specifies (*zero == false), it is still possible to receive
115 * zeroed memory, in which case *zero is toggled to true. arena_chunk_alloc()
116 * takes advantage of this to avoid demanding zeroed chunks, but taking
117 * advantage of them if they are returned.
118 */
Jason Evanse476f8a2010-01-16 09:53:50 -0800119void *
Mike Hommeyeae26902012-04-10 19:50:33 +0200120chunk_alloc(size_t size, size_t alignment, bool base, bool *zero)
Jason Evanse476f8a2010-01-16 09:53:50 -0800121{
122 void *ret;
123
124 assert(size != 0);
125 assert((size & chunksize_mask) == 0);
Mike Hommeyeae26902012-04-10 19:50:33 +0200126 assert((alignment & chunksize_mask) == 0);
Jason Evanse476f8a2010-01-16 09:53:50 -0800127
Jason Evans7ca0fdf2012-04-12 20:20:58 -0700128 ret = chunk_recycle(size, alignment, zero);
129 if (ret != NULL)
130 goto label_return;
Jason Evans8f0e0eb2012-04-21 13:33:48 -0700131
132 ret = chunk_alloc_mmap(size, alignment, zero);
133 if (ret != NULL)
134 goto label_return;
135
Jason Evans41626272012-02-13 10:56:17 -0800136 if (config_dss) {
Mike Hommeyeae26902012-04-10 19:50:33 +0200137 ret = chunk_alloc_dss(size, alignment, zero);
Jason Evans4201af02010-01-24 02:53:40 -0800138 if (ret != NULL)
Jason Evansa1ee7832012-04-10 15:07:44 -0700139 goto label_return;
Jason Evanse476f8a2010-01-16 09:53:50 -0800140 }
Jason Evanse476f8a2010-01-16 09:53:50 -0800141
142 /* All strategies for allocation failed. */
143 ret = NULL;
Jason Evansa1ee7832012-04-10 15:07:44 -0700144label_return:
Jason Evans7372b152012-02-10 20:22:09 -0800145 if (config_ivsalloc && base == false && ret != NULL) {
Jason Evans2dbecf12010-09-05 10:35:13 -0700146 if (rtree_set(chunks_rtree, (uintptr_t)ret, ret)) {
Jason Evans12a48872011-11-11 14:41:59 -0800147 chunk_dealloc(ret, size, true);
Jason Evans2dbecf12010-09-05 10:35:13 -0700148 return (NULL);
149 }
150 }
Jason Evans7372b152012-02-10 20:22:09 -0800151 if ((config_stats || config_prof) && ret != NULL) {
Jason Evanse7339702010-10-23 18:37:06 -0700152 bool gdump;
Jason Evans3c234352010-01-27 13:10:55 -0800153 malloc_mutex_lock(&chunks_mtx);
Jason Evans7372b152012-02-10 20:22:09 -0800154 if (config_stats)
155 stats_chunks.nchunks += (size / chunksize);
Jason Evanse476f8a2010-01-16 09:53:50 -0800156 stats_chunks.curchunks += (size / chunksize);
Jason Evans6109fe02010-02-10 10:37:56 -0800157 if (stats_chunks.curchunks > stats_chunks.highchunks) {
Jason Evans3c234352010-01-27 13:10:55 -0800158 stats_chunks.highchunks = stats_chunks.curchunks;
Jason Evans7372b152012-02-10 20:22:09 -0800159 if (config_prof)
160 gdump = true;
161 } else if (config_prof)
Jason Evanse7339702010-10-23 18:37:06 -0700162 gdump = false;
Jason Evans3c234352010-01-27 13:10:55 -0800163 malloc_mutex_unlock(&chunks_mtx);
Jason Evans7372b152012-02-10 20:22:09 -0800164 if (config_prof && opt_prof && opt_prof_gdump && gdump)
Jason Evanse7339702010-10-23 18:37:06 -0700165 prof_gdump();
Jason Evanse476f8a2010-01-16 09:53:50 -0800166 }
Jason Evans7ad54c12012-04-21 16:04:51 -0700167 if (config_debug && *zero && ret != NULL) {
168 size_t i;
169 size_t *p = (size_t *)(uintptr_t)ret;
Jason Evanse476f8a2010-01-16 09:53:50 -0800170
Jason Evans7ad54c12012-04-21 16:04:51 -0700171 for (i = 0; i < size / sizeof(size_t); i++)
172 assert(p[i] == 0);
173 }
Jason Evanse476f8a2010-01-16 09:53:50 -0800174 assert(CHUNK_ADDR2BASE(ret) == ret);
175 return (ret);
176}
177
Jason Evans7ca0fdf2012-04-12 20:20:58 -0700178static void
179chunk_record(void *chunk, size_t size)
180{
181 extent_node_t *xnode, *node, *prev, key;
182
Mike Hommey666c5bf2012-04-18 18:29:43 +0200183 pages_purge(chunk, size);
Jason Evans7ca0fdf2012-04-12 20:20:58 -0700184
185 xnode = NULL;
186 malloc_mutex_lock(&chunks_mtx);
187 while (true) {
188 key.addr = (void *)((uintptr_t)chunk + size);
189 node = extent_tree_ad_nsearch(&chunks_ad, &key);
190 /* Try to coalesce forward. */
191 if (node != NULL && node->addr == key.addr) {
192 /*
193 * Coalesce chunk with the following address range.
194 * This does not change the position within chunks_ad,
195 * so only remove/insert from/into chunks_szad.
196 */
197 extent_tree_szad_remove(&chunks_szad, node);
198 node->addr = chunk;
199 node->size += size;
200 extent_tree_szad_insert(&chunks_szad, node);
201 break;
202 } else if (xnode == NULL) {
203 /*
204 * It is possible that base_node_alloc() will cause a
205 * new base chunk to be allocated, so take care not to
206 * deadlock on chunks_mtx, and recover if another thread
207 * deallocates an adjacent chunk while this one is busy
208 * allocating xnode.
209 */
210 malloc_mutex_unlock(&chunks_mtx);
211 xnode = base_node_alloc();
212 if (xnode == NULL)
213 return;
214 malloc_mutex_lock(&chunks_mtx);
215 } else {
216 /* Coalescing forward failed, so insert a new node. */
217 node = xnode;
218 xnode = NULL;
219 node->addr = chunk;
220 node->size = size;
221 extent_tree_ad_insert(&chunks_ad, node);
222 extent_tree_szad_insert(&chunks_szad, node);
223 break;
224 }
225 }
226 /* Discard xnode if it ended up unused due to a race. */
227 if (xnode != NULL)
228 base_node_dealloc(xnode);
229
230 /* Try to coalesce backward. */
231 prev = extent_tree_ad_prev(&chunks_ad, node);
232 if (prev != NULL && (void *)((uintptr_t)prev->addr + prev->size) ==
233 chunk) {
234 /*
235 * Coalesce chunk with the previous address range. This does
236 * not change the position within chunks_ad, so only
237 * remove/insert node from/into chunks_szad.
238 */
239 extent_tree_szad_remove(&chunks_szad, prev);
240 extent_tree_ad_remove(&chunks_ad, prev);
241
242 extent_tree_szad_remove(&chunks_szad, node);
243 node->addr = prev->addr;
244 node->size += prev->size;
245 extent_tree_szad_insert(&chunks_szad, node);
246
247 base_node_dealloc(prev);
248 }
249 malloc_mutex_unlock(&chunks_mtx);
250}
251
Jason Evanse476f8a2010-01-16 09:53:50 -0800252void
Jason Evans12a48872011-11-11 14:41:59 -0800253chunk_dealloc(void *chunk, size_t size, bool unmap)
Jason Evanse476f8a2010-01-16 09:53:50 -0800254{
255
256 assert(chunk != NULL);
257 assert(CHUNK_ADDR2BASE(chunk) == chunk);
258 assert(size != 0);
259 assert((size & chunksize_mask) == 0);
260
Jason Evans7372b152012-02-10 20:22:09 -0800261 if (config_ivsalloc)
262 rtree_set(chunks_rtree, (uintptr_t)chunk, NULL);
263 if (config_stats || config_prof) {
264 malloc_mutex_lock(&chunks_mtx);
265 stats_chunks.curchunks -= (size / chunksize);
266 malloc_mutex_unlock(&chunks_mtx);
267 }
Jason Evanse476f8a2010-01-16 09:53:50 -0800268
Jason Evans12a48872011-11-11 14:41:59 -0800269 if (unmap) {
Jason Evans7ad54c12012-04-21 16:04:51 -0700270 if ((config_dss && chunk_in_dss(chunk)) ||
271 chunk_dealloc_mmap(chunk, size))
272 chunk_record(chunk, size);
Jason Evans12a48872011-11-11 14:41:59 -0800273 }
Jason Evanse476f8a2010-01-16 09:53:50 -0800274}
275
276bool
Jason Evansa8f8d752012-04-21 19:17:21 -0700277chunk_boot(void)
Jason Evanse476f8a2010-01-16 09:53:50 -0800278{
279
280 /* Set variables according to the value of opt_lg_chunk. */
Jason Evans2dbecf12010-09-05 10:35:13 -0700281 chunksize = (ZU(1) << opt_lg_chunk);
Jason Evansae4c7b42012-04-02 07:04:34 -0700282 assert(chunksize >= PAGE);
Jason Evanse476f8a2010-01-16 09:53:50 -0800283 chunksize_mask = chunksize - 1;
Jason Evansae4c7b42012-04-02 07:04:34 -0700284 chunk_npages = (chunksize >> LG_PAGE);
Jason Evanse476f8a2010-01-16 09:53:50 -0800285
Jason Evans7372b152012-02-10 20:22:09 -0800286 if (config_stats || config_prof) {
287 if (malloc_mutex_init(&chunks_mtx))
288 return (true);
289 memset(&stats_chunks, 0, sizeof(chunk_stats_t));
290 }
Jason Evans7372b152012-02-10 20:22:09 -0800291 if (config_dss && chunk_dss_boot())
Jason Evans4201af02010-01-24 02:53:40 -0800292 return (true);
Jason Evans7ca0fdf2012-04-12 20:20:58 -0700293 extent_tree_szad_new(&chunks_szad);
294 extent_tree_ad_new(&chunks_ad);
Jason Evans7372b152012-02-10 20:22:09 -0800295 if (config_ivsalloc) {
296 chunks_rtree = rtree_new((ZU(1) << (LG_SIZEOF_PTR+3)) -
297 opt_lg_chunk);
298 if (chunks_rtree == NULL)
299 return (true);
300 }
Jason Evanse476f8a2010-01-16 09:53:50 -0800301
302 return (false);
303}