Jason Evans | e476f8a | 2010-01-16 09:53:50 -0800 | [diff] [blame] | 1 | #define JEMALLOC_CHUNK_C_ |
Jason Evans | 376b152 | 2010-02-11 14:45:59 -0800 | [diff] [blame] | 2 | #include "jemalloc/internal/jemalloc_internal.h" |
Jason Evans | e476f8a | 2010-01-16 09:53:50 -0800 | [diff] [blame] | 3 | |
| 4 | /******************************************************************************/ |
| 5 | /* Data. */ |
| 6 | |
Jason Evans | 609ae59 | 2012-10-11 13:53:15 -0700 | [diff] [blame] | 7 | const char *opt_dss = DSS_DEFAULT; |
| 8 | size_t opt_lg_chunk = LG_CHUNK_DEFAULT; |
Jason Evans | e476f8a | 2010-01-16 09:53:50 -0800 | [diff] [blame] | 9 | |
Jason Evans | 3c23435 | 2010-01-27 13:10:55 -0800 | [diff] [blame] | 10 | malloc_mutex_t chunks_mtx; |
Jason Evans | e476f8a | 2010-01-16 09:53:50 -0800 | [diff] [blame] | 11 | chunk_stats_t stats_chunks; |
Jason Evans | e476f8a | 2010-01-16 09:53:50 -0800 | [diff] [blame] | 12 | |
Jason Evans | 7ca0fdf | 2012-04-12 20:20:58 -0700 | [diff] [blame] | 13 | /* |
| 14 | * Trees of chunks that were previously allocated (trees differ only in node |
| 15 | * ordering). These are used when allocating chunks, in an attempt to re-use |
| 16 | * address space. Depending on function, different tree orderings are needed, |
| 17 | * which is why there are two trees with the same contents. |
| 18 | */ |
Jason Evans | 609ae59 | 2012-10-11 13:53:15 -0700 | [diff] [blame] | 19 | static extent_tree_t chunks_szad_mmap; |
| 20 | static extent_tree_t chunks_ad_mmap; |
| 21 | static extent_tree_t chunks_szad_dss; |
| 22 | static extent_tree_t chunks_ad_dss; |
Jason Evans | 7ca0fdf | 2012-04-12 20:20:58 -0700 | [diff] [blame] | 23 | |
Jason Evans | 2dbecf1 | 2010-09-05 10:35:13 -0700 | [diff] [blame] | 24 | rtree_t *chunks_rtree; |
Jason Evans | 2dbecf1 | 2010-09-05 10:35:13 -0700 | [diff] [blame] | 25 | |
Jason Evans | e476f8a | 2010-01-16 09:53:50 -0800 | [diff] [blame] | 26 | /* Various chunk-related settings. */ |
| 27 | size_t chunksize; |
| 28 | size_t chunksize_mask; /* (chunksize - 1). */ |
| 29 | size_t chunk_npages; |
Jason Evans | e476f8a | 2010-01-16 09:53:50 -0800 | [diff] [blame] | 30 | |
Jason Evans | e476f8a | 2010-01-16 09:53:50 -0800 | [diff] [blame] | 31 | /******************************************************************************/ |
Jason Evans | e2deab7 | 2014-05-15 22:22:27 -0700 | [diff] [blame] | 32 | /* |
| 33 | * Function prototypes for static functions that are referenced prior to |
| 34 | * definition. |
| 35 | */ |
Jason Evans | 7ca0fdf | 2012-04-12 20:20:58 -0700 | [diff] [blame] | 36 | |
Jason Evans | e2deab7 | 2014-05-15 22:22:27 -0700 | [diff] [blame] | 37 | static void chunk_dalloc_core(void *chunk, size_t size); |
Jason Evans | 7ca0fdf | 2012-04-12 20:20:58 -0700 | [diff] [blame] | 38 | |
| 39 | /******************************************************************************/ |
| 40 | |
| 41 | static void * |
Daniel Micay | a95018e | 2014-10-04 01:39:32 -0400 | [diff] [blame] | 42 | chunk_recycle(extent_tree_t *chunks_szad, extent_tree_t *chunks_ad, |
| 43 | void *new_addr, size_t size, size_t alignment, bool base, bool *zero) |
Jason Evans | 7ca0fdf | 2012-04-12 20:20:58 -0700 | [diff] [blame] | 44 | { |
| 45 | void *ret; |
| 46 | extent_node_t *node; |
| 47 | extent_node_t key; |
| 48 | size_t alloc_size, leadsize, trailsize; |
Jason Evans | 7de9276 | 2012-10-08 17:56:11 -0700 | [diff] [blame] | 49 | bool zeroed; |
Jason Evans | 7ca0fdf | 2012-04-12 20:20:58 -0700 | [diff] [blame] | 50 | |
Jason Evans | 34a8cf6 | 2012-05-02 20:41:42 -0700 | [diff] [blame] | 51 | if (base) { |
| 52 | /* |
| 53 | * This function may need to call base_node_{,de}alloc(), but |
| 54 | * the current chunk allocation request is on behalf of the |
| 55 | * base allocator. Avoid deadlock (and if that weren't an |
| 56 | * issue, potential for infinite recursion) by returning NULL. |
| 57 | */ |
| 58 | return (NULL); |
| 59 | } |
| 60 | |
Jason Evans | 7ca0fdf | 2012-04-12 20:20:58 -0700 | [diff] [blame] | 61 | alloc_size = size + alignment - chunksize; |
| 62 | /* Beware size_t wrap-around. */ |
| 63 | if (alloc_size < size) |
| 64 | return (NULL); |
Daniel Micay | a95018e | 2014-10-04 01:39:32 -0400 | [diff] [blame] | 65 | key.addr = new_addr; |
Jason Evans | 7ca0fdf | 2012-04-12 20:20:58 -0700 | [diff] [blame] | 66 | key.size = alloc_size; |
| 67 | malloc_mutex_lock(&chunks_mtx); |
Jason Evans | 609ae59 | 2012-10-11 13:53:15 -0700 | [diff] [blame] | 68 | node = extent_tree_szad_nsearch(chunks_szad, &key); |
Daniel Micay | a95018e | 2014-10-04 01:39:32 -0400 | [diff] [blame] | 69 | if (node == NULL || (new_addr && node->addr != new_addr)) { |
Jason Evans | 7ca0fdf | 2012-04-12 20:20:58 -0700 | [diff] [blame] | 70 | malloc_mutex_unlock(&chunks_mtx); |
| 71 | return (NULL); |
| 72 | } |
| 73 | leadsize = ALIGNMENT_CEILING((uintptr_t)node->addr, alignment) - |
| 74 | (uintptr_t)node->addr; |
Jason Evans | 374d26a | 2012-05-09 14:48:35 -0700 | [diff] [blame] | 75 | assert(node->size >= leadsize + size); |
| 76 | trailsize = node->size - leadsize - size; |
Jason Evans | 7ca0fdf | 2012-04-12 20:20:58 -0700 | [diff] [blame] | 77 | ret = (void *)((uintptr_t)node->addr + leadsize); |
Jason Evans | 14a2c6a | 2013-01-21 19:56:34 -0800 | [diff] [blame] | 78 | zeroed = node->zeroed; |
| 79 | if (zeroed) |
| 80 | *zero = true; |
Jason Evans | 7ca0fdf | 2012-04-12 20:20:58 -0700 | [diff] [blame] | 81 | /* Remove node from the tree. */ |
Jason Evans | 609ae59 | 2012-10-11 13:53:15 -0700 | [diff] [blame] | 82 | extent_tree_szad_remove(chunks_szad, node); |
| 83 | extent_tree_ad_remove(chunks_ad, node); |
Jason Evans | 7ca0fdf | 2012-04-12 20:20:58 -0700 | [diff] [blame] | 84 | if (leadsize != 0) { |
| 85 | /* Insert the leading space as a smaller chunk. */ |
| 86 | node->size = leadsize; |
Jason Evans | 609ae59 | 2012-10-11 13:53:15 -0700 | [diff] [blame] | 87 | extent_tree_szad_insert(chunks_szad, node); |
| 88 | extent_tree_ad_insert(chunks_ad, node); |
Jason Evans | 7ca0fdf | 2012-04-12 20:20:58 -0700 | [diff] [blame] | 89 | node = NULL; |
| 90 | } |
| 91 | if (trailsize != 0) { |
| 92 | /* Insert the trailing space as a smaller chunk. */ |
| 93 | if (node == NULL) { |
| 94 | /* |
| 95 | * An additional node is required, but |
| 96 | * base_node_alloc() can cause a new base chunk to be |
| 97 | * allocated. Drop chunks_mtx in order to avoid |
| 98 | * deadlock, and if node allocation fails, deallocate |
| 99 | * the result before returning an error. |
| 100 | */ |
| 101 | malloc_mutex_unlock(&chunks_mtx); |
| 102 | node = base_node_alloc(); |
| 103 | if (node == NULL) { |
Jason Evans | e2deab7 | 2014-05-15 22:22:27 -0700 | [diff] [blame] | 104 | chunk_dalloc_core(ret, size); |
Jason Evans | 7ca0fdf | 2012-04-12 20:20:58 -0700 | [diff] [blame] | 105 | return (NULL); |
| 106 | } |
| 107 | malloc_mutex_lock(&chunks_mtx); |
| 108 | } |
| 109 | node->addr = (void *)((uintptr_t)(ret) + size); |
| 110 | node->size = trailsize; |
Jason Evans | a7a28c3 | 2013-01-31 16:53:58 -0800 | [diff] [blame] | 111 | node->zeroed = zeroed; |
Jason Evans | 609ae59 | 2012-10-11 13:53:15 -0700 | [diff] [blame] | 112 | extent_tree_szad_insert(chunks_szad, node); |
| 113 | extent_tree_ad_insert(chunks_ad, node); |
Jason Evans | 7ca0fdf | 2012-04-12 20:20:58 -0700 | [diff] [blame] | 114 | node = NULL; |
| 115 | } |
| 116 | malloc_mutex_unlock(&chunks_mtx); |
| 117 | |
Jason Evans | 14a2c6a | 2013-01-21 19:56:34 -0800 | [diff] [blame] | 118 | if (node != NULL) |
Jason Evans | e2deab7 | 2014-05-15 22:22:27 -0700 | [diff] [blame] | 119 | base_node_dalloc(node); |
Jason Evans | 14a2c6a | 2013-01-21 19:56:34 -0800 | [diff] [blame] | 120 | if (*zero) { |
Jason Evans | 551ebc4 | 2014-10-03 10:16:09 -0700 | [diff] [blame] | 121 | if (!zeroed) |
Jason Evans | 14a2c6a | 2013-01-21 19:56:34 -0800 | [diff] [blame] | 122 | memset(ret, 0, size); |
| 123 | else if (config_debug) { |
| 124 | size_t i; |
| 125 | size_t *p = (size_t *)(uintptr_t)ret; |
| 126 | |
Jason Evans | bd87b01 | 2014-04-15 16:35:08 -0700 | [diff] [blame] | 127 | JEMALLOC_VALGRIND_MAKE_MEM_DEFINED(ret, size); |
Jason Evans | 14a2c6a | 2013-01-21 19:56:34 -0800 | [diff] [blame] | 128 | for (i = 0; i < size / sizeof(size_t); i++) |
| 129 | assert(p[i] == 0); |
Jason Evans | 14a2c6a | 2013-01-21 19:56:34 -0800 | [diff] [blame] | 130 | } |
| 131 | } |
Jason Evans | 7ca0fdf | 2012-04-12 20:20:58 -0700 | [diff] [blame] | 132 | return (ret); |
| 133 | } |
Jason Evans | e476f8a | 2010-01-16 09:53:50 -0800 | [diff] [blame] | 134 | |
Jason Evans | 41631d0 | 2010-01-24 17:13:07 -0800 | [diff] [blame] | 135 | /* |
Jason Evans | 551ebc4 | 2014-10-03 10:16:09 -0700 | [diff] [blame] | 136 | * If the caller specifies (!*zero), it is still possible to receive zeroed |
| 137 | * memory, in which case *zero is toggled to true. arena_chunk_alloc() takes |
| 138 | * advantage of this to avoid demanding zeroed chunks, but taking advantage of |
| 139 | * them if they are returned. |
Jason Evans | 41631d0 | 2010-01-24 17:13:07 -0800 | [diff] [blame] | 140 | */ |
aravind | fb7fe50 | 2014-05-05 15:16:56 -0700 | [diff] [blame] | 141 | static void * |
Daniel Micay | a95018e | 2014-10-04 01:39:32 -0400 | [diff] [blame] | 142 | chunk_alloc_core(void *new_addr, size_t size, size_t alignment, bool base, |
| 143 | bool *zero, dss_prec_t dss_prec) |
Jason Evans | e476f8a | 2010-01-16 09:53:50 -0800 | [diff] [blame] | 144 | { |
| 145 | void *ret; |
| 146 | |
| 147 | assert(size != 0); |
| 148 | assert((size & chunksize_mask) == 0); |
Jason Evans | de6fbdb | 2012-05-09 13:05:04 -0700 | [diff] [blame] | 149 | assert(alignment != 0); |
Mike Hommey | eae2690 | 2012-04-10 19:50:33 +0200 | [diff] [blame] | 150 | assert((alignment & chunksize_mask) == 0); |
Jason Evans | e476f8a | 2010-01-16 09:53:50 -0800 | [diff] [blame] | 151 | |
Jason Evans | 609ae59 | 2012-10-11 13:53:15 -0700 | [diff] [blame] | 152 | /* "primary" dss. */ |
Jason Evans | 4d434ad | 2014-04-15 12:09:48 -0700 | [diff] [blame] | 153 | if (have_dss && dss_prec == dss_prec_primary) { |
Daniel Micay | a95018e | 2014-10-04 01:39:32 -0400 | [diff] [blame] | 154 | if ((ret = chunk_recycle(&chunks_szad_dss, &chunks_ad_dss, |
| 155 | new_addr, size, alignment, base, zero)) != NULL) |
aravind | fb7fe50 | 2014-05-05 15:16:56 -0700 | [diff] [blame] | 156 | return (ret); |
Daniel Micay | 879e76a | 2014-11-03 14:02:52 -0500 | [diff] [blame] | 157 | if ((ret = chunk_alloc_dss(new_addr, size, alignment, zero)) |
| 158 | != NULL) |
aravind | fb7fe50 | 2014-05-05 15:16:56 -0700 | [diff] [blame] | 159 | return (ret); |
Jason Evans | 12efefb | 2012-10-16 22:06:56 -0700 | [diff] [blame] | 160 | } |
Jason Evans | 609ae59 | 2012-10-11 13:53:15 -0700 | [diff] [blame] | 161 | /* mmap. */ |
Daniel Micay | a95018e | 2014-10-04 01:39:32 -0400 | [diff] [blame] | 162 | if ((ret = chunk_recycle(&chunks_szad_mmap, &chunks_ad_mmap, new_addr, |
| 163 | size, alignment, base, zero)) != NULL) |
aravind | fb7fe50 | 2014-05-05 15:16:56 -0700 | [diff] [blame] | 164 | return (ret); |
Jason Evans | e12eaf9 | 2014-12-08 14:40:14 -0800 | [diff] [blame^] | 165 | /* Requesting an address not implemented for chunk_alloc_mmap(). */ |
Daniel Micay | a95018e | 2014-10-04 01:39:32 -0400 | [diff] [blame] | 166 | if (new_addr == NULL && |
| 167 | (ret = chunk_alloc_mmap(size, alignment, zero)) != NULL) |
aravind | fb7fe50 | 2014-05-05 15:16:56 -0700 | [diff] [blame] | 168 | return (ret); |
Jason Evans | 609ae59 | 2012-10-11 13:53:15 -0700 | [diff] [blame] | 169 | /* "secondary" dss. */ |
Jason Evans | 4d434ad | 2014-04-15 12:09:48 -0700 | [diff] [blame] | 170 | if (have_dss && dss_prec == dss_prec_secondary) { |
Daniel Micay | a95018e | 2014-10-04 01:39:32 -0400 | [diff] [blame] | 171 | if ((ret = chunk_recycle(&chunks_szad_dss, &chunks_ad_dss, |
| 172 | new_addr, size, alignment, base, zero)) != NULL) |
aravind | fb7fe50 | 2014-05-05 15:16:56 -0700 | [diff] [blame] | 173 | return (ret); |
Daniel Micay | 879e76a | 2014-11-03 14:02:52 -0500 | [diff] [blame] | 174 | if ((ret = chunk_alloc_dss(new_addr, size, alignment, zero)) |
| 175 | != NULL) |
aravind | fb7fe50 | 2014-05-05 15:16:56 -0700 | [diff] [blame] | 176 | return (ret); |
Jason Evans | 12efefb | 2012-10-16 22:06:56 -0700 | [diff] [blame] | 177 | } |
Jason Evans | e476f8a | 2010-01-16 09:53:50 -0800 | [diff] [blame] | 178 | |
| 179 | /* All strategies for allocation failed. */ |
aravind | fb7fe50 | 2014-05-05 15:16:56 -0700 | [diff] [blame] | 180 | return (NULL); |
| 181 | } |
| 182 | |
Jason Evans | e2deab7 | 2014-05-15 22:22:27 -0700 | [diff] [blame] | 183 | static bool |
| 184 | chunk_register(void *chunk, size_t size, bool base) |
| 185 | { |
| 186 | |
| 187 | assert(chunk != NULL); |
| 188 | assert(CHUNK_ADDR2BASE(chunk) == chunk); |
| 189 | |
Jason Evans | 551ebc4 | 2014-10-03 10:16:09 -0700 | [diff] [blame] | 190 | if (config_ivsalloc && !base) { |
Jason Evans | e2deab7 | 2014-05-15 22:22:27 -0700 | [diff] [blame] | 191 | if (rtree_set(chunks_rtree, (uintptr_t)chunk, 1)) |
| 192 | return (true); |
| 193 | } |
| 194 | if (config_stats || config_prof) { |
| 195 | bool gdump; |
| 196 | malloc_mutex_lock(&chunks_mtx); |
| 197 | if (config_stats) |
| 198 | stats_chunks.nchunks += (size / chunksize); |
| 199 | stats_chunks.curchunks += (size / chunksize); |
| 200 | if (stats_chunks.curchunks > stats_chunks.highchunks) { |
| 201 | stats_chunks.highchunks = |
| 202 | stats_chunks.curchunks; |
| 203 | if (config_prof) |
| 204 | gdump = true; |
| 205 | } else if (config_prof) |
| 206 | gdump = false; |
| 207 | malloc_mutex_unlock(&chunks_mtx); |
| 208 | if (config_prof && opt_prof && opt_prof_gdump && gdump) |
| 209 | prof_gdump(); |
| 210 | } |
| 211 | if (config_valgrind) |
| 212 | JEMALLOC_VALGRIND_MAKE_MEM_UNDEFINED(chunk, size); |
| 213 | return (false); |
| 214 | } |
| 215 | |
| 216 | void * |
| 217 | chunk_alloc_base(size_t size) |
| 218 | { |
| 219 | void *ret; |
| 220 | bool zero; |
| 221 | |
| 222 | zero = false; |
Daniel Micay | a95018e | 2014-10-04 01:39:32 -0400 | [diff] [blame] | 223 | ret = chunk_alloc_core(NULL, size, chunksize, true, &zero, |
Jason Evans | e2deab7 | 2014-05-15 22:22:27 -0700 | [diff] [blame] | 224 | chunk_dss_prec_get()); |
| 225 | if (ret == NULL) |
| 226 | return (NULL); |
| 227 | if (chunk_register(ret, size, true)) { |
| 228 | chunk_dalloc_core(ret, size); |
| 229 | return (NULL); |
| 230 | } |
| 231 | return (ret); |
| 232 | } |
| 233 | |
| 234 | void * |
| 235 | chunk_alloc_arena(chunk_alloc_t *chunk_alloc, chunk_dalloc_t *chunk_dalloc, |
Daniel Micay | a95018e | 2014-10-04 01:39:32 -0400 | [diff] [blame] | 236 | unsigned arena_ind, void *new_addr, size_t size, size_t alignment, |
| 237 | bool *zero) |
Jason Evans | e2deab7 | 2014-05-15 22:22:27 -0700 | [diff] [blame] | 238 | { |
| 239 | void *ret; |
| 240 | |
Daniel Micay | a95018e | 2014-10-04 01:39:32 -0400 | [diff] [blame] | 241 | ret = chunk_alloc(new_addr, size, alignment, zero, arena_ind); |
Jason Evans | e2deab7 | 2014-05-15 22:22:27 -0700 | [diff] [blame] | 242 | if (ret != NULL && chunk_register(ret, size, false)) { |
| 243 | chunk_dalloc(ret, size, arena_ind); |
| 244 | ret = NULL; |
| 245 | } |
| 246 | |
| 247 | return (ret); |
| 248 | } |
| 249 | |
| 250 | /* Default arena chunk allocation routine in the absence of user override. */ |
aravind | fb7fe50 | 2014-05-05 15:16:56 -0700 | [diff] [blame] | 251 | void * |
Daniel Micay | a95018e | 2014-10-04 01:39:32 -0400 | [diff] [blame] | 252 | chunk_alloc_default(void *new_addr, size_t size, size_t alignment, bool *zero, |
aravind | fb7fe50 | 2014-05-05 15:16:56 -0700 | [diff] [blame] | 253 | unsigned arena_ind) |
| 254 | { |
Jason Evans | 8bb3198 | 2014-10-07 23:14:57 -0700 | [diff] [blame] | 255 | arena_t *arena; |
| 256 | |
| 257 | arena = arena_get(tsd_fetch(), arena_ind, false, true); |
| 258 | /* |
| 259 | * The arena we're allocating on behalf of must have been initialized |
| 260 | * already. |
| 261 | */ |
| 262 | assert(arena != NULL); |
aravind | fb7fe50 | 2014-05-05 15:16:56 -0700 | [diff] [blame] | 263 | |
Daniel Micay | a95018e | 2014-10-04 01:39:32 -0400 | [diff] [blame] | 264 | return (chunk_alloc_core(new_addr, size, alignment, false, zero, |
Jason Evans | 8bb3198 | 2014-10-07 23:14:57 -0700 | [diff] [blame] | 265 | arena->dss_prec)); |
aravind | fb7fe50 | 2014-05-05 15:16:56 -0700 | [diff] [blame] | 266 | } |
| 267 | |
Jason Evans | 7ca0fdf | 2012-04-12 20:20:58 -0700 | [diff] [blame] | 268 | static void |
Jason Evans | 609ae59 | 2012-10-11 13:53:15 -0700 | [diff] [blame] | 269 | chunk_record(extent_tree_t *chunks_szad, extent_tree_t *chunks_ad, void *chunk, |
| 270 | size_t size) |
Jason Evans | 7ca0fdf | 2012-04-12 20:20:58 -0700 | [diff] [blame] | 271 | { |
Jason Evans | 7de9276 | 2012-10-08 17:56:11 -0700 | [diff] [blame] | 272 | bool unzeroed; |
Jason Evans | 4f929aa | 2013-04-22 22:36:18 -0700 | [diff] [blame] | 273 | extent_node_t *xnode, *node, *prev, *xprev, key; |
Jason Evans | 7ca0fdf | 2012-04-12 20:20:58 -0700 | [diff] [blame] | 274 | |
Jason Evans | 7de9276 | 2012-10-08 17:56:11 -0700 | [diff] [blame] | 275 | unzeroed = pages_purge(chunk, size); |
Jason Evans | bd87b01 | 2014-04-15 16:35:08 -0700 | [diff] [blame] | 276 | JEMALLOC_VALGRIND_MAKE_MEM_NOACCESS(chunk, size); |
Jason Evans | 7ca0fdf | 2012-04-12 20:20:58 -0700 | [diff] [blame] | 277 | |
Jason Evans | 374d26a | 2012-05-09 14:48:35 -0700 | [diff] [blame] | 278 | /* |
| 279 | * Allocate a node before acquiring chunks_mtx even though it might not |
| 280 | * be needed, because base_node_alloc() may cause a new base chunk to |
| 281 | * be allocated, which could cause deadlock if chunks_mtx were already |
| 282 | * held. |
| 283 | */ |
| 284 | xnode = base_node_alloc(); |
Jason Evans | 4f929aa | 2013-04-22 22:36:18 -0700 | [diff] [blame] | 285 | /* Use xprev to implement conditional deferred deallocation of prev. */ |
| 286 | xprev = NULL; |
Jason Evans | 374d26a | 2012-05-09 14:48:35 -0700 | [diff] [blame] | 287 | |
Jason Evans | 7ca0fdf | 2012-04-12 20:20:58 -0700 | [diff] [blame] | 288 | malloc_mutex_lock(&chunks_mtx); |
Jason Evans | 374d26a | 2012-05-09 14:48:35 -0700 | [diff] [blame] | 289 | key.addr = (void *)((uintptr_t)chunk + size); |
Jason Evans | 609ae59 | 2012-10-11 13:53:15 -0700 | [diff] [blame] | 290 | node = extent_tree_ad_nsearch(chunks_ad, &key); |
Jason Evans | 374d26a | 2012-05-09 14:48:35 -0700 | [diff] [blame] | 291 | /* Try to coalesce forward. */ |
| 292 | if (node != NULL && node->addr == key.addr) { |
| 293 | /* |
| 294 | * Coalesce chunk with the following address range. This does |
| 295 | * not change the position within chunks_ad, so only |
| 296 | * remove/insert from/into chunks_szad. |
| 297 | */ |
Jason Evans | 609ae59 | 2012-10-11 13:53:15 -0700 | [diff] [blame] | 298 | extent_tree_szad_remove(chunks_szad, node); |
Jason Evans | 374d26a | 2012-05-09 14:48:35 -0700 | [diff] [blame] | 299 | node->addr = chunk; |
| 300 | node->size += size; |
Jason Evans | 551ebc4 | 2014-10-03 10:16:09 -0700 | [diff] [blame] | 301 | node->zeroed = (node->zeroed && !unzeroed); |
Jason Evans | 609ae59 | 2012-10-11 13:53:15 -0700 | [diff] [blame] | 302 | extent_tree_szad_insert(chunks_szad, node); |
Jason Evans | 374d26a | 2012-05-09 14:48:35 -0700 | [diff] [blame] | 303 | } else { |
| 304 | /* Coalescing forward failed, so insert a new node. */ |
| 305 | if (xnode == NULL) { |
Jason Evans | 7ca0fdf | 2012-04-12 20:20:58 -0700 | [diff] [blame] | 306 | /* |
Jason Evans | 374d26a | 2012-05-09 14:48:35 -0700 | [diff] [blame] | 307 | * base_node_alloc() failed, which is an exceedingly |
| 308 | * unlikely failure. Leak chunk; its pages have |
| 309 | * already been purged, so this is only a virtual |
| 310 | * memory leak. |
Jason Evans | 7ca0fdf | 2012-04-12 20:20:58 -0700 | [diff] [blame] | 311 | */ |
Jason Evans | 741fbc6 | 2013-04-17 09:57:11 -0700 | [diff] [blame] | 312 | goto label_return; |
Jason Evans | 7ca0fdf | 2012-04-12 20:20:58 -0700 | [diff] [blame] | 313 | } |
Jason Evans | 374d26a | 2012-05-09 14:48:35 -0700 | [diff] [blame] | 314 | node = xnode; |
Jason Evans | 741fbc6 | 2013-04-17 09:57:11 -0700 | [diff] [blame] | 315 | xnode = NULL; /* Prevent deallocation below. */ |
Jason Evans | 374d26a | 2012-05-09 14:48:35 -0700 | [diff] [blame] | 316 | node->addr = chunk; |
| 317 | node->size = size; |
Jason Evans | 551ebc4 | 2014-10-03 10:16:09 -0700 | [diff] [blame] | 318 | node->zeroed = !unzeroed; |
Jason Evans | 609ae59 | 2012-10-11 13:53:15 -0700 | [diff] [blame] | 319 | extent_tree_ad_insert(chunks_ad, node); |
| 320 | extent_tree_szad_insert(chunks_szad, node); |
Jason Evans | 7ca0fdf | 2012-04-12 20:20:58 -0700 | [diff] [blame] | 321 | } |
Jason Evans | 7ca0fdf | 2012-04-12 20:20:58 -0700 | [diff] [blame] | 322 | |
| 323 | /* Try to coalesce backward. */ |
Jason Evans | 609ae59 | 2012-10-11 13:53:15 -0700 | [diff] [blame] | 324 | prev = extent_tree_ad_prev(chunks_ad, node); |
Jason Evans | 7ca0fdf | 2012-04-12 20:20:58 -0700 | [diff] [blame] | 325 | if (prev != NULL && (void *)((uintptr_t)prev->addr + prev->size) == |
| 326 | chunk) { |
| 327 | /* |
| 328 | * Coalesce chunk with the previous address range. This does |
| 329 | * not change the position within chunks_ad, so only |
| 330 | * remove/insert node from/into chunks_szad. |
| 331 | */ |
Jason Evans | 609ae59 | 2012-10-11 13:53:15 -0700 | [diff] [blame] | 332 | extent_tree_szad_remove(chunks_szad, prev); |
| 333 | extent_tree_ad_remove(chunks_ad, prev); |
Jason Evans | 7ca0fdf | 2012-04-12 20:20:58 -0700 | [diff] [blame] | 334 | |
Jason Evans | 609ae59 | 2012-10-11 13:53:15 -0700 | [diff] [blame] | 335 | extent_tree_szad_remove(chunks_szad, node); |
Jason Evans | 7ca0fdf | 2012-04-12 20:20:58 -0700 | [diff] [blame] | 336 | node->addr = prev->addr; |
| 337 | node->size += prev->size; |
Jason Evans | 7de9276 | 2012-10-08 17:56:11 -0700 | [diff] [blame] | 338 | node->zeroed = (node->zeroed && prev->zeroed); |
Jason Evans | 609ae59 | 2012-10-11 13:53:15 -0700 | [diff] [blame] | 339 | extent_tree_szad_insert(chunks_szad, node); |
Jason Evans | 7ca0fdf | 2012-04-12 20:20:58 -0700 | [diff] [blame] | 340 | |
Jason Evans | 4f929aa | 2013-04-22 22:36:18 -0700 | [diff] [blame] | 341 | xprev = prev; |
Jason Evans | 7ca0fdf | 2012-04-12 20:20:58 -0700 | [diff] [blame] | 342 | } |
Jason Evans | 741fbc6 | 2013-04-17 09:57:11 -0700 | [diff] [blame] | 343 | |
| 344 | label_return: |
Jason Evans | 7ca0fdf | 2012-04-12 20:20:58 -0700 | [diff] [blame] | 345 | malloc_mutex_unlock(&chunks_mtx); |
Jason Evans | 4f929aa | 2013-04-22 22:36:18 -0700 | [diff] [blame] | 346 | /* |
| 347 | * Deallocate xnode and/or xprev after unlocking chunks_mtx in order to |
| 348 | * avoid potential deadlock. |
| 349 | */ |
| 350 | if (xnode != NULL) |
Jason Evans | e2deab7 | 2014-05-15 22:22:27 -0700 | [diff] [blame] | 351 | base_node_dalloc(xnode); |
Jason Evans | 4f929aa | 2013-04-22 22:36:18 -0700 | [diff] [blame] | 352 | if (xprev != NULL) |
Jason Evans | e2deab7 | 2014-05-15 22:22:27 -0700 | [diff] [blame] | 353 | base_node_dalloc(xprev); |
Jason Evans | 7ca0fdf | 2012-04-12 20:20:58 -0700 | [diff] [blame] | 354 | } |
| 355 | |
Jason Evans | e476f8a | 2010-01-16 09:53:50 -0800 | [diff] [blame] | 356 | void |
Jason Evans | 609ae59 | 2012-10-11 13:53:15 -0700 | [diff] [blame] | 357 | chunk_unmap(void *chunk, size_t size) |
| 358 | { |
| 359 | assert(chunk != NULL); |
| 360 | assert(CHUNK_ADDR2BASE(chunk) == chunk); |
| 361 | assert(size != 0); |
| 362 | assert((size & chunksize_mask) == 0); |
| 363 | |
Jason Evans | 4d434ad | 2014-04-15 12:09:48 -0700 | [diff] [blame] | 364 | if (have_dss && chunk_in_dss(chunk)) |
Jason Evans | 609ae59 | 2012-10-11 13:53:15 -0700 | [diff] [blame] | 365 | chunk_record(&chunks_szad_dss, &chunks_ad_dss, chunk, size); |
Jason Evans | e2deab7 | 2014-05-15 22:22:27 -0700 | [diff] [blame] | 366 | else if (chunk_dalloc_mmap(chunk, size)) |
Jason Evans | 609ae59 | 2012-10-11 13:53:15 -0700 | [diff] [blame] | 367 | chunk_record(&chunks_szad_mmap, &chunks_ad_mmap, chunk, size); |
| 368 | } |
| 369 | |
Jason Evans | e2deab7 | 2014-05-15 22:22:27 -0700 | [diff] [blame] | 370 | static void |
| 371 | chunk_dalloc_core(void *chunk, size_t size) |
Jason Evans | e476f8a | 2010-01-16 09:53:50 -0800 | [diff] [blame] | 372 | { |
| 373 | |
| 374 | assert(chunk != NULL); |
| 375 | assert(CHUNK_ADDR2BASE(chunk) == chunk); |
| 376 | assert(size != 0); |
| 377 | assert((size & chunksize_mask) == 0); |
| 378 | |
Jason Evans | 7372b15 | 2012-02-10 20:22:09 -0800 | [diff] [blame] | 379 | if (config_ivsalloc) |
Jason Evans | b954bc5 | 2014-01-02 17:36:38 -0800 | [diff] [blame] | 380 | rtree_set(chunks_rtree, (uintptr_t)chunk, 0); |
Jason Evans | 7372b15 | 2012-02-10 20:22:09 -0800 | [diff] [blame] | 381 | if (config_stats || config_prof) { |
| 382 | malloc_mutex_lock(&chunks_mtx); |
Jason Evans | 609ae59 | 2012-10-11 13:53:15 -0700 | [diff] [blame] | 383 | assert(stats_chunks.curchunks >= (size / chunksize)); |
Jason Evans | 7372b15 | 2012-02-10 20:22:09 -0800 | [diff] [blame] | 384 | stats_chunks.curchunks -= (size / chunksize); |
| 385 | malloc_mutex_unlock(&chunks_mtx); |
| 386 | } |
Jason Evans | e476f8a | 2010-01-16 09:53:50 -0800 | [diff] [blame] | 387 | |
Jason Evans | e2deab7 | 2014-05-15 22:22:27 -0700 | [diff] [blame] | 388 | chunk_unmap(chunk, size); |
| 389 | } |
| 390 | |
| 391 | /* Default arena chunk deallocation routine in the absence of user override. */ |
| 392 | bool |
| 393 | chunk_dalloc_default(void *chunk, size_t size, unsigned arena_ind) |
| 394 | { |
| 395 | |
| 396 | chunk_dalloc_core(chunk, size); |
| 397 | return (false); |
Jason Evans | e476f8a | 2010-01-16 09:53:50 -0800 | [diff] [blame] | 398 | } |
| 399 | |
| 400 | bool |
Jason Evans | a8f8d75 | 2012-04-21 19:17:21 -0700 | [diff] [blame] | 401 | chunk_boot(void) |
Jason Evans | e476f8a | 2010-01-16 09:53:50 -0800 | [diff] [blame] | 402 | { |
| 403 | |
| 404 | /* Set variables according to the value of opt_lg_chunk. */ |
Jason Evans | 2dbecf1 | 2010-09-05 10:35:13 -0700 | [diff] [blame] | 405 | chunksize = (ZU(1) << opt_lg_chunk); |
Jason Evans | ae4c7b4 | 2012-04-02 07:04:34 -0700 | [diff] [blame] | 406 | assert(chunksize >= PAGE); |
Jason Evans | e476f8a | 2010-01-16 09:53:50 -0800 | [diff] [blame] | 407 | chunksize_mask = chunksize - 1; |
Jason Evans | ae4c7b4 | 2012-04-02 07:04:34 -0700 | [diff] [blame] | 408 | chunk_npages = (chunksize >> LG_PAGE); |
Jason Evans | e476f8a | 2010-01-16 09:53:50 -0800 | [diff] [blame] | 409 | |
Jason Evans | c83bccd | 2014-10-16 12:33:18 -0700 | [diff] [blame] | 410 | if (malloc_mutex_init(&chunks_mtx)) |
| 411 | return (true); |
| 412 | if (config_stats || config_prof) |
Jason Evans | 7372b15 | 2012-02-10 20:22:09 -0800 | [diff] [blame] | 413 | memset(&stats_chunks, 0, sizeof(chunk_stats_t)); |
Jason Evans | 4d434ad | 2014-04-15 12:09:48 -0700 | [diff] [blame] | 414 | if (have_dss && chunk_dss_boot()) |
Jason Evans | 4201af0 | 2010-01-24 02:53:40 -0800 | [diff] [blame] | 415 | return (true); |
Jason Evans | 609ae59 | 2012-10-11 13:53:15 -0700 | [diff] [blame] | 416 | extent_tree_szad_new(&chunks_szad_mmap); |
| 417 | extent_tree_ad_new(&chunks_ad_mmap); |
| 418 | extent_tree_szad_new(&chunks_szad_dss); |
| 419 | extent_tree_ad_new(&chunks_ad_dss); |
Jason Evans | 7372b15 | 2012-02-10 20:22:09 -0800 | [diff] [blame] | 420 | if (config_ivsalloc) { |
| 421 | chunks_rtree = rtree_new((ZU(1) << (LG_SIZEOF_PTR+3)) - |
Jason Evans | b980cc7 | 2014-01-02 16:08:28 -0800 | [diff] [blame] | 422 | opt_lg_chunk, base_alloc, NULL); |
Jason Evans | 7372b15 | 2012-02-10 20:22:09 -0800 | [diff] [blame] | 423 | if (chunks_rtree == NULL) |
| 424 | return (true); |
| 425 | } |
Jason Evans | e476f8a | 2010-01-16 09:53:50 -0800 | [diff] [blame] | 426 | |
| 427 | return (false); |
| 428 | } |
Jason Evans | 20f1fc9 | 2012-10-09 14:46:22 -0700 | [diff] [blame] | 429 | |
| 430 | void |
| 431 | chunk_prefork(void) |
| 432 | { |
| 433 | |
Jason Evans | f1c3da8 | 2013-10-21 14:59:10 -0700 | [diff] [blame] | 434 | malloc_mutex_prefork(&chunks_mtx); |
Jason Evans | 20f1fc9 | 2012-10-09 14:46:22 -0700 | [diff] [blame] | 435 | if (config_ivsalloc) |
| 436 | rtree_prefork(chunks_rtree); |
| 437 | chunk_dss_prefork(); |
| 438 | } |
| 439 | |
| 440 | void |
| 441 | chunk_postfork_parent(void) |
| 442 | { |
| 443 | |
| 444 | chunk_dss_postfork_parent(); |
| 445 | if (config_ivsalloc) |
| 446 | rtree_postfork_parent(chunks_rtree); |
| 447 | malloc_mutex_postfork_parent(&chunks_mtx); |
| 448 | } |
| 449 | |
| 450 | void |
| 451 | chunk_postfork_child(void) |
| 452 | { |
| 453 | |
| 454 | chunk_dss_postfork_child(); |
| 455 | if (config_ivsalloc) |
| 456 | rtree_postfork_child(chunks_rtree); |
| 457 | malloc_mutex_postfork_child(&chunks_mtx); |
| 458 | } |