Jason Evans | e476f8a | 2010-01-16 09:53:50 -0800 | [diff] [blame] | 1 | #define JEMALLOC_CHUNK_C_ |
Jason Evans | 376b152 | 2010-02-11 14:45:59 -0800 | [diff] [blame] | 2 | #include "jemalloc/internal/jemalloc_internal.h" |
Jason Evans | e476f8a | 2010-01-16 09:53:50 -0800 | [diff] [blame] | 3 | |
| 4 | /******************************************************************************/ |
| 5 | /* Data. */ |
| 6 | |
Jason Evans | 609ae59 | 2012-10-11 13:53:15 -0700 | [diff] [blame] | 7 | const char *opt_dss = DSS_DEFAULT; |
| 8 | size_t opt_lg_chunk = LG_CHUNK_DEFAULT; |
Jason Evans | e476f8a | 2010-01-16 09:53:50 -0800 | [diff] [blame] | 9 | |
Jason Evans | 3c23435 | 2010-01-27 13:10:55 -0800 | [diff] [blame] | 10 | malloc_mutex_t chunks_mtx; |
Jason Evans | e476f8a | 2010-01-16 09:53:50 -0800 | [diff] [blame] | 11 | chunk_stats_t stats_chunks; |
Jason Evans | e476f8a | 2010-01-16 09:53:50 -0800 | [diff] [blame] | 12 | |
Jason Evans | 7ca0fdf | 2012-04-12 20:20:58 -0700 | [diff] [blame] | 13 | /* |
| 14 | * Trees of chunks that were previously allocated (trees differ only in node |
| 15 | * ordering). These are used when allocating chunks, in an attempt to re-use |
| 16 | * address space. Depending on function, different tree orderings are needed, |
| 17 | * which is why there are two trees with the same contents. |
| 18 | */ |
Jason Evans | 609ae59 | 2012-10-11 13:53:15 -0700 | [diff] [blame] | 19 | static extent_tree_t chunks_szad_mmap; |
| 20 | static extent_tree_t chunks_ad_mmap; |
| 21 | static extent_tree_t chunks_szad_dss; |
| 22 | static extent_tree_t chunks_ad_dss; |
Jason Evans | 7ca0fdf | 2012-04-12 20:20:58 -0700 | [diff] [blame] | 23 | |
Jason Evans | 2dbecf1 | 2010-09-05 10:35:13 -0700 | [diff] [blame] | 24 | rtree_t *chunks_rtree; |
Jason Evans | 2dbecf1 | 2010-09-05 10:35:13 -0700 | [diff] [blame] | 25 | |
Jason Evans | e476f8a | 2010-01-16 09:53:50 -0800 | [diff] [blame] | 26 | /* Various chunk-related settings. */ |
| 27 | size_t chunksize; |
| 28 | size_t chunksize_mask; /* (chunksize - 1). */ |
| 29 | size_t chunk_npages; |
Jason Evans | 7393f44 | 2010-10-01 17:35:43 -0700 | [diff] [blame] | 30 | size_t map_bias; |
Jason Evans | e476f8a | 2010-01-16 09:53:50 -0800 | [diff] [blame] | 31 | size_t arena_maxclass; /* Max size class for arenas. */ |
| 32 | |
Jason Evans | e476f8a | 2010-01-16 09:53:50 -0800 | [diff] [blame] | 33 | /******************************************************************************/ |
Jason Evans | 7ca0fdf | 2012-04-12 20:20:58 -0700 | [diff] [blame] | 34 | /* Function prototypes for non-inline static functions. */ |
| 35 | |
Jason Evans | 609ae59 | 2012-10-11 13:53:15 -0700 | [diff] [blame] | 36 | static void *chunk_recycle(extent_tree_t *chunks_szad, |
| 37 | extent_tree_t *chunks_ad, size_t size, size_t alignment, bool base, |
Jason Evans | 34a8cf6 | 2012-05-02 20:41:42 -0700 | [diff] [blame] | 38 | bool *zero); |
Jason Evans | 609ae59 | 2012-10-11 13:53:15 -0700 | [diff] [blame] | 39 | static void chunk_record(extent_tree_t *chunks_szad, |
| 40 | extent_tree_t *chunks_ad, void *chunk, size_t size); |
Jason Evans | 7ca0fdf | 2012-04-12 20:20:58 -0700 | [diff] [blame] | 41 | |
| 42 | /******************************************************************************/ |
| 43 | |
| 44 | static void * |
Jason Evans | 609ae59 | 2012-10-11 13:53:15 -0700 | [diff] [blame] | 45 | chunk_recycle(extent_tree_t *chunks_szad, extent_tree_t *chunks_ad, size_t size, |
| 46 | size_t alignment, bool base, bool *zero) |
Jason Evans | 7ca0fdf | 2012-04-12 20:20:58 -0700 | [diff] [blame] | 47 | { |
| 48 | void *ret; |
| 49 | extent_node_t *node; |
| 50 | extent_node_t key; |
| 51 | size_t alloc_size, leadsize, trailsize; |
Jason Evans | 7de9276 | 2012-10-08 17:56:11 -0700 | [diff] [blame] | 52 | bool zeroed; |
Jason Evans | 7ca0fdf | 2012-04-12 20:20:58 -0700 | [diff] [blame] | 53 | |
Jason Evans | 34a8cf6 | 2012-05-02 20:41:42 -0700 | [diff] [blame] | 54 | if (base) { |
| 55 | /* |
| 56 | * This function may need to call base_node_{,de}alloc(), but |
| 57 | * the current chunk allocation request is on behalf of the |
| 58 | * base allocator. Avoid deadlock (and if that weren't an |
| 59 | * issue, potential for infinite recursion) by returning NULL. |
| 60 | */ |
| 61 | return (NULL); |
| 62 | } |
| 63 | |
Jason Evans | 7ca0fdf | 2012-04-12 20:20:58 -0700 | [diff] [blame] | 64 | alloc_size = size + alignment - chunksize; |
| 65 | /* Beware size_t wrap-around. */ |
| 66 | if (alloc_size < size) |
| 67 | return (NULL); |
| 68 | key.addr = NULL; |
| 69 | key.size = alloc_size; |
| 70 | malloc_mutex_lock(&chunks_mtx); |
Jason Evans | 609ae59 | 2012-10-11 13:53:15 -0700 | [diff] [blame] | 71 | node = extent_tree_szad_nsearch(chunks_szad, &key); |
Jason Evans | 7ca0fdf | 2012-04-12 20:20:58 -0700 | [diff] [blame] | 72 | if (node == NULL) { |
| 73 | malloc_mutex_unlock(&chunks_mtx); |
| 74 | return (NULL); |
| 75 | } |
| 76 | leadsize = ALIGNMENT_CEILING((uintptr_t)node->addr, alignment) - |
| 77 | (uintptr_t)node->addr; |
Jason Evans | 374d26a | 2012-05-09 14:48:35 -0700 | [diff] [blame] | 78 | assert(node->size >= leadsize + size); |
| 79 | trailsize = node->size - leadsize - size; |
Jason Evans | 7ca0fdf | 2012-04-12 20:20:58 -0700 | [diff] [blame] | 80 | ret = (void *)((uintptr_t)node->addr + leadsize); |
Jason Evans | 14a2c6a | 2013-01-21 19:56:34 -0800 | [diff] [blame] | 81 | zeroed = node->zeroed; |
| 82 | if (zeroed) |
| 83 | *zero = true; |
Jason Evans | 7ca0fdf | 2012-04-12 20:20:58 -0700 | [diff] [blame] | 84 | /* Remove node from the tree. */ |
Jason Evans | 609ae59 | 2012-10-11 13:53:15 -0700 | [diff] [blame] | 85 | extent_tree_szad_remove(chunks_szad, node); |
| 86 | extent_tree_ad_remove(chunks_ad, node); |
Jason Evans | 7ca0fdf | 2012-04-12 20:20:58 -0700 | [diff] [blame] | 87 | if (leadsize != 0) { |
| 88 | /* Insert the leading space as a smaller chunk. */ |
| 89 | node->size = leadsize; |
Jason Evans | 609ae59 | 2012-10-11 13:53:15 -0700 | [diff] [blame] | 90 | extent_tree_szad_insert(chunks_szad, node); |
| 91 | extent_tree_ad_insert(chunks_ad, node); |
Jason Evans | 7ca0fdf | 2012-04-12 20:20:58 -0700 | [diff] [blame] | 92 | node = NULL; |
| 93 | } |
| 94 | if (trailsize != 0) { |
| 95 | /* Insert the trailing space as a smaller chunk. */ |
| 96 | if (node == NULL) { |
| 97 | /* |
| 98 | * An additional node is required, but |
| 99 | * base_node_alloc() can cause a new base chunk to be |
| 100 | * allocated. Drop chunks_mtx in order to avoid |
| 101 | * deadlock, and if node allocation fails, deallocate |
| 102 | * the result before returning an error. |
| 103 | */ |
| 104 | malloc_mutex_unlock(&chunks_mtx); |
| 105 | node = base_node_alloc(); |
| 106 | if (node == NULL) { |
| 107 | chunk_dealloc(ret, size, true); |
| 108 | return (NULL); |
| 109 | } |
| 110 | malloc_mutex_lock(&chunks_mtx); |
| 111 | } |
| 112 | node->addr = (void *)((uintptr_t)(ret) + size); |
| 113 | node->size = trailsize; |
Jason Evans | a7a28c3 | 2013-01-31 16:53:58 -0800 | [diff] [blame] | 114 | node->zeroed = zeroed; |
Jason Evans | 609ae59 | 2012-10-11 13:53:15 -0700 | [diff] [blame] | 115 | extent_tree_szad_insert(chunks_szad, node); |
| 116 | extent_tree_ad_insert(chunks_ad, node); |
Jason Evans | 7ca0fdf | 2012-04-12 20:20:58 -0700 | [diff] [blame] | 117 | node = NULL; |
| 118 | } |
| 119 | malloc_mutex_unlock(&chunks_mtx); |
| 120 | |
Jason Evans | 14a2c6a | 2013-01-21 19:56:34 -0800 | [diff] [blame] | 121 | if (node != NULL) |
Jason Evans | 7ca0fdf | 2012-04-12 20:20:58 -0700 | [diff] [blame] | 122 | base_node_dealloc(node); |
Jason Evans | 14a2c6a | 2013-01-21 19:56:34 -0800 | [diff] [blame] | 123 | if (*zero) { |
| 124 | if (zeroed == false) |
| 125 | memset(ret, 0, size); |
| 126 | else if (config_debug) { |
| 127 | size_t i; |
| 128 | size_t *p = (size_t *)(uintptr_t)ret; |
| 129 | |
| 130 | VALGRIND_MAKE_MEM_DEFINED(ret, size); |
| 131 | for (i = 0; i < size / sizeof(size_t); i++) |
| 132 | assert(p[i] == 0); |
Jason Evans | 14a2c6a | 2013-01-21 19:56:34 -0800 | [diff] [blame] | 133 | } |
| 134 | } |
Jason Evans | 7ca0fdf | 2012-04-12 20:20:58 -0700 | [diff] [blame] | 135 | return (ret); |
| 136 | } |
Jason Evans | e476f8a | 2010-01-16 09:53:50 -0800 | [diff] [blame] | 137 | |
Jason Evans | 41631d0 | 2010-01-24 17:13:07 -0800 | [diff] [blame] | 138 | /* |
| 139 | * If the caller specifies (*zero == false), it is still possible to receive |
| 140 | * zeroed memory, in which case *zero is toggled to true. arena_chunk_alloc() |
| 141 | * takes advantage of this to avoid demanding zeroed chunks, but taking |
| 142 | * advantage of them if they are returned. |
| 143 | */ |
Jason Evans | e476f8a | 2010-01-16 09:53:50 -0800 | [diff] [blame] | 144 | void * |
Jason Evans | 609ae59 | 2012-10-11 13:53:15 -0700 | [diff] [blame] | 145 | chunk_alloc(size_t size, size_t alignment, bool base, bool *zero, |
| 146 | dss_prec_t dss_prec) |
Jason Evans | e476f8a | 2010-01-16 09:53:50 -0800 | [diff] [blame] | 147 | { |
| 148 | void *ret; |
| 149 | |
| 150 | assert(size != 0); |
| 151 | assert((size & chunksize_mask) == 0); |
Jason Evans | de6fbdb | 2012-05-09 13:05:04 -0700 | [diff] [blame] | 152 | assert(alignment != 0); |
Mike Hommey | eae2690 | 2012-04-10 19:50:33 +0200 | [diff] [blame] | 153 | assert((alignment & chunksize_mask) == 0); |
Jason Evans | e476f8a | 2010-01-16 09:53:50 -0800 | [diff] [blame] | 154 | |
Jason Evans | 609ae59 | 2012-10-11 13:53:15 -0700 | [diff] [blame] | 155 | /* "primary" dss. */ |
Jason Evans | 12efefb | 2012-10-16 22:06:56 -0700 | [diff] [blame] | 156 | if (config_dss && dss_prec == dss_prec_primary) { |
| 157 | if ((ret = chunk_recycle(&chunks_szad_dss, &chunks_ad_dss, size, |
| 158 | alignment, base, zero)) != NULL) |
| 159 | goto label_return; |
| 160 | if ((ret = chunk_alloc_dss(size, alignment, zero)) != NULL) |
| 161 | goto label_return; |
| 162 | } |
Jason Evans | 609ae59 | 2012-10-11 13:53:15 -0700 | [diff] [blame] | 163 | /* mmap. */ |
| 164 | if ((ret = chunk_recycle(&chunks_szad_mmap, &chunks_ad_mmap, size, |
| 165 | alignment, base, zero)) != NULL) |
| 166 | goto label_return; |
Jason Evans | 609ae59 | 2012-10-11 13:53:15 -0700 | [diff] [blame] | 167 | if ((ret = chunk_alloc_mmap(size, alignment, zero)) != NULL) |
| 168 | goto label_return; |
| 169 | /* "secondary" dss. */ |
Jason Evans | 12efefb | 2012-10-16 22:06:56 -0700 | [diff] [blame] | 170 | if (config_dss && dss_prec == dss_prec_secondary) { |
| 171 | if ((ret = chunk_recycle(&chunks_szad_dss, &chunks_ad_dss, size, |
| 172 | alignment, base, zero)) != NULL) |
| 173 | goto label_return; |
| 174 | if ((ret = chunk_alloc_dss(size, alignment, zero)) != NULL) |
| 175 | goto label_return; |
| 176 | } |
Jason Evans | e476f8a | 2010-01-16 09:53:50 -0800 | [diff] [blame] | 177 | |
| 178 | /* All strategies for allocation failed. */ |
| 179 | ret = NULL; |
Jason Evans | a1ee783 | 2012-04-10 15:07:44 -0700 | [diff] [blame] | 180 | label_return: |
Jason Evans | 0691275 | 2013-01-31 17:02:53 -0800 | [diff] [blame] | 181 | if (ret != NULL) { |
| 182 | if (config_ivsalloc && base == false) { |
Jason Evans | b954bc5 | 2014-01-02 17:36:38 -0800 | [diff] [blame] | 183 | if (rtree_set(chunks_rtree, (uintptr_t)ret, 1)) { |
Jason Evans | 0691275 | 2013-01-31 17:02:53 -0800 | [diff] [blame] | 184 | chunk_dealloc(ret, size, true); |
| 185 | return (NULL); |
| 186 | } |
Jason Evans | 2dbecf1 | 2010-09-05 10:35:13 -0700 | [diff] [blame] | 187 | } |
Jason Evans | 0691275 | 2013-01-31 17:02:53 -0800 | [diff] [blame] | 188 | if (config_stats || config_prof) { |
| 189 | bool gdump; |
| 190 | malloc_mutex_lock(&chunks_mtx); |
| 191 | if (config_stats) |
| 192 | stats_chunks.nchunks += (size / chunksize); |
| 193 | stats_chunks.curchunks += (size / chunksize); |
| 194 | if (stats_chunks.curchunks > stats_chunks.highchunks) { |
| 195 | stats_chunks.highchunks = |
| 196 | stats_chunks.curchunks; |
| 197 | if (config_prof) |
| 198 | gdump = true; |
| 199 | } else if (config_prof) |
| 200 | gdump = false; |
| 201 | malloc_mutex_unlock(&chunks_mtx); |
| 202 | if (config_prof && opt_prof && opt_prof_gdump && gdump) |
| 203 | prof_gdump(); |
| 204 | } |
| 205 | if (config_valgrind) |
| 206 | VALGRIND_MAKE_MEM_UNDEFINED(ret, size); |
Jason Evans | e476f8a | 2010-01-16 09:53:50 -0800 | [diff] [blame] | 207 | } |
Jason Evans | e476f8a | 2010-01-16 09:53:50 -0800 | [diff] [blame] | 208 | assert(CHUNK_ADDR2BASE(ret) == ret); |
| 209 | return (ret); |
| 210 | } |
| 211 | |
Jason Evans | 7ca0fdf | 2012-04-12 20:20:58 -0700 | [diff] [blame] | 212 | static void |
Jason Evans | 609ae59 | 2012-10-11 13:53:15 -0700 | [diff] [blame] | 213 | chunk_record(extent_tree_t *chunks_szad, extent_tree_t *chunks_ad, void *chunk, |
| 214 | size_t size) |
Jason Evans | 7ca0fdf | 2012-04-12 20:20:58 -0700 | [diff] [blame] | 215 | { |
Jason Evans | 7de9276 | 2012-10-08 17:56:11 -0700 | [diff] [blame] | 216 | bool unzeroed; |
Jason Evans | 4f929aa | 2013-04-22 22:36:18 -0700 | [diff] [blame] | 217 | extent_node_t *xnode, *node, *prev, *xprev, key; |
Jason Evans | 7ca0fdf | 2012-04-12 20:20:58 -0700 | [diff] [blame] | 218 | |
Jason Evans | 7de9276 | 2012-10-08 17:56:11 -0700 | [diff] [blame] | 219 | unzeroed = pages_purge(chunk, size); |
Jason Evans | 0691275 | 2013-01-31 17:02:53 -0800 | [diff] [blame] | 220 | VALGRIND_MAKE_MEM_NOACCESS(chunk, size); |
Jason Evans | 7ca0fdf | 2012-04-12 20:20:58 -0700 | [diff] [blame] | 221 | |
Jason Evans | 374d26a | 2012-05-09 14:48:35 -0700 | [diff] [blame] | 222 | /* |
| 223 | * Allocate a node before acquiring chunks_mtx even though it might not |
| 224 | * be needed, because base_node_alloc() may cause a new base chunk to |
| 225 | * be allocated, which could cause deadlock if chunks_mtx were already |
| 226 | * held. |
| 227 | */ |
| 228 | xnode = base_node_alloc(); |
Jason Evans | 4f929aa | 2013-04-22 22:36:18 -0700 | [diff] [blame] | 229 | /* Use xprev to implement conditional deferred deallocation of prev. */ |
| 230 | xprev = NULL; |
Jason Evans | 374d26a | 2012-05-09 14:48:35 -0700 | [diff] [blame] | 231 | |
Jason Evans | 7ca0fdf | 2012-04-12 20:20:58 -0700 | [diff] [blame] | 232 | malloc_mutex_lock(&chunks_mtx); |
Jason Evans | 374d26a | 2012-05-09 14:48:35 -0700 | [diff] [blame] | 233 | key.addr = (void *)((uintptr_t)chunk + size); |
Jason Evans | 609ae59 | 2012-10-11 13:53:15 -0700 | [diff] [blame] | 234 | node = extent_tree_ad_nsearch(chunks_ad, &key); |
Jason Evans | 374d26a | 2012-05-09 14:48:35 -0700 | [diff] [blame] | 235 | /* Try to coalesce forward. */ |
| 236 | if (node != NULL && node->addr == key.addr) { |
| 237 | /* |
| 238 | * Coalesce chunk with the following address range. This does |
| 239 | * not change the position within chunks_ad, so only |
| 240 | * remove/insert from/into chunks_szad. |
| 241 | */ |
Jason Evans | 609ae59 | 2012-10-11 13:53:15 -0700 | [diff] [blame] | 242 | extent_tree_szad_remove(chunks_szad, node); |
Jason Evans | 374d26a | 2012-05-09 14:48:35 -0700 | [diff] [blame] | 243 | node->addr = chunk; |
| 244 | node->size += size; |
Jason Evans | 7de9276 | 2012-10-08 17:56:11 -0700 | [diff] [blame] | 245 | node->zeroed = (node->zeroed && (unzeroed == false)); |
Jason Evans | 609ae59 | 2012-10-11 13:53:15 -0700 | [diff] [blame] | 246 | extent_tree_szad_insert(chunks_szad, node); |
Jason Evans | 374d26a | 2012-05-09 14:48:35 -0700 | [diff] [blame] | 247 | } else { |
| 248 | /* Coalescing forward failed, so insert a new node. */ |
| 249 | if (xnode == NULL) { |
Jason Evans | 7ca0fdf | 2012-04-12 20:20:58 -0700 | [diff] [blame] | 250 | /* |
Jason Evans | 374d26a | 2012-05-09 14:48:35 -0700 | [diff] [blame] | 251 | * base_node_alloc() failed, which is an exceedingly |
| 252 | * unlikely failure. Leak chunk; its pages have |
| 253 | * already been purged, so this is only a virtual |
| 254 | * memory leak. |
Jason Evans | 7ca0fdf | 2012-04-12 20:20:58 -0700 | [diff] [blame] | 255 | */ |
Jason Evans | 741fbc6 | 2013-04-17 09:57:11 -0700 | [diff] [blame] | 256 | goto label_return; |
Jason Evans | 7ca0fdf | 2012-04-12 20:20:58 -0700 | [diff] [blame] | 257 | } |
Jason Evans | 374d26a | 2012-05-09 14:48:35 -0700 | [diff] [blame] | 258 | node = xnode; |
Jason Evans | 741fbc6 | 2013-04-17 09:57:11 -0700 | [diff] [blame] | 259 | xnode = NULL; /* Prevent deallocation below. */ |
Jason Evans | 374d26a | 2012-05-09 14:48:35 -0700 | [diff] [blame] | 260 | node->addr = chunk; |
| 261 | node->size = size; |
Jason Evans | 7de9276 | 2012-10-08 17:56:11 -0700 | [diff] [blame] | 262 | node->zeroed = (unzeroed == false); |
Jason Evans | 609ae59 | 2012-10-11 13:53:15 -0700 | [diff] [blame] | 263 | extent_tree_ad_insert(chunks_ad, node); |
| 264 | extent_tree_szad_insert(chunks_szad, node); |
Jason Evans | 7ca0fdf | 2012-04-12 20:20:58 -0700 | [diff] [blame] | 265 | } |
Jason Evans | 7ca0fdf | 2012-04-12 20:20:58 -0700 | [diff] [blame] | 266 | |
| 267 | /* Try to coalesce backward. */ |
Jason Evans | 609ae59 | 2012-10-11 13:53:15 -0700 | [diff] [blame] | 268 | prev = extent_tree_ad_prev(chunks_ad, node); |
Jason Evans | 7ca0fdf | 2012-04-12 20:20:58 -0700 | [diff] [blame] | 269 | if (prev != NULL && (void *)((uintptr_t)prev->addr + prev->size) == |
| 270 | chunk) { |
| 271 | /* |
| 272 | * Coalesce chunk with the previous address range. This does |
| 273 | * not change the position within chunks_ad, so only |
| 274 | * remove/insert node from/into chunks_szad. |
| 275 | */ |
Jason Evans | 609ae59 | 2012-10-11 13:53:15 -0700 | [diff] [blame] | 276 | extent_tree_szad_remove(chunks_szad, prev); |
| 277 | extent_tree_ad_remove(chunks_ad, prev); |
Jason Evans | 7ca0fdf | 2012-04-12 20:20:58 -0700 | [diff] [blame] | 278 | |
Jason Evans | 609ae59 | 2012-10-11 13:53:15 -0700 | [diff] [blame] | 279 | extent_tree_szad_remove(chunks_szad, node); |
Jason Evans | 7ca0fdf | 2012-04-12 20:20:58 -0700 | [diff] [blame] | 280 | node->addr = prev->addr; |
| 281 | node->size += prev->size; |
Jason Evans | 7de9276 | 2012-10-08 17:56:11 -0700 | [diff] [blame] | 282 | node->zeroed = (node->zeroed && prev->zeroed); |
Jason Evans | 609ae59 | 2012-10-11 13:53:15 -0700 | [diff] [blame] | 283 | extent_tree_szad_insert(chunks_szad, node); |
Jason Evans | 7ca0fdf | 2012-04-12 20:20:58 -0700 | [diff] [blame] | 284 | |
Jason Evans | 4f929aa | 2013-04-22 22:36:18 -0700 | [diff] [blame] | 285 | xprev = prev; |
Jason Evans | 7ca0fdf | 2012-04-12 20:20:58 -0700 | [diff] [blame] | 286 | } |
Jason Evans | 741fbc6 | 2013-04-17 09:57:11 -0700 | [diff] [blame] | 287 | |
| 288 | label_return: |
Jason Evans | 7ca0fdf | 2012-04-12 20:20:58 -0700 | [diff] [blame] | 289 | malloc_mutex_unlock(&chunks_mtx); |
Jason Evans | 4f929aa | 2013-04-22 22:36:18 -0700 | [diff] [blame] | 290 | /* |
| 291 | * Deallocate xnode and/or xprev after unlocking chunks_mtx in order to |
| 292 | * avoid potential deadlock. |
| 293 | */ |
| 294 | if (xnode != NULL) |
Jason Evans | 741fbc6 | 2013-04-17 09:57:11 -0700 | [diff] [blame] | 295 | base_node_dealloc(xnode); |
Jason Evans | 4f929aa | 2013-04-22 22:36:18 -0700 | [diff] [blame] | 296 | if (xprev != NULL) |
Jason Evans | d504477 | 2013-10-20 15:11:01 -0700 | [diff] [blame] | 297 | base_node_dealloc(xprev); |
Jason Evans | 7ca0fdf | 2012-04-12 20:20:58 -0700 | [diff] [blame] | 298 | } |
| 299 | |
Jason Evans | e476f8a | 2010-01-16 09:53:50 -0800 | [diff] [blame] | 300 | void |
Jason Evans | 609ae59 | 2012-10-11 13:53:15 -0700 | [diff] [blame] | 301 | chunk_unmap(void *chunk, size_t size) |
| 302 | { |
| 303 | assert(chunk != NULL); |
| 304 | assert(CHUNK_ADDR2BASE(chunk) == chunk); |
| 305 | assert(size != 0); |
| 306 | assert((size & chunksize_mask) == 0); |
| 307 | |
| 308 | if (config_dss && chunk_in_dss(chunk)) |
| 309 | chunk_record(&chunks_szad_dss, &chunks_ad_dss, chunk, size); |
| 310 | else if (chunk_dealloc_mmap(chunk, size)) |
| 311 | chunk_record(&chunks_szad_mmap, &chunks_ad_mmap, chunk, size); |
| 312 | } |
| 313 | |
| 314 | void |
Jason Evans | 12a4887 | 2011-11-11 14:41:59 -0800 | [diff] [blame] | 315 | chunk_dealloc(void *chunk, size_t size, bool unmap) |
Jason Evans | e476f8a | 2010-01-16 09:53:50 -0800 | [diff] [blame] | 316 | { |
| 317 | |
| 318 | assert(chunk != NULL); |
| 319 | assert(CHUNK_ADDR2BASE(chunk) == chunk); |
| 320 | assert(size != 0); |
| 321 | assert((size & chunksize_mask) == 0); |
| 322 | |
Jason Evans | 7372b15 | 2012-02-10 20:22:09 -0800 | [diff] [blame] | 323 | if (config_ivsalloc) |
Jason Evans | b954bc5 | 2014-01-02 17:36:38 -0800 | [diff] [blame] | 324 | rtree_set(chunks_rtree, (uintptr_t)chunk, 0); |
Jason Evans | 7372b15 | 2012-02-10 20:22:09 -0800 | [diff] [blame] | 325 | if (config_stats || config_prof) { |
| 326 | malloc_mutex_lock(&chunks_mtx); |
Jason Evans | 609ae59 | 2012-10-11 13:53:15 -0700 | [diff] [blame] | 327 | assert(stats_chunks.curchunks >= (size / chunksize)); |
Jason Evans | 7372b15 | 2012-02-10 20:22:09 -0800 | [diff] [blame] | 328 | stats_chunks.curchunks -= (size / chunksize); |
| 329 | malloc_mutex_unlock(&chunks_mtx); |
| 330 | } |
Jason Evans | e476f8a | 2010-01-16 09:53:50 -0800 | [diff] [blame] | 331 | |
Jason Evans | 609ae59 | 2012-10-11 13:53:15 -0700 | [diff] [blame] | 332 | if (unmap) |
| 333 | chunk_unmap(chunk, size); |
Jason Evans | e476f8a | 2010-01-16 09:53:50 -0800 | [diff] [blame] | 334 | } |
| 335 | |
| 336 | bool |
Jason Evans | a8f8d75 | 2012-04-21 19:17:21 -0700 | [diff] [blame] | 337 | chunk_boot(void) |
Jason Evans | e476f8a | 2010-01-16 09:53:50 -0800 | [diff] [blame] | 338 | { |
| 339 | |
| 340 | /* Set variables according to the value of opt_lg_chunk. */ |
Jason Evans | 2dbecf1 | 2010-09-05 10:35:13 -0700 | [diff] [blame] | 341 | chunksize = (ZU(1) << opt_lg_chunk); |
Jason Evans | ae4c7b4 | 2012-04-02 07:04:34 -0700 | [diff] [blame] | 342 | assert(chunksize >= PAGE); |
Jason Evans | e476f8a | 2010-01-16 09:53:50 -0800 | [diff] [blame] | 343 | chunksize_mask = chunksize - 1; |
Jason Evans | ae4c7b4 | 2012-04-02 07:04:34 -0700 | [diff] [blame] | 344 | chunk_npages = (chunksize >> LG_PAGE); |
Jason Evans | e476f8a | 2010-01-16 09:53:50 -0800 | [diff] [blame] | 345 | |
Jason Evans | 7372b15 | 2012-02-10 20:22:09 -0800 | [diff] [blame] | 346 | if (config_stats || config_prof) { |
| 347 | if (malloc_mutex_init(&chunks_mtx)) |
| 348 | return (true); |
| 349 | memset(&stats_chunks, 0, sizeof(chunk_stats_t)); |
| 350 | } |
Jason Evans | 7372b15 | 2012-02-10 20:22:09 -0800 | [diff] [blame] | 351 | if (config_dss && chunk_dss_boot()) |
Jason Evans | 4201af0 | 2010-01-24 02:53:40 -0800 | [diff] [blame] | 352 | return (true); |
Jason Evans | 609ae59 | 2012-10-11 13:53:15 -0700 | [diff] [blame] | 353 | extent_tree_szad_new(&chunks_szad_mmap); |
| 354 | extent_tree_ad_new(&chunks_ad_mmap); |
| 355 | extent_tree_szad_new(&chunks_szad_dss); |
| 356 | extent_tree_ad_new(&chunks_ad_dss); |
Jason Evans | 7372b15 | 2012-02-10 20:22:09 -0800 | [diff] [blame] | 357 | if (config_ivsalloc) { |
| 358 | chunks_rtree = rtree_new((ZU(1) << (LG_SIZEOF_PTR+3)) - |
Jason Evans | b980cc7 | 2014-01-02 16:08:28 -0800 | [diff] [blame] | 359 | opt_lg_chunk, base_alloc, NULL); |
Jason Evans | 7372b15 | 2012-02-10 20:22:09 -0800 | [diff] [blame] | 360 | if (chunks_rtree == NULL) |
| 361 | return (true); |
| 362 | } |
Jason Evans | e476f8a | 2010-01-16 09:53:50 -0800 | [diff] [blame] | 363 | |
| 364 | return (false); |
| 365 | } |
Jason Evans | 20f1fc9 | 2012-10-09 14:46:22 -0700 | [diff] [blame] | 366 | |
| 367 | void |
| 368 | chunk_prefork(void) |
| 369 | { |
| 370 | |
Jason Evans | f1c3da8 | 2013-10-21 14:59:10 -0700 | [diff] [blame] | 371 | malloc_mutex_prefork(&chunks_mtx); |
Jason Evans | 20f1fc9 | 2012-10-09 14:46:22 -0700 | [diff] [blame] | 372 | if (config_ivsalloc) |
| 373 | rtree_prefork(chunks_rtree); |
| 374 | chunk_dss_prefork(); |
| 375 | } |
| 376 | |
| 377 | void |
| 378 | chunk_postfork_parent(void) |
| 379 | { |
| 380 | |
| 381 | chunk_dss_postfork_parent(); |
| 382 | if (config_ivsalloc) |
| 383 | rtree_postfork_parent(chunks_rtree); |
| 384 | malloc_mutex_postfork_parent(&chunks_mtx); |
| 385 | } |
| 386 | |
| 387 | void |
| 388 | chunk_postfork_child(void) |
| 389 | { |
| 390 | |
| 391 | chunk_dss_postfork_child(); |
| 392 | if (config_ivsalloc) |
| 393 | rtree_postfork_child(chunks_rtree); |
| 394 | malloc_mutex_postfork_child(&chunks_mtx); |
| 395 | } |