Jason Evans | e476f8a | 2010-01-16 09:53:50 -0800 | [diff] [blame] | 1 | #define JEMALLOC_CHUNK_C_ |
Jason Evans | 376b152 | 2010-02-11 14:45:59 -0800 | [diff] [blame] | 2 | #include "jemalloc/internal/jemalloc_internal.h" |
Jason Evans | e476f8a | 2010-01-16 09:53:50 -0800 | [diff] [blame] | 3 | |
| 4 | /******************************************************************************/ |
| 5 | /* Data. */ |
| 6 | |
Jason Evans | 609ae59 | 2012-10-11 13:53:15 -0700 | [diff] [blame] | 7 | const char *opt_dss = DSS_DEFAULT; |
Matthijs | a1aaf94 | 2015-06-25 22:53:58 +0200 | [diff] [blame] | 8 | size_t opt_lg_chunk = 0; |
Jason Evans | e476f8a | 2010-01-16 09:53:50 -0800 | [diff] [blame] | 9 | |
Jason Evans | cbf3a6d | 2015-02-11 12:24:27 -0800 | [diff] [blame] | 10 | /* Used exclusively for gdump triggering. */ |
| 11 | static size_t curchunks; |
| 12 | static size_t highchunks; |
Jason Evans | 7ca0fdf | 2012-04-12 20:20:58 -0700 | [diff] [blame] | 13 | |
Jason Evans | 8d0e04d | 2015-01-30 22:54:08 -0800 | [diff] [blame] | 14 | rtree_t chunks_rtree; |
Jason Evans | 2dbecf1 | 2010-09-05 10:35:13 -0700 | [diff] [blame] | 15 | |
Jason Evans | e476f8a | 2010-01-16 09:53:50 -0800 | [diff] [blame] | 16 | /* Various chunk-related settings. */ |
| 17 | size_t chunksize; |
| 18 | size_t chunksize_mask; /* (chunksize - 1). */ |
| 19 | size_t chunk_npages; |
Jason Evans | e476f8a | 2010-01-16 09:53:50 -0800 | [diff] [blame] | 20 | |
Jason Evans | b49a334 | 2015-07-28 11:28:19 -0400 | [diff] [blame] | 21 | static void *chunk_alloc_default(void *new_addr, size_t size, |
Jason Evans | 8fadb1a | 2015-08-04 10:49:46 -0700 | [diff] [blame] | 22 | size_t alignment, bool *zero, bool *commit, unsigned arena_ind); |
| 23 | static bool chunk_dalloc_default(void *chunk, size_t size, bool committed, |
Jason Evans | b49a334 | 2015-07-28 11:28:19 -0400 | [diff] [blame] | 24 | unsigned arena_ind); |
Jason Evans | 8fadb1a | 2015-08-04 10:49:46 -0700 | [diff] [blame] | 25 | static bool chunk_commit_default(void *chunk, size_t size, size_t offset, |
| 26 | size_t length, unsigned arena_ind); |
| 27 | static bool chunk_decommit_default(void *chunk, size_t size, size_t offset, |
| 28 | size_t length, unsigned arena_ind); |
Jason Evans | b49a334 | 2015-07-28 11:28:19 -0400 | [diff] [blame] | 29 | static bool chunk_purge_default(void *chunk, size_t size, size_t offset, |
| 30 | size_t length, unsigned arena_ind); |
| 31 | static bool chunk_split_default(void *chunk, size_t size, size_t size_a, |
| 32 | size_t size_b, bool committed, unsigned arena_ind); |
| 33 | static bool chunk_merge_default(void *chunk_a, size_t size_a, void *chunk_b, |
| 34 | size_t size_b, bool committed, unsigned arena_ind); |
| 35 | |
| 36 | const chunk_hooks_t chunk_hooks_default = { |
| 37 | chunk_alloc_default, |
| 38 | chunk_dalloc_default, |
| 39 | chunk_commit_default, |
| 40 | chunk_decommit_default, |
| 41 | chunk_purge_default, |
| 42 | chunk_split_default, |
| 43 | chunk_merge_default |
| 44 | }; |
| 45 | |
Jason Evans | e476f8a | 2010-01-16 09:53:50 -0800 | [diff] [blame] | 46 | /******************************************************************************/ |
Jason Evans | b49a334 | 2015-07-28 11:28:19 -0400 | [diff] [blame] | 47 | /* |
| 48 | * Function prototypes for static functions that are referenced prior to |
| 49 | * definition. |
| 50 | */ |
| 51 | |
| 52 | static void chunk_record(arena_t *arena, chunk_hooks_t *chunk_hooks, |
| 53 | extent_tree_t *chunks_szad, extent_tree_t *chunks_ad, bool cache, |
Jason Evans | 8fadb1a | 2015-08-04 10:49:46 -0700 | [diff] [blame] | 54 | void *chunk, size_t size, bool zeroed, bool committed); |
Jason Evans | b49a334 | 2015-07-28 11:28:19 -0400 | [diff] [blame] | 55 | |
| 56 | /******************************************************************************/ |
| 57 | |
| 58 | static chunk_hooks_t |
| 59 | chunk_hooks_get_locked(arena_t *arena) |
| 60 | { |
| 61 | |
| 62 | return (arena->chunk_hooks); |
| 63 | } |
| 64 | |
| 65 | chunk_hooks_t |
| 66 | chunk_hooks_get(arena_t *arena) |
| 67 | { |
| 68 | chunk_hooks_t chunk_hooks; |
| 69 | |
| 70 | malloc_mutex_lock(&arena->chunks_mtx); |
| 71 | chunk_hooks = chunk_hooks_get_locked(arena); |
| 72 | malloc_mutex_unlock(&arena->chunks_mtx); |
| 73 | |
| 74 | return (chunk_hooks); |
| 75 | } |
| 76 | |
| 77 | chunk_hooks_t |
| 78 | chunk_hooks_set(arena_t *arena, const chunk_hooks_t *chunk_hooks) |
| 79 | { |
| 80 | chunk_hooks_t old_chunk_hooks; |
| 81 | |
| 82 | malloc_mutex_lock(&arena->chunks_mtx); |
| 83 | old_chunk_hooks = arena->chunk_hooks; |
Jason Evans | 8fadb1a | 2015-08-04 10:49:46 -0700 | [diff] [blame] | 84 | /* |
| 85 | * Copy each field atomically so that it is impossible for readers to |
| 86 | * see partially updated pointers. There are places where readers only |
| 87 | * need one hook function pointer (therefore no need to copy the |
| 88 | * entirety of arena->chunk_hooks), and stale reads do not affect |
| 89 | * correctness, so they perform unlocked reads. |
| 90 | */ |
| 91 | #define ATOMIC_COPY_HOOK(n) do { \ |
Jason Evans | 56af64d | 2015-08-12 16:38:20 -0700 | [diff] [blame] | 92 | union { \ |
| 93 | chunk_##n##_t **n; \ |
| 94 | void **v; \ |
| 95 | } u; \ |
| 96 | u.n = &arena->chunk_hooks.n; \ |
| 97 | atomic_write_p(u.v, chunk_hooks->n); \ |
Jason Evans | 8fadb1a | 2015-08-04 10:49:46 -0700 | [diff] [blame] | 98 | } while (0) |
| 99 | ATOMIC_COPY_HOOK(alloc); |
| 100 | ATOMIC_COPY_HOOK(dalloc); |
| 101 | ATOMIC_COPY_HOOK(commit); |
| 102 | ATOMIC_COPY_HOOK(decommit); |
| 103 | ATOMIC_COPY_HOOK(purge); |
| 104 | ATOMIC_COPY_HOOK(split); |
| 105 | ATOMIC_COPY_HOOK(merge); |
| 106 | #undef ATOMIC_COPY_HOOK |
Jason Evans | b49a334 | 2015-07-28 11:28:19 -0400 | [diff] [blame] | 107 | malloc_mutex_unlock(&arena->chunks_mtx); |
| 108 | |
| 109 | return (old_chunk_hooks); |
| 110 | } |
| 111 | |
| 112 | static void |
| 113 | chunk_hooks_assure_initialized_impl(arena_t *arena, chunk_hooks_t *chunk_hooks, |
| 114 | bool locked) |
| 115 | { |
| 116 | static const chunk_hooks_t uninitialized_hooks = |
| 117 | CHUNK_HOOKS_INITIALIZER; |
| 118 | |
| 119 | if (memcmp(chunk_hooks, &uninitialized_hooks, sizeof(chunk_hooks_t)) == |
| 120 | 0) { |
| 121 | *chunk_hooks = locked ? chunk_hooks_get_locked(arena) : |
| 122 | chunk_hooks_get(arena); |
| 123 | } |
| 124 | } |
| 125 | |
| 126 | static void |
| 127 | chunk_hooks_assure_initialized_locked(arena_t *arena, |
| 128 | chunk_hooks_t *chunk_hooks) |
| 129 | { |
| 130 | |
| 131 | chunk_hooks_assure_initialized_impl(arena, chunk_hooks, true); |
| 132 | } |
| 133 | |
| 134 | static void |
| 135 | chunk_hooks_assure_initialized(arena_t *arena, chunk_hooks_t *chunk_hooks) |
| 136 | { |
| 137 | |
| 138 | chunk_hooks_assure_initialized_impl(arena, chunk_hooks, false); |
| 139 | } |
Jason Evans | 7ca0fdf | 2012-04-12 20:20:58 -0700 | [diff] [blame] | 140 | |
Jason Evans | cbf3a6d | 2015-02-11 12:24:27 -0800 | [diff] [blame] | 141 | bool |
| 142 | chunk_register(const void *chunk, const extent_node_t *node) |
| 143 | { |
Jason Evans | 7ca0fdf | 2012-04-12 20:20:58 -0700 | [diff] [blame] | 144 | |
Jason Evans | ee41ad4 | 2015-02-15 18:04:46 -0800 | [diff] [blame] | 145 | assert(extent_node_addr_get(node) == chunk); |
Jason Evans | cbf3a6d | 2015-02-11 12:24:27 -0800 | [diff] [blame] | 146 | |
| 147 | if (rtree_set(&chunks_rtree, (uintptr_t)chunk, node)) |
| 148 | return (true); |
| 149 | if (config_prof && opt_prof) { |
Jason Evans | ee41ad4 | 2015-02-15 18:04:46 -0800 | [diff] [blame] | 150 | size_t size = extent_node_size_get(node); |
| 151 | size_t nadd = (size == 0) ? 1 : size / chunksize; |
Jason Evans | cbf3a6d | 2015-02-11 12:24:27 -0800 | [diff] [blame] | 152 | size_t cur = atomic_add_z(&curchunks, nadd); |
| 153 | size_t high = atomic_read_z(&highchunks); |
| 154 | while (cur > high && atomic_cas_z(&highchunks, high, cur)) { |
| 155 | /* |
| 156 | * Don't refresh cur, because it may have decreased |
| 157 | * since this thread lost the highchunks update race. |
| 158 | */ |
| 159 | high = atomic_read_z(&highchunks); |
| 160 | } |
| 161 | if (cur > high && prof_gdump_get_unlocked()) |
| 162 | prof_gdump(); |
| 163 | } |
| 164 | |
| 165 | return (false); |
| 166 | } |
| 167 | |
| 168 | void |
| 169 | chunk_deregister(const void *chunk, const extent_node_t *node) |
| 170 | { |
| 171 | bool err; |
| 172 | |
| 173 | err = rtree_set(&chunks_rtree, (uintptr_t)chunk, NULL); |
| 174 | assert(!err); |
| 175 | if (config_prof && opt_prof) { |
Jason Evans | ee41ad4 | 2015-02-15 18:04:46 -0800 | [diff] [blame] | 176 | size_t size = extent_node_size_get(node); |
| 177 | size_t nsub = (size == 0) ? 1 : size / chunksize; |
Jason Evans | cbf3a6d | 2015-02-11 12:24:27 -0800 | [diff] [blame] | 178 | assert(atomic_read_z(&curchunks) >= nsub); |
| 179 | atomic_sub_z(&curchunks, nsub); |
| 180 | } |
| 181 | } |
Jason Evans | 7ca0fdf | 2012-04-12 20:20:58 -0700 | [diff] [blame] | 182 | |
Jason Evans | aa28266 | 2015-07-15 16:02:21 -0700 | [diff] [blame] | 183 | /* |
| 184 | * Do first-best-fit chunk selection, i.e. select the lowest chunk that best |
| 185 | * fits. |
| 186 | */ |
Jason Evans | 97c04a9 | 2015-03-06 19:57:36 -0800 | [diff] [blame] | 187 | static extent_node_t * |
Jason Evans | aa28266 | 2015-07-15 16:02:21 -0700 | [diff] [blame] | 188 | chunk_first_best_fit(arena_t *arena, extent_tree_t *chunks_szad, |
Jason Evans | 04ca758 | 2015-03-06 23:25:13 -0800 | [diff] [blame] | 189 | extent_tree_t *chunks_ad, size_t size) |
Jason Evans | 97c04a9 | 2015-03-06 19:57:36 -0800 | [diff] [blame] | 190 | { |
Jason Evans | aa28266 | 2015-07-15 16:02:21 -0700 | [diff] [blame] | 191 | extent_node_t key; |
Jason Evans | 97c04a9 | 2015-03-06 19:57:36 -0800 | [diff] [blame] | 192 | |
| 193 | assert(size == CHUNK_CEILING(size)); |
| 194 | |
Jason Evans | b49a334 | 2015-07-28 11:28:19 -0400 | [diff] [blame] | 195 | extent_node_init(&key, arena, NULL, size, false, false); |
Jason Evans | aa28266 | 2015-07-15 16:02:21 -0700 | [diff] [blame] | 196 | return (extent_tree_szad_nsearch(chunks_szad, &key)); |
Jason Evans | 97c04a9 | 2015-03-06 19:57:36 -0800 | [diff] [blame] | 197 | } |
| 198 | |
Jason Evans | 7ca0fdf | 2012-04-12 20:20:58 -0700 | [diff] [blame] | 199 | static void * |
Jason Evans | b49a334 | 2015-07-28 11:28:19 -0400 | [diff] [blame] | 200 | chunk_recycle(arena_t *arena, chunk_hooks_t *chunk_hooks, |
| 201 | extent_tree_t *chunks_szad, extent_tree_t *chunks_ad, bool cache, |
Jason Evans | 8fadb1a | 2015-08-04 10:49:46 -0700 | [diff] [blame] | 202 | void *new_addr, size_t size, size_t alignment, bool *zero, bool *commit, |
| 203 | bool dalloc_node) |
Jason Evans | 7ca0fdf | 2012-04-12 20:20:58 -0700 | [diff] [blame] | 204 | { |
| 205 | void *ret; |
| 206 | extent_node_t *node; |
Jason Evans | 7ca0fdf | 2012-04-12 20:20:58 -0700 | [diff] [blame] | 207 | size_t alloc_size, leadsize, trailsize; |
Jason Evans | 8fadb1a | 2015-08-04 10:49:46 -0700 | [diff] [blame] | 208 | bool zeroed, committed; |
Jason Evans | 7ca0fdf | 2012-04-12 20:20:58 -0700 | [diff] [blame] | 209 | |
Jason Evans | 8ddc932 | 2015-01-30 21:22:54 -0800 | [diff] [blame] | 210 | assert(new_addr == NULL || alignment == chunksize); |
Jason Evans | b49a334 | 2015-07-28 11:28:19 -0400 | [diff] [blame] | 211 | /* |
| 212 | * Cached chunks use the node linkage embedded in their headers, in |
| 213 | * which case dalloc_node is true, and new_addr is non-NULL because |
| 214 | * we're operating on a specific chunk. |
| 215 | */ |
Jason Evans | 99bd94f | 2015-02-18 16:40:53 -0800 | [diff] [blame] | 216 | assert(dalloc_node || new_addr != NULL); |
Jason Evans | 8ddc932 | 2015-01-30 21:22:54 -0800 | [diff] [blame] | 217 | |
Jason Evans | 5707d6f | 2015-03-06 17:14:05 -0800 | [diff] [blame] | 218 | alloc_size = CHUNK_CEILING(s2u(size + alignment - chunksize)); |
Jason Evans | 7ca0fdf | 2012-04-12 20:20:58 -0700 | [diff] [blame] | 219 | /* Beware size_t wrap-around. */ |
| 220 | if (alloc_size < size) |
| 221 | return (NULL); |
Jason Evans | cbf3a6d | 2015-02-11 12:24:27 -0800 | [diff] [blame] | 222 | malloc_mutex_lock(&arena->chunks_mtx); |
Jason Evans | b49a334 | 2015-07-28 11:28:19 -0400 | [diff] [blame] | 223 | chunk_hooks_assure_initialized_locked(arena, chunk_hooks); |
Jason Evans | 04ca758 | 2015-03-06 23:25:13 -0800 | [diff] [blame] | 224 | if (new_addr != NULL) { |
Jason Evans | 97c04a9 | 2015-03-06 19:57:36 -0800 | [diff] [blame] | 225 | extent_node_t key; |
Jason Evans | b49a334 | 2015-07-28 11:28:19 -0400 | [diff] [blame] | 226 | extent_node_init(&key, arena, new_addr, alloc_size, false, |
| 227 | false); |
Jason Evans | 97c04a9 | 2015-03-06 19:57:36 -0800 | [diff] [blame] | 228 | node = extent_tree_ad_search(chunks_ad, &key); |
Jason Evans | 04ca758 | 2015-03-06 23:25:13 -0800 | [diff] [blame] | 229 | } else { |
Jason Evans | aa28266 | 2015-07-15 16:02:21 -0700 | [diff] [blame] | 230 | node = chunk_first_best_fit(arena, chunks_szad, chunks_ad, |
Jason Evans | 04ca758 | 2015-03-06 23:25:13 -0800 | [diff] [blame] | 231 | alloc_size); |
| 232 | } |
Jason Evans | ee41ad4 | 2015-02-15 18:04:46 -0800 | [diff] [blame] | 233 | if (node == NULL || (new_addr != NULL && extent_node_size_get(node) < |
| 234 | size)) { |
Jason Evans | cbf3a6d | 2015-02-11 12:24:27 -0800 | [diff] [blame] | 235 | malloc_mutex_unlock(&arena->chunks_mtx); |
Jason Evans | 7ca0fdf | 2012-04-12 20:20:58 -0700 | [diff] [blame] | 236 | return (NULL); |
| 237 | } |
Jason Evans | ee41ad4 | 2015-02-15 18:04:46 -0800 | [diff] [blame] | 238 | leadsize = ALIGNMENT_CEILING((uintptr_t)extent_node_addr_get(node), |
| 239 | alignment) - (uintptr_t)extent_node_addr_get(node); |
Jason Evans | 8ddc932 | 2015-01-30 21:22:54 -0800 | [diff] [blame] | 240 | assert(new_addr == NULL || leadsize == 0); |
Jason Evans | ee41ad4 | 2015-02-15 18:04:46 -0800 | [diff] [blame] | 241 | assert(extent_node_size_get(node) >= leadsize + size); |
| 242 | trailsize = extent_node_size_get(node) - leadsize - size; |
| 243 | ret = (void *)((uintptr_t)extent_node_addr_get(node) + leadsize); |
| 244 | zeroed = extent_node_zeroed_get(node); |
Jason Evans | 14a2c6a | 2013-01-21 19:56:34 -0800 | [diff] [blame] | 245 | if (zeroed) |
Jason Evans | 8fadb1a | 2015-08-04 10:49:46 -0700 | [diff] [blame] | 246 | *zero = true; |
| 247 | committed = extent_node_committed_get(node); |
| 248 | if (committed) |
| 249 | *commit = true; |
Jason Evans | b49a334 | 2015-07-28 11:28:19 -0400 | [diff] [blame] | 250 | /* Split the lead. */ |
| 251 | if (leadsize != 0 && |
| 252 | chunk_hooks->split(extent_node_addr_get(node), |
| 253 | extent_node_size_get(node), leadsize, size, false, arena->ind)) { |
| 254 | malloc_mutex_unlock(&arena->chunks_mtx); |
| 255 | return (NULL); |
| 256 | } |
Jason Evans | 7ca0fdf | 2012-04-12 20:20:58 -0700 | [diff] [blame] | 257 | /* Remove node from the tree. */ |
Jason Evans | 609ae59 | 2012-10-11 13:53:15 -0700 | [diff] [blame] | 258 | extent_tree_szad_remove(chunks_szad, node); |
| 259 | extent_tree_ad_remove(chunks_ad, node); |
Jason Evans | 738e089 | 2015-02-18 01:15:50 -0800 | [diff] [blame] | 260 | arena_chunk_cache_maybe_remove(arena, node, cache); |
Jason Evans | 7ca0fdf | 2012-04-12 20:20:58 -0700 | [diff] [blame] | 261 | if (leadsize != 0) { |
| 262 | /* Insert the leading space as a smaller chunk. */ |
Jason Evans | ee41ad4 | 2015-02-15 18:04:46 -0800 | [diff] [blame] | 263 | extent_node_size_set(node, leadsize); |
Jason Evans | 609ae59 | 2012-10-11 13:53:15 -0700 | [diff] [blame] | 264 | extent_tree_szad_insert(chunks_szad, node); |
| 265 | extent_tree_ad_insert(chunks_ad, node); |
Jason Evans | 738e089 | 2015-02-18 01:15:50 -0800 | [diff] [blame] | 266 | arena_chunk_cache_maybe_insert(arena, node, cache); |
Jason Evans | 7ca0fdf | 2012-04-12 20:20:58 -0700 | [diff] [blame] | 267 | node = NULL; |
| 268 | } |
| 269 | if (trailsize != 0) { |
Jason Evans | b49a334 | 2015-07-28 11:28:19 -0400 | [diff] [blame] | 270 | /* Split the trail. */ |
| 271 | if (chunk_hooks->split(ret, size + trailsize, size, |
| 272 | trailsize, false, arena->ind)) { |
| 273 | if (dalloc_node && node != NULL) |
| 274 | arena_node_dalloc(arena, node); |
| 275 | malloc_mutex_unlock(&arena->chunks_mtx); |
| 276 | chunk_record(arena, chunk_hooks, chunks_szad, chunks_ad, |
Jason Evans | 8fadb1a | 2015-08-04 10:49:46 -0700 | [diff] [blame] | 277 | cache, ret, size + trailsize, zeroed, committed); |
Jason Evans | b49a334 | 2015-07-28 11:28:19 -0400 | [diff] [blame] | 278 | return (NULL); |
| 279 | } |
Jason Evans | 7ca0fdf | 2012-04-12 20:20:58 -0700 | [diff] [blame] | 280 | /* Insert the trailing space as a smaller chunk. */ |
| 281 | if (node == NULL) { |
Jason Evans | cbf3a6d | 2015-02-11 12:24:27 -0800 | [diff] [blame] | 282 | node = arena_node_alloc(arena); |
Jason Evans | 7ca0fdf | 2012-04-12 20:20:58 -0700 | [diff] [blame] | 283 | if (node == NULL) { |
Jason Evans | cbf3a6d | 2015-02-11 12:24:27 -0800 | [diff] [blame] | 284 | malloc_mutex_unlock(&arena->chunks_mtx); |
Jason Evans | b49a334 | 2015-07-28 11:28:19 -0400 | [diff] [blame] | 285 | chunk_record(arena, chunk_hooks, chunks_szad, |
| 286 | chunks_ad, cache, ret, size + trailsize, |
Jason Evans | 8fadb1a | 2015-08-04 10:49:46 -0700 | [diff] [blame] | 287 | zeroed, committed); |
Jason Evans | 7ca0fdf | 2012-04-12 20:20:58 -0700 | [diff] [blame] | 288 | return (NULL); |
| 289 | } |
Jason Evans | 7ca0fdf | 2012-04-12 20:20:58 -0700 | [diff] [blame] | 290 | } |
Jason Evans | a4e1888 | 2015-02-17 15:13:52 -0800 | [diff] [blame] | 291 | extent_node_init(node, arena, (void *)((uintptr_t)(ret) + size), |
Jason Evans | 8fadb1a | 2015-08-04 10:49:46 -0700 | [diff] [blame] | 292 | trailsize, zeroed, committed); |
Jason Evans | 609ae59 | 2012-10-11 13:53:15 -0700 | [diff] [blame] | 293 | extent_tree_szad_insert(chunks_szad, node); |
| 294 | extent_tree_ad_insert(chunks_ad, node); |
Jason Evans | 738e089 | 2015-02-18 01:15:50 -0800 | [diff] [blame] | 295 | arena_chunk_cache_maybe_insert(arena, node, cache); |
Jason Evans | 7ca0fdf | 2012-04-12 20:20:58 -0700 | [diff] [blame] | 296 | node = NULL; |
| 297 | } |
Jason Evans | 8fadb1a | 2015-08-04 10:49:46 -0700 | [diff] [blame] | 298 | if (!committed && chunk_hooks->commit(ret, size, 0, size, arena->ind)) { |
Jason Evans | b49a334 | 2015-07-28 11:28:19 -0400 | [diff] [blame] | 299 | malloc_mutex_unlock(&arena->chunks_mtx); |
| 300 | chunk_record(arena, chunk_hooks, chunks_szad, chunks_ad, cache, |
Jason Evans | 8fadb1a | 2015-08-04 10:49:46 -0700 | [diff] [blame] | 301 | ret, size, zeroed, committed); |
Jason Evans | b49a334 | 2015-07-28 11:28:19 -0400 | [diff] [blame] | 302 | return (NULL); |
| 303 | } |
Jason Evans | cbf3a6d | 2015-02-11 12:24:27 -0800 | [diff] [blame] | 304 | malloc_mutex_unlock(&arena->chunks_mtx); |
Jason Evans | 7ca0fdf | 2012-04-12 20:20:58 -0700 | [diff] [blame] | 305 | |
Jason Evans | 35e3fd9 | 2015-02-18 16:51:51 -0800 | [diff] [blame] | 306 | assert(dalloc_node || node != NULL); |
Jason Evans | 99bd94f | 2015-02-18 16:40:53 -0800 | [diff] [blame] | 307 | if (dalloc_node && node != NULL) |
Jason Evans | cbf3a6d | 2015-02-11 12:24:27 -0800 | [diff] [blame] | 308 | arena_node_dalloc(arena, node); |
Jason Evans | 14a2c6a | 2013-01-21 19:56:34 -0800 | [diff] [blame] | 309 | if (*zero) { |
Jason Evans | 551ebc4 | 2014-10-03 10:16:09 -0700 | [diff] [blame] | 310 | if (!zeroed) |
Jason Evans | 14a2c6a | 2013-01-21 19:56:34 -0800 | [diff] [blame] | 311 | memset(ret, 0, size); |
| 312 | else if (config_debug) { |
| 313 | size_t i; |
| 314 | size_t *p = (size_t *)(uintptr_t)ret; |
| 315 | |
Jason Evans | bd87b01 | 2014-04-15 16:35:08 -0700 | [diff] [blame] | 316 | JEMALLOC_VALGRIND_MAKE_MEM_DEFINED(ret, size); |
Jason Evans | 14a2c6a | 2013-01-21 19:56:34 -0800 | [diff] [blame] | 317 | for (i = 0; i < size / sizeof(size_t); i++) |
| 318 | assert(p[i] == 0); |
Jason Evans | 14a2c6a | 2013-01-21 19:56:34 -0800 | [diff] [blame] | 319 | } |
| 320 | } |
Jason Evans | 7ca0fdf | 2012-04-12 20:20:58 -0700 | [diff] [blame] | 321 | return (ret); |
| 322 | } |
Jason Evans | e476f8a | 2010-01-16 09:53:50 -0800 | [diff] [blame] | 323 | |
Jason Evans | 41631d0 | 2010-01-24 17:13:07 -0800 | [diff] [blame] | 324 | /* |
Jason Evans | 551ebc4 | 2014-10-03 10:16:09 -0700 | [diff] [blame] | 325 | * If the caller specifies (!*zero), it is still possible to receive zeroed |
| 326 | * memory, in which case *zero is toggled to true. arena_chunk_alloc() takes |
| 327 | * advantage of this to avoid demanding zeroed chunks, but taking advantage of |
| 328 | * them if they are returned. |
Jason Evans | 41631d0 | 2010-01-24 17:13:07 -0800 | [diff] [blame] | 329 | */ |
aravind | fb7fe50 | 2014-05-05 15:16:56 -0700 | [diff] [blame] | 330 | static void * |
Jason Evans | cbf3a6d | 2015-02-11 12:24:27 -0800 | [diff] [blame] | 331 | chunk_alloc_core(arena_t *arena, void *new_addr, size_t size, size_t alignment, |
Jason Evans | 8fadb1a | 2015-08-04 10:49:46 -0700 | [diff] [blame] | 332 | bool *zero, bool *commit, dss_prec_t dss_prec) |
Jason Evans | e476f8a | 2010-01-16 09:53:50 -0800 | [diff] [blame] | 333 | { |
| 334 | void *ret; |
| 335 | |
| 336 | assert(size != 0); |
| 337 | assert((size & chunksize_mask) == 0); |
Jason Evans | de6fbdb | 2012-05-09 13:05:04 -0700 | [diff] [blame] | 338 | assert(alignment != 0); |
Mike Hommey | eae2690 | 2012-04-10 19:50:33 +0200 | [diff] [blame] | 339 | assert((alignment & chunksize_mask) == 0); |
Jason Evans | e476f8a | 2010-01-16 09:53:50 -0800 | [diff] [blame] | 340 | |
Jason Evans | 609ae59 | 2012-10-11 13:53:15 -0700 | [diff] [blame] | 341 | /* "primary" dss. */ |
Jason Evans | 0fd663e | 2015-01-25 17:31:24 -0800 | [diff] [blame] | 342 | if (have_dss && dss_prec == dss_prec_primary && (ret = |
Jason Evans | 8fadb1a | 2015-08-04 10:49:46 -0700 | [diff] [blame] | 343 | chunk_alloc_dss(arena, new_addr, size, alignment, zero, commit)) != |
| 344 | NULL) |
Jason Evans | 0fd663e | 2015-01-25 17:31:24 -0800 | [diff] [blame] | 345 | return (ret); |
Jason Evans | c7a9a6c | 2016-02-24 17:18:44 -0800 | [diff] [blame] | 346 | /* mmap. */ |
| 347 | if ((ret = chunk_alloc_mmap(new_addr, size, alignment, zero, commit)) != |
| 348 | NULL) |
aravind | fb7fe50 | 2014-05-05 15:16:56 -0700 | [diff] [blame] | 349 | return (ret); |
Jason Evans | 609ae59 | 2012-10-11 13:53:15 -0700 | [diff] [blame] | 350 | /* "secondary" dss. */ |
Jason Evans | 0fd663e | 2015-01-25 17:31:24 -0800 | [diff] [blame] | 351 | if (have_dss && dss_prec == dss_prec_secondary && (ret = |
Jason Evans | 8fadb1a | 2015-08-04 10:49:46 -0700 | [diff] [blame] | 352 | chunk_alloc_dss(arena, new_addr, size, alignment, zero, commit)) != |
| 353 | NULL) |
Jason Evans | 0fd663e | 2015-01-25 17:31:24 -0800 | [diff] [blame] | 354 | return (ret); |
Jason Evans | e476f8a | 2010-01-16 09:53:50 -0800 | [diff] [blame] | 355 | |
| 356 | /* All strategies for allocation failed. */ |
aravind | fb7fe50 | 2014-05-05 15:16:56 -0700 | [diff] [blame] | 357 | return (NULL); |
| 358 | } |
| 359 | |
Jason Evans | e2deab7 | 2014-05-15 22:22:27 -0700 | [diff] [blame] | 360 | void * |
| 361 | chunk_alloc_base(size_t size) |
| 362 | { |
| 363 | void *ret; |
Jason Evans | 8fadb1a | 2015-08-04 10:49:46 -0700 | [diff] [blame] | 364 | bool zero, commit; |
Jason Evans | e2deab7 | 2014-05-15 22:22:27 -0700 | [diff] [blame] | 365 | |
Jason Evans | f500a10 | 2015-01-30 21:49:19 -0800 | [diff] [blame] | 366 | /* |
| 367 | * Directly call chunk_alloc_mmap() rather than chunk_alloc_core() |
| 368 | * because it's critical that chunk_alloc_base() return untouched |
| 369 | * demand-zeroed virtual memory. |
| 370 | */ |
| 371 | zero = true; |
Jason Evans | 8fadb1a | 2015-08-04 10:49:46 -0700 | [diff] [blame] | 372 | commit = true; |
Jason Evans | c7a9a6c | 2016-02-24 17:18:44 -0800 | [diff] [blame] | 373 | ret = chunk_alloc_mmap(NULL, size, chunksize, &zero, &commit); |
Jason Evans | cbf3a6d | 2015-02-11 12:24:27 -0800 | [diff] [blame] | 374 | if (ret == NULL) |
| 375 | return (NULL); |
| 376 | if (config_valgrind) |
| 377 | JEMALLOC_VALGRIND_MAKE_MEM_UNDEFINED(ret, size); |
Jason Evans | f500a10 | 2015-01-30 21:49:19 -0800 | [diff] [blame] | 378 | |
Jason Evans | e2deab7 | 2014-05-15 22:22:27 -0700 | [diff] [blame] | 379 | return (ret); |
| 380 | } |
| 381 | |
| 382 | void * |
Jason Evans | b49a334 | 2015-07-28 11:28:19 -0400 | [diff] [blame] | 383 | chunk_alloc_cache(arena_t *arena, chunk_hooks_t *chunk_hooks, void *new_addr, |
| 384 | size_t size, size_t alignment, bool *zero, bool dalloc_node) |
Jason Evans | e2deab7 | 2014-05-15 22:22:27 -0700 | [diff] [blame] | 385 | { |
Jason Evans | 4f6f2b1 | 2015-06-22 14:38:06 -0700 | [diff] [blame] | 386 | void *ret; |
Jason Evans | 8fadb1a | 2015-08-04 10:49:46 -0700 | [diff] [blame] | 387 | bool commit; |
Jason Evans | e2deab7 | 2014-05-15 22:22:27 -0700 | [diff] [blame] | 388 | |
Jason Evans | 99bd94f | 2015-02-18 16:40:53 -0800 | [diff] [blame] | 389 | assert(size != 0); |
| 390 | assert((size & chunksize_mask) == 0); |
| 391 | assert(alignment != 0); |
| 392 | assert((alignment & chunksize_mask) == 0); |
Jason Evans | e2deab7 | 2014-05-15 22:22:27 -0700 | [diff] [blame] | 393 | |
Jason Evans | 8fadb1a | 2015-08-04 10:49:46 -0700 | [diff] [blame] | 394 | commit = true; |
Jason Evans | b49a334 | 2015-07-28 11:28:19 -0400 | [diff] [blame] | 395 | ret = chunk_recycle(arena, chunk_hooks, &arena->chunks_szad_cached, |
| 396 | &arena->chunks_ad_cached, true, new_addr, size, alignment, zero, |
Jason Evans | 8fadb1a | 2015-08-04 10:49:46 -0700 | [diff] [blame] | 397 | &commit, dalloc_node); |
Jason Evans | 4f6f2b1 | 2015-06-22 14:38:06 -0700 | [diff] [blame] | 398 | if (ret == NULL) |
| 399 | return (NULL); |
Jason Evans | 8fadb1a | 2015-08-04 10:49:46 -0700 | [diff] [blame] | 400 | assert(commit); |
Jason Evans | 4f6f2b1 | 2015-06-22 14:38:06 -0700 | [diff] [blame] | 401 | if (config_valgrind) |
| 402 | JEMALLOC_VALGRIND_MAKE_MEM_UNDEFINED(ret, size); |
| 403 | return (ret); |
Jason Evans | e2deab7 | 2014-05-15 22:22:27 -0700 | [diff] [blame] | 404 | } |
| 405 | |
Jason Evans | cbf3a6d | 2015-02-11 12:24:27 -0800 | [diff] [blame] | 406 | static arena_t * |
| 407 | chunk_arena_get(unsigned arena_ind) |
aravind | fb7fe50 | 2014-05-05 15:16:56 -0700 | [diff] [blame] | 408 | { |
Jason Evans | 8bb3198 | 2014-10-07 23:14:57 -0700 | [diff] [blame] | 409 | arena_t *arena; |
| 410 | |
Jason Evans | 767d850 | 2016-02-24 23:58:10 -0800 | [diff] [blame] | 411 | arena = arena_get(arena_ind, false); |
Jason Evans | 8bb3198 | 2014-10-07 23:14:57 -0700 | [diff] [blame] | 412 | /* |
| 413 | * The arena we're allocating on behalf of must have been initialized |
| 414 | * already. |
| 415 | */ |
| 416 | assert(arena != NULL); |
Jason Evans | cbf3a6d | 2015-02-11 12:24:27 -0800 | [diff] [blame] | 417 | return (arena); |
| 418 | } |
aravind | fb7fe50 | 2014-05-05 15:16:56 -0700 | [diff] [blame] | 419 | |
Jason Evans | 99bd94f | 2015-02-18 16:40:53 -0800 | [diff] [blame] | 420 | static void * |
Jason Evans | b49a334 | 2015-07-28 11:28:19 -0400 | [diff] [blame] | 421 | chunk_alloc_default(void *new_addr, size_t size, size_t alignment, bool *zero, |
Jason Evans | 8fadb1a | 2015-08-04 10:49:46 -0700 | [diff] [blame] | 422 | bool *commit, unsigned arena_ind) |
Jason Evans | 99bd94f | 2015-02-18 16:40:53 -0800 | [diff] [blame] | 423 | { |
| 424 | void *ret; |
Jason Evans | b49a334 | 2015-07-28 11:28:19 -0400 | [diff] [blame] | 425 | arena_t *arena; |
Jason Evans | 99bd94f | 2015-02-18 16:40:53 -0800 | [diff] [blame] | 426 | |
Jason Evans | b49a334 | 2015-07-28 11:28:19 -0400 | [diff] [blame] | 427 | arena = chunk_arena_get(arena_ind); |
Jason Evans | 8d8960f | 2016-03-30 18:36:04 -0700 | [diff] [blame^] | 428 | ret = chunk_alloc_core(arena, new_addr, size, alignment, zero, commit, |
| 429 | arena->dss_prec); |
Jason Evans | 99bd94f | 2015-02-18 16:40:53 -0800 | [diff] [blame] | 430 | if (ret == NULL) |
| 431 | return (NULL); |
| 432 | if (config_valgrind) |
| 433 | JEMALLOC_VALGRIND_MAKE_MEM_UNDEFINED(ret, size); |
| 434 | |
| 435 | return (ret); |
| 436 | } |
| 437 | |
buchgr | d412624 | 2015-12-09 18:00:57 +0100 | [diff] [blame] | 438 | static void * |
| 439 | chunk_alloc_retained(arena_t *arena, chunk_hooks_t *chunk_hooks, void *new_addr, |
| 440 | size_t size, size_t alignment, bool *zero, bool *commit) |
| 441 | { |
| 442 | |
| 443 | assert(size != 0); |
| 444 | assert((size & chunksize_mask) == 0); |
| 445 | assert(alignment != 0); |
| 446 | assert((alignment & chunksize_mask) == 0); |
| 447 | |
| 448 | return (chunk_recycle(arena, chunk_hooks, &arena->chunks_szad_retained, |
| 449 | &arena->chunks_ad_retained, false, new_addr, size, alignment, zero, |
| 450 | commit, true)); |
| 451 | } |
| 452 | |
Jason Evans | cbf3a6d | 2015-02-11 12:24:27 -0800 | [diff] [blame] | 453 | void * |
Jason Evans | b49a334 | 2015-07-28 11:28:19 -0400 | [diff] [blame] | 454 | chunk_alloc_wrapper(arena_t *arena, chunk_hooks_t *chunk_hooks, void *new_addr, |
Jason Evans | 8fadb1a | 2015-08-04 10:49:46 -0700 | [diff] [blame] | 455 | size_t size, size_t alignment, bool *zero, bool *commit) |
Jason Evans | 99bd94f | 2015-02-18 16:40:53 -0800 | [diff] [blame] | 456 | { |
| 457 | void *ret; |
| 458 | |
Jason Evans | b49a334 | 2015-07-28 11:28:19 -0400 | [diff] [blame] | 459 | chunk_hooks_assure_initialized(arena, chunk_hooks); |
buchgr | d412624 | 2015-12-09 18:00:57 +0100 | [diff] [blame] | 460 | |
| 461 | ret = chunk_alloc_retained(arena, chunk_hooks, new_addr, size, |
| 462 | alignment, zero, commit); |
| 463 | if (ret == NULL) { |
| 464 | ret = chunk_hooks->alloc(new_addr, size, alignment, zero, |
| 465 | commit, arena->ind); |
| 466 | if (ret == NULL) |
| 467 | return (NULL); |
| 468 | } |
| 469 | |
Jason Evans | b49a334 | 2015-07-28 11:28:19 -0400 | [diff] [blame] | 470 | if (config_valgrind && chunk_hooks->alloc != chunk_alloc_default) |
Jason Evans | 35e3fd9 | 2015-02-18 16:51:51 -0800 | [diff] [blame] | 471 | JEMALLOC_VALGRIND_MAKE_MEM_UNDEFINED(ret, chunksize); |
Jason Evans | 99bd94f | 2015-02-18 16:40:53 -0800 | [diff] [blame] | 472 | return (ret); |
aravind | fb7fe50 | 2014-05-05 15:16:56 -0700 | [diff] [blame] | 473 | } |
| 474 | |
Jason Evans | b49a334 | 2015-07-28 11:28:19 -0400 | [diff] [blame] | 475 | static void |
| 476 | chunk_record(arena_t *arena, chunk_hooks_t *chunk_hooks, |
| 477 | extent_tree_t *chunks_szad, extent_tree_t *chunks_ad, bool cache, |
Jason Evans | 8fadb1a | 2015-08-04 10:49:46 -0700 | [diff] [blame] | 478 | void *chunk, size_t size, bool zeroed, bool committed) |
Jason Evans | 7ca0fdf | 2012-04-12 20:20:58 -0700 | [diff] [blame] | 479 | { |
Jason Evans | 7de9276 | 2012-10-08 17:56:11 -0700 | [diff] [blame] | 480 | bool unzeroed; |
Jason Evans | ee41ad4 | 2015-02-15 18:04:46 -0800 | [diff] [blame] | 481 | extent_node_t *node, *prev; |
| 482 | extent_node_t key; |
Jason Evans | 7ca0fdf | 2012-04-12 20:20:58 -0700 | [diff] [blame] | 483 | |
Jason Evans | 738e089 | 2015-02-18 01:15:50 -0800 | [diff] [blame] | 484 | assert(!cache || !zeroed); |
| 485 | unzeroed = cache || !zeroed; |
Jason Evans | bd87b01 | 2014-04-15 16:35:08 -0700 | [diff] [blame] | 486 | JEMALLOC_VALGRIND_MAKE_MEM_NOACCESS(chunk, size); |
Jason Evans | 7ca0fdf | 2012-04-12 20:20:58 -0700 | [diff] [blame] | 487 | |
Jason Evans | cbf3a6d | 2015-02-11 12:24:27 -0800 | [diff] [blame] | 488 | malloc_mutex_lock(&arena->chunks_mtx); |
Jason Evans | b49a334 | 2015-07-28 11:28:19 -0400 | [diff] [blame] | 489 | chunk_hooks_assure_initialized_locked(arena, chunk_hooks); |
Jason Evans | a4e1888 | 2015-02-17 15:13:52 -0800 | [diff] [blame] | 490 | extent_node_init(&key, arena, (void *)((uintptr_t)chunk + size), 0, |
Jason Evans | b49a334 | 2015-07-28 11:28:19 -0400 | [diff] [blame] | 491 | false, false); |
Jason Evans | 609ae59 | 2012-10-11 13:53:15 -0700 | [diff] [blame] | 492 | node = extent_tree_ad_nsearch(chunks_ad, &key); |
Jason Evans | 374d26a | 2012-05-09 14:48:35 -0700 | [diff] [blame] | 493 | /* Try to coalesce forward. */ |
Jason Evans | ee41ad4 | 2015-02-15 18:04:46 -0800 | [diff] [blame] | 494 | if (node != NULL && extent_node_addr_get(node) == |
Jason Evans | b49a334 | 2015-07-28 11:28:19 -0400 | [diff] [blame] | 495 | extent_node_addr_get(&key) && extent_node_committed_get(node) == |
| 496 | committed && !chunk_hooks->merge(chunk, size, |
| 497 | extent_node_addr_get(node), extent_node_size_get(node), false, |
| 498 | arena->ind)) { |
Jason Evans | 374d26a | 2012-05-09 14:48:35 -0700 | [diff] [blame] | 499 | /* |
| 500 | * Coalesce chunk with the following address range. This does |
| 501 | * not change the position within chunks_ad, so only |
| 502 | * remove/insert from/into chunks_szad. |
| 503 | */ |
Jason Evans | 609ae59 | 2012-10-11 13:53:15 -0700 | [diff] [blame] | 504 | extent_tree_szad_remove(chunks_szad, node); |
Jason Evans | 738e089 | 2015-02-18 01:15:50 -0800 | [diff] [blame] | 505 | arena_chunk_cache_maybe_remove(arena, node, cache); |
Jason Evans | ee41ad4 | 2015-02-15 18:04:46 -0800 | [diff] [blame] | 506 | extent_node_addr_set(node, chunk); |
Jason Evans | a4e1888 | 2015-02-17 15:13:52 -0800 | [diff] [blame] | 507 | extent_node_size_set(node, size + extent_node_size_get(node)); |
Jason Evans | ee41ad4 | 2015-02-15 18:04:46 -0800 | [diff] [blame] | 508 | extent_node_zeroed_set(node, extent_node_zeroed_get(node) && |
| 509 | !unzeroed); |
Jason Evans | 609ae59 | 2012-10-11 13:53:15 -0700 | [diff] [blame] | 510 | extent_tree_szad_insert(chunks_szad, node); |
Jason Evans | 738e089 | 2015-02-18 01:15:50 -0800 | [diff] [blame] | 511 | arena_chunk_cache_maybe_insert(arena, node, cache); |
Jason Evans | 374d26a | 2012-05-09 14:48:35 -0700 | [diff] [blame] | 512 | } else { |
| 513 | /* Coalescing forward failed, so insert a new node. */ |
Jason Evans | cbf3a6d | 2015-02-11 12:24:27 -0800 | [diff] [blame] | 514 | node = arena_node_alloc(arena); |
| 515 | if (node == NULL) { |
Jason Evans | 7ca0fdf | 2012-04-12 20:20:58 -0700 | [diff] [blame] | 516 | /* |
Jason Evans | cbf3a6d | 2015-02-11 12:24:27 -0800 | [diff] [blame] | 517 | * Node allocation failed, which is an exceedingly |
Jason Evans | ee41ad4 | 2015-02-15 18:04:46 -0800 | [diff] [blame] | 518 | * unlikely failure. Leak chunk after making sure its |
| 519 | * pages have already been purged, so that this is only |
| 520 | * a virtual memory leak. |
Jason Evans | 7ca0fdf | 2012-04-12 20:20:58 -0700 | [diff] [blame] | 521 | */ |
Jason Evans | 8d6a3e8 | 2015-03-18 18:55:33 -0700 | [diff] [blame] | 522 | if (cache) { |
Jason Evans | b49a334 | 2015-07-28 11:28:19 -0400 | [diff] [blame] | 523 | chunk_purge_wrapper(arena, chunk_hooks, chunk, |
| 524 | size, 0, size); |
Jason Evans | 8d6a3e8 | 2015-03-18 18:55:33 -0700 | [diff] [blame] | 525 | } |
Jason Evans | 741fbc6 | 2013-04-17 09:57:11 -0700 | [diff] [blame] | 526 | goto label_return; |
Jason Evans | 7ca0fdf | 2012-04-12 20:20:58 -0700 | [diff] [blame] | 527 | } |
Jason Evans | 8fadb1a | 2015-08-04 10:49:46 -0700 | [diff] [blame] | 528 | extent_node_init(node, arena, chunk, size, !unzeroed, |
| 529 | committed); |
Jason Evans | 609ae59 | 2012-10-11 13:53:15 -0700 | [diff] [blame] | 530 | extent_tree_ad_insert(chunks_ad, node); |
| 531 | extent_tree_szad_insert(chunks_szad, node); |
Jason Evans | 738e089 | 2015-02-18 01:15:50 -0800 | [diff] [blame] | 532 | arena_chunk_cache_maybe_insert(arena, node, cache); |
Jason Evans | 7ca0fdf | 2012-04-12 20:20:58 -0700 | [diff] [blame] | 533 | } |
Jason Evans | 7ca0fdf | 2012-04-12 20:20:58 -0700 | [diff] [blame] | 534 | |
| 535 | /* Try to coalesce backward. */ |
Jason Evans | 609ae59 | 2012-10-11 13:53:15 -0700 | [diff] [blame] | 536 | prev = extent_tree_ad_prev(chunks_ad, node); |
Jason Evans | ee41ad4 | 2015-02-15 18:04:46 -0800 | [diff] [blame] | 537 | if (prev != NULL && (void *)((uintptr_t)extent_node_addr_get(prev) + |
Jason Evans | b49a334 | 2015-07-28 11:28:19 -0400 | [diff] [blame] | 538 | extent_node_size_get(prev)) == chunk && |
| 539 | extent_node_committed_get(prev) == committed && |
| 540 | !chunk_hooks->merge(extent_node_addr_get(prev), |
| 541 | extent_node_size_get(prev), chunk, size, false, arena->ind)) { |
Jason Evans | 7ca0fdf | 2012-04-12 20:20:58 -0700 | [diff] [blame] | 542 | /* |
| 543 | * Coalesce chunk with the previous address range. This does |
| 544 | * not change the position within chunks_ad, so only |
| 545 | * remove/insert node from/into chunks_szad. |
| 546 | */ |
Jason Evans | 609ae59 | 2012-10-11 13:53:15 -0700 | [diff] [blame] | 547 | extent_tree_szad_remove(chunks_szad, prev); |
| 548 | extent_tree_ad_remove(chunks_ad, prev); |
Jason Evans | 738e089 | 2015-02-18 01:15:50 -0800 | [diff] [blame] | 549 | arena_chunk_cache_maybe_remove(arena, prev, cache); |
Jason Evans | 609ae59 | 2012-10-11 13:53:15 -0700 | [diff] [blame] | 550 | extent_tree_szad_remove(chunks_szad, node); |
Jason Evans | 738e089 | 2015-02-18 01:15:50 -0800 | [diff] [blame] | 551 | arena_chunk_cache_maybe_remove(arena, node, cache); |
Jason Evans | ee41ad4 | 2015-02-15 18:04:46 -0800 | [diff] [blame] | 552 | extent_node_addr_set(node, extent_node_addr_get(prev)); |
Jason Evans | a4e1888 | 2015-02-17 15:13:52 -0800 | [diff] [blame] | 553 | extent_node_size_set(node, extent_node_size_get(prev) + |
| 554 | extent_node_size_get(node)); |
| 555 | extent_node_zeroed_set(node, extent_node_zeroed_get(prev) && |
| 556 | extent_node_zeroed_get(node)); |
Jason Evans | 609ae59 | 2012-10-11 13:53:15 -0700 | [diff] [blame] | 557 | extent_tree_szad_insert(chunks_szad, node); |
Jason Evans | 738e089 | 2015-02-18 01:15:50 -0800 | [diff] [blame] | 558 | arena_chunk_cache_maybe_insert(arena, node, cache); |
Jason Evans | 7ca0fdf | 2012-04-12 20:20:58 -0700 | [diff] [blame] | 559 | |
Jason Evans | cbf3a6d | 2015-02-11 12:24:27 -0800 | [diff] [blame] | 560 | arena_node_dalloc(arena, prev); |
Jason Evans | 7ca0fdf | 2012-04-12 20:20:58 -0700 | [diff] [blame] | 561 | } |
Jason Evans | 741fbc6 | 2013-04-17 09:57:11 -0700 | [diff] [blame] | 562 | |
| 563 | label_return: |
Jason Evans | cbf3a6d | 2015-02-11 12:24:27 -0800 | [diff] [blame] | 564 | malloc_mutex_unlock(&arena->chunks_mtx); |
Jason Evans | 7ca0fdf | 2012-04-12 20:20:58 -0700 | [diff] [blame] | 565 | } |
| 566 | |
Jason Evans | 99bd94f | 2015-02-18 16:40:53 -0800 | [diff] [blame] | 567 | void |
Jason Evans | b49a334 | 2015-07-28 11:28:19 -0400 | [diff] [blame] | 568 | chunk_dalloc_cache(arena_t *arena, chunk_hooks_t *chunk_hooks, void *chunk, |
Jason Evans | de249c8 | 2015-08-09 16:47:27 -0700 | [diff] [blame] | 569 | size_t size, bool committed) |
Jason Evans | ee41ad4 | 2015-02-15 18:04:46 -0800 | [diff] [blame] | 570 | { |
| 571 | |
| 572 | assert(chunk != NULL); |
| 573 | assert(CHUNK_ADDR2BASE(chunk) == chunk); |
| 574 | assert(size != 0); |
| 575 | assert((size & chunksize_mask) == 0); |
| 576 | |
Jason Evans | b49a334 | 2015-07-28 11:28:19 -0400 | [diff] [blame] | 577 | chunk_record(arena, chunk_hooks, &arena->chunks_szad_cached, |
Jason Evans | de249c8 | 2015-08-09 16:47:27 -0700 | [diff] [blame] | 578 | &arena->chunks_ad_cached, true, chunk, size, false, committed); |
Jason Evans | 99bd94f | 2015-02-18 16:40:53 -0800 | [diff] [blame] | 579 | arena_maybe_purge(arena); |
Jason Evans | ee41ad4 | 2015-02-15 18:04:46 -0800 | [diff] [blame] | 580 | } |
| 581 | |
Jason Evans | 8d8960f | 2016-03-30 18:36:04 -0700 | [diff] [blame^] | 582 | static bool |
| 583 | chunk_dalloc_default(void *chunk, size_t size, bool committed, |
| 584 | unsigned arena_ind) |
| 585 | { |
| 586 | |
| 587 | if (!have_dss || !chunk_in_dss(chunk)) |
| 588 | return (chunk_dalloc_mmap(chunk, size)); |
| 589 | return (true); |
| 590 | } |
| 591 | |
Jason Evans | e476f8a | 2010-01-16 09:53:50 -0800 | [diff] [blame] | 592 | void |
Jason Evans | 8d8960f | 2016-03-30 18:36:04 -0700 | [diff] [blame^] | 593 | chunk_dalloc_wrapper(arena_t *arena, chunk_hooks_t *chunk_hooks, void *chunk, |
Jason Evans | 8fadb1a | 2015-08-04 10:49:46 -0700 | [diff] [blame] | 594 | size_t size, bool zeroed, bool committed) |
Jason Evans | e476f8a | 2010-01-16 09:53:50 -0800 | [diff] [blame] | 595 | { |
| 596 | |
| 597 | assert(chunk != NULL); |
| 598 | assert(CHUNK_ADDR2BASE(chunk) == chunk); |
| 599 | assert(size != 0); |
| 600 | assert((size & chunksize_mask) == 0); |
| 601 | |
Jason Evans | b49a334 | 2015-07-28 11:28:19 -0400 | [diff] [blame] | 602 | chunk_hooks_assure_initialized(arena, chunk_hooks); |
| 603 | /* Try to deallocate. */ |
Jason Evans | 8fadb1a | 2015-08-04 10:49:46 -0700 | [diff] [blame] | 604 | if (!chunk_hooks->dalloc(chunk, size, committed, arena->ind)) |
Jason Evans | b49a334 | 2015-07-28 11:28:19 -0400 | [diff] [blame] | 605 | return; |
| 606 | /* Try to decommit; purge if that fails. */ |
Jason Evans | 8fadb1a | 2015-08-04 10:49:46 -0700 | [diff] [blame] | 607 | if (committed) { |
| 608 | committed = chunk_hooks->decommit(chunk, size, 0, size, |
| 609 | arena->ind); |
| 610 | } |
Jason Evans | 6ed18cb | 2015-08-12 15:20:34 -0700 | [diff] [blame] | 611 | zeroed = !committed || !chunk_hooks->purge(chunk, size, 0, size, |
Jason Evans | b49a334 | 2015-07-28 11:28:19 -0400 | [diff] [blame] | 612 | arena->ind); |
| 613 | chunk_record(arena, chunk_hooks, &arena->chunks_szad_retained, |
Jason Evans | 8fadb1a | 2015-08-04 10:49:46 -0700 | [diff] [blame] | 614 | &arena->chunks_ad_retained, false, chunk, size, zeroed, committed); |
Jason Evans | e2deab7 | 2014-05-15 22:22:27 -0700 | [diff] [blame] | 615 | } |
| 616 | |
Jason Evans | b49a334 | 2015-07-28 11:28:19 -0400 | [diff] [blame] | 617 | static bool |
Jason Evans | 8fadb1a | 2015-08-04 10:49:46 -0700 | [diff] [blame] | 618 | chunk_commit_default(void *chunk, size_t size, size_t offset, size_t length, |
| 619 | unsigned arena_ind) |
Jason Evans | b49a334 | 2015-07-28 11:28:19 -0400 | [diff] [blame] | 620 | { |
| 621 | |
Jason Evans | 8fadb1a | 2015-08-04 10:49:46 -0700 | [diff] [blame] | 622 | return (pages_commit((void *)((uintptr_t)chunk + (uintptr_t)offset), |
| 623 | length)); |
Jason Evans | b49a334 | 2015-07-28 11:28:19 -0400 | [diff] [blame] | 624 | } |
| 625 | |
| 626 | static bool |
Jason Evans | 8fadb1a | 2015-08-04 10:49:46 -0700 | [diff] [blame] | 627 | chunk_decommit_default(void *chunk, size_t size, size_t offset, size_t length, |
| 628 | unsigned arena_ind) |
Jason Evans | b49a334 | 2015-07-28 11:28:19 -0400 | [diff] [blame] | 629 | { |
| 630 | |
Jason Evans | 8fadb1a | 2015-08-04 10:49:46 -0700 | [diff] [blame] | 631 | return (pages_decommit((void *)((uintptr_t)chunk + (uintptr_t)offset), |
| 632 | length)); |
Jason Evans | b49a334 | 2015-07-28 11:28:19 -0400 | [diff] [blame] | 633 | } |
| 634 | |
Jason Evans | 8d8960f | 2016-03-30 18:36:04 -0700 | [diff] [blame^] | 635 | static bool |
| 636 | chunk_purge_default(void *chunk, size_t size, size_t offset, size_t length, |
| 637 | unsigned arena_ind) |
Jason Evans | 8d6a3e8 | 2015-03-18 18:55:33 -0700 | [diff] [blame] | 638 | { |
| 639 | |
| 640 | assert(chunk != NULL); |
| 641 | assert(CHUNK_ADDR2BASE(chunk) == chunk); |
| 642 | assert((offset & PAGE_MASK) == 0); |
| 643 | assert(length != 0); |
| 644 | assert((length & PAGE_MASK) == 0); |
| 645 | |
| 646 | return (pages_purge((void *)((uintptr_t)chunk + (uintptr_t)offset), |
| 647 | length)); |
| 648 | } |
| 649 | |
Jason Evans | 8d6a3e8 | 2015-03-18 18:55:33 -0700 | [diff] [blame] | 650 | bool |
Jason Evans | b49a334 | 2015-07-28 11:28:19 -0400 | [diff] [blame] | 651 | chunk_purge_wrapper(arena_t *arena, chunk_hooks_t *chunk_hooks, void *chunk, |
| 652 | size_t size, size_t offset, size_t length) |
Jason Evans | 8d6a3e8 | 2015-03-18 18:55:33 -0700 | [diff] [blame] | 653 | { |
| 654 | |
Jason Evans | b49a334 | 2015-07-28 11:28:19 -0400 | [diff] [blame] | 655 | chunk_hooks_assure_initialized(arena, chunk_hooks); |
| 656 | return (chunk_hooks->purge(chunk, size, offset, length, arena->ind)); |
| 657 | } |
| 658 | |
| 659 | static bool |
| 660 | chunk_split_default(void *chunk, size_t size, size_t size_a, size_t size_b, |
| 661 | bool committed, unsigned arena_ind) |
| 662 | { |
| 663 | |
| 664 | if (!maps_coalesce) |
| 665 | return (true); |
| 666 | return (false); |
| 667 | } |
| 668 | |
| 669 | static bool |
| 670 | chunk_merge_default(void *chunk_a, size_t size_a, void *chunk_b, size_t size_b, |
| 671 | bool committed, unsigned arena_ind) |
| 672 | { |
| 673 | |
| 674 | if (!maps_coalesce) |
| 675 | return (true); |
| 676 | if (have_dss && chunk_in_dss(chunk_a) != chunk_in_dss(chunk_b)) |
| 677 | return (true); |
| 678 | |
| 679 | return (false); |
Jason Evans | 8d6a3e8 | 2015-03-18 18:55:33 -0700 | [diff] [blame] | 680 | } |
| 681 | |
Jason Evans | 8d0e04d | 2015-01-30 22:54:08 -0800 | [diff] [blame] | 682 | static rtree_node_elm_t * |
| 683 | chunks_rtree_node_alloc(size_t nelms) |
| 684 | { |
| 685 | |
| 686 | return ((rtree_node_elm_t *)base_alloc(nelms * |
| 687 | sizeof(rtree_node_elm_t))); |
| 688 | } |
| 689 | |
Jason Evans | e476f8a | 2010-01-16 09:53:50 -0800 | [diff] [blame] | 690 | bool |
Jason Evans | a8f8d75 | 2012-04-21 19:17:21 -0700 | [diff] [blame] | 691 | chunk_boot(void) |
Jason Evans | e476f8a | 2010-01-16 09:53:50 -0800 | [diff] [blame] | 692 | { |
Matthijs | a1aaf94 | 2015-06-25 22:53:58 +0200 | [diff] [blame] | 693 | #ifdef _WIN32 |
| 694 | SYSTEM_INFO info; |
| 695 | GetSystemInfo(&info); |
| 696 | |
Jason Evans | b946086 | 2015-07-07 20:16:25 -0700 | [diff] [blame] | 697 | /* |
| 698 | * Verify actual page size is equal to or an integral multiple of |
| 699 | * configured page size. |
| 700 | */ |
Matthijs | a1aaf94 | 2015-06-25 22:53:58 +0200 | [diff] [blame] | 701 | if (info.dwPageSize & ((1U << LG_PAGE) - 1)) |
| 702 | return (true); |
| 703 | |
Jason Evans | b946086 | 2015-07-07 20:16:25 -0700 | [diff] [blame] | 704 | /* |
| 705 | * Configure chunksize (if not set) to match granularity (usually 64K), |
| 706 | * so pages_map will always take fast path. |
| 707 | */ |
| 708 | if (!opt_lg_chunk) { |
Jason Evans | 9f4ee60 | 2016-02-24 10:32:45 -0800 | [diff] [blame] | 709 | opt_lg_chunk = ffs_u((unsigned)info.dwAllocationGranularity) |
Jason Evans | b946086 | 2015-07-07 20:16:25 -0700 | [diff] [blame] | 710 | - 1; |
| 711 | } |
Matthijs | a1aaf94 | 2015-06-25 22:53:58 +0200 | [diff] [blame] | 712 | #else |
| 713 | if (!opt_lg_chunk) |
| 714 | opt_lg_chunk = LG_CHUNK_DEFAULT; |
| 715 | #endif |
Jason Evans | e476f8a | 2010-01-16 09:53:50 -0800 | [diff] [blame] | 716 | |
| 717 | /* Set variables according to the value of opt_lg_chunk. */ |
Jason Evans | 2dbecf1 | 2010-09-05 10:35:13 -0700 | [diff] [blame] | 718 | chunksize = (ZU(1) << opt_lg_chunk); |
Jason Evans | ae4c7b4 | 2012-04-02 07:04:34 -0700 | [diff] [blame] | 719 | assert(chunksize >= PAGE); |
Jason Evans | e476f8a | 2010-01-16 09:53:50 -0800 | [diff] [blame] | 720 | chunksize_mask = chunksize - 1; |
Jason Evans | ae4c7b4 | 2012-04-02 07:04:34 -0700 | [diff] [blame] | 721 | chunk_npages = (chunksize >> LG_PAGE); |
Jason Evans | e476f8a | 2010-01-16 09:53:50 -0800 | [diff] [blame] | 722 | |
Jason Evans | 4d434ad | 2014-04-15 12:09:48 -0700 | [diff] [blame] | 723 | if (have_dss && chunk_dss_boot()) |
Jason Evans | 4201af0 | 2010-01-24 02:53:40 -0800 | [diff] [blame] | 724 | return (true); |
Jason Evans | 9e1810c | 2016-02-24 12:42:23 -0800 | [diff] [blame] | 725 | if (rtree_new(&chunks_rtree, (unsigned)((ZU(1) << (LG_SIZEOF_PTR+3)) - |
| 726 | opt_lg_chunk), chunks_rtree_node_alloc, NULL)) |
Jason Evans | cbf3a6d | 2015-02-11 12:24:27 -0800 | [diff] [blame] | 727 | return (true); |
Jason Evans | e476f8a | 2010-01-16 09:53:50 -0800 | [diff] [blame] | 728 | |
| 729 | return (false); |
| 730 | } |
Jason Evans | 20f1fc9 | 2012-10-09 14:46:22 -0700 | [diff] [blame] | 731 | |
| 732 | void |
| 733 | chunk_prefork(void) |
| 734 | { |
| 735 | |
Jason Evans | 20f1fc9 | 2012-10-09 14:46:22 -0700 | [diff] [blame] | 736 | chunk_dss_prefork(); |
| 737 | } |
| 738 | |
| 739 | void |
| 740 | chunk_postfork_parent(void) |
| 741 | { |
| 742 | |
| 743 | chunk_dss_postfork_parent(); |
Jason Evans | 20f1fc9 | 2012-10-09 14:46:22 -0700 | [diff] [blame] | 744 | } |
| 745 | |
| 746 | void |
| 747 | chunk_postfork_child(void) |
| 748 | { |
| 749 | |
| 750 | chunk_dss_postfork_child(); |
Jason Evans | 20f1fc9 | 2012-10-09 14:46:22 -0700 | [diff] [blame] | 751 | } |