Jason Evans | e476f8a | 2010-01-16 09:53:50 -0800 | [diff] [blame] | 1 | #define JEMALLOC_CHUNK_C_ |
Jason Evans | 376b152 | 2010-02-11 14:45:59 -0800 | [diff] [blame] | 2 | #include "jemalloc/internal/jemalloc_internal.h" |
Jason Evans | e476f8a | 2010-01-16 09:53:50 -0800 | [diff] [blame] | 3 | |
| 4 | /******************************************************************************/ |
| 5 | /* Data. */ |
| 6 | |
| 7 | size_t opt_lg_chunk = LG_CHUNK_DEFAULT; |
| 8 | |
Jason Evans | 3c23435 | 2010-01-27 13:10:55 -0800 | [diff] [blame] | 9 | malloc_mutex_t chunks_mtx; |
Jason Evans | e476f8a | 2010-01-16 09:53:50 -0800 | [diff] [blame] | 10 | chunk_stats_t stats_chunks; |
Jason Evans | e476f8a | 2010-01-16 09:53:50 -0800 | [diff] [blame] | 11 | |
Jason Evans | 7ca0fdf | 2012-04-12 20:20:58 -0700 | [diff] [blame] | 12 | /* |
| 13 | * Trees of chunks that were previously allocated (trees differ only in node |
| 14 | * ordering). These are used when allocating chunks, in an attempt to re-use |
| 15 | * address space. Depending on function, different tree orderings are needed, |
| 16 | * which is why there are two trees with the same contents. |
| 17 | */ |
| 18 | static extent_tree_t chunks_szad; |
| 19 | static extent_tree_t chunks_ad; |
| 20 | |
Jason Evans | 2dbecf1 | 2010-09-05 10:35:13 -0700 | [diff] [blame] | 21 | rtree_t *chunks_rtree; |
Jason Evans | 2dbecf1 | 2010-09-05 10:35:13 -0700 | [diff] [blame] | 22 | |
Jason Evans | e476f8a | 2010-01-16 09:53:50 -0800 | [diff] [blame] | 23 | /* Various chunk-related settings. */ |
| 24 | size_t chunksize; |
| 25 | size_t chunksize_mask; /* (chunksize - 1). */ |
| 26 | size_t chunk_npages; |
Jason Evans | 7393f44 | 2010-10-01 17:35:43 -0700 | [diff] [blame] | 27 | size_t map_bias; |
Jason Evans | e476f8a | 2010-01-16 09:53:50 -0800 | [diff] [blame] | 28 | size_t arena_maxclass; /* Max size class for arenas. */ |
| 29 | |
Jason Evans | e476f8a | 2010-01-16 09:53:50 -0800 | [diff] [blame] | 30 | /******************************************************************************/ |
Jason Evans | 7ca0fdf | 2012-04-12 20:20:58 -0700 | [diff] [blame] | 31 | /* Function prototypes for non-inline static functions. */ |
| 32 | |
| 33 | static void *chunk_recycle(size_t size, size_t alignment, bool *zero); |
| 34 | static void chunk_record(void *chunk, size_t size); |
| 35 | |
| 36 | /******************************************************************************/ |
| 37 | |
| 38 | static void * |
| 39 | chunk_recycle(size_t size, size_t alignment, bool *zero) |
| 40 | { |
| 41 | void *ret; |
| 42 | extent_node_t *node; |
| 43 | extent_node_t key; |
| 44 | size_t alloc_size, leadsize, trailsize; |
| 45 | |
| 46 | alloc_size = size + alignment - chunksize; |
| 47 | /* Beware size_t wrap-around. */ |
| 48 | if (alloc_size < size) |
| 49 | return (NULL); |
| 50 | key.addr = NULL; |
| 51 | key.size = alloc_size; |
| 52 | malloc_mutex_lock(&chunks_mtx); |
| 53 | node = extent_tree_szad_nsearch(&chunks_szad, &key); |
| 54 | if (node == NULL) { |
| 55 | malloc_mutex_unlock(&chunks_mtx); |
| 56 | return (NULL); |
| 57 | } |
| 58 | leadsize = ALIGNMENT_CEILING((uintptr_t)node->addr, alignment) - |
| 59 | (uintptr_t)node->addr; |
| 60 | assert(alloc_size >= leadsize + size); |
| 61 | trailsize = alloc_size - leadsize - size; |
| 62 | ret = (void *)((uintptr_t)node->addr + leadsize); |
| 63 | /* Remove node from the tree. */ |
| 64 | extent_tree_szad_remove(&chunks_szad, node); |
| 65 | extent_tree_ad_remove(&chunks_ad, node); |
| 66 | if (leadsize != 0) { |
| 67 | /* Insert the leading space as a smaller chunk. */ |
| 68 | node->size = leadsize; |
| 69 | extent_tree_szad_insert(&chunks_szad, node); |
| 70 | extent_tree_ad_insert(&chunks_ad, node); |
| 71 | node = NULL; |
| 72 | } |
| 73 | if (trailsize != 0) { |
| 74 | /* Insert the trailing space as a smaller chunk. */ |
| 75 | if (node == NULL) { |
| 76 | /* |
| 77 | * An additional node is required, but |
| 78 | * base_node_alloc() can cause a new base chunk to be |
| 79 | * allocated. Drop chunks_mtx in order to avoid |
| 80 | * deadlock, and if node allocation fails, deallocate |
| 81 | * the result before returning an error. |
| 82 | */ |
| 83 | malloc_mutex_unlock(&chunks_mtx); |
| 84 | node = base_node_alloc(); |
| 85 | if (node == NULL) { |
| 86 | chunk_dealloc(ret, size, true); |
| 87 | return (NULL); |
| 88 | } |
| 89 | malloc_mutex_lock(&chunks_mtx); |
| 90 | } |
| 91 | node->addr = (void *)((uintptr_t)(ret) + size); |
| 92 | node->size = trailsize; |
| 93 | extent_tree_szad_insert(&chunks_szad, node); |
| 94 | extent_tree_ad_insert(&chunks_ad, node); |
| 95 | node = NULL; |
| 96 | } |
| 97 | malloc_mutex_unlock(&chunks_mtx); |
| 98 | |
| 99 | if (node != NULL) |
| 100 | base_node_dealloc(node); |
Jason Evans | 7ad54c1 | 2012-04-21 16:04:51 -0700 | [diff] [blame] | 101 | #ifdef JEMALLOC_PURGE_MADVISE_DONTNEED |
| 102 | /* Pages are zeroed as a side effect of pages_purge(). */ |
| 103 | *zero = true; |
| 104 | #else |
Jason Evans | 7ca0fdf | 2012-04-12 20:20:58 -0700 | [diff] [blame] | 105 | if (*zero) { |
| 106 | VALGRIND_MAKE_MEM_UNDEFINED(ret, size); |
| 107 | memset(ret, 0, size); |
| 108 | } |
| 109 | #endif |
| 110 | return (ret); |
| 111 | } |
Jason Evans | e476f8a | 2010-01-16 09:53:50 -0800 | [diff] [blame] | 112 | |
Jason Evans | 41631d0 | 2010-01-24 17:13:07 -0800 | [diff] [blame] | 113 | /* |
| 114 | * If the caller specifies (*zero == false), it is still possible to receive |
| 115 | * zeroed memory, in which case *zero is toggled to true. arena_chunk_alloc() |
| 116 | * takes advantage of this to avoid demanding zeroed chunks, but taking |
| 117 | * advantage of them if they are returned. |
| 118 | */ |
Jason Evans | e476f8a | 2010-01-16 09:53:50 -0800 | [diff] [blame] | 119 | void * |
Mike Hommey | eae2690 | 2012-04-10 19:50:33 +0200 | [diff] [blame] | 120 | chunk_alloc(size_t size, size_t alignment, bool base, bool *zero) |
Jason Evans | e476f8a | 2010-01-16 09:53:50 -0800 | [diff] [blame] | 121 | { |
| 122 | void *ret; |
| 123 | |
| 124 | assert(size != 0); |
| 125 | assert((size & chunksize_mask) == 0); |
Mike Hommey | eae2690 | 2012-04-10 19:50:33 +0200 | [diff] [blame] | 126 | assert((alignment & chunksize_mask) == 0); |
Jason Evans | e476f8a | 2010-01-16 09:53:50 -0800 | [diff] [blame] | 127 | |
Jason Evans | 7ca0fdf | 2012-04-12 20:20:58 -0700 | [diff] [blame] | 128 | ret = chunk_recycle(size, alignment, zero); |
| 129 | if (ret != NULL) |
| 130 | goto label_return; |
Jason Evans | 8f0e0eb | 2012-04-21 13:33:48 -0700 | [diff] [blame] | 131 | |
| 132 | ret = chunk_alloc_mmap(size, alignment, zero); |
| 133 | if (ret != NULL) |
| 134 | goto label_return; |
| 135 | |
Jason Evans | 4162627 | 2012-02-13 10:56:17 -0800 | [diff] [blame] | 136 | if (config_dss) { |
Mike Hommey | eae2690 | 2012-04-10 19:50:33 +0200 | [diff] [blame] | 137 | ret = chunk_alloc_dss(size, alignment, zero); |
Jason Evans | 4201af0 | 2010-01-24 02:53:40 -0800 | [diff] [blame] | 138 | if (ret != NULL) |
Jason Evans | a1ee783 | 2012-04-10 15:07:44 -0700 | [diff] [blame] | 139 | goto label_return; |
Jason Evans | e476f8a | 2010-01-16 09:53:50 -0800 | [diff] [blame] | 140 | } |
Jason Evans | e476f8a | 2010-01-16 09:53:50 -0800 | [diff] [blame] | 141 | |
| 142 | /* All strategies for allocation failed. */ |
| 143 | ret = NULL; |
Jason Evans | a1ee783 | 2012-04-10 15:07:44 -0700 | [diff] [blame] | 144 | label_return: |
Jason Evans | 7372b15 | 2012-02-10 20:22:09 -0800 | [diff] [blame] | 145 | if (config_ivsalloc && base == false && ret != NULL) { |
Jason Evans | 2dbecf1 | 2010-09-05 10:35:13 -0700 | [diff] [blame] | 146 | if (rtree_set(chunks_rtree, (uintptr_t)ret, ret)) { |
Jason Evans | 12a4887 | 2011-11-11 14:41:59 -0800 | [diff] [blame] | 147 | chunk_dealloc(ret, size, true); |
Jason Evans | 2dbecf1 | 2010-09-05 10:35:13 -0700 | [diff] [blame] | 148 | return (NULL); |
| 149 | } |
| 150 | } |
Jason Evans | 7372b15 | 2012-02-10 20:22:09 -0800 | [diff] [blame] | 151 | if ((config_stats || config_prof) && ret != NULL) { |
Jason Evans | e733970 | 2010-10-23 18:37:06 -0700 | [diff] [blame] | 152 | bool gdump; |
Jason Evans | 3c23435 | 2010-01-27 13:10:55 -0800 | [diff] [blame] | 153 | malloc_mutex_lock(&chunks_mtx); |
Jason Evans | 7372b15 | 2012-02-10 20:22:09 -0800 | [diff] [blame] | 154 | if (config_stats) |
| 155 | stats_chunks.nchunks += (size / chunksize); |
Jason Evans | e476f8a | 2010-01-16 09:53:50 -0800 | [diff] [blame] | 156 | stats_chunks.curchunks += (size / chunksize); |
Jason Evans | 6109fe0 | 2010-02-10 10:37:56 -0800 | [diff] [blame] | 157 | if (stats_chunks.curchunks > stats_chunks.highchunks) { |
Jason Evans | 3c23435 | 2010-01-27 13:10:55 -0800 | [diff] [blame] | 158 | stats_chunks.highchunks = stats_chunks.curchunks; |
Jason Evans | 7372b15 | 2012-02-10 20:22:09 -0800 | [diff] [blame] | 159 | if (config_prof) |
| 160 | gdump = true; |
| 161 | } else if (config_prof) |
Jason Evans | e733970 | 2010-10-23 18:37:06 -0700 | [diff] [blame] | 162 | gdump = false; |
Jason Evans | 3c23435 | 2010-01-27 13:10:55 -0800 | [diff] [blame] | 163 | malloc_mutex_unlock(&chunks_mtx); |
Jason Evans | 7372b15 | 2012-02-10 20:22:09 -0800 | [diff] [blame] | 164 | if (config_prof && opt_prof && opt_prof_gdump && gdump) |
Jason Evans | e733970 | 2010-10-23 18:37:06 -0700 | [diff] [blame] | 165 | prof_gdump(); |
Jason Evans | e476f8a | 2010-01-16 09:53:50 -0800 | [diff] [blame] | 166 | } |
Jason Evans | 7ad54c1 | 2012-04-21 16:04:51 -0700 | [diff] [blame] | 167 | if (config_debug && *zero && ret != NULL) { |
| 168 | size_t i; |
| 169 | size_t *p = (size_t *)(uintptr_t)ret; |
Jason Evans | e476f8a | 2010-01-16 09:53:50 -0800 | [diff] [blame] | 170 | |
Jason Evans | 7ad54c1 | 2012-04-21 16:04:51 -0700 | [diff] [blame] | 171 | for (i = 0; i < size / sizeof(size_t); i++) |
| 172 | assert(p[i] == 0); |
| 173 | } |
Jason Evans | e476f8a | 2010-01-16 09:53:50 -0800 | [diff] [blame] | 174 | assert(CHUNK_ADDR2BASE(ret) == ret); |
| 175 | return (ret); |
| 176 | } |
| 177 | |
Jason Evans | 7ca0fdf | 2012-04-12 20:20:58 -0700 | [diff] [blame] | 178 | static void |
| 179 | chunk_record(void *chunk, size_t size) |
| 180 | { |
| 181 | extent_node_t *xnode, *node, *prev, key; |
| 182 | |
Mike Hommey | 666c5bf | 2012-04-18 18:29:43 +0200 | [diff] [blame] | 183 | pages_purge(chunk, size); |
Jason Evans | 7ca0fdf | 2012-04-12 20:20:58 -0700 | [diff] [blame] | 184 | |
| 185 | xnode = NULL; |
| 186 | malloc_mutex_lock(&chunks_mtx); |
| 187 | while (true) { |
| 188 | key.addr = (void *)((uintptr_t)chunk + size); |
| 189 | node = extent_tree_ad_nsearch(&chunks_ad, &key); |
| 190 | /* Try to coalesce forward. */ |
| 191 | if (node != NULL && node->addr == key.addr) { |
| 192 | /* |
| 193 | * Coalesce chunk with the following address range. |
| 194 | * This does not change the position within chunks_ad, |
| 195 | * so only remove/insert from/into chunks_szad. |
| 196 | */ |
| 197 | extent_tree_szad_remove(&chunks_szad, node); |
| 198 | node->addr = chunk; |
| 199 | node->size += size; |
| 200 | extent_tree_szad_insert(&chunks_szad, node); |
| 201 | break; |
| 202 | } else if (xnode == NULL) { |
| 203 | /* |
| 204 | * It is possible that base_node_alloc() will cause a |
| 205 | * new base chunk to be allocated, so take care not to |
| 206 | * deadlock on chunks_mtx, and recover if another thread |
| 207 | * deallocates an adjacent chunk while this one is busy |
| 208 | * allocating xnode. |
| 209 | */ |
| 210 | malloc_mutex_unlock(&chunks_mtx); |
| 211 | xnode = base_node_alloc(); |
| 212 | if (xnode == NULL) |
| 213 | return; |
| 214 | malloc_mutex_lock(&chunks_mtx); |
| 215 | } else { |
| 216 | /* Coalescing forward failed, so insert a new node. */ |
| 217 | node = xnode; |
| 218 | xnode = NULL; |
| 219 | node->addr = chunk; |
| 220 | node->size = size; |
| 221 | extent_tree_ad_insert(&chunks_ad, node); |
| 222 | extent_tree_szad_insert(&chunks_szad, node); |
| 223 | break; |
| 224 | } |
| 225 | } |
| 226 | /* Discard xnode if it ended up unused due to a race. */ |
| 227 | if (xnode != NULL) |
| 228 | base_node_dealloc(xnode); |
| 229 | |
| 230 | /* Try to coalesce backward. */ |
| 231 | prev = extent_tree_ad_prev(&chunks_ad, node); |
| 232 | if (prev != NULL && (void *)((uintptr_t)prev->addr + prev->size) == |
| 233 | chunk) { |
| 234 | /* |
| 235 | * Coalesce chunk with the previous address range. This does |
| 236 | * not change the position within chunks_ad, so only |
| 237 | * remove/insert node from/into chunks_szad. |
| 238 | */ |
| 239 | extent_tree_szad_remove(&chunks_szad, prev); |
| 240 | extent_tree_ad_remove(&chunks_ad, prev); |
| 241 | |
| 242 | extent_tree_szad_remove(&chunks_szad, node); |
| 243 | node->addr = prev->addr; |
| 244 | node->size += prev->size; |
| 245 | extent_tree_szad_insert(&chunks_szad, node); |
| 246 | |
| 247 | base_node_dealloc(prev); |
| 248 | } |
| 249 | malloc_mutex_unlock(&chunks_mtx); |
| 250 | } |
| 251 | |
Jason Evans | e476f8a | 2010-01-16 09:53:50 -0800 | [diff] [blame] | 252 | void |
Jason Evans | 12a4887 | 2011-11-11 14:41:59 -0800 | [diff] [blame] | 253 | chunk_dealloc(void *chunk, size_t size, bool unmap) |
Jason Evans | e476f8a | 2010-01-16 09:53:50 -0800 | [diff] [blame] | 254 | { |
| 255 | |
| 256 | assert(chunk != NULL); |
| 257 | assert(CHUNK_ADDR2BASE(chunk) == chunk); |
| 258 | assert(size != 0); |
| 259 | assert((size & chunksize_mask) == 0); |
| 260 | |
Jason Evans | 7372b15 | 2012-02-10 20:22:09 -0800 | [diff] [blame] | 261 | if (config_ivsalloc) |
| 262 | rtree_set(chunks_rtree, (uintptr_t)chunk, NULL); |
| 263 | if (config_stats || config_prof) { |
| 264 | malloc_mutex_lock(&chunks_mtx); |
| 265 | stats_chunks.curchunks -= (size / chunksize); |
| 266 | malloc_mutex_unlock(&chunks_mtx); |
| 267 | } |
Jason Evans | e476f8a | 2010-01-16 09:53:50 -0800 | [diff] [blame] | 268 | |
Jason Evans | 12a4887 | 2011-11-11 14:41:59 -0800 | [diff] [blame] | 269 | if (unmap) { |
Jason Evans | 7ad54c1 | 2012-04-21 16:04:51 -0700 | [diff] [blame] | 270 | if ((config_dss && chunk_in_dss(chunk)) || |
| 271 | chunk_dealloc_mmap(chunk, size)) |
| 272 | chunk_record(chunk, size); |
Jason Evans | 12a4887 | 2011-11-11 14:41:59 -0800 | [diff] [blame] | 273 | } |
Jason Evans | e476f8a | 2010-01-16 09:53:50 -0800 | [diff] [blame] | 274 | } |
| 275 | |
| 276 | bool |
Jason Evans | a8f8d75 | 2012-04-21 19:17:21 -0700 | [diff] [blame] | 277 | chunk_boot(void) |
Jason Evans | e476f8a | 2010-01-16 09:53:50 -0800 | [diff] [blame] | 278 | { |
| 279 | |
| 280 | /* Set variables according to the value of opt_lg_chunk. */ |
Jason Evans | 2dbecf1 | 2010-09-05 10:35:13 -0700 | [diff] [blame] | 281 | chunksize = (ZU(1) << opt_lg_chunk); |
Jason Evans | ae4c7b4 | 2012-04-02 07:04:34 -0700 | [diff] [blame] | 282 | assert(chunksize >= PAGE); |
Jason Evans | e476f8a | 2010-01-16 09:53:50 -0800 | [diff] [blame] | 283 | chunksize_mask = chunksize - 1; |
Jason Evans | ae4c7b4 | 2012-04-02 07:04:34 -0700 | [diff] [blame] | 284 | chunk_npages = (chunksize >> LG_PAGE); |
Jason Evans | e476f8a | 2010-01-16 09:53:50 -0800 | [diff] [blame] | 285 | |
Jason Evans | 7372b15 | 2012-02-10 20:22:09 -0800 | [diff] [blame] | 286 | if (config_stats || config_prof) { |
| 287 | if (malloc_mutex_init(&chunks_mtx)) |
| 288 | return (true); |
| 289 | memset(&stats_chunks, 0, sizeof(chunk_stats_t)); |
| 290 | } |
Jason Evans | 7372b15 | 2012-02-10 20:22:09 -0800 | [diff] [blame] | 291 | if (config_dss && chunk_dss_boot()) |
Jason Evans | 4201af0 | 2010-01-24 02:53:40 -0800 | [diff] [blame] | 292 | return (true); |
Jason Evans | 7ca0fdf | 2012-04-12 20:20:58 -0700 | [diff] [blame] | 293 | extent_tree_szad_new(&chunks_szad); |
| 294 | extent_tree_ad_new(&chunks_ad); |
Jason Evans | 7372b15 | 2012-02-10 20:22:09 -0800 | [diff] [blame] | 295 | if (config_ivsalloc) { |
| 296 | chunks_rtree = rtree_new((ZU(1) << (LG_SIZEOF_PTR+3)) - |
| 297 | opt_lg_chunk); |
| 298 | if (chunks_rtree == NULL) |
| 299 | return (true); |
| 300 | } |
Jason Evans | e476f8a | 2010-01-16 09:53:50 -0800 | [diff] [blame] | 301 | |
| 302 | return (false); |
| 303 | } |