Jason Evans | e476f8a | 2010-01-16 09:53:50 -0800 | [diff] [blame] | 1 | #define JEMALLOC_HUGE_C_ |
Jason Evans | 376b152 | 2010-02-11 14:45:59 -0800 | [diff] [blame] | 2 | #include "jemalloc/internal/jemalloc_internal.h" |
Jason Evans | e476f8a | 2010-01-16 09:53:50 -0800 | [diff] [blame] | 3 | |
| 4 | /******************************************************************************/ |
| 5 | /* Data. */ |
| 6 | |
Jason Evans | e2deab7 | 2014-05-15 22:22:27 -0700 | [diff] [blame] | 7 | /* Protects chunk-related data structures. */ |
| 8 | static malloc_mutex_t huge_mtx; |
Jason Evans | e476f8a | 2010-01-16 09:53:50 -0800 | [diff] [blame] | 9 | |
| 10 | /******************************************************************************/ |
| 11 | |
| 12 | /* Tree of chunks that are stand-alone huge allocations. */ |
| 13 | static extent_tree_t huge; |
| 14 | |
| 15 | void * |
Jason Evans | 5460aa6 | 2014-09-22 21:09:23 -0700 | [diff] [blame] | 16 | huge_malloc(tsd_t *tsd, arena_t *arena, size_t size, bool zero) |
Jason Evans | e476f8a | 2010-01-16 09:53:50 -0800 | [diff] [blame] | 17 | { |
Mike Hommey | eae2690 | 2012-04-10 19:50:33 +0200 | [diff] [blame] | 18 | |
Jason Evans | 5460aa6 | 2014-09-22 21:09:23 -0700 | [diff] [blame] | 19 | return (huge_palloc(tsd, arena, size, chunksize, zero)); |
Mike Hommey | eae2690 | 2012-04-10 19:50:33 +0200 | [diff] [blame] | 20 | } |
| 21 | |
| 22 | void * |
Jason Evans | 5460aa6 | 2014-09-22 21:09:23 -0700 | [diff] [blame] | 23 | huge_palloc(tsd_t *tsd, arena_t *arena, size_t size, size_t alignment, |
| 24 | bool zero) |
Mike Hommey | eae2690 | 2012-04-10 19:50:33 +0200 | [diff] [blame] | 25 | { |
Jason Evans | e476f8a | 2010-01-16 09:53:50 -0800 | [diff] [blame] | 26 | void *ret; |
| 27 | size_t csize; |
| 28 | extent_node_t *node; |
Jason Evans | 7ad54c1 | 2012-04-21 16:04:51 -0700 | [diff] [blame] | 29 | bool is_zeroed; |
Jason Evans | e476f8a | 2010-01-16 09:53:50 -0800 | [diff] [blame] | 30 | |
| 31 | /* Allocate one or more contiguous chunks for this request. */ |
| 32 | |
| 33 | csize = CHUNK_CEILING(size); |
| 34 | if (csize == 0) { |
| 35 | /* size is large enough to cause size_t wrap-around. */ |
| 36 | return (NULL); |
| 37 | } |
| 38 | |
| 39 | /* Allocate an extent node with which to track the chunk. */ |
| 40 | node = base_node_alloc(); |
| 41 | if (node == NULL) |
| 42 | return (NULL); |
| 43 | |
Jason Evans | 7ad54c1 | 2012-04-21 16:04:51 -0700 | [diff] [blame] | 44 | /* |
| 45 | * Copy zero into is_zeroed and pass the copy to chunk_alloc(), so that |
| 46 | * it is possible to make correct junk/zero fill decisions below. |
| 47 | */ |
| 48 | is_zeroed = zero; |
Jason Evans | 5460aa6 | 2014-09-22 21:09:23 -0700 | [diff] [blame] | 49 | arena = choose_arena(tsd, arena); |
Jason Evans | e2deab7 | 2014-05-15 22:22:27 -0700 | [diff] [blame] | 50 | ret = arena_chunk_alloc_huge(arena, csize, alignment, &is_zeroed); |
Jason Evans | e476f8a | 2010-01-16 09:53:50 -0800 | [diff] [blame] | 51 | if (ret == NULL) { |
Jason Evans | e2deab7 | 2014-05-15 22:22:27 -0700 | [diff] [blame] | 52 | base_node_dalloc(node); |
Jason Evans | e476f8a | 2010-01-16 09:53:50 -0800 | [diff] [blame] | 53 | return (NULL); |
| 54 | } |
| 55 | |
| 56 | /* Insert node into huge. */ |
| 57 | node->addr = ret; |
| 58 | node->size = csize; |
aravind | fb7fe50 | 2014-05-05 15:16:56 -0700 | [diff] [blame] | 59 | node->arena = arena; |
Jason Evans | e476f8a | 2010-01-16 09:53:50 -0800 | [diff] [blame] | 60 | |
| 61 | malloc_mutex_lock(&huge_mtx); |
| 62 | extent_tree_ad_insert(&huge, node); |
Jason Evans | e476f8a | 2010-01-16 09:53:50 -0800 | [diff] [blame] | 63 | malloc_mutex_unlock(&huge_mtx); |
| 64 | |
Jason Evans | 7372b15 | 2012-02-10 20:22:09 -0800 | [diff] [blame] | 65 | if (config_fill && zero == false) { |
Jason Evans | 9c640bf | 2014-09-11 16:20:44 -0700 | [diff] [blame] | 66 | if (unlikely(opt_junk)) |
Jason Evans | e476f8a | 2010-01-16 09:53:50 -0800 | [diff] [blame] | 67 | memset(ret, 0xa5, csize); |
Jason Evans | 9c640bf | 2014-09-11 16:20:44 -0700 | [diff] [blame] | 68 | else if (unlikely(opt_zero) && is_zeroed == false) |
Jason Evans | e476f8a | 2010-01-16 09:53:50 -0800 | [diff] [blame] | 69 | memset(ret, 0, csize); |
| 70 | } |
Jason Evans | e476f8a | 2010-01-16 09:53:50 -0800 | [diff] [blame] | 71 | |
| 72 | return (ret); |
| 73 | } |
| 74 | |
Daniel Micay | f803454 | 2014-09-30 10:33:46 -0400 | [diff] [blame^] | 75 | #ifdef JEMALLOC_JET |
| 76 | #undef huge_dalloc_junk |
| 77 | #define huge_dalloc_junk JEMALLOC_N(huge_dalloc_junk_impl) |
| 78 | #endif |
| 79 | static void |
| 80 | huge_dalloc_junk(void *ptr, size_t usize) |
| 81 | { |
| 82 | |
| 83 | if (config_fill && have_dss && unlikely(opt_junk)) { |
| 84 | /* |
| 85 | * Only bother junk filling if the chunk isn't about to be |
| 86 | * unmapped. |
| 87 | */ |
| 88 | if (config_munmap == false || (have_dss && chunk_in_dss(ptr))) |
| 89 | memset(ptr, 0x5a, usize); |
| 90 | } |
| 91 | } |
| 92 | #ifdef JEMALLOC_JET |
| 93 | #undef huge_dalloc_junk |
| 94 | #define huge_dalloc_junk JEMALLOC_N(huge_dalloc_junk) |
| 95 | huge_dalloc_junk_t *huge_dalloc_junk = JEMALLOC_N(huge_dalloc_junk_impl); |
| 96 | #endif |
| 97 | |
Jason Evans | b2c3166 | 2014-01-12 15:05:44 -0800 | [diff] [blame] | 98 | bool |
Jason Evans | 8e3c3c6 | 2010-09-17 15:46:18 -0700 | [diff] [blame] | 99 | huge_ralloc_no_move(void *ptr, size_t oldsize, size_t size, size_t extra) |
Jason Evans | e476f8a | 2010-01-16 09:53:50 -0800 | [diff] [blame] | 100 | { |
Jason Evans | e476f8a | 2010-01-16 09:53:50 -0800 | [diff] [blame] | 101 | |
Daniel Micay | f803454 | 2014-09-30 10:33:46 -0400 | [diff] [blame^] | 102 | /* Both allocations must be huge to avoid a move. */ |
| 103 | if (oldsize <= arena_maxclass) |
| 104 | return (true); |
| 105 | |
| 106 | assert(CHUNK_CEILING(oldsize) == oldsize); |
| 107 | |
Jason Evans | 8e3c3c6 | 2010-09-17 15:46:18 -0700 | [diff] [blame] | 108 | /* |
| 109 | * Avoid moving the allocation if the size class can be left the same. |
| 110 | */ |
Daniel Micay | f803454 | 2014-09-30 10:33:46 -0400 | [diff] [blame^] | 111 | if (CHUNK_CEILING(oldsize) >= CHUNK_CEILING(size) |
Jason Evans | 8e3c3c6 | 2010-09-17 15:46:18 -0700 | [diff] [blame] | 112 | && CHUNK_CEILING(oldsize) <= CHUNK_CEILING(size+extra)) { |
Jason Evans | b2c3166 | 2014-01-12 15:05:44 -0800 | [diff] [blame] | 113 | return (false); |
Jason Evans | e476f8a | 2010-01-16 09:53:50 -0800 | [diff] [blame] | 114 | } |
| 115 | |
Daniel Micay | f803454 | 2014-09-30 10:33:46 -0400 | [diff] [blame^] | 116 | /* Overflow. */ |
| 117 | if (CHUNK_CEILING(size) == 0) |
| 118 | return (true); |
| 119 | |
| 120 | /* Shrink the allocation in-place. */ |
| 121 | if (CHUNK_CEILING(oldsize) > CHUNK_CEILING(size)) { |
| 122 | extent_node_t *node, key; |
| 123 | void *excess_addr; |
| 124 | size_t excess_size; |
| 125 | |
| 126 | malloc_mutex_lock(&huge_mtx); |
| 127 | |
| 128 | key.addr = ptr; |
| 129 | node = extent_tree_ad_search(&huge, &key); |
| 130 | assert(node != NULL); |
| 131 | assert(node->addr == ptr); |
| 132 | |
| 133 | /* Update the size of the huge allocation. */ |
| 134 | node->size = CHUNK_CEILING(size); |
| 135 | |
| 136 | malloc_mutex_unlock(&huge_mtx); |
| 137 | |
| 138 | excess_addr = node->addr + CHUNK_CEILING(size); |
| 139 | excess_size = CHUNK_CEILING(oldsize) - CHUNK_CEILING(size); |
| 140 | |
| 141 | /* Zap the excess chunks. */ |
| 142 | huge_dalloc_junk(excess_addr, excess_size); |
| 143 | arena_chunk_dalloc_huge(node->arena, excess_addr, excess_size); |
| 144 | |
| 145 | return (false); |
| 146 | } |
| 147 | |
Jason Evans | b2c3166 | 2014-01-12 15:05:44 -0800 | [diff] [blame] | 148 | return (true); |
Jason Evans | 8e3c3c6 | 2010-09-17 15:46:18 -0700 | [diff] [blame] | 149 | } |
Jason Evans | e476f8a | 2010-01-16 09:53:50 -0800 | [diff] [blame] | 150 | |
Jason Evans | 8e3c3c6 | 2010-09-17 15:46:18 -0700 | [diff] [blame] | 151 | void * |
Jason Evans | 5460aa6 | 2014-09-22 21:09:23 -0700 | [diff] [blame] | 152 | huge_ralloc(tsd_t *tsd, arena_t *arena, void *ptr, size_t oldsize, size_t size, |
Jason Evans | e2deab7 | 2014-05-15 22:22:27 -0700 | [diff] [blame] | 153 | size_t extra, size_t alignment, bool zero, bool try_tcache_dalloc) |
Jason Evans | 8e3c3c6 | 2010-09-17 15:46:18 -0700 | [diff] [blame] | 154 | { |
| 155 | void *ret; |
| 156 | size_t copysize; |
| 157 | |
| 158 | /* Try to avoid moving the allocation. */ |
Jason Evans | b2c3166 | 2014-01-12 15:05:44 -0800 | [diff] [blame] | 159 | if (huge_ralloc_no_move(ptr, oldsize, size, extra) == false) |
| 160 | return (ptr); |
Jason Evans | 8e3c3c6 | 2010-09-17 15:46:18 -0700 | [diff] [blame] | 161 | |
| 162 | /* |
| 163 | * size and oldsize are different enough that we need to use a |
| 164 | * different size class. In that case, fall back to allocating new |
| 165 | * space and copying. |
| 166 | */ |
Jason Evans | 31bfb3e | 2011-01-31 19:58:22 -0800 | [diff] [blame] | 167 | if (alignment > chunksize) |
Jason Evans | 5460aa6 | 2014-09-22 21:09:23 -0700 | [diff] [blame] | 168 | ret = huge_palloc(tsd, arena, size + extra, alignment, zero); |
Jason Evans | 8e3c3c6 | 2010-09-17 15:46:18 -0700 | [diff] [blame] | 169 | else |
Jason Evans | 5460aa6 | 2014-09-22 21:09:23 -0700 | [diff] [blame] | 170 | ret = huge_malloc(tsd, arena, size + extra, zero); |
Jason Evans | 8e3c3c6 | 2010-09-17 15:46:18 -0700 | [diff] [blame] | 171 | |
| 172 | if (ret == NULL) { |
| 173 | if (extra == 0) |
| 174 | return (NULL); |
| 175 | /* Try again, this time without extra. */ |
Jason Evans | 31bfb3e | 2011-01-31 19:58:22 -0800 | [diff] [blame] | 176 | if (alignment > chunksize) |
Jason Evans | 5460aa6 | 2014-09-22 21:09:23 -0700 | [diff] [blame] | 177 | ret = huge_palloc(tsd, arena, size, alignment, zero); |
Jason Evans | 8e3c3c6 | 2010-09-17 15:46:18 -0700 | [diff] [blame] | 178 | else |
Jason Evans | 5460aa6 | 2014-09-22 21:09:23 -0700 | [diff] [blame] | 179 | ret = huge_malloc(tsd, arena, size, zero); |
Jason Evans | 8e3c3c6 | 2010-09-17 15:46:18 -0700 | [diff] [blame] | 180 | |
| 181 | if (ret == NULL) |
| 182 | return (NULL); |
| 183 | } |
| 184 | |
| 185 | /* |
| 186 | * Copy at most size bytes (not size+extra), since the caller has no |
| 187 | * expectation that the extra bytes will be reliably preserved. |
| 188 | */ |
Jason Evans | e476f8a | 2010-01-16 09:53:50 -0800 | [diff] [blame] | 189 | copysize = (size < oldsize) ? size : oldsize; |
Jason Evans | e2deab7 | 2014-05-15 22:22:27 -0700 | [diff] [blame] | 190 | memcpy(ret, ptr, copysize); |
Jason Evans | 5460aa6 | 2014-09-22 21:09:23 -0700 | [diff] [blame] | 191 | iqalloc(tsd, ptr, try_tcache_dalloc); |
Jason Evans | e476f8a | 2010-01-16 09:53:50 -0800 | [diff] [blame] | 192 | return (ret); |
| 193 | } |
| 194 | |
| 195 | void |
Jason Evans | e2deab7 | 2014-05-15 22:22:27 -0700 | [diff] [blame] | 196 | huge_dalloc(void *ptr) |
Jason Evans | e476f8a | 2010-01-16 09:53:50 -0800 | [diff] [blame] | 197 | { |
| 198 | extent_node_t *node, key; |
| 199 | |
| 200 | malloc_mutex_lock(&huge_mtx); |
| 201 | |
| 202 | /* Extract from tree of huge allocations. */ |
| 203 | key.addr = ptr; |
| 204 | node = extent_tree_ad_search(&huge, &key); |
| 205 | assert(node != NULL); |
| 206 | assert(node->addr == ptr); |
| 207 | extent_tree_ad_remove(&huge, node); |
| 208 | |
Jason Evans | e476f8a | 2010-01-16 09:53:50 -0800 | [diff] [blame] | 209 | malloc_mutex_unlock(&huge_mtx); |
| 210 | |
Jason Evans | e2deab7 | 2014-05-15 22:22:27 -0700 | [diff] [blame] | 211 | huge_dalloc_junk(node->addr, node->size); |
| 212 | arena_chunk_dalloc_huge(node->arena, node->addr, node->size); |
| 213 | base_node_dalloc(node); |
Jason Evans | e476f8a | 2010-01-16 09:53:50 -0800 | [diff] [blame] | 214 | } |
| 215 | |
| 216 | size_t |
| 217 | huge_salloc(const void *ptr) |
| 218 | { |
| 219 | size_t ret; |
| 220 | extent_node_t *node, key; |
| 221 | |
| 222 | malloc_mutex_lock(&huge_mtx); |
| 223 | |
| 224 | /* Extract from tree of huge allocations. */ |
| 225 | key.addr = __DECONST(void *, ptr); |
| 226 | node = extent_tree_ad_search(&huge, &key); |
| 227 | assert(node != NULL); |
| 228 | |
| 229 | ret = node->size; |
| 230 | |
| 231 | malloc_mutex_unlock(&huge_mtx); |
| 232 | |
| 233 | return (ret); |
| 234 | } |
| 235 | |
Jason Evans | 602c8e0 | 2014-08-18 16:22:13 -0700 | [diff] [blame] | 236 | prof_tctx_t * |
| 237 | huge_prof_tctx_get(const void *ptr) |
Jason Evans | 6109fe0 | 2010-02-10 10:37:56 -0800 | [diff] [blame] | 238 | { |
Jason Evans | 602c8e0 | 2014-08-18 16:22:13 -0700 | [diff] [blame] | 239 | prof_tctx_t *ret; |
Jason Evans | 6109fe0 | 2010-02-10 10:37:56 -0800 | [diff] [blame] | 240 | extent_node_t *node, key; |
| 241 | |
| 242 | malloc_mutex_lock(&huge_mtx); |
| 243 | |
| 244 | /* Extract from tree of huge allocations. */ |
| 245 | key.addr = __DECONST(void *, ptr); |
| 246 | node = extent_tree_ad_search(&huge, &key); |
| 247 | assert(node != NULL); |
| 248 | |
Jason Evans | 602c8e0 | 2014-08-18 16:22:13 -0700 | [diff] [blame] | 249 | ret = node->prof_tctx; |
Jason Evans | 6109fe0 | 2010-02-10 10:37:56 -0800 | [diff] [blame] | 250 | |
| 251 | malloc_mutex_unlock(&huge_mtx); |
| 252 | |
| 253 | return (ret); |
| 254 | } |
| 255 | |
| 256 | void |
Jason Evans | 602c8e0 | 2014-08-18 16:22:13 -0700 | [diff] [blame] | 257 | huge_prof_tctx_set(const void *ptr, prof_tctx_t *tctx) |
Jason Evans | 6109fe0 | 2010-02-10 10:37:56 -0800 | [diff] [blame] | 258 | { |
| 259 | extent_node_t *node, key; |
| 260 | |
| 261 | malloc_mutex_lock(&huge_mtx); |
| 262 | |
| 263 | /* Extract from tree of huge allocations. */ |
| 264 | key.addr = __DECONST(void *, ptr); |
| 265 | node = extent_tree_ad_search(&huge, &key); |
| 266 | assert(node != NULL); |
| 267 | |
Jason Evans | 602c8e0 | 2014-08-18 16:22:13 -0700 | [diff] [blame] | 268 | node->prof_tctx = tctx; |
Jason Evans | 6109fe0 | 2010-02-10 10:37:56 -0800 | [diff] [blame] | 269 | |
| 270 | malloc_mutex_unlock(&huge_mtx); |
| 271 | } |
Jason Evans | 6109fe0 | 2010-02-10 10:37:56 -0800 | [diff] [blame] | 272 | |
Jason Evans | e476f8a | 2010-01-16 09:53:50 -0800 | [diff] [blame] | 273 | bool |
| 274 | huge_boot(void) |
| 275 | { |
| 276 | |
| 277 | /* Initialize chunks data. */ |
| 278 | if (malloc_mutex_init(&huge_mtx)) |
| 279 | return (true); |
| 280 | extent_tree_ad_new(&huge); |
| 281 | |
Jason Evans | e476f8a | 2010-01-16 09:53:50 -0800 | [diff] [blame] | 282 | return (false); |
| 283 | } |
Jason Evans | 4e2e3dd | 2012-03-13 16:31:41 -0700 | [diff] [blame] | 284 | |
| 285 | void |
| 286 | huge_prefork(void) |
| 287 | { |
| 288 | |
| 289 | malloc_mutex_prefork(&huge_mtx); |
| 290 | } |
| 291 | |
| 292 | void |
| 293 | huge_postfork_parent(void) |
| 294 | { |
| 295 | |
| 296 | malloc_mutex_postfork_parent(&huge_mtx); |
| 297 | } |
| 298 | |
| 299 | void |
| 300 | huge_postfork_child(void) |
| 301 | { |
| 302 | |
| 303 | malloc_mutex_postfork_child(&huge_mtx); |
| 304 | } |