blob: 6c9b97bb8c2cc66533f1edce849b6224b4f025d1 [file] [log] [blame]
Jason Evanse476f8a2010-01-16 09:53:50 -08001#define JEMALLOC_HUGE_C_
Jason Evans376b1522010-02-11 14:45:59 -08002#include "jemalloc/internal/jemalloc_internal.h"
Jason Evanse476f8a2010-01-16 09:53:50 -08003
4/******************************************************************************/
5/* Data. */
6
Jason Evanse2deab72014-05-15 22:22:27 -07007/* Protects chunk-related data structures. */
8static malloc_mutex_t huge_mtx;
Jason Evanse476f8a2010-01-16 09:53:50 -08009
10/******************************************************************************/
11
12/* Tree of chunks that are stand-alone huge allocations. */
13static extent_tree_t huge;
14
15void *
Jason Evansfc0b3b72014-10-09 17:54:06 -070016huge_malloc(tsd_t *tsd, arena_t *arena, size_t size, bool zero, bool try_tcache)
Jason Evanse476f8a2010-01-16 09:53:50 -080017{
Jason Evans155bfa72014-10-05 17:54:10 -070018 size_t usize;
Mike Hommeyeae26902012-04-10 19:50:33 +020019
Jason Evans155bfa72014-10-05 17:54:10 -070020 usize = s2u(size);
21 if (usize == 0) {
22 /* size_t overflow. */
23 return (NULL);
24 }
25
Jason Evansfc0b3b72014-10-09 17:54:06 -070026 return (huge_palloc(tsd, arena, usize, chunksize, zero, try_tcache));
Mike Hommeyeae26902012-04-10 19:50:33 +020027}
28
29void *
Jason Evans155bfa72014-10-05 17:54:10 -070030huge_palloc(tsd_t *tsd, arena_t *arena, size_t usize, size_t alignment,
Jason Evansfc0b3b72014-10-09 17:54:06 -070031 bool zero, bool try_tcache)
Mike Hommeyeae26902012-04-10 19:50:33 +020032{
Jason Evanse476f8a2010-01-16 09:53:50 -080033 void *ret;
34 size_t csize;
35 extent_node_t *node;
Jason Evans7ad54c12012-04-21 16:04:51 -070036 bool is_zeroed;
Jason Evanse476f8a2010-01-16 09:53:50 -080037
38 /* Allocate one or more contiguous chunks for this request. */
39
Jason Evans155bfa72014-10-05 17:54:10 -070040 csize = CHUNK_CEILING(usize);
41 assert(csize >= usize);
Jason Evanse476f8a2010-01-16 09:53:50 -080042
43 /* Allocate an extent node with which to track the chunk. */
Daniel Micayf22214a2014-10-06 03:42:10 -040044 node = ipalloct(tsd, CACHELINE_CEILING(sizeof(extent_node_t)),
Jason Evansfc0b3b72014-10-09 17:54:06 -070045 CACHELINE, false, try_tcache, NULL);
Jason Evanse476f8a2010-01-16 09:53:50 -080046 if (node == NULL)
47 return (NULL);
48
Jason Evans7ad54c12012-04-21 16:04:51 -070049 /*
50 * Copy zero into is_zeroed and pass the copy to chunk_alloc(), so that
51 * it is possible to make correct junk/zero fill decisions below.
52 */
53 is_zeroed = zero;
Jason Evans8bb31982014-10-07 23:14:57 -070054 arena = arena_choose(tsd, arena);
55 if (unlikely(arena == NULL)) {
56 base_node_dalloc(node);
57 return (NULL);
58 }
Daniel Micaya95018e2014-10-04 01:39:32 -040059 ret = arena_chunk_alloc_huge(arena, NULL, csize, alignment, &is_zeroed);
Jason Evanse476f8a2010-01-16 09:53:50 -080060 if (ret == NULL) {
Jason Evansfc0b3b72014-10-09 17:54:06 -070061 idalloct(tsd, node, try_tcache);
Jason Evanse476f8a2010-01-16 09:53:50 -080062 return (NULL);
63 }
64
65 /* Insert node into huge. */
66 node->addr = ret;
Jason Evans155bfa72014-10-05 17:54:10 -070067 node->size = usize;
aravindfb7fe502014-05-05 15:16:56 -070068 node->arena = arena;
Jason Evanse476f8a2010-01-16 09:53:50 -080069
70 malloc_mutex_lock(&huge_mtx);
71 extent_tree_ad_insert(&huge, node);
Jason Evanse476f8a2010-01-16 09:53:50 -080072 malloc_mutex_unlock(&huge_mtx);
73
Jason Evans551ebc42014-10-03 10:16:09 -070074 if (config_fill && !zero) {
Jason Evans9c640bf2014-09-11 16:20:44 -070075 if (unlikely(opt_junk))
Jason Evans155bfa72014-10-05 17:54:10 -070076 memset(ret, 0xa5, usize);
Jason Evans551ebc42014-10-03 10:16:09 -070077 else if (unlikely(opt_zero) && !is_zeroed)
Jason Evans155bfa72014-10-05 17:54:10 -070078 memset(ret, 0, usize);
Jason Evanse476f8a2010-01-16 09:53:50 -080079 }
Jason Evanse476f8a2010-01-16 09:53:50 -080080
81 return (ret);
82}
83
Daniel Micayf8034542014-09-30 10:33:46 -040084#ifdef JEMALLOC_JET
85#undef huge_dalloc_junk
86#define huge_dalloc_junk JEMALLOC_N(huge_dalloc_junk_impl)
87#endif
88static void
89huge_dalloc_junk(void *ptr, size_t usize)
90{
91
92 if (config_fill && have_dss && unlikely(opt_junk)) {
93 /*
94 * Only bother junk filling if the chunk isn't about to be
95 * unmapped.
96 */
Jason Evans551ebc42014-10-03 10:16:09 -070097 if (!config_munmap || (have_dss && chunk_in_dss(ptr)))
Daniel Micayf8034542014-09-30 10:33:46 -040098 memset(ptr, 0x5a, usize);
99 }
100}
101#ifdef JEMALLOC_JET
102#undef huge_dalloc_junk
103#define huge_dalloc_junk JEMALLOC_N(huge_dalloc_junk)
104huge_dalloc_junk_t *huge_dalloc_junk = JEMALLOC_N(huge_dalloc_junk_impl);
105#endif
106
Daniel Micaya95018e2014-10-04 01:39:32 -0400107static bool
108huge_ralloc_no_move_expand(void *ptr, size_t oldsize, size_t size, bool zero) {
Jason Evans155bfa72014-10-05 17:54:10 -0700109 size_t usize;
Daniel Micaya95018e2014-10-04 01:39:32 -0400110 void *expand_addr;
111 size_t expand_size;
112 extent_node_t *node, key;
113 arena_t *arena;
114 bool is_zeroed;
115 void *ret;
116
Jason Evans155bfa72014-10-05 17:54:10 -0700117 usize = s2u(size);
118 if (usize == 0) {
119 /* size_t overflow. */
Daniel Micaya95018e2014-10-04 01:39:32 -0400120 return (true);
121 }
122
Jason Evans155bfa72014-10-05 17:54:10 -0700123 expand_addr = ptr + CHUNK_CEILING(oldsize);
124 expand_size = CHUNK_CEILING(usize) - CHUNK_CEILING(oldsize);
Jason Evansfc0b3b72014-10-09 17:54:06 -0700125 assert(expand_size > 0);
Daniel Micaya95018e2014-10-04 01:39:32 -0400126
127 malloc_mutex_lock(&huge_mtx);
128
129 key.addr = ptr;
130 node = extent_tree_ad_search(&huge, &key);
131 assert(node != NULL);
132 assert(node->addr == ptr);
133
134 /* Find the current arena. */
135 arena = node->arena;
136
137 malloc_mutex_unlock(&huge_mtx);
138
139 /*
140 * Copy zero into is_zeroed and pass the copy to chunk_alloc(), so that
141 * it is possible to make correct junk/zero fill decisions below.
142 */
143 is_zeroed = zero;
144 ret = arena_chunk_alloc_huge(arena, expand_addr, expand_size, chunksize,
145 &is_zeroed);
146 if (ret == NULL)
147 return (true);
148
149 assert(ret == expand_addr);
150
151 malloc_mutex_lock(&huge_mtx);
152 /* Update the size of the huge allocation. */
Jason Evans155bfa72014-10-05 17:54:10 -0700153 node->size = usize;
Daniel Micaya95018e2014-10-04 01:39:32 -0400154 malloc_mutex_unlock(&huge_mtx);
155
156 if (config_fill && !zero) {
157 if (unlikely(opt_junk))
Jason Evans155bfa72014-10-05 17:54:10 -0700158 memset(ptr + oldsize, 0xa5, usize - oldsize);
Daniel Micaya95018e2014-10-04 01:39:32 -0400159 else if (unlikely(opt_zero) && !is_zeroed)
Jason Evans155bfa72014-10-05 17:54:10 -0700160 memset(ptr + oldsize, 0, usize - oldsize);
Daniel Micaya95018e2014-10-04 01:39:32 -0400161 }
162 return (false);
163}
164
Jason Evansb2c31662014-01-12 15:05:44 -0800165bool
Daniel Micaya95018e2014-10-04 01:39:32 -0400166huge_ralloc_no_move(void *ptr, size_t oldsize, size_t size, size_t extra,
167 bool zero)
Jason Evanse476f8a2010-01-16 09:53:50 -0800168{
Jason Evans155bfa72014-10-05 17:54:10 -0700169 size_t usize;
Jason Evanse476f8a2010-01-16 09:53:50 -0800170
Daniel Micayf8034542014-09-30 10:33:46 -0400171 /* Both allocations must be huge to avoid a move. */
Jason Evans155bfa72014-10-05 17:54:10 -0700172 if (oldsize < chunksize)
Daniel Micayf8034542014-09-30 10:33:46 -0400173 return (true);
174
Jason Evans155bfa72014-10-05 17:54:10 -0700175 assert(s2u(oldsize) == oldsize);
176 usize = s2u(size);
177 if (usize == 0) {
178 /* size_t overflow. */
179 return (true);
180 }
Daniel Micayf8034542014-09-30 10:33:46 -0400181
Jason Evans8e3c3c62010-09-17 15:46:18 -0700182 /*
Jason Evans155bfa72014-10-05 17:54:10 -0700183 * Avoid moving the allocation if the existing chunk size accommodates
184 * the new size.
Jason Evans8e3c3c62010-09-17 15:46:18 -0700185 */
Jason Evans155bfa72014-10-05 17:54:10 -0700186 if (CHUNK_CEILING(oldsize) >= CHUNK_CEILING(usize)
187 && CHUNK_CEILING(oldsize) <= CHUNK_CEILING(size+extra)) {
188 size_t usize_next;
189
190 /* Increase usize to incorporate extra. */
191 while (usize < s2u(size+extra) && (usize_next = s2u(usize+1)) <
192 oldsize)
193 usize = usize_next;
194
195 /* Update the size of the huge allocation if it changed. */
196 if (oldsize != usize) {
197 extent_node_t *node, key;
198
199 malloc_mutex_lock(&huge_mtx);
200
201 key.addr = ptr;
202 node = extent_tree_ad_search(&huge, &key);
203 assert(node != NULL);
204 assert(node->addr == ptr);
205
206 assert(node->size != usize);
207 node->size = usize;
208
209 malloc_mutex_unlock(&huge_mtx);
210
211 if (oldsize < usize) {
212 if (zero || (config_fill &&
213 unlikely(opt_zero))) {
214 memset(ptr + oldsize, 0, usize -
215 oldsize);
216 } else if (config_fill && unlikely(opt_junk)) {
217 memset(ptr + oldsize, 0xa5, usize -
218 oldsize);
219 }
220 } else if (config_fill && unlikely(opt_junk) && oldsize
221 > usize)
222 memset(ptr + usize, 0x5a, oldsize - usize);
223 }
224 return (false);
225 }
226
Daniel Micayf8034542014-09-30 10:33:46 -0400227 /* Shrink the allocation in-place. */
Jason Evansfc0b3b72014-10-09 17:54:06 -0700228 if (CHUNK_CEILING(oldsize) >= CHUNK_CEILING(usize)) {
Daniel Micayf8034542014-09-30 10:33:46 -0400229 extent_node_t *node, key;
230 void *excess_addr;
231 size_t excess_size;
232
233 malloc_mutex_lock(&huge_mtx);
234
235 key.addr = ptr;
236 node = extent_tree_ad_search(&huge, &key);
237 assert(node != NULL);
238 assert(node->addr == ptr);
239
240 /* Update the size of the huge allocation. */
Jason Evans155bfa72014-10-05 17:54:10 -0700241 node->size = usize;
Daniel Micayf8034542014-09-30 10:33:46 -0400242
243 malloc_mutex_unlock(&huge_mtx);
244
Jason Evans155bfa72014-10-05 17:54:10 -0700245 excess_addr = node->addr + CHUNK_CEILING(usize);
246 excess_size = CHUNK_CEILING(oldsize) - CHUNK_CEILING(usize);
Daniel Micayf8034542014-09-30 10:33:46 -0400247
248 /* Zap the excess chunks. */
Jason Evans155bfa72014-10-05 17:54:10 -0700249 huge_dalloc_junk(ptr + usize, oldsize - usize);
Jason Evansfc0b3b72014-10-09 17:54:06 -0700250 if (excess_size > 0) {
251 arena_chunk_dalloc_huge(node->arena, excess_addr,
252 excess_size);
253 }
Daniel Micayf8034542014-09-30 10:33:46 -0400254
255 return (false);
256 }
257
Daniel Micaya95018e2014-10-04 01:39:32 -0400258 /* Attempt to expand the allocation in-place. */
259 if (huge_ralloc_no_move_expand(ptr, oldsize, size + extra, zero)) {
260 if (extra == 0)
261 return (true);
262
263 /* Try again, this time without extra. */
264 return (huge_ralloc_no_move_expand(ptr, oldsize, size, zero));
265 }
266 return (false);
Jason Evans8e3c3c62010-09-17 15:46:18 -0700267}
Jason Evanse476f8a2010-01-16 09:53:50 -0800268
Jason Evans8e3c3c62010-09-17 15:46:18 -0700269void *
Jason Evans5460aa62014-09-22 21:09:23 -0700270huge_ralloc(tsd_t *tsd, arena_t *arena, void *ptr, size_t oldsize, size_t size,
Jason Evansfc0b3b72014-10-09 17:54:06 -0700271 size_t extra, size_t alignment, bool zero, bool try_tcache_alloc,
272 bool try_tcache_dalloc)
Jason Evans8e3c3c62010-09-17 15:46:18 -0700273{
274 void *ret;
275 size_t copysize;
276
277 /* Try to avoid moving the allocation. */
Daniel Micaya95018e2014-10-04 01:39:32 -0400278 if (!huge_ralloc_no_move(ptr, oldsize, size, extra, zero))
Jason Evansb2c31662014-01-12 15:05:44 -0800279 return (ptr);
Jason Evans8e3c3c62010-09-17 15:46:18 -0700280
281 /*
282 * size and oldsize are different enough that we need to use a
283 * different size class. In that case, fall back to allocating new
284 * space and copying.
285 */
Jason Evansfc0b3b72014-10-09 17:54:06 -0700286 if (alignment > chunksize) {
287 ret = huge_palloc(tsd, arena, size + extra, alignment, zero,
288 try_tcache_alloc);
289 } else {
290 ret = huge_malloc(tsd, arena, size + extra, zero,
291 try_tcache_alloc);
292 }
Jason Evans8e3c3c62010-09-17 15:46:18 -0700293
294 if (ret == NULL) {
295 if (extra == 0)
296 return (NULL);
297 /* Try again, this time without extra. */
Jason Evansfc0b3b72014-10-09 17:54:06 -0700298 if (alignment > chunksize) {
299 ret = huge_palloc(tsd, arena, size, alignment, zero,
300 try_tcache_alloc);
301 } else {
302 ret = huge_malloc(tsd, arena, size, zero,
303 try_tcache_alloc);
304 }
Jason Evans8e3c3c62010-09-17 15:46:18 -0700305
306 if (ret == NULL)
307 return (NULL);
308 }
309
310 /*
311 * Copy at most size bytes (not size+extra), since the caller has no
312 * expectation that the extra bytes will be reliably preserved.
313 */
Jason Evanse476f8a2010-01-16 09:53:50 -0800314 copysize = (size < oldsize) ? size : oldsize;
Jason Evanse2deab72014-05-15 22:22:27 -0700315 memcpy(ret, ptr, copysize);
Jason Evans5460aa62014-09-22 21:09:23 -0700316 iqalloc(tsd, ptr, try_tcache_dalloc);
Jason Evanse476f8a2010-01-16 09:53:50 -0800317 return (ret);
318}
319
320void
Jason Evansfc0b3b72014-10-09 17:54:06 -0700321huge_dalloc(tsd_t *tsd, void *ptr, bool try_tcache)
Jason Evanse476f8a2010-01-16 09:53:50 -0800322{
323 extent_node_t *node, key;
324
325 malloc_mutex_lock(&huge_mtx);
326
327 /* Extract from tree of huge allocations. */
328 key.addr = ptr;
329 node = extent_tree_ad_search(&huge, &key);
330 assert(node != NULL);
331 assert(node->addr == ptr);
332 extent_tree_ad_remove(&huge, node);
333
Jason Evanse476f8a2010-01-16 09:53:50 -0800334 malloc_mutex_unlock(&huge_mtx);
335
Jason Evanse2deab72014-05-15 22:22:27 -0700336 huge_dalloc_junk(node->addr, node->size);
Jason Evans155bfa72014-10-05 17:54:10 -0700337 arena_chunk_dalloc_huge(node->arena, node->addr,
338 CHUNK_CEILING(node->size));
Jason Evansfc0b3b72014-10-09 17:54:06 -0700339 idalloct(tsd, node, try_tcache);
Jason Evanse476f8a2010-01-16 09:53:50 -0800340}
341
342size_t
343huge_salloc(const void *ptr)
344{
345 size_t ret;
346 extent_node_t *node, key;
347
348 malloc_mutex_lock(&huge_mtx);
349
350 /* Extract from tree of huge allocations. */
351 key.addr = __DECONST(void *, ptr);
352 node = extent_tree_ad_search(&huge, &key);
353 assert(node != NULL);
354
355 ret = node->size;
356
357 malloc_mutex_unlock(&huge_mtx);
358
359 return (ret);
360}
361
Jason Evans602c8e02014-08-18 16:22:13 -0700362prof_tctx_t *
363huge_prof_tctx_get(const void *ptr)
Jason Evans6109fe02010-02-10 10:37:56 -0800364{
Jason Evans602c8e02014-08-18 16:22:13 -0700365 prof_tctx_t *ret;
Jason Evans6109fe02010-02-10 10:37:56 -0800366 extent_node_t *node, key;
367
368 malloc_mutex_lock(&huge_mtx);
369
370 /* Extract from tree of huge allocations. */
371 key.addr = __DECONST(void *, ptr);
372 node = extent_tree_ad_search(&huge, &key);
373 assert(node != NULL);
374
Jason Evans602c8e02014-08-18 16:22:13 -0700375 ret = node->prof_tctx;
Jason Evans6109fe02010-02-10 10:37:56 -0800376
377 malloc_mutex_unlock(&huge_mtx);
378
379 return (ret);
380}
381
382void
Jason Evans602c8e02014-08-18 16:22:13 -0700383huge_prof_tctx_set(const void *ptr, prof_tctx_t *tctx)
Jason Evans6109fe02010-02-10 10:37:56 -0800384{
385 extent_node_t *node, key;
386
387 malloc_mutex_lock(&huge_mtx);
388
389 /* Extract from tree of huge allocations. */
390 key.addr = __DECONST(void *, ptr);
391 node = extent_tree_ad_search(&huge, &key);
392 assert(node != NULL);
393
Jason Evans602c8e02014-08-18 16:22:13 -0700394 node->prof_tctx = tctx;
Jason Evans6109fe02010-02-10 10:37:56 -0800395
396 malloc_mutex_unlock(&huge_mtx);
397}
Jason Evans6109fe02010-02-10 10:37:56 -0800398
Jason Evanse476f8a2010-01-16 09:53:50 -0800399bool
400huge_boot(void)
401{
402
403 /* Initialize chunks data. */
404 if (malloc_mutex_init(&huge_mtx))
405 return (true);
406 extent_tree_ad_new(&huge);
407
Jason Evanse476f8a2010-01-16 09:53:50 -0800408 return (false);
409}
Jason Evans4e2e3dd2012-03-13 16:31:41 -0700410
411void
412huge_prefork(void)
413{
414
415 malloc_mutex_prefork(&huge_mtx);
416}
417
418void
419huge_postfork_parent(void)
420{
421
422 malloc_mutex_postfork_parent(&huge_mtx);
423}
424
425void
426huge_postfork_child(void)
427{
428
429 malloc_mutex_postfork_child(&huge_mtx);
430}