blob: 40d1362d9d453707fc6a68b4ee144dae3e650caf [file] [log] [blame]
Jason Evanse476f8a2010-01-16 09:53:50 -08001#define JEMALLOC_HUGE_C_
Jason Evans376b1522010-02-11 14:45:59 -08002#include "jemalloc/internal/jemalloc_internal.h"
Jason Evanse476f8a2010-01-16 09:53:50 -08003
4/******************************************************************************/
5/* Data. */
6
Jason Evanse2deab72014-05-15 22:22:27 -07007/* Protects chunk-related data structures. */
8static malloc_mutex_t huge_mtx;
Jason Evanse476f8a2010-01-16 09:53:50 -08009
10/******************************************************************************/
11
12/* Tree of chunks that are stand-alone huge allocations. */
13static extent_tree_t huge;
14
15void *
Jason Evans5460aa62014-09-22 21:09:23 -070016huge_malloc(tsd_t *tsd, arena_t *arena, size_t size, bool zero)
Jason Evanse476f8a2010-01-16 09:53:50 -080017{
Mike Hommeyeae26902012-04-10 19:50:33 +020018
Jason Evans5460aa62014-09-22 21:09:23 -070019 return (huge_palloc(tsd, arena, size, chunksize, zero));
Mike Hommeyeae26902012-04-10 19:50:33 +020020}
21
22void *
Jason Evans5460aa62014-09-22 21:09:23 -070023huge_palloc(tsd_t *tsd, arena_t *arena, size_t size, size_t alignment,
24 bool zero)
Mike Hommeyeae26902012-04-10 19:50:33 +020025{
Jason Evanse476f8a2010-01-16 09:53:50 -080026 void *ret;
27 size_t csize;
28 extent_node_t *node;
Jason Evans7ad54c12012-04-21 16:04:51 -070029 bool is_zeroed;
Jason Evanse476f8a2010-01-16 09:53:50 -080030
31 /* Allocate one or more contiguous chunks for this request. */
32
33 csize = CHUNK_CEILING(size);
34 if (csize == 0) {
35 /* size is large enough to cause size_t wrap-around. */
36 return (NULL);
37 }
38
39 /* Allocate an extent node with which to track the chunk. */
40 node = base_node_alloc();
41 if (node == NULL)
42 return (NULL);
43
Jason Evans7ad54c12012-04-21 16:04:51 -070044 /*
45 * Copy zero into is_zeroed and pass the copy to chunk_alloc(), so that
46 * it is possible to make correct junk/zero fill decisions below.
47 */
48 is_zeroed = zero;
Jason Evans5460aa62014-09-22 21:09:23 -070049 arena = choose_arena(tsd, arena);
Jason Evanse2deab72014-05-15 22:22:27 -070050 ret = arena_chunk_alloc_huge(arena, csize, alignment, &is_zeroed);
Jason Evanse476f8a2010-01-16 09:53:50 -080051 if (ret == NULL) {
Jason Evanse2deab72014-05-15 22:22:27 -070052 base_node_dalloc(node);
Jason Evanse476f8a2010-01-16 09:53:50 -080053 return (NULL);
54 }
55
56 /* Insert node into huge. */
57 node->addr = ret;
58 node->size = csize;
aravindfb7fe502014-05-05 15:16:56 -070059 node->arena = arena;
Jason Evanse476f8a2010-01-16 09:53:50 -080060
61 malloc_mutex_lock(&huge_mtx);
62 extent_tree_ad_insert(&huge, node);
Jason Evanse476f8a2010-01-16 09:53:50 -080063 malloc_mutex_unlock(&huge_mtx);
64
Jason Evans7372b152012-02-10 20:22:09 -080065 if (config_fill && zero == false) {
Jason Evans9c640bf2014-09-11 16:20:44 -070066 if (unlikely(opt_junk))
Jason Evanse476f8a2010-01-16 09:53:50 -080067 memset(ret, 0xa5, csize);
Jason Evans9c640bf2014-09-11 16:20:44 -070068 else if (unlikely(opt_zero) && is_zeroed == false)
Jason Evanse476f8a2010-01-16 09:53:50 -080069 memset(ret, 0, csize);
70 }
Jason Evanse476f8a2010-01-16 09:53:50 -080071
72 return (ret);
73}
74
Daniel Micayf8034542014-09-30 10:33:46 -040075#ifdef JEMALLOC_JET
76#undef huge_dalloc_junk
77#define huge_dalloc_junk JEMALLOC_N(huge_dalloc_junk_impl)
78#endif
79static void
80huge_dalloc_junk(void *ptr, size_t usize)
81{
82
83 if (config_fill && have_dss && unlikely(opt_junk)) {
84 /*
85 * Only bother junk filling if the chunk isn't about to be
86 * unmapped.
87 */
88 if (config_munmap == false || (have_dss && chunk_in_dss(ptr)))
89 memset(ptr, 0x5a, usize);
90 }
91}
92#ifdef JEMALLOC_JET
93#undef huge_dalloc_junk
94#define huge_dalloc_junk JEMALLOC_N(huge_dalloc_junk)
95huge_dalloc_junk_t *huge_dalloc_junk = JEMALLOC_N(huge_dalloc_junk_impl);
96#endif
97
Jason Evansb2c31662014-01-12 15:05:44 -080098bool
Jason Evans8e3c3c62010-09-17 15:46:18 -070099huge_ralloc_no_move(void *ptr, size_t oldsize, size_t size, size_t extra)
Jason Evanse476f8a2010-01-16 09:53:50 -0800100{
Jason Evanse476f8a2010-01-16 09:53:50 -0800101
Daniel Micayf8034542014-09-30 10:33:46 -0400102 /* Both allocations must be huge to avoid a move. */
103 if (oldsize <= arena_maxclass)
104 return (true);
105
106 assert(CHUNK_CEILING(oldsize) == oldsize);
107
Jason Evans8e3c3c62010-09-17 15:46:18 -0700108 /*
109 * Avoid moving the allocation if the size class can be left the same.
110 */
Daniel Micayf8034542014-09-30 10:33:46 -0400111 if (CHUNK_CEILING(oldsize) >= CHUNK_CEILING(size)
Jason Evans8e3c3c62010-09-17 15:46:18 -0700112 && CHUNK_CEILING(oldsize) <= CHUNK_CEILING(size+extra)) {
Jason Evansb2c31662014-01-12 15:05:44 -0800113 return (false);
Jason Evanse476f8a2010-01-16 09:53:50 -0800114 }
115
Daniel Micayf8034542014-09-30 10:33:46 -0400116 /* Overflow. */
117 if (CHUNK_CEILING(size) == 0)
118 return (true);
119
120 /* Shrink the allocation in-place. */
121 if (CHUNK_CEILING(oldsize) > CHUNK_CEILING(size)) {
122 extent_node_t *node, key;
123 void *excess_addr;
124 size_t excess_size;
125
126 malloc_mutex_lock(&huge_mtx);
127
128 key.addr = ptr;
129 node = extent_tree_ad_search(&huge, &key);
130 assert(node != NULL);
131 assert(node->addr == ptr);
132
133 /* Update the size of the huge allocation. */
134 node->size = CHUNK_CEILING(size);
135
136 malloc_mutex_unlock(&huge_mtx);
137
138 excess_addr = node->addr + CHUNK_CEILING(size);
139 excess_size = CHUNK_CEILING(oldsize) - CHUNK_CEILING(size);
140
141 /* Zap the excess chunks. */
142 huge_dalloc_junk(excess_addr, excess_size);
143 arena_chunk_dalloc_huge(node->arena, excess_addr, excess_size);
144
145 return (false);
146 }
147
Jason Evansb2c31662014-01-12 15:05:44 -0800148 return (true);
Jason Evans8e3c3c62010-09-17 15:46:18 -0700149}
Jason Evanse476f8a2010-01-16 09:53:50 -0800150
Jason Evans8e3c3c62010-09-17 15:46:18 -0700151void *
Jason Evans5460aa62014-09-22 21:09:23 -0700152huge_ralloc(tsd_t *tsd, arena_t *arena, void *ptr, size_t oldsize, size_t size,
Jason Evanse2deab72014-05-15 22:22:27 -0700153 size_t extra, size_t alignment, bool zero, bool try_tcache_dalloc)
Jason Evans8e3c3c62010-09-17 15:46:18 -0700154{
155 void *ret;
156 size_t copysize;
157
158 /* Try to avoid moving the allocation. */
Jason Evansb2c31662014-01-12 15:05:44 -0800159 if (huge_ralloc_no_move(ptr, oldsize, size, extra) == false)
160 return (ptr);
Jason Evans8e3c3c62010-09-17 15:46:18 -0700161
162 /*
163 * size and oldsize are different enough that we need to use a
164 * different size class. In that case, fall back to allocating new
165 * space and copying.
166 */
Jason Evans31bfb3e2011-01-31 19:58:22 -0800167 if (alignment > chunksize)
Jason Evans5460aa62014-09-22 21:09:23 -0700168 ret = huge_palloc(tsd, arena, size + extra, alignment, zero);
Jason Evans8e3c3c62010-09-17 15:46:18 -0700169 else
Jason Evans5460aa62014-09-22 21:09:23 -0700170 ret = huge_malloc(tsd, arena, size + extra, zero);
Jason Evans8e3c3c62010-09-17 15:46:18 -0700171
172 if (ret == NULL) {
173 if (extra == 0)
174 return (NULL);
175 /* Try again, this time without extra. */
Jason Evans31bfb3e2011-01-31 19:58:22 -0800176 if (alignment > chunksize)
Jason Evans5460aa62014-09-22 21:09:23 -0700177 ret = huge_palloc(tsd, arena, size, alignment, zero);
Jason Evans8e3c3c62010-09-17 15:46:18 -0700178 else
Jason Evans5460aa62014-09-22 21:09:23 -0700179 ret = huge_malloc(tsd, arena, size, zero);
Jason Evans8e3c3c62010-09-17 15:46:18 -0700180
181 if (ret == NULL)
182 return (NULL);
183 }
184
185 /*
186 * Copy at most size bytes (not size+extra), since the caller has no
187 * expectation that the extra bytes will be reliably preserved.
188 */
Jason Evanse476f8a2010-01-16 09:53:50 -0800189 copysize = (size < oldsize) ? size : oldsize;
Jason Evanse2deab72014-05-15 22:22:27 -0700190 memcpy(ret, ptr, copysize);
Jason Evans5460aa62014-09-22 21:09:23 -0700191 iqalloc(tsd, ptr, try_tcache_dalloc);
Jason Evanse476f8a2010-01-16 09:53:50 -0800192 return (ret);
193}
194
195void
Jason Evanse2deab72014-05-15 22:22:27 -0700196huge_dalloc(void *ptr)
Jason Evanse476f8a2010-01-16 09:53:50 -0800197{
198 extent_node_t *node, key;
199
200 malloc_mutex_lock(&huge_mtx);
201
202 /* Extract from tree of huge allocations. */
203 key.addr = ptr;
204 node = extent_tree_ad_search(&huge, &key);
205 assert(node != NULL);
206 assert(node->addr == ptr);
207 extent_tree_ad_remove(&huge, node);
208
Jason Evanse476f8a2010-01-16 09:53:50 -0800209 malloc_mutex_unlock(&huge_mtx);
210
Jason Evanse2deab72014-05-15 22:22:27 -0700211 huge_dalloc_junk(node->addr, node->size);
212 arena_chunk_dalloc_huge(node->arena, node->addr, node->size);
213 base_node_dalloc(node);
Jason Evanse476f8a2010-01-16 09:53:50 -0800214}
215
216size_t
217huge_salloc(const void *ptr)
218{
219 size_t ret;
220 extent_node_t *node, key;
221
222 malloc_mutex_lock(&huge_mtx);
223
224 /* Extract from tree of huge allocations. */
225 key.addr = __DECONST(void *, ptr);
226 node = extent_tree_ad_search(&huge, &key);
227 assert(node != NULL);
228
229 ret = node->size;
230
231 malloc_mutex_unlock(&huge_mtx);
232
233 return (ret);
234}
235
Jason Evans602c8e02014-08-18 16:22:13 -0700236prof_tctx_t *
237huge_prof_tctx_get(const void *ptr)
Jason Evans6109fe02010-02-10 10:37:56 -0800238{
Jason Evans602c8e02014-08-18 16:22:13 -0700239 prof_tctx_t *ret;
Jason Evans6109fe02010-02-10 10:37:56 -0800240 extent_node_t *node, key;
241
242 malloc_mutex_lock(&huge_mtx);
243
244 /* Extract from tree of huge allocations. */
245 key.addr = __DECONST(void *, ptr);
246 node = extent_tree_ad_search(&huge, &key);
247 assert(node != NULL);
248
Jason Evans602c8e02014-08-18 16:22:13 -0700249 ret = node->prof_tctx;
Jason Evans6109fe02010-02-10 10:37:56 -0800250
251 malloc_mutex_unlock(&huge_mtx);
252
253 return (ret);
254}
255
256void
Jason Evans602c8e02014-08-18 16:22:13 -0700257huge_prof_tctx_set(const void *ptr, prof_tctx_t *tctx)
Jason Evans6109fe02010-02-10 10:37:56 -0800258{
259 extent_node_t *node, key;
260
261 malloc_mutex_lock(&huge_mtx);
262
263 /* Extract from tree of huge allocations. */
264 key.addr = __DECONST(void *, ptr);
265 node = extent_tree_ad_search(&huge, &key);
266 assert(node != NULL);
267
Jason Evans602c8e02014-08-18 16:22:13 -0700268 node->prof_tctx = tctx;
Jason Evans6109fe02010-02-10 10:37:56 -0800269
270 malloc_mutex_unlock(&huge_mtx);
271}
Jason Evans6109fe02010-02-10 10:37:56 -0800272
Jason Evanse476f8a2010-01-16 09:53:50 -0800273bool
274huge_boot(void)
275{
276
277 /* Initialize chunks data. */
278 if (malloc_mutex_init(&huge_mtx))
279 return (true);
280 extent_tree_ad_new(&huge);
281
Jason Evanse476f8a2010-01-16 09:53:50 -0800282 return (false);
283}
Jason Evans4e2e3dd2012-03-13 16:31:41 -0700284
285void
286huge_prefork(void)
287{
288
289 malloc_mutex_prefork(&huge_mtx);
290}
291
292void
293huge_postfork_parent(void)
294{
295
296 malloc_mutex_postfork_parent(&huge_mtx);
297}
298
299void
300huge_postfork_child(void)
301{
302
303 malloc_mutex_postfork_child(&huge_mtx);
304}