blob: d08ed4a941debb8902fae6c679f91eca45c5d77d [file] [log] [blame]
Jason Evanse476f8a2010-01-16 09:53:50 -08001#define JEMALLOC_HUGE_C_
Jason Evans376b1522010-02-11 14:45:59 -08002#include "jemalloc/internal/jemalloc_internal.h"
Jason Evanse476f8a2010-01-16 09:53:50 -08003
4/******************************************************************************/
5/* Data. */
6
Jason Evans12141152014-05-15 22:22:27 -07007/* Protects chunk-related data structures. */
8static malloc_mutex_t huge_mtx;
Jason Evanse476f8a2010-01-16 09:53:50 -08009
10/******************************************************************************/
11
12/* Tree of chunks that are stand-alone huge allocations. */
13static extent_tree_t huge;
14
15void *
Jason Evans12141152014-05-15 22:22:27 -070016huge_malloc(arena_t *arena, size_t size, bool zero)
Jason Evanse476f8a2010-01-16 09:53:50 -080017{
Mike Hommeyeae26902012-04-10 19:50:33 +020018
Jason Evans12141152014-05-15 22:22:27 -070019 return (huge_palloc(arena, size, chunksize, zero));
Mike Hommeyeae26902012-04-10 19:50:33 +020020}
21
22void *
Jason Evans12141152014-05-15 22:22:27 -070023huge_palloc(arena_t *arena, size_t size, size_t alignment, bool zero)
Mike Hommeyeae26902012-04-10 19:50:33 +020024{
Jason Evanse476f8a2010-01-16 09:53:50 -080025 void *ret;
26 size_t csize;
27 extent_node_t *node;
Jason Evans7ad54c12012-04-21 16:04:51 -070028 bool is_zeroed;
Jason Evanse476f8a2010-01-16 09:53:50 -080029
30 /* Allocate one or more contiguous chunks for this request. */
31
32 csize = CHUNK_CEILING(size);
33 if (csize == 0) {
34 /* size is large enough to cause size_t wrap-around. */
35 return (NULL);
36 }
37
38 /* Allocate an extent node with which to track the chunk. */
39 node = base_node_alloc();
40 if (node == NULL)
41 return (NULL);
42
Jason Evans7ad54c12012-04-21 16:04:51 -070043 /*
44 * Copy zero into is_zeroed and pass the copy to chunk_alloc(), so that
45 * it is possible to make correct junk/zero fill decisions below.
46 */
47 is_zeroed = zero;
Jason Evans12141152014-05-15 22:22:27 -070048 arena = choose_arena(arena);
49 ret = arena_chunk_alloc_huge(arena, csize, alignment, &is_zeroed);
Jason Evanse476f8a2010-01-16 09:53:50 -080050 if (ret == NULL) {
Jason Evans12141152014-05-15 22:22:27 -070051 base_node_dalloc(node);
Jason Evanse476f8a2010-01-16 09:53:50 -080052 return (NULL);
53 }
54
55 /* Insert node into huge. */
56 node->addr = ret;
57 node->size = csize;
aravind59113bc2014-05-05 15:16:56 -070058 node->arena = arena;
Jason Evanse476f8a2010-01-16 09:53:50 -080059
60 malloc_mutex_lock(&huge_mtx);
61 extent_tree_ad_insert(&huge, node);
Jason Evanse476f8a2010-01-16 09:53:50 -080062 malloc_mutex_unlock(&huge_mtx);
63
Jason Evans7372b152012-02-10 20:22:09 -080064 if (config_fill && zero == false) {
Jason Evanse476f8a2010-01-16 09:53:50 -080065 if (opt_junk)
66 memset(ret, 0xa5, csize);
Jason Evans7ad54c12012-04-21 16:04:51 -070067 else if (opt_zero && is_zeroed == false)
Jason Evanse476f8a2010-01-16 09:53:50 -080068 memset(ret, 0, csize);
69 }
Jason Evanse476f8a2010-01-16 09:53:50 -080070
71 return (ret);
72}
73
Jason Evansb2c31662014-01-12 15:05:44 -080074bool
Jason Evans8e3c3c62010-09-17 15:46:18 -070075huge_ralloc_no_move(void *ptr, size_t oldsize, size_t size, size_t extra)
Jason Evanse476f8a2010-01-16 09:53:50 -080076{
Jason Evanse476f8a2010-01-16 09:53:50 -080077
Jason Evans8e3c3c62010-09-17 15:46:18 -070078 /*
79 * Avoid moving the allocation if the size class can be left the same.
80 */
81 if (oldsize > arena_maxclass
82 && CHUNK_CEILING(oldsize) >= CHUNK_CEILING(size)
83 && CHUNK_CEILING(oldsize) <= CHUNK_CEILING(size+extra)) {
84 assert(CHUNK_CEILING(oldsize) == oldsize);
Jason Evansb2c31662014-01-12 15:05:44 -080085 return (false);
Jason Evanse476f8a2010-01-16 09:53:50 -080086 }
87
Jason Evans8e3c3c62010-09-17 15:46:18 -070088 /* Reallocation would require a move. */
Jason Evansb2c31662014-01-12 15:05:44 -080089 return (true);
Jason Evans8e3c3c62010-09-17 15:46:18 -070090}
Jason Evanse476f8a2010-01-16 09:53:50 -080091
Jason Evans8e3c3c62010-09-17 15:46:18 -070092void *
aravind59113bc2014-05-05 15:16:56 -070093huge_ralloc(arena_t *arena, void *ptr, size_t oldsize, size_t size,
Jason Evans12141152014-05-15 22:22:27 -070094 size_t extra, size_t alignment, bool zero, bool try_tcache_dalloc)
Jason Evans8e3c3c62010-09-17 15:46:18 -070095{
96 void *ret;
97 size_t copysize;
98
99 /* Try to avoid moving the allocation. */
Jason Evansb2c31662014-01-12 15:05:44 -0800100 if (huge_ralloc_no_move(ptr, oldsize, size, extra) == false)
101 return (ptr);
Jason Evans8e3c3c62010-09-17 15:46:18 -0700102
103 /*
104 * size and oldsize are different enough that we need to use a
105 * different size class. In that case, fall back to allocating new
106 * space and copying.
107 */
Jason Evans31bfb3e2011-01-31 19:58:22 -0800108 if (alignment > chunksize)
Jason Evans12141152014-05-15 22:22:27 -0700109 ret = huge_palloc(arena, size + extra, alignment, zero);
Jason Evans8e3c3c62010-09-17 15:46:18 -0700110 else
Jason Evans12141152014-05-15 22:22:27 -0700111 ret = huge_malloc(arena, size + extra, zero);
Jason Evans8e3c3c62010-09-17 15:46:18 -0700112
113 if (ret == NULL) {
114 if (extra == 0)
115 return (NULL);
116 /* Try again, this time without extra. */
Jason Evans31bfb3e2011-01-31 19:58:22 -0800117 if (alignment > chunksize)
Jason Evans12141152014-05-15 22:22:27 -0700118 ret = huge_palloc(arena, size, alignment, zero);
Jason Evans8e3c3c62010-09-17 15:46:18 -0700119 else
Jason Evans12141152014-05-15 22:22:27 -0700120 ret = huge_malloc(arena, size, zero);
Jason Evans8e3c3c62010-09-17 15:46:18 -0700121
122 if (ret == NULL)
123 return (NULL);
124 }
125
126 /*
127 * Copy at most size bytes (not size+extra), since the caller has no
128 * expectation that the extra bytes will be reliably preserved.
129 */
Jason Evanse476f8a2010-01-16 09:53:50 -0800130 copysize = (size < oldsize) ? size : oldsize;
Jason Evans12141152014-05-15 22:22:27 -0700131 memcpy(ret, ptr, copysize);
132 iqalloct(ptr, try_tcache_dalloc);
Jason Evanse476f8a2010-01-16 09:53:50 -0800133 return (ret);
134}
135
Jason Evans6b694c42014-01-07 16:47:56 -0800136#ifdef JEMALLOC_JET
137#undef huge_dalloc_junk
138#define huge_dalloc_junk JEMALLOC_N(huge_dalloc_junk_impl)
139#endif
140static void
141huge_dalloc_junk(void *ptr, size_t usize)
142{
143
Jason Evans4d434ad2014-04-15 12:09:48 -0700144 if (config_fill && have_dss && opt_junk) {
Jason Evans6b694c42014-01-07 16:47:56 -0800145 /*
146 * Only bother junk filling if the chunk isn't about to be
147 * unmapped.
148 */
Jason Evans4d434ad2014-04-15 12:09:48 -0700149 if (config_munmap == false || (have_dss && chunk_in_dss(ptr)))
Jason Evans6b694c42014-01-07 16:47:56 -0800150 memset(ptr, 0x5a, usize);
151 }
152}
153#ifdef JEMALLOC_JET
154#undef huge_dalloc_junk
155#define huge_dalloc_junk JEMALLOC_N(huge_dalloc_junk)
156huge_dalloc_junk_t *huge_dalloc_junk = JEMALLOC_N(huge_dalloc_junk_impl);
157#endif
158
Jason Evanse476f8a2010-01-16 09:53:50 -0800159void
Jason Evans12141152014-05-15 22:22:27 -0700160huge_dalloc(void *ptr)
Jason Evanse476f8a2010-01-16 09:53:50 -0800161{
162 extent_node_t *node, key;
163
164 malloc_mutex_lock(&huge_mtx);
165
166 /* Extract from tree of huge allocations. */
167 key.addr = ptr;
168 node = extent_tree_ad_search(&huge, &key);
169 assert(node != NULL);
170 assert(node->addr == ptr);
171 extent_tree_ad_remove(&huge, node);
172
Jason Evanse476f8a2010-01-16 09:53:50 -0800173 malloc_mutex_unlock(&huge_mtx);
174
Jason Evans12141152014-05-15 22:22:27 -0700175 huge_dalloc_junk(node->addr, node->size);
176 arena_chunk_dalloc_huge(node->arena, node->addr, node->size);
177 base_node_dalloc(node);
Jason Evanse476f8a2010-01-16 09:53:50 -0800178}
179
180size_t
181huge_salloc(const void *ptr)
182{
183 size_t ret;
184 extent_node_t *node, key;
185
186 malloc_mutex_lock(&huge_mtx);
187
188 /* Extract from tree of huge allocations. */
189 key.addr = __DECONST(void *, ptr);
190 node = extent_tree_ad_search(&huge, &key);
191 assert(node != NULL);
192
193 ret = node->size;
194
195 malloc_mutex_unlock(&huge_mtx);
196
197 return (ret);
198}
199
Jason Evans50651562010-04-13 16:13:54 -0700200prof_ctx_t *
201huge_prof_ctx_get(const void *ptr)
Jason Evans6109fe02010-02-10 10:37:56 -0800202{
Jason Evans50651562010-04-13 16:13:54 -0700203 prof_ctx_t *ret;
Jason Evans6109fe02010-02-10 10:37:56 -0800204 extent_node_t *node, key;
205
206 malloc_mutex_lock(&huge_mtx);
207
208 /* Extract from tree of huge allocations. */
209 key.addr = __DECONST(void *, ptr);
210 node = extent_tree_ad_search(&huge, &key);
211 assert(node != NULL);
212
Jason Evans50651562010-04-13 16:13:54 -0700213 ret = node->prof_ctx;
Jason Evans6109fe02010-02-10 10:37:56 -0800214
215 malloc_mutex_unlock(&huge_mtx);
216
217 return (ret);
218}
219
220void
Jason Evans50651562010-04-13 16:13:54 -0700221huge_prof_ctx_set(const void *ptr, prof_ctx_t *ctx)
Jason Evans6109fe02010-02-10 10:37:56 -0800222{
223 extent_node_t *node, key;
224
225 malloc_mutex_lock(&huge_mtx);
226
227 /* Extract from tree of huge allocations. */
228 key.addr = __DECONST(void *, ptr);
229 node = extent_tree_ad_search(&huge, &key);
230 assert(node != NULL);
231
Jason Evans50651562010-04-13 16:13:54 -0700232 node->prof_ctx = ctx;
Jason Evans6109fe02010-02-10 10:37:56 -0800233
234 malloc_mutex_unlock(&huge_mtx);
235}
Jason Evans6109fe02010-02-10 10:37:56 -0800236
Jason Evanse476f8a2010-01-16 09:53:50 -0800237bool
238huge_boot(void)
239{
240
241 /* Initialize chunks data. */
242 if (malloc_mutex_init(&huge_mtx))
243 return (true);
244 extent_tree_ad_new(&huge);
245
Jason Evanse476f8a2010-01-16 09:53:50 -0800246 return (false);
247}
Jason Evans4e2e3dd2012-03-13 16:31:41 -0700248
249void
250huge_prefork(void)
251{
252
253 malloc_mutex_prefork(&huge_mtx);
254}
255
256void
257huge_postfork_parent(void)
258{
259
260 malloc_mutex_postfork_parent(&huge_mtx);
261}
262
263void
264huge_postfork_child(void)
265{
266
267 malloc_mutex_postfork_child(&huge_mtx);
268}