blob: ab05c90582fdd31254653e6931d751aa17c19105 [file] [log] [blame]
Jason Evanse476f8a2010-01-16 09:53:50 -08001#define JEMALLOC_HUGE_C_
Jason Evans376b1522010-02-11 14:45:59 -08002#include "jemalloc/internal/jemalloc_internal.h"
Jason Evanse476f8a2010-01-16 09:53:50 -08003
4/******************************************************************************/
5/* Data. */
6
Jason Evanse476f8a2010-01-16 09:53:50 -08007uint64_t huge_nmalloc;
8uint64_t huge_ndalloc;
9size_t huge_allocated;
Jason Evanse476f8a2010-01-16 09:53:50 -080010
11malloc_mutex_t huge_mtx;
12
13/******************************************************************************/
14
15/* Tree of chunks that are stand-alone huge allocations. */
16static extent_tree_t huge;
17
18void *
aravind59113bc2014-05-05 15:16:56 -070019huge_malloc(arena_t *arena, size_t size, bool zero, dss_prec_t dss_prec)
Jason Evanse476f8a2010-01-16 09:53:50 -080020{
Mike Hommeyeae26902012-04-10 19:50:33 +020021
aravind59113bc2014-05-05 15:16:56 -070022 return (huge_palloc(arena, size, chunksize, zero, dss_prec));
Mike Hommeyeae26902012-04-10 19:50:33 +020023}
24
25void *
aravind59113bc2014-05-05 15:16:56 -070026huge_palloc(arena_t *arena, size_t size, size_t alignment, bool zero,
27 dss_prec_t dss_prec)
Mike Hommeyeae26902012-04-10 19:50:33 +020028{
Jason Evanse476f8a2010-01-16 09:53:50 -080029 void *ret;
30 size_t csize;
31 extent_node_t *node;
Jason Evans7ad54c12012-04-21 16:04:51 -070032 bool is_zeroed;
Jason Evanse476f8a2010-01-16 09:53:50 -080033
34 /* Allocate one or more contiguous chunks for this request. */
35
36 csize = CHUNK_CEILING(size);
37 if (csize == 0) {
38 /* size is large enough to cause size_t wrap-around. */
39 return (NULL);
40 }
41
42 /* Allocate an extent node with which to track the chunk. */
43 node = base_node_alloc();
44 if (node == NULL)
45 return (NULL);
46
Jason Evans7ad54c12012-04-21 16:04:51 -070047 /*
48 * Copy zero into is_zeroed and pass the copy to chunk_alloc(), so that
49 * it is possible to make correct junk/zero fill decisions below.
50 */
51 is_zeroed = zero;
aravind59113bc2014-05-05 15:16:56 -070052 ret = chunk_alloc(arena, csize, alignment, false, &is_zeroed, dss_prec);
Jason Evanse476f8a2010-01-16 09:53:50 -080053 if (ret == NULL) {
54 base_node_dealloc(node);
55 return (NULL);
56 }
57
58 /* Insert node into huge. */
59 node->addr = ret;
60 node->size = csize;
aravind59113bc2014-05-05 15:16:56 -070061 node->arena = arena;
Jason Evanse476f8a2010-01-16 09:53:50 -080062
63 malloc_mutex_lock(&huge_mtx);
64 extent_tree_ad_insert(&huge, node);
Jason Evans7372b152012-02-10 20:22:09 -080065 if (config_stats) {
66 stats_cactive_add(csize);
67 huge_nmalloc++;
68 huge_allocated += csize;
69 }
Jason Evanse476f8a2010-01-16 09:53:50 -080070 malloc_mutex_unlock(&huge_mtx);
71
Jason Evans7372b152012-02-10 20:22:09 -080072 if (config_fill && zero == false) {
Jason Evanse476f8a2010-01-16 09:53:50 -080073 if (opt_junk)
74 memset(ret, 0xa5, csize);
Jason Evans7ad54c12012-04-21 16:04:51 -070075 else if (opt_zero && is_zeroed == false)
Jason Evanse476f8a2010-01-16 09:53:50 -080076 memset(ret, 0, csize);
77 }
Jason Evanse476f8a2010-01-16 09:53:50 -080078
79 return (ret);
80}
81
Jason Evansb2c31662014-01-12 15:05:44 -080082bool
Jason Evans8e3c3c62010-09-17 15:46:18 -070083huge_ralloc_no_move(void *ptr, size_t oldsize, size_t size, size_t extra)
Jason Evanse476f8a2010-01-16 09:53:50 -080084{
Jason Evanse476f8a2010-01-16 09:53:50 -080085
Jason Evans8e3c3c62010-09-17 15:46:18 -070086 /*
87 * Avoid moving the allocation if the size class can be left the same.
88 */
89 if (oldsize > arena_maxclass
90 && CHUNK_CEILING(oldsize) >= CHUNK_CEILING(size)
91 && CHUNK_CEILING(oldsize) <= CHUNK_CEILING(size+extra)) {
92 assert(CHUNK_CEILING(oldsize) == oldsize);
Jason Evansb2c31662014-01-12 15:05:44 -080093 return (false);
Jason Evanse476f8a2010-01-16 09:53:50 -080094 }
95
Jason Evans8e3c3c62010-09-17 15:46:18 -070096 /* Reallocation would require a move. */
Jason Evansb2c31662014-01-12 15:05:44 -080097 return (true);
Jason Evans8e3c3c62010-09-17 15:46:18 -070098}
Jason Evanse476f8a2010-01-16 09:53:50 -080099
Jason Evans8e3c3c62010-09-17 15:46:18 -0700100void *
aravind59113bc2014-05-05 15:16:56 -0700101huge_ralloc(arena_t *arena, void *ptr, size_t oldsize, size_t size,
102 size_t extra, size_t alignment, bool zero, bool try_tcache_dalloc,
103 dss_prec_t dss_prec)
Jason Evans8e3c3c62010-09-17 15:46:18 -0700104{
105 void *ret;
106 size_t copysize;
107
108 /* Try to avoid moving the allocation. */
Jason Evansb2c31662014-01-12 15:05:44 -0800109 if (huge_ralloc_no_move(ptr, oldsize, size, extra) == false)
110 return (ptr);
Jason Evans8e3c3c62010-09-17 15:46:18 -0700111
112 /*
113 * size and oldsize are different enough that we need to use a
114 * different size class. In that case, fall back to allocating new
115 * space and copying.
116 */
Jason Evans31bfb3e2011-01-31 19:58:22 -0800117 if (alignment > chunksize)
aravind59113bc2014-05-05 15:16:56 -0700118 ret = huge_palloc(arena, size + extra, alignment, zero, dss_prec);
Jason Evans8e3c3c62010-09-17 15:46:18 -0700119 else
aravind59113bc2014-05-05 15:16:56 -0700120 ret = huge_malloc(arena, size + extra, zero, dss_prec);
Jason Evans8e3c3c62010-09-17 15:46:18 -0700121
122 if (ret == NULL) {
123 if (extra == 0)
124 return (NULL);
125 /* Try again, this time without extra. */
Jason Evans31bfb3e2011-01-31 19:58:22 -0800126 if (alignment > chunksize)
aravind59113bc2014-05-05 15:16:56 -0700127 ret = huge_palloc(arena, size, alignment, zero, dss_prec);
Jason Evans8e3c3c62010-09-17 15:46:18 -0700128 else
aravind59113bc2014-05-05 15:16:56 -0700129 ret = huge_malloc(arena, size, zero, dss_prec);
Jason Evans8e3c3c62010-09-17 15:46:18 -0700130
131 if (ret == NULL)
132 return (NULL);
133 }
134
135 /*
136 * Copy at most size bytes (not size+extra), since the caller has no
137 * expectation that the extra bytes will be reliably preserved.
138 */
Jason Evanse476f8a2010-01-16 09:53:50 -0800139 copysize = (size < oldsize) ? size : oldsize;
Jason Evanscfdc8cf2010-11-30 16:50:58 -0800140
Jason Evans2e671ff2012-05-09 16:12:00 -0700141#ifdef JEMALLOC_MREMAP
Jason Evanscfdc8cf2010-11-30 16:50:58 -0800142 /*
143 * Use mremap(2) if this is a huge-->huge reallocation, and neither the
Jason Evans41626272012-02-13 10:56:17 -0800144 * source nor the destination are in dss.
Jason Evanscfdc8cf2010-11-30 16:50:58 -0800145 */
Jason Evans4d434ad2014-04-15 12:09:48 -0700146 if (oldsize >= chunksize && (have_dss == false || (chunk_in_dss(ptr)
Jason Evans41626272012-02-13 10:56:17 -0800147 == false && chunk_in_dss(ret) == false))) {
Jason Evanscfdc8cf2010-11-30 16:50:58 -0800148 size_t newsize = huge_salloc(ret);
149
Jason Evansfa351d92011-11-09 11:55:19 -0800150 /*
151 * Remove ptr from the tree of huge allocations before
152 * performing the remap operation, in order to avoid the
153 * possibility of another thread acquiring that mapping before
154 * this one removes it from the tree.
155 */
156 huge_dalloc(ptr, false);
Jason Evanscfdc8cf2010-11-30 16:50:58 -0800157 if (mremap(ptr, oldsize, newsize, MREMAP_MAYMOVE|MREMAP_FIXED,
158 ret) == MAP_FAILED) {
159 /*
160 * Assuming no chunk management bugs in the allocator,
161 * the only documented way an error can occur here is
162 * if the application changed the map type for a
163 * portion of the old allocation. This is firmly in
164 * undefined behavior territory, so write a diagnostic
165 * message, and optionally abort.
166 */
167 char buf[BUFERROR_BUF];
168
Jason Evans2a83ed02013-12-08 20:52:21 -0800169 buferror(get_errno(), buf, sizeof(buf));
Jason Evansd81e4bd2012-03-06 14:57:45 -0800170 malloc_printf("<jemalloc>: Error in mremap(): %s\n",
171 buf);
Jason Evanscfdc8cf2010-11-30 16:50:58 -0800172 if (opt_abort)
173 abort();
174 memcpy(ret, ptr, copysize);
Jason Evans12a48872011-11-11 14:41:59 -0800175 chunk_dealloc_mmap(ptr, oldsize);
Jason Evans940fdfd2014-02-25 11:58:50 -0800176 } else if (config_fill && zero == false && opt_junk && oldsize
177 < newsize) {
178 /*
179 * mremap(2) clobbers the original mapping, so
180 * junk/zero filling is not preserved. There is no
181 * need to zero fill here, since any trailing
182 * uninititialized memory is demand-zeroed by the
183 * kernel, but junk filling must be redone.
184 */
185 memset(ret + oldsize, 0xa5, newsize - oldsize);
Jason Evansfa351d92011-11-09 11:55:19 -0800186 }
Jason Evanscfdc8cf2010-11-30 16:50:58 -0800187 } else
188#endif
189 {
190 memcpy(ret, ptr, copysize);
Jason Evansd82a5e62013-12-12 22:35:52 -0800191 iqalloct(ptr, try_tcache_dalloc);
Jason Evanscfdc8cf2010-11-30 16:50:58 -0800192 }
Jason Evanse476f8a2010-01-16 09:53:50 -0800193 return (ret);
194}
195
Jason Evans6b694c42014-01-07 16:47:56 -0800196#ifdef JEMALLOC_JET
197#undef huge_dalloc_junk
198#define huge_dalloc_junk JEMALLOC_N(huge_dalloc_junk_impl)
199#endif
200static void
201huge_dalloc_junk(void *ptr, size_t usize)
202{
203
Jason Evans4d434ad2014-04-15 12:09:48 -0700204 if (config_fill && have_dss && opt_junk) {
Jason Evans6b694c42014-01-07 16:47:56 -0800205 /*
206 * Only bother junk filling if the chunk isn't about to be
207 * unmapped.
208 */
Jason Evans4d434ad2014-04-15 12:09:48 -0700209 if (config_munmap == false || (have_dss && chunk_in_dss(ptr)))
Jason Evans6b694c42014-01-07 16:47:56 -0800210 memset(ptr, 0x5a, usize);
211 }
212}
213#ifdef JEMALLOC_JET
214#undef huge_dalloc_junk
215#define huge_dalloc_junk JEMALLOC_N(huge_dalloc_junk)
216huge_dalloc_junk_t *huge_dalloc_junk = JEMALLOC_N(huge_dalloc_junk_impl);
217#endif
218
Jason Evanse476f8a2010-01-16 09:53:50 -0800219void
Jason Evanscfdc8cf2010-11-30 16:50:58 -0800220huge_dalloc(void *ptr, bool unmap)
Jason Evanse476f8a2010-01-16 09:53:50 -0800221{
222 extent_node_t *node, key;
223
224 malloc_mutex_lock(&huge_mtx);
225
226 /* Extract from tree of huge allocations. */
227 key.addr = ptr;
228 node = extent_tree_ad_search(&huge, &key);
229 assert(node != NULL);
230 assert(node->addr == ptr);
231 extent_tree_ad_remove(&huge, node);
232
Jason Evans7372b152012-02-10 20:22:09 -0800233 if (config_stats) {
234 stats_cactive_sub(node->size);
235 huge_ndalloc++;
236 huge_allocated -= node->size;
237 }
Jason Evanse476f8a2010-01-16 09:53:50 -0800238
239 malloc_mutex_unlock(&huge_mtx);
240
Jason Evans6b694c42014-01-07 16:47:56 -0800241 if (unmap)
242 huge_dalloc_junk(node->addr, node->size);
Jason Evanse476f8a2010-01-16 09:53:50 -0800243
aravind59113bc2014-05-05 15:16:56 -0700244 chunk_dealloc(node->arena, node->addr, node->size, unmap);
Jason Evans12a48872011-11-11 14:41:59 -0800245
Jason Evanse476f8a2010-01-16 09:53:50 -0800246 base_node_dealloc(node);
247}
248
249size_t
250huge_salloc(const void *ptr)
251{
252 size_t ret;
253 extent_node_t *node, key;
254
255 malloc_mutex_lock(&huge_mtx);
256
257 /* Extract from tree of huge allocations. */
258 key.addr = __DECONST(void *, ptr);
259 node = extent_tree_ad_search(&huge, &key);
260 assert(node != NULL);
261
262 ret = node->size;
263
264 malloc_mutex_unlock(&huge_mtx);
265
266 return (ret);
267}
268
Max Wangfbb31022014-03-27 14:46:00 -0700269dss_prec_t
270huge_dss_prec_get(arena_t *arena)
271{
272
273 return (arena_dss_prec_get(choose_arena(arena)));
274}
275
Jason Evans50651562010-04-13 16:13:54 -0700276prof_ctx_t *
277huge_prof_ctx_get(const void *ptr)
Jason Evans6109fe02010-02-10 10:37:56 -0800278{
Jason Evans50651562010-04-13 16:13:54 -0700279 prof_ctx_t *ret;
Jason Evans6109fe02010-02-10 10:37:56 -0800280 extent_node_t *node, key;
281
282 malloc_mutex_lock(&huge_mtx);
283
284 /* Extract from tree of huge allocations. */
285 key.addr = __DECONST(void *, ptr);
286 node = extent_tree_ad_search(&huge, &key);
287 assert(node != NULL);
288
Jason Evans50651562010-04-13 16:13:54 -0700289 ret = node->prof_ctx;
Jason Evans6109fe02010-02-10 10:37:56 -0800290
291 malloc_mutex_unlock(&huge_mtx);
292
293 return (ret);
294}
295
296void
Jason Evans50651562010-04-13 16:13:54 -0700297huge_prof_ctx_set(const void *ptr, prof_ctx_t *ctx)
Jason Evans6109fe02010-02-10 10:37:56 -0800298{
299 extent_node_t *node, key;
300
301 malloc_mutex_lock(&huge_mtx);
302
303 /* Extract from tree of huge allocations. */
304 key.addr = __DECONST(void *, ptr);
305 node = extent_tree_ad_search(&huge, &key);
306 assert(node != NULL);
307
Jason Evans50651562010-04-13 16:13:54 -0700308 node->prof_ctx = ctx;
Jason Evans6109fe02010-02-10 10:37:56 -0800309
310 malloc_mutex_unlock(&huge_mtx);
311}
Jason Evans6109fe02010-02-10 10:37:56 -0800312
Jason Evanse476f8a2010-01-16 09:53:50 -0800313bool
314huge_boot(void)
315{
316
317 /* Initialize chunks data. */
318 if (malloc_mutex_init(&huge_mtx))
319 return (true);
320 extent_tree_ad_new(&huge);
321
Jason Evans7372b152012-02-10 20:22:09 -0800322 if (config_stats) {
323 huge_nmalloc = 0;
324 huge_ndalloc = 0;
325 huge_allocated = 0;
326 }
Jason Evanse476f8a2010-01-16 09:53:50 -0800327
328 return (false);
329}
Jason Evans4e2e3dd2012-03-13 16:31:41 -0700330
331void
332huge_prefork(void)
333{
334
335 malloc_mutex_prefork(&huge_mtx);
336}
337
338void
339huge_postfork_parent(void)
340{
341
342 malloc_mutex_postfork_parent(&huge_mtx);
343}
344
345void
346huge_postfork_child(void)
347{
348
349 malloc_mutex_postfork_child(&huge_mtx);
350}