blob: d72f21357021aee8d14ca7971e0b75e03c4675cb [file] [log] [blame]
Jason Evanse476f8a2010-01-16 09:53:50 -08001#define JEMALLOC_HUGE_C_
Jason Evans376b1522010-02-11 14:45:59 -08002#include "jemalloc/internal/jemalloc_internal.h"
Jason Evanse476f8a2010-01-16 09:53:50 -08003
4/******************************************************************************/
5/* Data. */
6
Jason Evanse476f8a2010-01-16 09:53:50 -08007uint64_t huge_nmalloc;
8uint64_t huge_ndalloc;
9size_t huge_allocated;
Jason Evanse476f8a2010-01-16 09:53:50 -080010
11malloc_mutex_t huge_mtx;
12
13/******************************************************************************/
14
15/* Tree of chunks that are stand-alone huge allocations. */
16static extent_tree_t huge;
17
18void *
Max Wangfbb31022014-03-27 14:46:00 -070019huge_malloc(size_t size, bool zero, dss_prec_t dss_prec)
Jason Evanse476f8a2010-01-16 09:53:50 -080020{
Mike Hommeyeae26902012-04-10 19:50:33 +020021
Max Wangfbb31022014-03-27 14:46:00 -070022 return (huge_palloc(size, chunksize, zero, dss_prec));
Mike Hommeyeae26902012-04-10 19:50:33 +020023}
24
25void *
Max Wangfbb31022014-03-27 14:46:00 -070026huge_palloc(size_t size, size_t alignment, bool zero, dss_prec_t dss_prec)
Mike Hommeyeae26902012-04-10 19:50:33 +020027{
Jason Evanse476f8a2010-01-16 09:53:50 -080028 void *ret;
29 size_t csize;
30 extent_node_t *node;
Jason Evans7ad54c12012-04-21 16:04:51 -070031 bool is_zeroed;
Jason Evanse476f8a2010-01-16 09:53:50 -080032
33 /* Allocate one or more contiguous chunks for this request. */
34
35 csize = CHUNK_CEILING(size);
36 if (csize == 0) {
37 /* size is large enough to cause size_t wrap-around. */
38 return (NULL);
39 }
40
41 /* Allocate an extent node with which to track the chunk. */
42 node = base_node_alloc();
43 if (node == NULL)
44 return (NULL);
45
Jason Evans7ad54c12012-04-21 16:04:51 -070046 /*
47 * Copy zero into is_zeroed and pass the copy to chunk_alloc(), so that
48 * it is possible to make correct junk/zero fill decisions below.
49 */
50 is_zeroed = zero;
Max Wangfbb31022014-03-27 14:46:00 -070051 ret = chunk_alloc(csize, alignment, false, &is_zeroed, dss_prec);
Jason Evanse476f8a2010-01-16 09:53:50 -080052 if (ret == NULL) {
53 base_node_dealloc(node);
54 return (NULL);
55 }
56
57 /* Insert node into huge. */
58 node->addr = ret;
59 node->size = csize;
60
61 malloc_mutex_lock(&huge_mtx);
62 extent_tree_ad_insert(&huge, node);
Jason Evans7372b152012-02-10 20:22:09 -080063 if (config_stats) {
64 stats_cactive_add(csize);
65 huge_nmalloc++;
66 huge_allocated += csize;
67 }
Jason Evanse476f8a2010-01-16 09:53:50 -080068 malloc_mutex_unlock(&huge_mtx);
69
Jason Evans7372b152012-02-10 20:22:09 -080070 if (config_fill && zero == false) {
Jason Evanse476f8a2010-01-16 09:53:50 -080071 if (opt_junk)
72 memset(ret, 0xa5, csize);
Jason Evans7ad54c12012-04-21 16:04:51 -070073 else if (opt_zero && is_zeroed == false)
Jason Evanse476f8a2010-01-16 09:53:50 -080074 memset(ret, 0, csize);
75 }
Jason Evanse476f8a2010-01-16 09:53:50 -080076
77 return (ret);
78}
79
Jason Evansb2c31662014-01-12 15:05:44 -080080bool
Jason Evans8e3c3c62010-09-17 15:46:18 -070081huge_ralloc_no_move(void *ptr, size_t oldsize, size_t size, size_t extra)
Jason Evanse476f8a2010-01-16 09:53:50 -080082{
Jason Evanse476f8a2010-01-16 09:53:50 -080083
Jason Evans8e3c3c62010-09-17 15:46:18 -070084 /*
85 * Avoid moving the allocation if the size class can be left the same.
86 */
87 if (oldsize > arena_maxclass
88 && CHUNK_CEILING(oldsize) >= CHUNK_CEILING(size)
89 && CHUNK_CEILING(oldsize) <= CHUNK_CEILING(size+extra)) {
90 assert(CHUNK_CEILING(oldsize) == oldsize);
Jason Evansb2c31662014-01-12 15:05:44 -080091 return (false);
Jason Evanse476f8a2010-01-16 09:53:50 -080092 }
93
Jason Evans8e3c3c62010-09-17 15:46:18 -070094 /* Reallocation would require a move. */
Jason Evansb2c31662014-01-12 15:05:44 -080095 return (true);
Jason Evans8e3c3c62010-09-17 15:46:18 -070096}
Jason Evanse476f8a2010-01-16 09:53:50 -080097
Jason Evans8e3c3c62010-09-17 15:46:18 -070098void *
99huge_ralloc(void *ptr, size_t oldsize, size_t size, size_t extra,
Max Wangfbb31022014-03-27 14:46:00 -0700100 size_t alignment, bool zero, bool try_tcache_dalloc, dss_prec_t dss_prec)
Jason Evans8e3c3c62010-09-17 15:46:18 -0700101{
102 void *ret;
103 size_t copysize;
104
105 /* Try to avoid moving the allocation. */
Jason Evansb2c31662014-01-12 15:05:44 -0800106 if (huge_ralloc_no_move(ptr, oldsize, size, extra) == false)
107 return (ptr);
Jason Evans8e3c3c62010-09-17 15:46:18 -0700108
109 /*
110 * size and oldsize are different enough that we need to use a
111 * different size class. In that case, fall back to allocating new
112 * space and copying.
113 */
Jason Evans31bfb3e2011-01-31 19:58:22 -0800114 if (alignment > chunksize)
Max Wangfbb31022014-03-27 14:46:00 -0700115 ret = huge_palloc(size + extra, alignment, zero, dss_prec);
Jason Evans8e3c3c62010-09-17 15:46:18 -0700116 else
Max Wangfbb31022014-03-27 14:46:00 -0700117 ret = huge_malloc(size + extra, zero, dss_prec);
Jason Evans8e3c3c62010-09-17 15:46:18 -0700118
119 if (ret == NULL) {
120 if (extra == 0)
121 return (NULL);
122 /* Try again, this time without extra. */
Jason Evans31bfb3e2011-01-31 19:58:22 -0800123 if (alignment > chunksize)
Max Wangfbb31022014-03-27 14:46:00 -0700124 ret = huge_palloc(size, alignment, zero, dss_prec);
Jason Evans8e3c3c62010-09-17 15:46:18 -0700125 else
Max Wangfbb31022014-03-27 14:46:00 -0700126 ret = huge_malloc(size, zero, dss_prec);
Jason Evans8e3c3c62010-09-17 15:46:18 -0700127
128 if (ret == NULL)
129 return (NULL);
130 }
131
132 /*
133 * Copy at most size bytes (not size+extra), since the caller has no
134 * expectation that the extra bytes will be reliably preserved.
135 */
Jason Evanse476f8a2010-01-16 09:53:50 -0800136 copysize = (size < oldsize) ? size : oldsize;
Jason Evanscfdc8cf2010-11-30 16:50:58 -0800137
Jason Evans2e671ff2012-05-09 16:12:00 -0700138#ifdef JEMALLOC_MREMAP
Jason Evanscfdc8cf2010-11-30 16:50:58 -0800139 /*
140 * Use mremap(2) if this is a huge-->huge reallocation, and neither the
Jason Evans41626272012-02-13 10:56:17 -0800141 * source nor the destination are in dss.
Jason Evanscfdc8cf2010-11-30 16:50:58 -0800142 */
Jason Evans41626272012-02-13 10:56:17 -0800143 if (oldsize >= chunksize && (config_dss == false || (chunk_in_dss(ptr)
144 == false && chunk_in_dss(ret) == false))) {
Jason Evanscfdc8cf2010-11-30 16:50:58 -0800145 size_t newsize = huge_salloc(ret);
146
Jason Evansfa351d92011-11-09 11:55:19 -0800147 /*
148 * Remove ptr from the tree of huge allocations before
149 * performing the remap operation, in order to avoid the
150 * possibility of another thread acquiring that mapping before
151 * this one removes it from the tree.
152 */
153 huge_dalloc(ptr, false);
Jason Evanscfdc8cf2010-11-30 16:50:58 -0800154 if (mremap(ptr, oldsize, newsize, MREMAP_MAYMOVE|MREMAP_FIXED,
155 ret) == MAP_FAILED) {
156 /*
157 * Assuming no chunk management bugs in the allocator,
158 * the only documented way an error can occur here is
159 * if the application changed the map type for a
160 * portion of the old allocation. This is firmly in
161 * undefined behavior territory, so write a diagnostic
162 * message, and optionally abort.
163 */
164 char buf[BUFERROR_BUF];
165
Jason Evans2a83ed02013-12-08 20:52:21 -0800166 buferror(get_errno(), buf, sizeof(buf));
Jason Evansd81e4bd2012-03-06 14:57:45 -0800167 malloc_printf("<jemalloc>: Error in mremap(): %s\n",
168 buf);
Jason Evanscfdc8cf2010-11-30 16:50:58 -0800169 if (opt_abort)
170 abort();
171 memcpy(ret, ptr, copysize);
Jason Evans12a48872011-11-11 14:41:59 -0800172 chunk_dealloc_mmap(ptr, oldsize);
Jason Evans940fdfd2014-02-25 11:58:50 -0800173 } else if (config_fill && zero == false && opt_junk && oldsize
174 < newsize) {
175 /*
176 * mremap(2) clobbers the original mapping, so
177 * junk/zero filling is not preserved. There is no
178 * need to zero fill here, since any trailing
179 * uninititialized memory is demand-zeroed by the
180 * kernel, but junk filling must be redone.
181 */
182 memset(ret + oldsize, 0xa5, newsize - oldsize);
Jason Evansfa351d92011-11-09 11:55:19 -0800183 }
Jason Evanscfdc8cf2010-11-30 16:50:58 -0800184 } else
185#endif
186 {
187 memcpy(ret, ptr, copysize);
Jason Evansd82a5e62013-12-12 22:35:52 -0800188 iqalloct(ptr, try_tcache_dalloc);
Jason Evanscfdc8cf2010-11-30 16:50:58 -0800189 }
Jason Evanse476f8a2010-01-16 09:53:50 -0800190 return (ret);
191}
192
Jason Evans6b694c42014-01-07 16:47:56 -0800193#ifdef JEMALLOC_JET
194#undef huge_dalloc_junk
195#define huge_dalloc_junk JEMALLOC_N(huge_dalloc_junk_impl)
196#endif
197static void
198huge_dalloc_junk(void *ptr, size_t usize)
199{
200
201 if (config_fill && config_dss && opt_junk) {
202 /*
203 * Only bother junk filling if the chunk isn't about to be
204 * unmapped.
205 */
206 if (config_munmap == false || (config_dss && chunk_in_dss(ptr)))
207 memset(ptr, 0x5a, usize);
208 }
209}
210#ifdef JEMALLOC_JET
211#undef huge_dalloc_junk
212#define huge_dalloc_junk JEMALLOC_N(huge_dalloc_junk)
213huge_dalloc_junk_t *huge_dalloc_junk = JEMALLOC_N(huge_dalloc_junk_impl);
214#endif
215
Jason Evanse476f8a2010-01-16 09:53:50 -0800216void
Jason Evanscfdc8cf2010-11-30 16:50:58 -0800217huge_dalloc(void *ptr, bool unmap)
Jason Evanse476f8a2010-01-16 09:53:50 -0800218{
219 extent_node_t *node, key;
220
221 malloc_mutex_lock(&huge_mtx);
222
223 /* Extract from tree of huge allocations. */
224 key.addr = ptr;
225 node = extent_tree_ad_search(&huge, &key);
226 assert(node != NULL);
227 assert(node->addr == ptr);
228 extent_tree_ad_remove(&huge, node);
229
Jason Evans7372b152012-02-10 20:22:09 -0800230 if (config_stats) {
231 stats_cactive_sub(node->size);
232 huge_ndalloc++;
233 huge_allocated -= node->size;
234 }
Jason Evanse476f8a2010-01-16 09:53:50 -0800235
236 malloc_mutex_unlock(&huge_mtx);
237
Jason Evans6b694c42014-01-07 16:47:56 -0800238 if (unmap)
239 huge_dalloc_junk(node->addr, node->size);
Jason Evanse476f8a2010-01-16 09:53:50 -0800240
Jason Evans12a48872011-11-11 14:41:59 -0800241 chunk_dealloc(node->addr, node->size, unmap);
242
Jason Evanse476f8a2010-01-16 09:53:50 -0800243 base_node_dealloc(node);
244}
245
246size_t
247huge_salloc(const void *ptr)
248{
249 size_t ret;
250 extent_node_t *node, key;
251
252 malloc_mutex_lock(&huge_mtx);
253
254 /* Extract from tree of huge allocations. */
255 key.addr = __DECONST(void *, ptr);
256 node = extent_tree_ad_search(&huge, &key);
257 assert(node != NULL);
258
259 ret = node->size;
260
261 malloc_mutex_unlock(&huge_mtx);
262
263 return (ret);
264}
265
Max Wangfbb31022014-03-27 14:46:00 -0700266dss_prec_t
267huge_dss_prec_get(arena_t *arena)
268{
269
270 return (arena_dss_prec_get(choose_arena(arena)));
271}
272
Jason Evans50651562010-04-13 16:13:54 -0700273prof_ctx_t *
274huge_prof_ctx_get(const void *ptr)
Jason Evans6109fe02010-02-10 10:37:56 -0800275{
Jason Evans50651562010-04-13 16:13:54 -0700276 prof_ctx_t *ret;
Jason Evans6109fe02010-02-10 10:37:56 -0800277 extent_node_t *node, key;
278
279 malloc_mutex_lock(&huge_mtx);
280
281 /* Extract from tree of huge allocations. */
282 key.addr = __DECONST(void *, ptr);
283 node = extent_tree_ad_search(&huge, &key);
284 assert(node != NULL);
285
Jason Evans50651562010-04-13 16:13:54 -0700286 ret = node->prof_ctx;
Jason Evans6109fe02010-02-10 10:37:56 -0800287
288 malloc_mutex_unlock(&huge_mtx);
289
290 return (ret);
291}
292
293void
Jason Evans50651562010-04-13 16:13:54 -0700294huge_prof_ctx_set(const void *ptr, prof_ctx_t *ctx)
Jason Evans6109fe02010-02-10 10:37:56 -0800295{
296 extent_node_t *node, key;
297
298 malloc_mutex_lock(&huge_mtx);
299
300 /* Extract from tree of huge allocations. */
301 key.addr = __DECONST(void *, ptr);
302 node = extent_tree_ad_search(&huge, &key);
303 assert(node != NULL);
304
Jason Evans50651562010-04-13 16:13:54 -0700305 node->prof_ctx = ctx;
Jason Evans6109fe02010-02-10 10:37:56 -0800306
307 malloc_mutex_unlock(&huge_mtx);
308}
Jason Evans6109fe02010-02-10 10:37:56 -0800309
Jason Evanse476f8a2010-01-16 09:53:50 -0800310bool
311huge_boot(void)
312{
313
314 /* Initialize chunks data. */
315 if (malloc_mutex_init(&huge_mtx))
316 return (true);
317 extent_tree_ad_new(&huge);
318
Jason Evans7372b152012-02-10 20:22:09 -0800319 if (config_stats) {
320 huge_nmalloc = 0;
321 huge_ndalloc = 0;
322 huge_allocated = 0;
323 }
Jason Evanse476f8a2010-01-16 09:53:50 -0800324
325 return (false);
326}
Jason Evans4e2e3dd2012-03-13 16:31:41 -0700327
328void
329huge_prefork(void)
330{
331
332 malloc_mutex_prefork(&huge_mtx);
333}
334
335void
336huge_postfork_parent(void)
337{
338
339 malloc_mutex_postfork_parent(&huge_mtx);
340}
341
342void
343huge_postfork_child(void)
344{
345
346 malloc_mutex_postfork_child(&huge_mtx);
347}