blob: 766c80c6dcf3c49515f5f30cc8b789295521542d [file] [log] [blame]
Jason Evanse476f8a2010-01-16 09:53:50 -08001#define JEMALLOC_HUGE_C_
Jason Evans376b1522010-02-11 14:45:59 -08002#include "jemalloc/internal/jemalloc_internal.h"
Jason Evanse476f8a2010-01-16 09:53:50 -08003
4/******************************************************************************/
5/* Data. */
6
Jason Evanse476f8a2010-01-16 09:53:50 -08007uint64_t huge_nmalloc;
8uint64_t huge_ndalloc;
9size_t huge_allocated;
Jason Evanse476f8a2010-01-16 09:53:50 -080010
11malloc_mutex_t huge_mtx;
12
13/******************************************************************************/
14
15/* Tree of chunks that are stand-alone huge allocations. */
16static extent_tree_t huge;
17
18void *
19huge_malloc(size_t size, bool zero)
20{
Mike Hommeyeae26902012-04-10 19:50:33 +020021
22 return (huge_palloc(size, chunksize, zero));
23}
24
25void *
26huge_palloc(size_t size, size_t alignment, bool zero)
27{
Jason Evanse476f8a2010-01-16 09:53:50 -080028 void *ret;
29 size_t csize;
30 extent_node_t *node;
Jason Evans7ad54c12012-04-21 16:04:51 -070031 bool is_zeroed;
Jason Evanse476f8a2010-01-16 09:53:50 -080032
33 /* Allocate one or more contiguous chunks for this request. */
34
35 csize = CHUNK_CEILING(size);
36 if (csize == 0) {
37 /* size is large enough to cause size_t wrap-around. */
38 return (NULL);
39 }
40
41 /* Allocate an extent node with which to track the chunk. */
42 node = base_node_alloc();
43 if (node == NULL)
44 return (NULL);
45
Jason Evans7ad54c12012-04-21 16:04:51 -070046 /*
47 * Copy zero into is_zeroed and pass the copy to chunk_alloc(), so that
48 * it is possible to make correct junk/zero fill decisions below.
49 */
50 is_zeroed = zero;
Jason Evans609ae592012-10-11 13:53:15 -070051 ret = chunk_alloc(csize, alignment, false, &is_zeroed,
52 chunk_dss_prec_get());
Jason Evanse476f8a2010-01-16 09:53:50 -080053 if (ret == NULL) {
54 base_node_dealloc(node);
55 return (NULL);
56 }
57
58 /* Insert node into huge. */
59 node->addr = ret;
60 node->size = csize;
61
62 malloc_mutex_lock(&huge_mtx);
63 extent_tree_ad_insert(&huge, node);
Jason Evans7372b152012-02-10 20:22:09 -080064 if (config_stats) {
65 stats_cactive_add(csize);
66 huge_nmalloc++;
67 huge_allocated += csize;
68 }
Jason Evanse476f8a2010-01-16 09:53:50 -080069 malloc_mutex_unlock(&huge_mtx);
70
Jason Evans7372b152012-02-10 20:22:09 -080071 if (config_fill && zero == false) {
Jason Evanse476f8a2010-01-16 09:53:50 -080072 if (opt_junk)
73 memset(ret, 0xa5, csize);
Jason Evans7ad54c12012-04-21 16:04:51 -070074 else if (opt_zero && is_zeroed == false)
Jason Evanse476f8a2010-01-16 09:53:50 -080075 memset(ret, 0, csize);
76 }
Jason Evanse476f8a2010-01-16 09:53:50 -080077
78 return (ret);
79}
80
Jason Evanse476f8a2010-01-16 09:53:50 -080081void *
Jason Evans8e3c3c62010-09-17 15:46:18 -070082huge_ralloc_no_move(void *ptr, size_t oldsize, size_t size, size_t extra)
Jason Evanse476f8a2010-01-16 09:53:50 -080083{
Jason Evanse476f8a2010-01-16 09:53:50 -080084
Jason Evans8e3c3c62010-09-17 15:46:18 -070085 /*
86 * Avoid moving the allocation if the size class can be left the same.
87 */
88 if (oldsize > arena_maxclass
89 && CHUNK_CEILING(oldsize) >= CHUNK_CEILING(size)
90 && CHUNK_CEILING(oldsize) <= CHUNK_CEILING(size+extra)) {
91 assert(CHUNK_CEILING(oldsize) == oldsize);
Jason Evanse476f8a2010-01-16 09:53:50 -080092 return (ptr);
93 }
94
Jason Evans8e3c3c62010-09-17 15:46:18 -070095 /* Reallocation would require a move. */
96 return (NULL);
97}
Jason Evanse476f8a2010-01-16 09:53:50 -080098
Jason Evans8e3c3c62010-09-17 15:46:18 -070099void *
100huge_ralloc(void *ptr, size_t oldsize, size_t size, size_t extra,
Jason Evans609ae592012-10-11 13:53:15 -0700101 size_t alignment, bool zero, bool try_tcache_dalloc)
Jason Evans8e3c3c62010-09-17 15:46:18 -0700102{
103 void *ret;
104 size_t copysize;
105
106 /* Try to avoid moving the allocation. */
107 ret = huge_ralloc_no_move(ptr, oldsize, size, extra);
108 if (ret != NULL)
109 return (ret);
110
111 /*
112 * size and oldsize are different enough that we need to use a
113 * different size class. In that case, fall back to allocating new
114 * space and copying.
115 */
Jason Evans31bfb3e2011-01-31 19:58:22 -0800116 if (alignment > chunksize)
Jason Evans8e3c3c62010-09-17 15:46:18 -0700117 ret = huge_palloc(size + extra, alignment, zero);
118 else
119 ret = huge_malloc(size + extra, zero);
120
121 if (ret == NULL) {
122 if (extra == 0)
123 return (NULL);
124 /* Try again, this time without extra. */
Jason Evans31bfb3e2011-01-31 19:58:22 -0800125 if (alignment > chunksize)
Jason Evans8e3c3c62010-09-17 15:46:18 -0700126 ret = huge_palloc(size, alignment, zero);
127 else
128 ret = huge_malloc(size, zero);
129
130 if (ret == NULL)
131 return (NULL);
132 }
133
134 /*
135 * Copy at most size bytes (not size+extra), since the caller has no
136 * expectation that the extra bytes will be reliably preserved.
137 */
Jason Evanse476f8a2010-01-16 09:53:50 -0800138 copysize = (size < oldsize) ? size : oldsize;
Jason Evanscfdc8cf2010-11-30 16:50:58 -0800139
Jason Evans2e671ff2012-05-09 16:12:00 -0700140#ifdef JEMALLOC_MREMAP
Jason Evanscfdc8cf2010-11-30 16:50:58 -0800141 /*
142 * Use mremap(2) if this is a huge-->huge reallocation, and neither the
Jason Evans41626272012-02-13 10:56:17 -0800143 * source nor the destination are in dss.
Jason Evanscfdc8cf2010-11-30 16:50:58 -0800144 */
Jason Evans41626272012-02-13 10:56:17 -0800145 if (oldsize >= chunksize && (config_dss == false || (chunk_in_dss(ptr)
146 == false && chunk_in_dss(ret) == false))) {
Jason Evanscfdc8cf2010-11-30 16:50:58 -0800147 size_t newsize = huge_salloc(ret);
148
Jason Evansfa351d92011-11-09 11:55:19 -0800149 /*
150 * Remove ptr from the tree of huge allocations before
151 * performing the remap operation, in order to avoid the
152 * possibility of another thread acquiring that mapping before
153 * this one removes it from the tree.
154 */
155 huge_dalloc(ptr, false);
Jason Evanscfdc8cf2010-11-30 16:50:58 -0800156 if (mremap(ptr, oldsize, newsize, MREMAP_MAYMOVE|MREMAP_FIXED,
157 ret) == MAP_FAILED) {
158 /*
159 * Assuming no chunk management bugs in the allocator,
160 * the only documented way an error can occur here is
161 * if the application changed the map type for a
162 * portion of the old allocation. This is firmly in
163 * undefined behavior territory, so write a diagnostic
164 * message, and optionally abort.
165 */
166 char buf[BUFERROR_BUF];
167
Jason Evans2a83ed02013-12-08 20:52:21 -0800168 buferror(get_errno(), buf, sizeof(buf));
Jason Evansd81e4bd2012-03-06 14:57:45 -0800169 malloc_printf("<jemalloc>: Error in mremap(): %s\n",
170 buf);
Jason Evanscfdc8cf2010-11-30 16:50:58 -0800171 if (opt_abort)
172 abort();
173 memcpy(ret, ptr, copysize);
Jason Evans12a48872011-11-11 14:41:59 -0800174 chunk_dealloc_mmap(ptr, oldsize);
Jason Evansfa351d92011-11-09 11:55:19 -0800175 }
Jason Evanscfdc8cf2010-11-30 16:50:58 -0800176 } else
177#endif
178 {
179 memcpy(ret, ptr, copysize);
Jason Evansd82a5e62013-12-12 22:35:52 -0800180 iqalloct(ptr, try_tcache_dalloc);
Jason Evanscfdc8cf2010-11-30 16:50:58 -0800181 }
Jason Evanse476f8a2010-01-16 09:53:50 -0800182 return (ret);
183}
184
Jason Evans6b694c42014-01-07 16:47:56 -0800185#ifdef JEMALLOC_JET
186#undef huge_dalloc_junk
187#define huge_dalloc_junk JEMALLOC_N(huge_dalloc_junk_impl)
188#endif
189static void
190huge_dalloc_junk(void *ptr, size_t usize)
191{
192
193 if (config_fill && config_dss && opt_junk) {
194 /*
195 * Only bother junk filling if the chunk isn't about to be
196 * unmapped.
197 */
198 if (config_munmap == false || (config_dss && chunk_in_dss(ptr)))
199 memset(ptr, 0x5a, usize);
200 }
201}
202#ifdef JEMALLOC_JET
203#undef huge_dalloc_junk
204#define huge_dalloc_junk JEMALLOC_N(huge_dalloc_junk)
205huge_dalloc_junk_t *huge_dalloc_junk = JEMALLOC_N(huge_dalloc_junk_impl);
206#endif
207
Jason Evanse476f8a2010-01-16 09:53:50 -0800208void
Jason Evanscfdc8cf2010-11-30 16:50:58 -0800209huge_dalloc(void *ptr, bool unmap)
Jason Evanse476f8a2010-01-16 09:53:50 -0800210{
211 extent_node_t *node, key;
212
213 malloc_mutex_lock(&huge_mtx);
214
215 /* Extract from tree of huge allocations. */
216 key.addr = ptr;
217 node = extent_tree_ad_search(&huge, &key);
218 assert(node != NULL);
219 assert(node->addr == ptr);
220 extent_tree_ad_remove(&huge, node);
221
Jason Evans7372b152012-02-10 20:22:09 -0800222 if (config_stats) {
223 stats_cactive_sub(node->size);
224 huge_ndalloc++;
225 huge_allocated -= node->size;
226 }
Jason Evanse476f8a2010-01-16 09:53:50 -0800227
228 malloc_mutex_unlock(&huge_mtx);
229
Jason Evans6b694c42014-01-07 16:47:56 -0800230 if (unmap)
231 huge_dalloc_junk(node->addr, node->size);
Jason Evanse476f8a2010-01-16 09:53:50 -0800232
Jason Evans12a48872011-11-11 14:41:59 -0800233 chunk_dealloc(node->addr, node->size, unmap);
234
Jason Evanse476f8a2010-01-16 09:53:50 -0800235 base_node_dealloc(node);
236}
237
238size_t
239huge_salloc(const void *ptr)
240{
241 size_t ret;
242 extent_node_t *node, key;
243
244 malloc_mutex_lock(&huge_mtx);
245
246 /* Extract from tree of huge allocations. */
247 key.addr = __DECONST(void *, ptr);
248 node = extent_tree_ad_search(&huge, &key);
249 assert(node != NULL);
250
251 ret = node->size;
252
253 malloc_mutex_unlock(&huge_mtx);
254
255 return (ret);
256}
257
Jason Evans50651562010-04-13 16:13:54 -0700258prof_ctx_t *
259huge_prof_ctx_get(const void *ptr)
Jason Evans6109fe02010-02-10 10:37:56 -0800260{
Jason Evans50651562010-04-13 16:13:54 -0700261 prof_ctx_t *ret;
Jason Evans6109fe02010-02-10 10:37:56 -0800262 extent_node_t *node, key;
263
264 malloc_mutex_lock(&huge_mtx);
265
266 /* Extract from tree of huge allocations. */
267 key.addr = __DECONST(void *, ptr);
268 node = extent_tree_ad_search(&huge, &key);
269 assert(node != NULL);
270
Jason Evans50651562010-04-13 16:13:54 -0700271 ret = node->prof_ctx;
Jason Evans6109fe02010-02-10 10:37:56 -0800272
273 malloc_mutex_unlock(&huge_mtx);
274
275 return (ret);
276}
277
278void
Jason Evans50651562010-04-13 16:13:54 -0700279huge_prof_ctx_set(const void *ptr, prof_ctx_t *ctx)
Jason Evans6109fe02010-02-10 10:37:56 -0800280{
281 extent_node_t *node, key;
282
283 malloc_mutex_lock(&huge_mtx);
284
285 /* Extract from tree of huge allocations. */
286 key.addr = __DECONST(void *, ptr);
287 node = extent_tree_ad_search(&huge, &key);
288 assert(node != NULL);
289
Jason Evans50651562010-04-13 16:13:54 -0700290 node->prof_ctx = ctx;
Jason Evans6109fe02010-02-10 10:37:56 -0800291
292 malloc_mutex_unlock(&huge_mtx);
293}
Jason Evans6109fe02010-02-10 10:37:56 -0800294
Jason Evanse476f8a2010-01-16 09:53:50 -0800295bool
296huge_boot(void)
297{
298
299 /* Initialize chunks data. */
300 if (malloc_mutex_init(&huge_mtx))
301 return (true);
302 extent_tree_ad_new(&huge);
303
Jason Evans7372b152012-02-10 20:22:09 -0800304 if (config_stats) {
305 huge_nmalloc = 0;
306 huge_ndalloc = 0;
307 huge_allocated = 0;
308 }
Jason Evanse476f8a2010-01-16 09:53:50 -0800309
310 return (false);
311}
Jason Evans4e2e3dd2012-03-13 16:31:41 -0700312
313void
314huge_prefork(void)
315{
316
317 malloc_mutex_prefork(&huge_mtx);
318}
319
320void
321huge_postfork_parent(void)
322{
323
324 malloc_mutex_postfork_parent(&huge_mtx);
325}
326
327void
328huge_postfork_child(void)
329{
330
331 malloc_mutex_postfork_child(&huge_mtx);
332}