blob: 6d86aed881b6adc53b83a73cce18bc4b75559d32 [file] [log] [blame]
Jason Evanse476f8a2010-01-16 09:53:50 -08001#define JEMALLOC_HUGE_C_
Jason Evans376b1522010-02-11 14:45:59 -08002#include "jemalloc/internal/jemalloc_internal.h"
Jason Evanse476f8a2010-01-16 09:53:50 -08003
4/******************************************************************************/
5/* Data. */
6
Jason Evanse476f8a2010-01-16 09:53:50 -08007uint64_t huge_nmalloc;
8uint64_t huge_ndalloc;
9size_t huge_allocated;
Jason Evanse476f8a2010-01-16 09:53:50 -080010
11malloc_mutex_t huge_mtx;
12
13/******************************************************************************/
14
15/* Tree of chunks that are stand-alone huge allocations. */
16static extent_tree_t huge;
17
18void *
19huge_malloc(size_t size, bool zero)
20{
Mike Hommeyeae26902012-04-10 19:50:33 +020021
22 return (huge_palloc(size, chunksize, zero));
23}
24
25void *
26huge_palloc(size_t size, size_t alignment, bool zero)
27{
Jason Evanse476f8a2010-01-16 09:53:50 -080028 void *ret;
29 size_t csize;
30 extent_node_t *node;
Jason Evans7ad54c12012-04-21 16:04:51 -070031 bool is_zeroed;
Jason Evanse476f8a2010-01-16 09:53:50 -080032
33 /* Allocate one or more contiguous chunks for this request. */
34
35 csize = CHUNK_CEILING(size);
36 if (csize == 0) {
37 /* size is large enough to cause size_t wrap-around. */
38 return (NULL);
39 }
40
41 /* Allocate an extent node with which to track the chunk. */
42 node = base_node_alloc();
43 if (node == NULL)
44 return (NULL);
45
Jason Evans7ad54c12012-04-21 16:04:51 -070046 /*
47 * Copy zero into is_zeroed and pass the copy to chunk_alloc(), so that
48 * it is possible to make correct junk/zero fill decisions below.
49 */
50 is_zeroed = zero;
Jason Evans609ae592012-10-11 13:53:15 -070051 ret = chunk_alloc(csize, alignment, false, &is_zeroed,
52 chunk_dss_prec_get());
Jason Evanse476f8a2010-01-16 09:53:50 -080053 if (ret == NULL) {
54 base_node_dealloc(node);
55 return (NULL);
56 }
57
58 /* Insert node into huge. */
59 node->addr = ret;
60 node->size = csize;
61
62 malloc_mutex_lock(&huge_mtx);
63 extent_tree_ad_insert(&huge, node);
Jason Evans7372b152012-02-10 20:22:09 -080064 if (config_stats) {
65 stats_cactive_add(csize);
66 huge_nmalloc++;
67 huge_allocated += csize;
68 }
Jason Evanse476f8a2010-01-16 09:53:50 -080069 malloc_mutex_unlock(&huge_mtx);
70
Jason Evans7372b152012-02-10 20:22:09 -080071 if (config_fill && zero == false) {
Jason Evanse476f8a2010-01-16 09:53:50 -080072 if (opt_junk)
73 memset(ret, 0xa5, csize);
Jason Evans7ad54c12012-04-21 16:04:51 -070074 else if (opt_zero && is_zeroed == false)
Jason Evanse476f8a2010-01-16 09:53:50 -080075 memset(ret, 0, csize);
76 }
Jason Evanse476f8a2010-01-16 09:53:50 -080077
78 return (ret);
79}
80
Jason Evansb2c31662014-01-12 15:05:44 -080081bool
Jason Evans8e3c3c62010-09-17 15:46:18 -070082huge_ralloc_no_move(void *ptr, size_t oldsize, size_t size, size_t extra)
Jason Evanse476f8a2010-01-16 09:53:50 -080083{
Jason Evanse476f8a2010-01-16 09:53:50 -080084
Jason Evans8e3c3c62010-09-17 15:46:18 -070085 /*
86 * Avoid moving the allocation if the size class can be left the same.
87 */
88 if (oldsize > arena_maxclass
89 && CHUNK_CEILING(oldsize) >= CHUNK_CEILING(size)
90 && CHUNK_CEILING(oldsize) <= CHUNK_CEILING(size+extra)) {
91 assert(CHUNK_CEILING(oldsize) == oldsize);
Jason Evansb2c31662014-01-12 15:05:44 -080092 return (false);
Jason Evanse476f8a2010-01-16 09:53:50 -080093 }
94
Jason Evans8e3c3c62010-09-17 15:46:18 -070095 /* Reallocation would require a move. */
Jason Evansb2c31662014-01-12 15:05:44 -080096 return (true);
Jason Evans8e3c3c62010-09-17 15:46:18 -070097}
Jason Evanse476f8a2010-01-16 09:53:50 -080098
Jason Evans8e3c3c62010-09-17 15:46:18 -070099void *
100huge_ralloc(void *ptr, size_t oldsize, size_t size, size_t extra,
Jason Evans609ae592012-10-11 13:53:15 -0700101 size_t alignment, bool zero, bool try_tcache_dalloc)
Jason Evans8e3c3c62010-09-17 15:46:18 -0700102{
103 void *ret;
104 size_t copysize;
105
106 /* Try to avoid moving the allocation. */
Jason Evansb2c31662014-01-12 15:05:44 -0800107 if (huge_ralloc_no_move(ptr, oldsize, size, extra) == false)
108 return (ptr);
Jason Evans8e3c3c62010-09-17 15:46:18 -0700109
110 /*
111 * size and oldsize are different enough that we need to use a
112 * different size class. In that case, fall back to allocating new
113 * space and copying.
114 */
Jason Evans31bfb3e2011-01-31 19:58:22 -0800115 if (alignment > chunksize)
Jason Evans8e3c3c62010-09-17 15:46:18 -0700116 ret = huge_palloc(size + extra, alignment, zero);
117 else
118 ret = huge_malloc(size + extra, zero);
119
120 if (ret == NULL) {
121 if (extra == 0)
122 return (NULL);
123 /* Try again, this time without extra. */
Jason Evans31bfb3e2011-01-31 19:58:22 -0800124 if (alignment > chunksize)
Jason Evans8e3c3c62010-09-17 15:46:18 -0700125 ret = huge_palloc(size, alignment, zero);
126 else
127 ret = huge_malloc(size, zero);
128
129 if (ret == NULL)
130 return (NULL);
131 }
132
133 /*
134 * Copy at most size bytes (not size+extra), since the caller has no
135 * expectation that the extra bytes will be reliably preserved.
136 */
Jason Evanse476f8a2010-01-16 09:53:50 -0800137 copysize = (size < oldsize) ? size : oldsize;
Jason Evanscfdc8cf2010-11-30 16:50:58 -0800138
Jason Evans2e671ff2012-05-09 16:12:00 -0700139#ifdef JEMALLOC_MREMAP
Jason Evanscfdc8cf2010-11-30 16:50:58 -0800140 /*
141 * Use mremap(2) if this is a huge-->huge reallocation, and neither the
Jason Evans41626272012-02-13 10:56:17 -0800142 * source nor the destination are in dss.
Jason Evanscfdc8cf2010-11-30 16:50:58 -0800143 */
Jason Evans41626272012-02-13 10:56:17 -0800144 if (oldsize >= chunksize && (config_dss == false || (chunk_in_dss(ptr)
145 == false && chunk_in_dss(ret) == false))) {
Jason Evanscfdc8cf2010-11-30 16:50:58 -0800146 size_t newsize = huge_salloc(ret);
147
Jason Evansfa351d92011-11-09 11:55:19 -0800148 /*
149 * Remove ptr from the tree of huge allocations before
150 * performing the remap operation, in order to avoid the
151 * possibility of another thread acquiring that mapping before
152 * this one removes it from the tree.
153 */
154 huge_dalloc(ptr, false);
Jason Evanscfdc8cf2010-11-30 16:50:58 -0800155 if (mremap(ptr, oldsize, newsize, MREMAP_MAYMOVE|MREMAP_FIXED,
156 ret) == MAP_FAILED) {
157 /*
158 * Assuming no chunk management bugs in the allocator,
159 * the only documented way an error can occur here is
160 * if the application changed the map type for a
161 * portion of the old allocation. This is firmly in
162 * undefined behavior territory, so write a diagnostic
163 * message, and optionally abort.
164 */
165 char buf[BUFERROR_BUF];
166
Jason Evans2a83ed02013-12-08 20:52:21 -0800167 buferror(get_errno(), buf, sizeof(buf));
Jason Evansd81e4bd2012-03-06 14:57:45 -0800168 malloc_printf("<jemalloc>: Error in mremap(): %s\n",
169 buf);
Jason Evanscfdc8cf2010-11-30 16:50:58 -0800170 if (opt_abort)
171 abort();
172 memcpy(ret, ptr, copysize);
Jason Evans12a48872011-11-11 14:41:59 -0800173 chunk_dealloc_mmap(ptr, oldsize);
Jason Evans940fdfd2014-02-25 11:58:50 -0800174 } else if (config_fill && zero == false && opt_junk && oldsize
175 < newsize) {
176 /*
177 * mremap(2) clobbers the original mapping, so
178 * junk/zero filling is not preserved. There is no
179 * need to zero fill here, since any trailing
180 * uninititialized memory is demand-zeroed by the
181 * kernel, but junk filling must be redone.
182 */
183 memset(ret + oldsize, 0xa5, newsize - oldsize);
Jason Evansfa351d92011-11-09 11:55:19 -0800184 }
Jason Evanscfdc8cf2010-11-30 16:50:58 -0800185 } else
186#endif
187 {
188 memcpy(ret, ptr, copysize);
Jason Evansd82a5e62013-12-12 22:35:52 -0800189 iqalloct(ptr, try_tcache_dalloc);
Jason Evanscfdc8cf2010-11-30 16:50:58 -0800190 }
Jason Evanse476f8a2010-01-16 09:53:50 -0800191 return (ret);
192}
193
Jason Evans6b694c42014-01-07 16:47:56 -0800194#ifdef JEMALLOC_JET
195#undef huge_dalloc_junk
196#define huge_dalloc_junk JEMALLOC_N(huge_dalloc_junk_impl)
197#endif
198static void
199huge_dalloc_junk(void *ptr, size_t usize)
200{
201
202 if (config_fill && config_dss && opt_junk) {
203 /*
204 * Only bother junk filling if the chunk isn't about to be
205 * unmapped.
206 */
207 if (config_munmap == false || (config_dss && chunk_in_dss(ptr)))
208 memset(ptr, 0x5a, usize);
209 }
210}
211#ifdef JEMALLOC_JET
212#undef huge_dalloc_junk
213#define huge_dalloc_junk JEMALLOC_N(huge_dalloc_junk)
214huge_dalloc_junk_t *huge_dalloc_junk = JEMALLOC_N(huge_dalloc_junk_impl);
215#endif
216
Jason Evanse476f8a2010-01-16 09:53:50 -0800217void
Jason Evanscfdc8cf2010-11-30 16:50:58 -0800218huge_dalloc(void *ptr, bool unmap)
Jason Evanse476f8a2010-01-16 09:53:50 -0800219{
220 extent_node_t *node, key;
221
222 malloc_mutex_lock(&huge_mtx);
223
224 /* Extract from tree of huge allocations. */
225 key.addr = ptr;
226 node = extent_tree_ad_search(&huge, &key);
227 assert(node != NULL);
228 assert(node->addr == ptr);
229 extent_tree_ad_remove(&huge, node);
230
Jason Evans7372b152012-02-10 20:22:09 -0800231 if (config_stats) {
232 stats_cactive_sub(node->size);
233 huge_ndalloc++;
234 huge_allocated -= node->size;
235 }
Jason Evanse476f8a2010-01-16 09:53:50 -0800236
237 malloc_mutex_unlock(&huge_mtx);
238
Jason Evans6b694c42014-01-07 16:47:56 -0800239 if (unmap)
240 huge_dalloc_junk(node->addr, node->size);
Jason Evanse476f8a2010-01-16 09:53:50 -0800241
Jason Evans12a48872011-11-11 14:41:59 -0800242 chunk_dealloc(node->addr, node->size, unmap);
243
Jason Evanse476f8a2010-01-16 09:53:50 -0800244 base_node_dealloc(node);
245}
246
247size_t
248huge_salloc(const void *ptr)
249{
250 size_t ret;
251 extent_node_t *node, key;
252
253 malloc_mutex_lock(&huge_mtx);
254
255 /* Extract from tree of huge allocations. */
256 key.addr = __DECONST(void *, ptr);
257 node = extent_tree_ad_search(&huge, &key);
258 assert(node != NULL);
259
260 ret = node->size;
261
262 malloc_mutex_unlock(&huge_mtx);
263
264 return (ret);
265}
266
Jason Evans50651562010-04-13 16:13:54 -0700267prof_ctx_t *
268huge_prof_ctx_get(const void *ptr)
Jason Evans6109fe02010-02-10 10:37:56 -0800269{
Jason Evans50651562010-04-13 16:13:54 -0700270 prof_ctx_t *ret;
Jason Evans6109fe02010-02-10 10:37:56 -0800271 extent_node_t *node, key;
272
273 malloc_mutex_lock(&huge_mtx);
274
275 /* Extract from tree of huge allocations. */
276 key.addr = __DECONST(void *, ptr);
277 node = extent_tree_ad_search(&huge, &key);
278 assert(node != NULL);
279
Jason Evans50651562010-04-13 16:13:54 -0700280 ret = node->prof_ctx;
Jason Evans6109fe02010-02-10 10:37:56 -0800281
282 malloc_mutex_unlock(&huge_mtx);
283
284 return (ret);
285}
286
287void
Jason Evans50651562010-04-13 16:13:54 -0700288huge_prof_ctx_set(const void *ptr, prof_ctx_t *ctx)
Jason Evans6109fe02010-02-10 10:37:56 -0800289{
290 extent_node_t *node, key;
291
292 malloc_mutex_lock(&huge_mtx);
293
294 /* Extract from tree of huge allocations. */
295 key.addr = __DECONST(void *, ptr);
296 node = extent_tree_ad_search(&huge, &key);
297 assert(node != NULL);
298
Jason Evans50651562010-04-13 16:13:54 -0700299 node->prof_ctx = ctx;
Jason Evans6109fe02010-02-10 10:37:56 -0800300
301 malloc_mutex_unlock(&huge_mtx);
302}
Jason Evans6109fe02010-02-10 10:37:56 -0800303
Jason Evanse476f8a2010-01-16 09:53:50 -0800304bool
305huge_boot(void)
306{
307
308 /* Initialize chunks data. */
309 if (malloc_mutex_init(&huge_mtx))
310 return (true);
311 extent_tree_ad_new(&huge);
312
Jason Evans7372b152012-02-10 20:22:09 -0800313 if (config_stats) {
314 huge_nmalloc = 0;
315 huge_ndalloc = 0;
316 huge_allocated = 0;
317 }
Jason Evanse476f8a2010-01-16 09:53:50 -0800318
319 return (false);
320}
Jason Evans4e2e3dd2012-03-13 16:31:41 -0700321
322void
323huge_prefork(void)
324{
325
326 malloc_mutex_prefork(&huge_mtx);
327}
328
329void
330huge_postfork_parent(void)
331{
332
333 malloc_mutex_postfork_parent(&huge_mtx);
334}
335
336void
337huge_postfork_child(void)
338{
339
340 malloc_mutex_postfork_child(&huge_mtx);
341}