blob: 8a4ec942410e50d6b6b8beaef2e95c01929ab58a [file] [log] [blame]
Jason Evanse476f8a2010-01-16 09:53:50 -08001#define JEMALLOC_HUGE_C_
Jason Evans376b1522010-02-11 14:45:59 -08002#include "jemalloc/internal/jemalloc_internal.h"
Jason Evanse476f8a2010-01-16 09:53:50 -08003
4/******************************************************************************/
5/* Data. */
6
Jason Evanse476f8a2010-01-16 09:53:50 -08007uint64_t huge_nmalloc;
8uint64_t huge_ndalloc;
9size_t huge_allocated;
Jason Evanse476f8a2010-01-16 09:53:50 -080010
11malloc_mutex_t huge_mtx;
12
13/******************************************************************************/
14
15/* Tree of chunks that are stand-alone huge allocations. */
16static extent_tree_t huge;
17
18void *
19huge_malloc(size_t size, bool zero)
20{
Mike Hommeyeae26902012-04-10 19:50:33 +020021
22 return (huge_palloc(size, chunksize, zero));
23}
24
25void *
26huge_palloc(size_t size, size_t alignment, bool zero)
27{
Jason Evanse476f8a2010-01-16 09:53:50 -080028 void *ret;
29 size_t csize;
30 extent_node_t *node;
Jason Evans7ad54c12012-04-21 16:04:51 -070031 bool is_zeroed;
Jason Evanse476f8a2010-01-16 09:53:50 -080032
33 /* Allocate one or more contiguous chunks for this request. */
34
35 csize = CHUNK_CEILING(size);
36 if (csize == 0) {
37 /* size is large enough to cause size_t wrap-around. */
38 return (NULL);
39 }
40
41 /* Allocate an extent node with which to track the chunk. */
42 node = base_node_alloc();
43 if (node == NULL)
44 return (NULL);
45
Jason Evans7ad54c12012-04-21 16:04:51 -070046 /*
47 * Copy zero into is_zeroed and pass the copy to chunk_alloc(), so that
48 * it is possible to make correct junk/zero fill decisions below.
49 */
50 is_zeroed = zero;
51 ret = chunk_alloc(csize, alignment, false, &is_zeroed);
Jason Evanse476f8a2010-01-16 09:53:50 -080052 if (ret == NULL) {
53 base_node_dealloc(node);
54 return (NULL);
55 }
56
57 /* Insert node into huge. */
58 node->addr = ret;
59 node->size = csize;
60
61 malloc_mutex_lock(&huge_mtx);
62 extent_tree_ad_insert(&huge, node);
Jason Evans7372b152012-02-10 20:22:09 -080063 if (config_stats) {
64 stats_cactive_add(csize);
65 huge_nmalloc++;
66 huge_allocated += csize;
67 }
Jason Evanse476f8a2010-01-16 09:53:50 -080068 malloc_mutex_unlock(&huge_mtx);
69
Jason Evans7372b152012-02-10 20:22:09 -080070 if (config_fill && zero == false) {
Jason Evanse476f8a2010-01-16 09:53:50 -080071 if (opt_junk)
72 memset(ret, 0xa5, csize);
Jason Evans7ad54c12012-04-21 16:04:51 -070073 else if (opt_zero && is_zeroed == false)
Jason Evanse476f8a2010-01-16 09:53:50 -080074 memset(ret, 0, csize);
75 }
Jason Evanse476f8a2010-01-16 09:53:50 -080076
77 return (ret);
78}
79
Jason Evanse476f8a2010-01-16 09:53:50 -080080void *
Jason Evans8e3c3c62010-09-17 15:46:18 -070081huge_ralloc_no_move(void *ptr, size_t oldsize, size_t size, size_t extra)
Jason Evanse476f8a2010-01-16 09:53:50 -080082{
Jason Evanse476f8a2010-01-16 09:53:50 -080083
Jason Evans8e3c3c62010-09-17 15:46:18 -070084 /*
85 * Avoid moving the allocation if the size class can be left the same.
86 */
87 if (oldsize > arena_maxclass
88 && CHUNK_CEILING(oldsize) >= CHUNK_CEILING(size)
89 && CHUNK_CEILING(oldsize) <= CHUNK_CEILING(size+extra)) {
90 assert(CHUNK_CEILING(oldsize) == oldsize);
Jason Evans7372b152012-02-10 20:22:09 -080091 if (config_fill && opt_junk && size < oldsize) {
Jason Evans8e3c3c62010-09-17 15:46:18 -070092 memset((void *)((uintptr_t)ptr + size), 0x5a,
93 oldsize - size);
Jason Evanse476f8a2010-01-16 09:53:50 -080094 }
Jason Evanse476f8a2010-01-16 09:53:50 -080095 return (ptr);
96 }
97
Jason Evans8e3c3c62010-09-17 15:46:18 -070098 /* Reallocation would require a move. */
99 return (NULL);
100}
Jason Evanse476f8a2010-01-16 09:53:50 -0800101
Jason Evans8e3c3c62010-09-17 15:46:18 -0700102void *
103huge_ralloc(void *ptr, size_t oldsize, size_t size, size_t extra,
104 size_t alignment, bool zero)
105{
106 void *ret;
107 size_t copysize;
108
109 /* Try to avoid moving the allocation. */
110 ret = huge_ralloc_no_move(ptr, oldsize, size, extra);
111 if (ret != NULL)
112 return (ret);
113
114 /*
115 * size and oldsize are different enough that we need to use a
116 * different size class. In that case, fall back to allocating new
117 * space and copying.
118 */
Jason Evans31bfb3e2011-01-31 19:58:22 -0800119 if (alignment > chunksize)
Jason Evans8e3c3c62010-09-17 15:46:18 -0700120 ret = huge_palloc(size + extra, alignment, zero);
121 else
122 ret = huge_malloc(size + extra, zero);
123
124 if (ret == NULL) {
125 if (extra == 0)
126 return (NULL);
127 /* Try again, this time without extra. */
Jason Evans31bfb3e2011-01-31 19:58:22 -0800128 if (alignment > chunksize)
Jason Evans8e3c3c62010-09-17 15:46:18 -0700129 ret = huge_palloc(size, alignment, zero);
130 else
131 ret = huge_malloc(size, zero);
132
133 if (ret == NULL)
134 return (NULL);
135 }
136
137 /*
138 * Copy at most size bytes (not size+extra), since the caller has no
139 * expectation that the extra bytes will be reliably preserved.
140 */
Jason Evanse476f8a2010-01-16 09:53:50 -0800141 copysize = (size < oldsize) ? size : oldsize;
Jason Evanscfdc8cf2010-11-30 16:50:58 -0800142
Jason Evans2e671ff2012-05-09 16:12:00 -0700143#ifdef JEMALLOC_MREMAP
Jason Evanscfdc8cf2010-11-30 16:50:58 -0800144 /*
145 * Use mremap(2) if this is a huge-->huge reallocation, and neither the
Jason Evans41626272012-02-13 10:56:17 -0800146 * source nor the destination are in dss.
Jason Evanscfdc8cf2010-11-30 16:50:58 -0800147 */
Jason Evans41626272012-02-13 10:56:17 -0800148 if (oldsize >= chunksize && (config_dss == false || (chunk_in_dss(ptr)
149 == false && chunk_in_dss(ret) == false))) {
Jason Evanscfdc8cf2010-11-30 16:50:58 -0800150 size_t newsize = huge_salloc(ret);
151
Jason Evansfa351d92011-11-09 11:55:19 -0800152 /*
153 * Remove ptr from the tree of huge allocations before
154 * performing the remap operation, in order to avoid the
155 * possibility of another thread acquiring that mapping before
156 * this one removes it from the tree.
157 */
158 huge_dalloc(ptr, false);
Jason Evanscfdc8cf2010-11-30 16:50:58 -0800159 if (mremap(ptr, oldsize, newsize, MREMAP_MAYMOVE|MREMAP_FIXED,
160 ret) == MAP_FAILED) {
161 /*
162 * Assuming no chunk management bugs in the allocator,
163 * the only documented way an error can occur here is
164 * if the application changed the map type for a
165 * portion of the old allocation. This is firmly in
166 * undefined behavior territory, so write a diagnostic
167 * message, and optionally abort.
168 */
169 char buf[BUFERROR_BUF];
170
Mike Hommeya14bce82012-04-30 12:38:26 +0200171 buferror(buf, sizeof(buf));
Jason Evansd81e4bd2012-03-06 14:57:45 -0800172 malloc_printf("<jemalloc>: Error in mremap(): %s\n",
173 buf);
Jason Evanscfdc8cf2010-11-30 16:50:58 -0800174 if (opt_abort)
175 abort();
176 memcpy(ret, ptr, copysize);
Jason Evans12a48872011-11-11 14:41:59 -0800177 chunk_dealloc_mmap(ptr, oldsize);
Jason Evansfa351d92011-11-09 11:55:19 -0800178 }
Jason Evanscfdc8cf2010-11-30 16:50:58 -0800179 } else
180#endif
181 {
182 memcpy(ret, ptr, copysize);
Jason Evans122449b2012-04-06 00:35:09 -0700183 iqalloc(ptr);
Jason Evanscfdc8cf2010-11-30 16:50:58 -0800184 }
Jason Evanse476f8a2010-01-16 09:53:50 -0800185 return (ret);
186}
187
188void
Jason Evanscfdc8cf2010-11-30 16:50:58 -0800189huge_dalloc(void *ptr, bool unmap)
Jason Evanse476f8a2010-01-16 09:53:50 -0800190{
191 extent_node_t *node, key;
192
193 malloc_mutex_lock(&huge_mtx);
194
195 /* Extract from tree of huge allocations. */
196 key.addr = ptr;
197 node = extent_tree_ad_search(&huge, &key);
198 assert(node != NULL);
199 assert(node->addr == ptr);
200 extent_tree_ad_remove(&huge, node);
201
Jason Evans7372b152012-02-10 20:22:09 -0800202 if (config_stats) {
203 stats_cactive_sub(node->size);
204 huge_ndalloc++;
205 huge_allocated -= node->size;
206 }
Jason Evanse476f8a2010-01-16 09:53:50 -0800207
208 malloc_mutex_unlock(&huge_mtx);
209
Jason Evans41626272012-02-13 10:56:17 -0800210 if (unmap && config_fill && config_dss && opt_junk)
Jason Evans7372b152012-02-10 20:22:09 -0800211 memset(node->addr, 0x5a, node->size);
Jason Evanse476f8a2010-01-16 09:53:50 -0800212
Jason Evans12a48872011-11-11 14:41:59 -0800213 chunk_dealloc(node->addr, node->size, unmap);
214
Jason Evanse476f8a2010-01-16 09:53:50 -0800215 base_node_dealloc(node);
216}
217
218size_t
219huge_salloc(const void *ptr)
220{
221 size_t ret;
222 extent_node_t *node, key;
223
224 malloc_mutex_lock(&huge_mtx);
225
226 /* Extract from tree of huge allocations. */
227 key.addr = __DECONST(void *, ptr);
228 node = extent_tree_ad_search(&huge, &key);
229 assert(node != NULL);
230
231 ret = node->size;
232
233 malloc_mutex_unlock(&huge_mtx);
234
235 return (ret);
236}
237
Jason Evans50651562010-04-13 16:13:54 -0700238prof_ctx_t *
239huge_prof_ctx_get(const void *ptr)
Jason Evans6109fe02010-02-10 10:37:56 -0800240{
Jason Evans50651562010-04-13 16:13:54 -0700241 prof_ctx_t *ret;
Jason Evans6109fe02010-02-10 10:37:56 -0800242 extent_node_t *node, key;
243
244 malloc_mutex_lock(&huge_mtx);
245
246 /* Extract from tree of huge allocations. */
247 key.addr = __DECONST(void *, ptr);
248 node = extent_tree_ad_search(&huge, &key);
249 assert(node != NULL);
250
Jason Evans50651562010-04-13 16:13:54 -0700251 ret = node->prof_ctx;
Jason Evans6109fe02010-02-10 10:37:56 -0800252
253 malloc_mutex_unlock(&huge_mtx);
254
255 return (ret);
256}
257
258void
Jason Evans50651562010-04-13 16:13:54 -0700259huge_prof_ctx_set(const void *ptr, prof_ctx_t *ctx)
Jason Evans6109fe02010-02-10 10:37:56 -0800260{
261 extent_node_t *node, key;
262
263 malloc_mutex_lock(&huge_mtx);
264
265 /* Extract from tree of huge allocations. */
266 key.addr = __DECONST(void *, ptr);
267 node = extent_tree_ad_search(&huge, &key);
268 assert(node != NULL);
269
Jason Evans50651562010-04-13 16:13:54 -0700270 node->prof_ctx = ctx;
Jason Evans6109fe02010-02-10 10:37:56 -0800271
272 malloc_mutex_unlock(&huge_mtx);
273}
Jason Evans6109fe02010-02-10 10:37:56 -0800274
Jason Evanse476f8a2010-01-16 09:53:50 -0800275bool
276huge_boot(void)
277{
278
279 /* Initialize chunks data. */
280 if (malloc_mutex_init(&huge_mtx))
281 return (true);
282 extent_tree_ad_new(&huge);
283
Jason Evans7372b152012-02-10 20:22:09 -0800284 if (config_stats) {
285 huge_nmalloc = 0;
286 huge_ndalloc = 0;
287 huge_allocated = 0;
288 }
Jason Evanse476f8a2010-01-16 09:53:50 -0800289
290 return (false);
291}
Jason Evans4e2e3dd2012-03-13 16:31:41 -0700292
293void
294huge_prefork(void)
295{
296
297 malloc_mutex_prefork(&huge_mtx);
298}
299
300void
301huge_postfork_parent(void)
302{
303
304 malloc_mutex_postfork_parent(&huge_mtx);
305}
306
307void
308huge_postfork_child(void)
309{
310
311 malloc_mutex_postfork_child(&huge_mtx);
312}