blob: 43c8f3b0e2be963973aac0ddd0d858d9994a7663 [file] [log] [blame]
Jason Evanse476f8a2010-01-16 09:53:50 -08001#define JEMALLOC_HUGE_C_
Jason Evans376b1522010-02-11 14:45:59 -08002#include "jemalloc/internal/jemalloc_internal.h"
Jason Evanse476f8a2010-01-16 09:53:50 -08003
4/******************************************************************************/
5/* Data. */
6
Jason Evanse476f8a2010-01-16 09:53:50 -08007uint64_t huge_nmalloc;
8uint64_t huge_ndalloc;
9size_t huge_allocated;
Jason Evanse476f8a2010-01-16 09:53:50 -080010
11malloc_mutex_t huge_mtx;
12
13/******************************************************************************/
14
15/* Tree of chunks that are stand-alone huge allocations. */
16static extent_tree_t huge;
17
18void *
19huge_malloc(size_t size, bool zero)
20{
Mike Hommeyeae26902012-04-10 19:50:33 +020021
22 return (huge_palloc(size, chunksize, zero));
23}
24
25void *
26huge_palloc(size_t size, size_t alignment, bool zero)
27{
Jason Evanse476f8a2010-01-16 09:53:50 -080028 void *ret;
29 size_t csize;
30 extent_node_t *node;
31
32 /* Allocate one or more contiguous chunks for this request. */
33
34 csize = CHUNK_CEILING(size);
35 if (csize == 0) {
36 /* size is large enough to cause size_t wrap-around. */
37 return (NULL);
38 }
39
40 /* Allocate an extent node with which to track the chunk. */
41 node = base_node_alloc();
42 if (node == NULL)
43 return (NULL);
44
Mike Hommeyeae26902012-04-10 19:50:33 +020045 ret = chunk_alloc(csize, alignment, false, &zero);
Jason Evanse476f8a2010-01-16 09:53:50 -080046 if (ret == NULL) {
47 base_node_dealloc(node);
48 return (NULL);
49 }
50
51 /* Insert node into huge. */
52 node->addr = ret;
53 node->size = csize;
54
55 malloc_mutex_lock(&huge_mtx);
56 extent_tree_ad_insert(&huge, node);
Jason Evans7372b152012-02-10 20:22:09 -080057 if (config_stats) {
58 stats_cactive_add(csize);
59 huge_nmalloc++;
60 huge_allocated += csize;
61 }
Jason Evanse476f8a2010-01-16 09:53:50 -080062 malloc_mutex_unlock(&huge_mtx);
63
Jason Evans7372b152012-02-10 20:22:09 -080064 if (config_fill && zero == false) {
Jason Evanse476f8a2010-01-16 09:53:50 -080065 if (opt_junk)
66 memset(ret, 0xa5, csize);
67 else if (opt_zero)
68 memset(ret, 0, csize);
69 }
Jason Evanse476f8a2010-01-16 09:53:50 -080070
71 return (ret);
72}
73
Jason Evanse476f8a2010-01-16 09:53:50 -080074void *
Jason Evans8e3c3c62010-09-17 15:46:18 -070075huge_ralloc_no_move(void *ptr, size_t oldsize, size_t size, size_t extra)
Jason Evanse476f8a2010-01-16 09:53:50 -080076{
Jason Evanse476f8a2010-01-16 09:53:50 -080077
Jason Evans8e3c3c62010-09-17 15:46:18 -070078 /*
79 * Avoid moving the allocation if the size class can be left the same.
80 */
81 if (oldsize > arena_maxclass
82 && CHUNK_CEILING(oldsize) >= CHUNK_CEILING(size)
83 && CHUNK_CEILING(oldsize) <= CHUNK_CEILING(size+extra)) {
84 assert(CHUNK_CEILING(oldsize) == oldsize);
Jason Evans7372b152012-02-10 20:22:09 -080085 if (config_fill && opt_junk && size < oldsize) {
Jason Evans8e3c3c62010-09-17 15:46:18 -070086 memset((void *)((uintptr_t)ptr + size), 0x5a,
87 oldsize - size);
Jason Evanse476f8a2010-01-16 09:53:50 -080088 }
Jason Evanse476f8a2010-01-16 09:53:50 -080089 return (ptr);
90 }
91
Jason Evans8e3c3c62010-09-17 15:46:18 -070092 /* Reallocation would require a move. */
93 return (NULL);
94}
Jason Evanse476f8a2010-01-16 09:53:50 -080095
Jason Evans8e3c3c62010-09-17 15:46:18 -070096void *
97huge_ralloc(void *ptr, size_t oldsize, size_t size, size_t extra,
98 size_t alignment, bool zero)
99{
100 void *ret;
101 size_t copysize;
102
103 /* Try to avoid moving the allocation. */
104 ret = huge_ralloc_no_move(ptr, oldsize, size, extra);
105 if (ret != NULL)
106 return (ret);
107
108 /*
109 * size and oldsize are different enough that we need to use a
110 * different size class. In that case, fall back to allocating new
111 * space and copying.
112 */
Jason Evans31bfb3e2011-01-31 19:58:22 -0800113 if (alignment > chunksize)
Jason Evans8e3c3c62010-09-17 15:46:18 -0700114 ret = huge_palloc(size + extra, alignment, zero);
115 else
116 ret = huge_malloc(size + extra, zero);
117
118 if (ret == NULL) {
119 if (extra == 0)
120 return (NULL);
121 /* Try again, this time without extra. */
Jason Evans31bfb3e2011-01-31 19:58:22 -0800122 if (alignment > chunksize)
Jason Evans8e3c3c62010-09-17 15:46:18 -0700123 ret = huge_palloc(size, alignment, zero);
124 else
125 ret = huge_malloc(size, zero);
126
127 if (ret == NULL)
128 return (NULL);
129 }
130
131 /*
132 * Copy at most size bytes (not size+extra), since the caller has no
133 * expectation that the extra bytes will be reliably preserved.
134 */
Jason Evanse476f8a2010-01-16 09:53:50 -0800135 copysize = (size < oldsize) ? size : oldsize;
Jason Evanscfdc8cf2010-11-30 16:50:58 -0800136
137 /*
138 * Use mremap(2) if this is a huge-->huge reallocation, and neither the
Jason Evans41626272012-02-13 10:56:17 -0800139 * source nor the destination are in dss.
Jason Evanscfdc8cf2010-11-30 16:50:58 -0800140 */
141#ifdef JEMALLOC_MREMAP_FIXED
Jason Evans41626272012-02-13 10:56:17 -0800142 if (oldsize >= chunksize && (config_dss == false || (chunk_in_dss(ptr)
143 == false && chunk_in_dss(ret) == false))) {
Jason Evanscfdc8cf2010-11-30 16:50:58 -0800144 size_t newsize = huge_salloc(ret);
145
Jason Evansfa351d92011-11-09 11:55:19 -0800146 /*
147 * Remove ptr from the tree of huge allocations before
148 * performing the remap operation, in order to avoid the
149 * possibility of another thread acquiring that mapping before
150 * this one removes it from the tree.
151 */
152 huge_dalloc(ptr, false);
Jason Evanscfdc8cf2010-11-30 16:50:58 -0800153 if (mremap(ptr, oldsize, newsize, MREMAP_MAYMOVE|MREMAP_FIXED,
154 ret) == MAP_FAILED) {
155 /*
156 * Assuming no chunk management bugs in the allocator,
157 * the only documented way an error can occur here is
158 * if the application changed the map type for a
159 * portion of the old allocation. This is firmly in
160 * undefined behavior territory, so write a diagnostic
161 * message, and optionally abort.
162 */
163 char buf[BUFERROR_BUF];
164
165 buferror(errno, buf, sizeof(buf));
Jason Evansd81e4bd2012-03-06 14:57:45 -0800166 malloc_printf("<jemalloc>: Error in mremap(): %s\n",
167 buf);
Jason Evanscfdc8cf2010-11-30 16:50:58 -0800168 if (opt_abort)
169 abort();
170 memcpy(ret, ptr, copysize);
Jason Evans12a48872011-11-11 14:41:59 -0800171 chunk_dealloc_mmap(ptr, oldsize);
Jason Evansfa351d92011-11-09 11:55:19 -0800172 }
Jason Evanscfdc8cf2010-11-30 16:50:58 -0800173 } else
174#endif
175 {
176 memcpy(ret, ptr, copysize);
177 idalloc(ptr);
178 }
Jason Evanse476f8a2010-01-16 09:53:50 -0800179 return (ret);
180}
181
182void
Jason Evanscfdc8cf2010-11-30 16:50:58 -0800183huge_dalloc(void *ptr, bool unmap)
Jason Evanse476f8a2010-01-16 09:53:50 -0800184{
185 extent_node_t *node, key;
186
187 malloc_mutex_lock(&huge_mtx);
188
189 /* Extract from tree of huge allocations. */
190 key.addr = ptr;
191 node = extent_tree_ad_search(&huge, &key);
192 assert(node != NULL);
193 assert(node->addr == ptr);
194 extent_tree_ad_remove(&huge, node);
195
Jason Evans7372b152012-02-10 20:22:09 -0800196 if (config_stats) {
197 stats_cactive_sub(node->size);
198 huge_ndalloc++;
199 huge_allocated -= node->size;
200 }
Jason Evanse476f8a2010-01-16 09:53:50 -0800201
202 malloc_mutex_unlock(&huge_mtx);
203
Jason Evans41626272012-02-13 10:56:17 -0800204 if (unmap && config_fill && config_dss && opt_junk)
Jason Evans7372b152012-02-10 20:22:09 -0800205 memset(node->addr, 0x5a, node->size);
Jason Evanse476f8a2010-01-16 09:53:50 -0800206
Jason Evans12a48872011-11-11 14:41:59 -0800207 chunk_dealloc(node->addr, node->size, unmap);
208
Jason Evanse476f8a2010-01-16 09:53:50 -0800209 base_node_dealloc(node);
210}
211
212size_t
213huge_salloc(const void *ptr)
214{
215 size_t ret;
216 extent_node_t *node, key;
217
218 malloc_mutex_lock(&huge_mtx);
219
220 /* Extract from tree of huge allocations. */
221 key.addr = __DECONST(void *, ptr);
222 node = extent_tree_ad_search(&huge, &key);
223 assert(node != NULL);
224
225 ret = node->size;
226
227 malloc_mutex_unlock(&huge_mtx);
228
229 return (ret);
230}
231
Jason Evans50651562010-04-13 16:13:54 -0700232prof_ctx_t *
233huge_prof_ctx_get(const void *ptr)
Jason Evans6109fe02010-02-10 10:37:56 -0800234{
Jason Evans50651562010-04-13 16:13:54 -0700235 prof_ctx_t *ret;
Jason Evans6109fe02010-02-10 10:37:56 -0800236 extent_node_t *node, key;
237
238 malloc_mutex_lock(&huge_mtx);
239
240 /* Extract from tree of huge allocations. */
241 key.addr = __DECONST(void *, ptr);
242 node = extent_tree_ad_search(&huge, &key);
243 assert(node != NULL);
244
Jason Evans50651562010-04-13 16:13:54 -0700245 ret = node->prof_ctx;
Jason Evans6109fe02010-02-10 10:37:56 -0800246
247 malloc_mutex_unlock(&huge_mtx);
248
249 return (ret);
250}
251
252void
Jason Evans50651562010-04-13 16:13:54 -0700253huge_prof_ctx_set(const void *ptr, prof_ctx_t *ctx)
Jason Evans6109fe02010-02-10 10:37:56 -0800254{
255 extent_node_t *node, key;
256
257 malloc_mutex_lock(&huge_mtx);
258
259 /* Extract from tree of huge allocations. */
260 key.addr = __DECONST(void *, ptr);
261 node = extent_tree_ad_search(&huge, &key);
262 assert(node != NULL);
263
Jason Evans50651562010-04-13 16:13:54 -0700264 node->prof_ctx = ctx;
Jason Evans6109fe02010-02-10 10:37:56 -0800265
266 malloc_mutex_unlock(&huge_mtx);
267}
Jason Evans6109fe02010-02-10 10:37:56 -0800268
Jason Evanse476f8a2010-01-16 09:53:50 -0800269bool
270huge_boot(void)
271{
272
273 /* Initialize chunks data. */
274 if (malloc_mutex_init(&huge_mtx))
275 return (true);
276 extent_tree_ad_new(&huge);
277
Jason Evans7372b152012-02-10 20:22:09 -0800278 if (config_stats) {
279 huge_nmalloc = 0;
280 huge_ndalloc = 0;
281 huge_allocated = 0;
282 }
Jason Evanse476f8a2010-01-16 09:53:50 -0800283
284 return (false);
285}
Jason Evans4e2e3dd2012-03-13 16:31:41 -0700286
287void
288huge_prefork(void)
289{
290
291 malloc_mutex_prefork(&huge_mtx);
292}
293
294void
295huge_postfork_parent(void)
296{
297
298 malloc_mutex_postfork_parent(&huge_mtx);
299}
300
301void
302huge_postfork_child(void)
303{
304
305 malloc_mutex_postfork_child(&huge_mtx);
306}