blob: 2d51c529fe4419c033978d317da0c5e9e2fe0c3f [file] [log] [blame]
Jason Evanse476f8a2010-01-16 09:53:50 -08001#define JEMALLOC_HUGE_C_
Jason Evans376b1522010-02-11 14:45:59 -08002#include "jemalloc/internal/jemalloc_internal.h"
Jason Evanse476f8a2010-01-16 09:53:50 -08003
4/******************************************************************************/
5/* Data. */
6
Jason Evanse476f8a2010-01-16 09:53:50 -08007uint64_t huge_nmalloc;
8uint64_t huge_ndalloc;
9size_t huge_allocated;
Jason Evanse476f8a2010-01-16 09:53:50 -080010
11malloc_mutex_t huge_mtx;
12
13/******************************************************************************/
14
15/* Tree of chunks that are stand-alone huge allocations. */
16static extent_tree_t huge;
17
18void *
19huge_malloc(size_t size, bool zero)
20{
21 void *ret;
22 size_t csize;
23 extent_node_t *node;
24
25 /* Allocate one or more contiguous chunks for this request. */
26
27 csize = CHUNK_CEILING(size);
28 if (csize == 0) {
29 /* size is large enough to cause size_t wrap-around. */
30 return (NULL);
31 }
32
33 /* Allocate an extent node with which to track the chunk. */
34 node = base_node_alloc();
35 if (node == NULL)
36 return (NULL);
37
Jason Evans2dbecf12010-09-05 10:35:13 -070038 ret = chunk_alloc(csize, false, &zero);
Jason Evanse476f8a2010-01-16 09:53:50 -080039 if (ret == NULL) {
40 base_node_dealloc(node);
41 return (NULL);
42 }
43
44 /* Insert node into huge. */
45 node->addr = ret;
46 node->size = csize;
47
48 malloc_mutex_lock(&huge_mtx);
49 extent_tree_ad_insert(&huge, node);
Jason Evans7372b152012-02-10 20:22:09 -080050 if (config_stats) {
51 stats_cactive_add(csize);
52 huge_nmalloc++;
53 huge_allocated += csize;
54 }
Jason Evanse476f8a2010-01-16 09:53:50 -080055 malloc_mutex_unlock(&huge_mtx);
56
Jason Evans7372b152012-02-10 20:22:09 -080057 if (config_fill && zero == false) {
Jason Evanse476f8a2010-01-16 09:53:50 -080058 if (opt_junk)
59 memset(ret, 0xa5, csize);
60 else if (opt_zero)
61 memset(ret, 0, csize);
62 }
Jason Evanse476f8a2010-01-16 09:53:50 -080063
64 return (ret);
65}
66
67/* Only handles large allocations that require more than chunk alignment. */
68void *
Jason Evans8e3c3c62010-09-17 15:46:18 -070069huge_palloc(size_t size, size_t alignment, bool zero)
Jason Evanse476f8a2010-01-16 09:53:50 -080070{
71 void *ret;
72 size_t alloc_size, chunk_size, offset;
73 extent_node_t *node;
74
75 /*
76 * This allocation requires alignment that is even larger than chunk
77 * alignment. This means that huge_malloc() isn't good enough.
78 *
79 * Allocate almost twice as many chunks as are demanded by the size or
80 * alignment, in order to assure the alignment can be achieved, then
81 * unmap leading and trailing chunks.
82 */
Jason Evans31bfb3e2011-01-31 19:58:22 -080083 assert(alignment > chunksize);
Jason Evanse476f8a2010-01-16 09:53:50 -080084
85 chunk_size = CHUNK_CEILING(size);
86
87 if (size >= alignment)
88 alloc_size = chunk_size + alignment - chunksize;
89 else
90 alloc_size = (alignment << 1) - chunksize;
91
92 /* Allocate an extent node with which to track the chunk. */
93 node = base_node_alloc();
94 if (node == NULL)
95 return (NULL);
96
Jason Evans2dbecf12010-09-05 10:35:13 -070097 ret = chunk_alloc(alloc_size, false, &zero);
Jason Evanse476f8a2010-01-16 09:53:50 -080098 if (ret == NULL) {
99 base_node_dealloc(node);
100 return (NULL);
101 }
102
103 offset = (uintptr_t)ret & (alignment - 1);
104 assert((offset & chunksize_mask) == 0);
105 assert(offset < alloc_size);
106 if (offset == 0) {
107 /* Trim trailing space. */
108 chunk_dealloc((void *)((uintptr_t)ret + chunk_size), alloc_size
Jason Evans12a48872011-11-11 14:41:59 -0800109 - chunk_size, true);
Jason Evanse476f8a2010-01-16 09:53:50 -0800110 } else {
111 size_t trailsize;
112
113 /* Trim leading space. */
Jason Evans12a48872011-11-11 14:41:59 -0800114 chunk_dealloc(ret, alignment - offset, true);
Jason Evanse476f8a2010-01-16 09:53:50 -0800115
116 ret = (void *)((uintptr_t)ret + (alignment - offset));
117
118 trailsize = alloc_size - (alignment - offset) - chunk_size;
119 if (trailsize != 0) {
120 /* Trim trailing space. */
121 assert(trailsize < alloc_size);
122 chunk_dealloc((void *)((uintptr_t)ret + chunk_size),
Jason Evans12a48872011-11-11 14:41:59 -0800123 trailsize, true);
Jason Evanse476f8a2010-01-16 09:53:50 -0800124 }
125 }
126
127 /* Insert node into huge. */
128 node->addr = ret;
129 node->size = chunk_size;
130
131 malloc_mutex_lock(&huge_mtx);
132 extent_tree_ad_insert(&huge, node);
Jason Evans7372b152012-02-10 20:22:09 -0800133 if (config_stats) {
134 stats_cactive_add(chunk_size);
135 huge_nmalloc++;
136 huge_allocated += chunk_size;
137 }
Jason Evanse476f8a2010-01-16 09:53:50 -0800138 malloc_mutex_unlock(&huge_mtx);
139
Jason Evans7372b152012-02-10 20:22:09 -0800140 if (config_fill && zero == false) {
Jason Evans8e3c3c62010-09-17 15:46:18 -0700141 if (opt_junk)
142 memset(ret, 0xa5, chunk_size);
143 else if (opt_zero)
144 memset(ret, 0, chunk_size);
145 }
Jason Evanse476f8a2010-01-16 09:53:50 -0800146
147 return (ret);
148}
149
150void *
Jason Evans8e3c3c62010-09-17 15:46:18 -0700151huge_ralloc_no_move(void *ptr, size_t oldsize, size_t size, size_t extra)
Jason Evanse476f8a2010-01-16 09:53:50 -0800152{
Jason Evanse476f8a2010-01-16 09:53:50 -0800153
Jason Evans8e3c3c62010-09-17 15:46:18 -0700154 /*
155 * Avoid moving the allocation if the size class can be left the same.
156 */
157 if (oldsize > arena_maxclass
158 && CHUNK_CEILING(oldsize) >= CHUNK_CEILING(size)
159 && CHUNK_CEILING(oldsize) <= CHUNK_CEILING(size+extra)) {
160 assert(CHUNK_CEILING(oldsize) == oldsize);
Jason Evans7372b152012-02-10 20:22:09 -0800161 if (config_fill && opt_junk && size < oldsize) {
Jason Evans8e3c3c62010-09-17 15:46:18 -0700162 memset((void *)((uintptr_t)ptr + size), 0x5a,
163 oldsize - size);
Jason Evanse476f8a2010-01-16 09:53:50 -0800164 }
Jason Evanse476f8a2010-01-16 09:53:50 -0800165 return (ptr);
166 }
167
Jason Evans8e3c3c62010-09-17 15:46:18 -0700168 /* Reallocation would require a move. */
169 return (NULL);
170}
Jason Evanse476f8a2010-01-16 09:53:50 -0800171
Jason Evans8e3c3c62010-09-17 15:46:18 -0700172void *
173huge_ralloc(void *ptr, size_t oldsize, size_t size, size_t extra,
174 size_t alignment, bool zero)
175{
176 void *ret;
177 size_t copysize;
178
179 /* Try to avoid moving the allocation. */
180 ret = huge_ralloc_no_move(ptr, oldsize, size, extra);
181 if (ret != NULL)
182 return (ret);
183
184 /*
185 * size and oldsize are different enough that we need to use a
186 * different size class. In that case, fall back to allocating new
187 * space and copying.
188 */
Jason Evans31bfb3e2011-01-31 19:58:22 -0800189 if (alignment > chunksize)
Jason Evans8e3c3c62010-09-17 15:46:18 -0700190 ret = huge_palloc(size + extra, alignment, zero);
191 else
192 ret = huge_malloc(size + extra, zero);
193
194 if (ret == NULL) {
195 if (extra == 0)
196 return (NULL);
197 /* Try again, this time without extra. */
Jason Evans31bfb3e2011-01-31 19:58:22 -0800198 if (alignment > chunksize)
Jason Evans8e3c3c62010-09-17 15:46:18 -0700199 ret = huge_palloc(size, alignment, zero);
200 else
201 ret = huge_malloc(size, zero);
202
203 if (ret == NULL)
204 return (NULL);
205 }
206
207 /*
208 * Copy at most size bytes (not size+extra), since the caller has no
209 * expectation that the extra bytes will be reliably preserved.
210 */
Jason Evanse476f8a2010-01-16 09:53:50 -0800211 copysize = (size < oldsize) ? size : oldsize;
Jason Evanscfdc8cf2010-11-30 16:50:58 -0800212
213 /*
214 * Use mremap(2) if this is a huge-->huge reallocation, and neither the
Jason Evans41626272012-02-13 10:56:17 -0800215 * source nor the destination are in dss.
Jason Evanscfdc8cf2010-11-30 16:50:58 -0800216 */
217#ifdef JEMALLOC_MREMAP_FIXED
Jason Evans41626272012-02-13 10:56:17 -0800218 if (oldsize >= chunksize && (config_dss == false || (chunk_in_dss(ptr)
219 == false && chunk_in_dss(ret) == false))) {
Jason Evanscfdc8cf2010-11-30 16:50:58 -0800220 size_t newsize = huge_salloc(ret);
221
Jason Evansfa351d92011-11-09 11:55:19 -0800222 /*
223 * Remove ptr from the tree of huge allocations before
224 * performing the remap operation, in order to avoid the
225 * possibility of another thread acquiring that mapping before
226 * this one removes it from the tree.
227 */
228 huge_dalloc(ptr, false);
Jason Evanscfdc8cf2010-11-30 16:50:58 -0800229 if (mremap(ptr, oldsize, newsize, MREMAP_MAYMOVE|MREMAP_FIXED,
230 ret) == MAP_FAILED) {
231 /*
232 * Assuming no chunk management bugs in the allocator,
233 * the only documented way an error can occur here is
234 * if the application changed the map type for a
235 * portion of the old allocation. This is firmly in
236 * undefined behavior territory, so write a diagnostic
237 * message, and optionally abort.
238 */
239 char buf[BUFERROR_BUF];
240
241 buferror(errno, buf, sizeof(buf));
Jason Evansd81e4bd2012-03-06 14:57:45 -0800242 malloc_printf("<jemalloc>: Error in mremap(): %s\n",
243 buf);
Jason Evanscfdc8cf2010-11-30 16:50:58 -0800244 if (opt_abort)
245 abort();
246 memcpy(ret, ptr, copysize);
Jason Evans12a48872011-11-11 14:41:59 -0800247 chunk_dealloc_mmap(ptr, oldsize);
Jason Evansfa351d92011-11-09 11:55:19 -0800248 }
Jason Evanscfdc8cf2010-11-30 16:50:58 -0800249 } else
250#endif
251 {
252 memcpy(ret, ptr, copysize);
253 idalloc(ptr);
254 }
Jason Evanse476f8a2010-01-16 09:53:50 -0800255 return (ret);
256}
257
258void
Jason Evanscfdc8cf2010-11-30 16:50:58 -0800259huge_dalloc(void *ptr, bool unmap)
Jason Evanse476f8a2010-01-16 09:53:50 -0800260{
261 extent_node_t *node, key;
262
263 malloc_mutex_lock(&huge_mtx);
264
265 /* Extract from tree of huge allocations. */
266 key.addr = ptr;
267 node = extent_tree_ad_search(&huge, &key);
268 assert(node != NULL);
269 assert(node->addr == ptr);
270 extent_tree_ad_remove(&huge, node);
271
Jason Evans7372b152012-02-10 20:22:09 -0800272 if (config_stats) {
273 stats_cactive_sub(node->size);
274 huge_ndalloc++;
275 huge_allocated -= node->size;
276 }
Jason Evanse476f8a2010-01-16 09:53:50 -0800277
278 malloc_mutex_unlock(&huge_mtx);
279
Jason Evans41626272012-02-13 10:56:17 -0800280 if (unmap && config_fill && config_dss && opt_junk)
Jason Evans7372b152012-02-10 20:22:09 -0800281 memset(node->addr, 0x5a, node->size);
Jason Evanse476f8a2010-01-16 09:53:50 -0800282
Jason Evans12a48872011-11-11 14:41:59 -0800283 chunk_dealloc(node->addr, node->size, unmap);
284
Jason Evanse476f8a2010-01-16 09:53:50 -0800285 base_node_dealloc(node);
286}
287
288size_t
289huge_salloc(const void *ptr)
290{
291 size_t ret;
292 extent_node_t *node, key;
293
294 malloc_mutex_lock(&huge_mtx);
295
296 /* Extract from tree of huge allocations. */
297 key.addr = __DECONST(void *, ptr);
298 node = extent_tree_ad_search(&huge, &key);
299 assert(node != NULL);
300
301 ret = node->size;
302
303 malloc_mutex_unlock(&huge_mtx);
304
305 return (ret);
306}
307
Jason Evans50651562010-04-13 16:13:54 -0700308prof_ctx_t *
309huge_prof_ctx_get(const void *ptr)
Jason Evans6109fe02010-02-10 10:37:56 -0800310{
Jason Evans50651562010-04-13 16:13:54 -0700311 prof_ctx_t *ret;
Jason Evans6109fe02010-02-10 10:37:56 -0800312 extent_node_t *node, key;
313
314 malloc_mutex_lock(&huge_mtx);
315
316 /* Extract from tree of huge allocations. */
317 key.addr = __DECONST(void *, ptr);
318 node = extent_tree_ad_search(&huge, &key);
319 assert(node != NULL);
320
Jason Evans50651562010-04-13 16:13:54 -0700321 ret = node->prof_ctx;
Jason Evans6109fe02010-02-10 10:37:56 -0800322
323 malloc_mutex_unlock(&huge_mtx);
324
325 return (ret);
326}
327
328void
Jason Evans50651562010-04-13 16:13:54 -0700329huge_prof_ctx_set(const void *ptr, prof_ctx_t *ctx)
Jason Evans6109fe02010-02-10 10:37:56 -0800330{
331 extent_node_t *node, key;
332
333 malloc_mutex_lock(&huge_mtx);
334
335 /* Extract from tree of huge allocations. */
336 key.addr = __DECONST(void *, ptr);
337 node = extent_tree_ad_search(&huge, &key);
338 assert(node != NULL);
339
Jason Evans50651562010-04-13 16:13:54 -0700340 node->prof_ctx = ctx;
Jason Evans6109fe02010-02-10 10:37:56 -0800341
342 malloc_mutex_unlock(&huge_mtx);
343}
Jason Evans6109fe02010-02-10 10:37:56 -0800344
Jason Evanse476f8a2010-01-16 09:53:50 -0800345bool
346huge_boot(void)
347{
348
349 /* Initialize chunks data. */
350 if (malloc_mutex_init(&huge_mtx))
351 return (true);
352 extent_tree_ad_new(&huge);
353
Jason Evans7372b152012-02-10 20:22:09 -0800354 if (config_stats) {
355 huge_nmalloc = 0;
356 huge_ndalloc = 0;
357 huge_allocated = 0;
358 }
Jason Evanse476f8a2010-01-16 09:53:50 -0800359
360 return (false);
361}