blob: de09198e9783dd5b90d70fa5969f6a6d24f58204 [file] [log] [blame]
Jason Evanse476f8a2010-01-16 09:53:50 -08001#define JEMALLOC_HUGE_C_
Jason Evans376b1522010-02-11 14:45:59 -08002#include "jemalloc/internal/jemalloc_internal.h"
Jason Evanse476f8a2010-01-16 09:53:50 -08003
4/******************************************************************************/
5/* Data. */
6
7#ifdef JEMALLOC_STATS
8uint64_t huge_nmalloc;
9uint64_t huge_ndalloc;
10size_t huge_allocated;
11#endif
12
13malloc_mutex_t huge_mtx;
14
15/******************************************************************************/
16
17/* Tree of chunks that are stand-alone huge allocations. */
18static extent_tree_t huge;
19
20void *
21huge_malloc(size_t size, bool zero)
22{
23 void *ret;
24 size_t csize;
25 extent_node_t *node;
26
27 /* Allocate one or more contiguous chunks for this request. */
28
29 csize = CHUNK_CEILING(size);
30 if (csize == 0) {
31 /* size is large enough to cause size_t wrap-around. */
32 return (NULL);
33 }
34
35 /* Allocate an extent node with which to track the chunk. */
36 node = base_node_alloc();
37 if (node == NULL)
38 return (NULL);
39
Jason Evans2dbecf12010-09-05 10:35:13 -070040 ret = chunk_alloc(csize, false, &zero);
Jason Evanse476f8a2010-01-16 09:53:50 -080041 if (ret == NULL) {
42 base_node_dealloc(node);
43 return (NULL);
44 }
45
46 /* Insert node into huge. */
47 node->addr = ret;
48 node->size = csize;
49
50 malloc_mutex_lock(&huge_mtx);
51 extent_tree_ad_insert(&huge, node);
52#ifdef JEMALLOC_STATS
53 huge_nmalloc++;
54 huge_allocated += csize;
55#endif
56 malloc_mutex_unlock(&huge_mtx);
57
58#ifdef JEMALLOC_FILL
59 if (zero == false) {
60 if (opt_junk)
61 memset(ret, 0xa5, csize);
62 else if (opt_zero)
63 memset(ret, 0, csize);
64 }
65#endif
66
67 return (ret);
68}
69
70/* Only handles large allocations that require more than chunk alignment. */
71void *
Jason Evans8e3c3c62010-09-17 15:46:18 -070072huge_palloc(size_t size, size_t alignment, bool zero)
Jason Evanse476f8a2010-01-16 09:53:50 -080073{
74 void *ret;
75 size_t alloc_size, chunk_size, offset;
76 extent_node_t *node;
77
78 /*
79 * This allocation requires alignment that is even larger than chunk
80 * alignment. This means that huge_malloc() isn't good enough.
81 *
82 * Allocate almost twice as many chunks as are demanded by the size or
83 * alignment, in order to assure the alignment can be achieved, then
84 * unmap leading and trailing chunks.
85 */
Jason Evans31bfb3e2011-01-31 19:58:22 -080086 assert(alignment > chunksize);
Jason Evanse476f8a2010-01-16 09:53:50 -080087
88 chunk_size = CHUNK_CEILING(size);
89
90 if (size >= alignment)
91 alloc_size = chunk_size + alignment - chunksize;
92 else
93 alloc_size = (alignment << 1) - chunksize;
94
95 /* Allocate an extent node with which to track the chunk. */
96 node = base_node_alloc();
97 if (node == NULL)
98 return (NULL);
99
Jason Evans2dbecf12010-09-05 10:35:13 -0700100 ret = chunk_alloc(alloc_size, false, &zero);
Jason Evanse476f8a2010-01-16 09:53:50 -0800101 if (ret == NULL) {
102 base_node_dealloc(node);
103 return (NULL);
104 }
105
106 offset = (uintptr_t)ret & (alignment - 1);
107 assert((offset & chunksize_mask) == 0);
108 assert(offset < alloc_size);
109 if (offset == 0) {
110 /* Trim trailing space. */
111 chunk_dealloc((void *)((uintptr_t)ret + chunk_size), alloc_size
112 - chunk_size);
113 } else {
114 size_t trailsize;
115
116 /* Trim leading space. */
117 chunk_dealloc(ret, alignment - offset);
118
119 ret = (void *)((uintptr_t)ret + (alignment - offset));
120
121 trailsize = alloc_size - (alignment - offset) - chunk_size;
122 if (trailsize != 0) {
123 /* Trim trailing space. */
124 assert(trailsize < alloc_size);
125 chunk_dealloc((void *)((uintptr_t)ret + chunk_size),
126 trailsize);
127 }
128 }
129
130 /* Insert node into huge. */
131 node->addr = ret;
132 node->size = chunk_size;
133
134 malloc_mutex_lock(&huge_mtx);
135 extent_tree_ad_insert(&huge, node);
136#ifdef JEMALLOC_STATS
137 huge_nmalloc++;
138 huge_allocated += chunk_size;
139#endif
140 malloc_mutex_unlock(&huge_mtx);
141
142#ifdef JEMALLOC_FILL
Jason Evans8e3c3c62010-09-17 15:46:18 -0700143 if (zero == false) {
144 if (opt_junk)
145 memset(ret, 0xa5, chunk_size);
146 else if (opt_zero)
147 memset(ret, 0, chunk_size);
148 }
Jason Evanse476f8a2010-01-16 09:53:50 -0800149#endif
150
151 return (ret);
152}
153
154void *
Jason Evans8e3c3c62010-09-17 15:46:18 -0700155huge_ralloc_no_move(void *ptr, size_t oldsize, size_t size, size_t extra)
Jason Evanse476f8a2010-01-16 09:53:50 -0800156{
Jason Evanse476f8a2010-01-16 09:53:50 -0800157
Jason Evans8e3c3c62010-09-17 15:46:18 -0700158 /*
159 * Avoid moving the allocation if the size class can be left the same.
160 */
161 if (oldsize > arena_maxclass
162 && CHUNK_CEILING(oldsize) >= CHUNK_CEILING(size)
163 && CHUNK_CEILING(oldsize) <= CHUNK_CEILING(size+extra)) {
164 assert(CHUNK_CEILING(oldsize) == oldsize);
Jason Evanse476f8a2010-01-16 09:53:50 -0800165#ifdef JEMALLOC_FILL
166 if (opt_junk && size < oldsize) {
Jason Evans8e3c3c62010-09-17 15:46:18 -0700167 memset((void *)((uintptr_t)ptr + size), 0x5a,
168 oldsize - size);
Jason Evanse476f8a2010-01-16 09:53:50 -0800169 }
170#endif
171 return (ptr);
172 }
173
Jason Evans8e3c3c62010-09-17 15:46:18 -0700174 /* Reallocation would require a move. */
175 return (NULL);
176}
Jason Evanse476f8a2010-01-16 09:53:50 -0800177
Jason Evans8e3c3c62010-09-17 15:46:18 -0700178void *
179huge_ralloc(void *ptr, size_t oldsize, size_t size, size_t extra,
180 size_t alignment, bool zero)
181{
182 void *ret;
183 size_t copysize;
184
185 /* Try to avoid moving the allocation. */
186 ret = huge_ralloc_no_move(ptr, oldsize, size, extra);
187 if (ret != NULL)
188 return (ret);
189
190 /*
191 * size and oldsize are different enough that we need to use a
192 * different size class. In that case, fall back to allocating new
193 * space and copying.
194 */
Jason Evans31bfb3e2011-01-31 19:58:22 -0800195 if (alignment > chunksize)
Jason Evans8e3c3c62010-09-17 15:46:18 -0700196 ret = huge_palloc(size + extra, alignment, zero);
197 else
198 ret = huge_malloc(size + extra, zero);
199
200 if (ret == NULL) {
201 if (extra == 0)
202 return (NULL);
203 /* Try again, this time without extra. */
Jason Evans31bfb3e2011-01-31 19:58:22 -0800204 if (alignment > chunksize)
Jason Evans8e3c3c62010-09-17 15:46:18 -0700205 ret = huge_palloc(size, alignment, zero);
206 else
207 ret = huge_malloc(size, zero);
208
209 if (ret == NULL)
210 return (NULL);
211 }
212
213 /*
214 * Copy at most size bytes (not size+extra), since the caller has no
215 * expectation that the extra bytes will be reliably preserved.
216 */
Jason Evanse476f8a2010-01-16 09:53:50 -0800217 copysize = (size < oldsize) ? size : oldsize;
Jason Evanscfdc8cf2010-11-30 16:50:58 -0800218
219 /*
220 * Use mremap(2) if this is a huge-->huge reallocation, and neither the
221 * source nor the destination are in swap or dss.
222 */
223#ifdef JEMALLOC_MREMAP_FIXED
224 if (oldsize >= chunksize
225# ifdef JEMALLOC_SWAP
226 && (swap_enabled == false || (chunk_in_swap(ptr) == false &&
227 chunk_in_swap(ret) == false))
228# endif
229# ifdef JEMALLOC_DSS
230 && chunk_in_dss(ptr) == false && chunk_in_dss(ret) == false
231# endif
232 ) {
233 size_t newsize = huge_salloc(ret);
234
235 if (mremap(ptr, oldsize, newsize, MREMAP_MAYMOVE|MREMAP_FIXED,
236 ret) == MAP_FAILED) {
237 /*
238 * Assuming no chunk management bugs in the allocator,
239 * the only documented way an error can occur here is
240 * if the application changed the map type for a
241 * portion of the old allocation. This is firmly in
242 * undefined behavior territory, so write a diagnostic
243 * message, and optionally abort.
244 */
245 char buf[BUFERROR_BUF];
246
247 buferror(errno, buf, sizeof(buf));
248 malloc_write("<jemalloc>: Error in mremap(): ");
249 malloc_write(buf);
250 malloc_write("\n");
251 if (opt_abort)
252 abort();
253 memcpy(ret, ptr, copysize);
254 idalloc(ptr);
255 } else
256 huge_dalloc(ptr, false);
257 } else
258#endif
259 {
260 memcpy(ret, ptr, copysize);
261 idalloc(ptr);
262 }
Jason Evanse476f8a2010-01-16 09:53:50 -0800263 return (ret);
264}
265
266void
Jason Evanscfdc8cf2010-11-30 16:50:58 -0800267huge_dalloc(void *ptr, bool unmap)
Jason Evanse476f8a2010-01-16 09:53:50 -0800268{
269 extent_node_t *node, key;
270
271 malloc_mutex_lock(&huge_mtx);
272
273 /* Extract from tree of huge allocations. */
274 key.addr = ptr;
275 node = extent_tree_ad_search(&huge, &key);
276 assert(node != NULL);
277 assert(node->addr == ptr);
278 extent_tree_ad_remove(&huge, node);
279
280#ifdef JEMALLOC_STATS
281 huge_ndalloc++;
282 huge_allocated -= node->size;
283#endif
284
285 malloc_mutex_unlock(&huge_mtx);
286
Jason Evanscfdc8cf2010-11-30 16:50:58 -0800287 if (unmap) {
Jason Evanse476f8a2010-01-16 09:53:50 -0800288 /* Unmap chunk. */
289#ifdef JEMALLOC_FILL
Jason Evans4201af02010-01-24 02:53:40 -0800290#if (defined(JEMALLOC_SWAP) || defined(JEMALLOC_DSS))
Jason Evanscfdc8cf2010-11-30 16:50:58 -0800291 if (opt_junk)
292 memset(node->addr, 0x5a, node->size);
Jason Evanse476f8a2010-01-16 09:53:50 -0800293#endif
294#endif
Jason Evanscfdc8cf2010-11-30 16:50:58 -0800295 chunk_dealloc(node->addr, node->size);
296 }
Jason Evanse476f8a2010-01-16 09:53:50 -0800297
298 base_node_dealloc(node);
299}
300
301size_t
302huge_salloc(const void *ptr)
303{
304 size_t ret;
305 extent_node_t *node, key;
306
307 malloc_mutex_lock(&huge_mtx);
308
309 /* Extract from tree of huge allocations. */
310 key.addr = __DECONST(void *, ptr);
311 node = extent_tree_ad_search(&huge, &key);
312 assert(node != NULL);
313
314 ret = node->size;
315
316 malloc_mutex_unlock(&huge_mtx);
317
318 return (ret);
319}
320
Jason Evans6109fe02010-02-10 10:37:56 -0800321#ifdef JEMALLOC_PROF
Jason Evans50651562010-04-13 16:13:54 -0700322prof_ctx_t *
323huge_prof_ctx_get(const void *ptr)
Jason Evans6109fe02010-02-10 10:37:56 -0800324{
Jason Evans50651562010-04-13 16:13:54 -0700325 prof_ctx_t *ret;
Jason Evans6109fe02010-02-10 10:37:56 -0800326 extent_node_t *node, key;
327
328 malloc_mutex_lock(&huge_mtx);
329
330 /* Extract from tree of huge allocations. */
331 key.addr = __DECONST(void *, ptr);
332 node = extent_tree_ad_search(&huge, &key);
333 assert(node != NULL);
334
Jason Evans50651562010-04-13 16:13:54 -0700335 ret = node->prof_ctx;
Jason Evans6109fe02010-02-10 10:37:56 -0800336
337 malloc_mutex_unlock(&huge_mtx);
338
339 return (ret);
340}
341
342void
Jason Evans50651562010-04-13 16:13:54 -0700343huge_prof_ctx_set(const void *ptr, prof_ctx_t *ctx)
Jason Evans6109fe02010-02-10 10:37:56 -0800344{
345 extent_node_t *node, key;
346
347 malloc_mutex_lock(&huge_mtx);
348
349 /* Extract from tree of huge allocations. */
350 key.addr = __DECONST(void *, ptr);
351 node = extent_tree_ad_search(&huge, &key);
352 assert(node != NULL);
353
Jason Evans50651562010-04-13 16:13:54 -0700354 node->prof_ctx = ctx;
Jason Evans6109fe02010-02-10 10:37:56 -0800355
356 malloc_mutex_unlock(&huge_mtx);
357}
358#endif
359
Jason Evanse476f8a2010-01-16 09:53:50 -0800360bool
361huge_boot(void)
362{
363
364 /* Initialize chunks data. */
365 if (malloc_mutex_init(&huge_mtx))
366 return (true);
367 extent_tree_ad_new(&huge);
368
369#ifdef JEMALLOC_STATS
370 huge_nmalloc = 0;
371 huge_ndalloc = 0;
372 huge_allocated = 0;
373#endif
374
375 return (false);
376}