blob: be35d16f1bbc6a30b7f20a6e26ccb41af60d5dd7 [file] [log] [blame]
Jason Evanse476f8a2010-01-16 09:53:50 -08001#define JEMALLOC_HUGE_C_
Jason Evans376b1522010-02-11 14:45:59 -08002#include "jemalloc/internal/jemalloc_internal.h"
Jason Evanse476f8a2010-01-16 09:53:50 -08003
4/******************************************************************************/
5/* Data. */
6
7#ifdef JEMALLOC_STATS
8uint64_t huge_nmalloc;
9uint64_t huge_ndalloc;
10size_t huge_allocated;
11#endif
12
13malloc_mutex_t huge_mtx;
14
15/******************************************************************************/
16
17/* Tree of chunks that are stand-alone huge allocations. */
18static extent_tree_t huge;
19
20void *
21huge_malloc(size_t size, bool zero)
22{
23 void *ret;
24 size_t csize;
25 extent_node_t *node;
26
27 /* Allocate one or more contiguous chunks for this request. */
28
29 csize = CHUNK_CEILING(size);
30 if (csize == 0) {
31 /* size is large enough to cause size_t wrap-around. */
32 return (NULL);
33 }
34
35 /* Allocate an extent node with which to track the chunk. */
36 node = base_node_alloc();
37 if (node == NULL)
38 return (NULL);
39
Jason Evans2dbecf12010-09-05 10:35:13 -070040 ret = chunk_alloc(csize, false, &zero);
Jason Evanse476f8a2010-01-16 09:53:50 -080041 if (ret == NULL) {
42 base_node_dealloc(node);
43 return (NULL);
44 }
45
46 /* Insert node into huge. */
47 node->addr = ret;
48 node->size = csize;
49
50 malloc_mutex_lock(&huge_mtx);
51 extent_tree_ad_insert(&huge, node);
52#ifdef JEMALLOC_STATS
53 huge_nmalloc++;
54 huge_allocated += csize;
55#endif
56 malloc_mutex_unlock(&huge_mtx);
57
58#ifdef JEMALLOC_FILL
59 if (zero == false) {
60 if (opt_junk)
61 memset(ret, 0xa5, csize);
62 else if (opt_zero)
63 memset(ret, 0, csize);
64 }
65#endif
66
67 return (ret);
68}
69
70/* Only handles large allocations that require more than chunk alignment. */
71void *
72huge_palloc(size_t alignment, size_t size)
73{
74 void *ret;
75 size_t alloc_size, chunk_size, offset;
76 extent_node_t *node;
Jason Evans41631d02010-01-24 17:13:07 -080077 bool zero;
Jason Evanse476f8a2010-01-16 09:53:50 -080078
79 /*
80 * This allocation requires alignment that is even larger than chunk
81 * alignment. This means that huge_malloc() isn't good enough.
82 *
83 * Allocate almost twice as many chunks as are demanded by the size or
84 * alignment, in order to assure the alignment can be achieved, then
85 * unmap leading and trailing chunks.
86 */
87 assert(alignment >= chunksize);
88
89 chunk_size = CHUNK_CEILING(size);
90
91 if (size >= alignment)
92 alloc_size = chunk_size + alignment - chunksize;
93 else
94 alloc_size = (alignment << 1) - chunksize;
95
96 /* Allocate an extent node with which to track the chunk. */
97 node = base_node_alloc();
98 if (node == NULL)
99 return (NULL);
100
Jason Evans41631d02010-01-24 17:13:07 -0800101 zero = false;
Jason Evans2dbecf12010-09-05 10:35:13 -0700102 ret = chunk_alloc(alloc_size, false, &zero);
Jason Evanse476f8a2010-01-16 09:53:50 -0800103 if (ret == NULL) {
104 base_node_dealloc(node);
105 return (NULL);
106 }
107
108 offset = (uintptr_t)ret & (alignment - 1);
109 assert((offset & chunksize_mask) == 0);
110 assert(offset < alloc_size);
111 if (offset == 0) {
112 /* Trim trailing space. */
113 chunk_dealloc((void *)((uintptr_t)ret + chunk_size), alloc_size
114 - chunk_size);
115 } else {
116 size_t trailsize;
117
118 /* Trim leading space. */
119 chunk_dealloc(ret, alignment - offset);
120
121 ret = (void *)((uintptr_t)ret + (alignment - offset));
122
123 trailsize = alloc_size - (alignment - offset) - chunk_size;
124 if (trailsize != 0) {
125 /* Trim trailing space. */
126 assert(trailsize < alloc_size);
127 chunk_dealloc((void *)((uintptr_t)ret + chunk_size),
128 trailsize);
129 }
130 }
131
132 /* Insert node into huge. */
133 node->addr = ret;
134 node->size = chunk_size;
135
136 malloc_mutex_lock(&huge_mtx);
137 extent_tree_ad_insert(&huge, node);
138#ifdef JEMALLOC_STATS
139 huge_nmalloc++;
140 huge_allocated += chunk_size;
141#endif
142 malloc_mutex_unlock(&huge_mtx);
143
144#ifdef JEMALLOC_FILL
145 if (opt_junk)
146 memset(ret, 0xa5, chunk_size);
147 else if (opt_zero)
148 memset(ret, 0, chunk_size);
149#endif
150
151 return (ret);
152}
153
154void *
155huge_ralloc(void *ptr, size_t size, size_t oldsize)
156{
157 void *ret;
158 size_t copysize;
159
160 /* Avoid moving the allocation if the size class would not change. */
161 if (oldsize > arena_maxclass &&
162 CHUNK_CEILING(size) == CHUNK_CEILING(oldsize)) {
163#ifdef JEMALLOC_FILL
164 if (opt_junk && size < oldsize) {
165 memset((void *)((uintptr_t)ptr + size), 0x5a, oldsize
166 - size);
167 } else if (opt_zero && size > oldsize) {
168 memset((void *)((uintptr_t)ptr + oldsize), 0, size
169 - oldsize);
170 }
171#endif
172 return (ptr);
173 }
174
175 /*
176 * If we get here, then size and oldsize are different enough that we
177 * need to use a different size class. In that case, fall back to
178 * allocating new space and copying.
179 */
180 ret = huge_malloc(size, false);
181 if (ret == NULL)
182 return (NULL);
183
184 copysize = (size < oldsize) ? size : oldsize;
185 memcpy(ret, ptr, copysize);
186 idalloc(ptr);
187 return (ret);
188}
189
190void
191huge_dalloc(void *ptr)
192{
193 extent_node_t *node, key;
194
195 malloc_mutex_lock(&huge_mtx);
196
197 /* Extract from tree of huge allocations. */
198 key.addr = ptr;
199 node = extent_tree_ad_search(&huge, &key);
200 assert(node != NULL);
201 assert(node->addr == ptr);
202 extent_tree_ad_remove(&huge, node);
203
204#ifdef JEMALLOC_STATS
205 huge_ndalloc++;
206 huge_allocated -= node->size;
207#endif
208
209 malloc_mutex_unlock(&huge_mtx);
210
211 /* Unmap chunk. */
212#ifdef JEMALLOC_FILL
Jason Evans4201af02010-01-24 02:53:40 -0800213#if (defined(JEMALLOC_SWAP) || defined(JEMALLOC_DSS))
Jason Evanse476f8a2010-01-16 09:53:50 -0800214 if (opt_junk)
215 memset(node->addr, 0x5a, node->size);
216#endif
217#endif
218 chunk_dealloc(node->addr, node->size);
219
220 base_node_dealloc(node);
221}
222
223size_t
224huge_salloc(const void *ptr)
225{
226 size_t ret;
227 extent_node_t *node, key;
228
229 malloc_mutex_lock(&huge_mtx);
230
231 /* Extract from tree of huge allocations. */
232 key.addr = __DECONST(void *, ptr);
233 node = extent_tree_ad_search(&huge, &key);
234 assert(node != NULL);
235
236 ret = node->size;
237
238 malloc_mutex_unlock(&huge_mtx);
239
240 return (ret);
241}
242
Jason Evans6109fe02010-02-10 10:37:56 -0800243#ifdef JEMALLOC_PROF
Jason Evans50651562010-04-13 16:13:54 -0700244prof_ctx_t *
245huge_prof_ctx_get(const void *ptr)
Jason Evans6109fe02010-02-10 10:37:56 -0800246{
Jason Evans50651562010-04-13 16:13:54 -0700247 prof_ctx_t *ret;
Jason Evans6109fe02010-02-10 10:37:56 -0800248 extent_node_t *node, key;
249
250 malloc_mutex_lock(&huge_mtx);
251
252 /* Extract from tree of huge allocations. */
253 key.addr = __DECONST(void *, ptr);
254 node = extent_tree_ad_search(&huge, &key);
255 assert(node != NULL);
256
Jason Evans50651562010-04-13 16:13:54 -0700257 ret = node->prof_ctx;
Jason Evans6109fe02010-02-10 10:37:56 -0800258
259 malloc_mutex_unlock(&huge_mtx);
260
261 return (ret);
262}
263
264void
Jason Evans50651562010-04-13 16:13:54 -0700265huge_prof_ctx_set(const void *ptr, prof_ctx_t *ctx)
Jason Evans6109fe02010-02-10 10:37:56 -0800266{
267 extent_node_t *node, key;
268
269 malloc_mutex_lock(&huge_mtx);
270
271 /* Extract from tree of huge allocations. */
272 key.addr = __DECONST(void *, ptr);
273 node = extent_tree_ad_search(&huge, &key);
274 assert(node != NULL);
275
Jason Evans50651562010-04-13 16:13:54 -0700276 node->prof_ctx = ctx;
Jason Evans6109fe02010-02-10 10:37:56 -0800277
278 malloc_mutex_unlock(&huge_mtx);
279}
280#endif
281
Jason Evanse476f8a2010-01-16 09:53:50 -0800282bool
283huge_boot(void)
284{
285
286 /* Initialize chunks data. */
287 if (malloc_mutex_init(&huge_mtx))
288 return (true);
289 extent_tree_ad_new(&huge);
290
291#ifdef JEMALLOC_STATS
292 huge_nmalloc = 0;
293 huge_ndalloc = 0;
294 huge_allocated = 0;
295#endif
296
297 return (false);
298}