blob: a3d09e9ac69ebce23b47767b170054849c8829a2 [file] [log] [blame]
Jason Evans4201af02010-01-24 02:53:40 -08001#define JEMALLOC_CHUNK_MMAP_C_
Jason Evans376b1522010-02-11 14:45:59 -08002#include "jemalloc/internal/jemalloc_internal.h"
Jason Evans4201af02010-01-24 02:53:40 -08003
4/******************************************************************************/
5/* Data. */
6
7/*
8 * Used by chunk_alloc_mmap() to decide whether to attempt the fast path and
Jason Evans2dbecf12010-09-05 10:35:13 -07009 * potentially avoid some system calls.
Jason Evans4201af02010-01-24 02:53:40 -080010 */
Jason Evans4201af02010-01-24 02:53:40 -080011#ifndef NO_TLS
Jason Evans2dbecf12010-09-05 10:35:13 -070012static __thread bool mmap_unaligned_tls
13 JEMALLOC_ATTR(tls_model("initial-exec"));
14#define MMAP_UNALIGNED_GET() mmap_unaligned_tls
15#define MMAP_UNALIGNED_SET(v) do { \
16 mmap_unaligned_tls = (v); \
17} while (0)
18#else
19static pthread_key_t mmap_unaligned_tsd;
20#define MMAP_UNALIGNED_GET() ((bool)pthread_getspecific(mmap_unaligned_tsd))
21#define MMAP_UNALIGNED_SET(v) do { \
22 pthread_setspecific(mmap_unaligned_tsd, (void *)(v)); \
23} while (0)
Jason Evans4201af02010-01-24 02:53:40 -080024#endif
Jason Evans4201af02010-01-24 02:53:40 -080025
26/******************************************************************************/
27/* Function prototypes for non-inline static functions. */
28
Jordan DeLong2206e1a2010-05-10 14:17:00 -070029static void *pages_map(void *addr, size_t size, bool noreserve);
Jason Evans4201af02010-01-24 02:53:40 -080030static void pages_unmap(void *addr, size_t size);
Jordan DeLong2206e1a2010-05-10 14:17:00 -070031static void *chunk_alloc_mmap_slow(size_t size, bool unaligned, bool noreserve);
32static void *chunk_alloc_mmap_internal(size_t size, bool noreserve);
Jason Evans4201af02010-01-24 02:53:40 -080033
34/******************************************************************************/
35
36static void *
Jordan DeLong2206e1a2010-05-10 14:17:00 -070037pages_map(void *addr, size_t size, bool noreserve)
Jason Evans4201af02010-01-24 02:53:40 -080038{
39 void *ret;
40
41 /*
42 * We don't use MAP_FIXED here, because it can cause the *replacement*
43 * of existing mappings, and we only want to create new mappings.
44 */
Jordan DeLong2206e1a2010-05-10 14:17:00 -070045 int flags = MAP_PRIVATE | MAP_ANON;
46#ifdef MAP_NORESERVE
47 if (noreserve)
48 flags |= MAP_NORESERVE;
49#endif
50 ret = mmap(addr, size, PROT_READ | PROT_WRITE, flags, -1, 0);
Jason Evans4201af02010-01-24 02:53:40 -080051 assert(ret != NULL);
52
53 if (ret == MAP_FAILED)
54 ret = NULL;
55 else if (addr != NULL && ret != addr) {
56 /*
57 * We succeeded in mapping memory, but not in the right place.
58 */
59 if (munmap(ret, size) == -1) {
60 char buf[STRERROR_BUF];
61
62 strerror_r(errno, buf, sizeof(buf));
Jason Evans698805c2010-03-03 17:45:38 -080063 malloc_write("<jemalloc>: Error in munmap(): ");
64 malloc_write(buf);
65 malloc_write("\n");
Jason Evans4201af02010-01-24 02:53:40 -080066 if (opt_abort)
67 abort();
68 }
69 ret = NULL;
70 }
71
72 assert(ret == NULL || (addr == NULL && ret != addr)
73 || (addr != NULL && ret == addr));
74 return (ret);
75}
76
77static void
78pages_unmap(void *addr, size_t size)
79{
80
81 if (munmap(addr, size) == -1) {
82 char buf[STRERROR_BUF];
83
84 strerror_r(errno, buf, sizeof(buf));
Jason Evans698805c2010-03-03 17:45:38 -080085 malloc_write("<jemalloc>: Error in munmap(): ");
86 malloc_write(buf);
87 malloc_write("\n");
Jason Evans4201af02010-01-24 02:53:40 -080088 if (opt_abort)
89 abort();
90 }
91}
92
93static void *
Jordan DeLong2206e1a2010-05-10 14:17:00 -070094chunk_alloc_mmap_slow(size_t size, bool unaligned, bool noreserve)
Jason Evans4201af02010-01-24 02:53:40 -080095{
96 void *ret;
97 size_t offset;
98
99 /* Beware size_t wrap-around. */
100 if (size + chunksize <= size)
101 return (NULL);
102
Jordan DeLong2206e1a2010-05-10 14:17:00 -0700103 ret = pages_map(NULL, size + chunksize, noreserve);
Jason Evans4201af02010-01-24 02:53:40 -0800104 if (ret == NULL)
105 return (NULL);
106
107 /* Clean up unneeded leading/trailing space. */
108 offset = CHUNK_ADDR2OFFSET(ret);
109 if (offset != 0) {
110 /* Note that mmap() returned an unaligned mapping. */
111 unaligned = true;
112
113 /* Leading space. */
114 pages_unmap(ret, chunksize - offset);
115
116 ret = (void *)((uintptr_t)ret +
117 (chunksize - offset));
118
119 /* Trailing space. */
120 pages_unmap((void *)((uintptr_t)ret + size),
121 offset);
122 } else {
123 /* Trailing space only. */
124 pages_unmap((void *)((uintptr_t)ret + size),
125 chunksize);
126 }
127
128 /*
129 * If mmap() returned an aligned mapping, reset mmap_unaligned so that
130 * the next chunk_alloc_mmap() execution tries the fast allocation
131 * method.
132 */
133 if (unaligned == false)
Jason Evans2dbecf12010-09-05 10:35:13 -0700134 MMAP_UNALIGNED_SET(false);
Jason Evans4201af02010-01-24 02:53:40 -0800135
136 return (ret);
137}
138
Jordan DeLong2206e1a2010-05-10 14:17:00 -0700139static void *
140chunk_alloc_mmap_internal(size_t size, bool noreserve)
Jason Evans4201af02010-01-24 02:53:40 -0800141{
142 void *ret;
143
144 /*
145 * Ideally, there would be a way to specify alignment to mmap() (like
146 * NetBSD has), but in the absence of such a feature, we have to work
147 * hard to efficiently create aligned mappings. The reliable, but
148 * slow method is to create a mapping that is over-sized, then trim the
149 * excess. However, that always results in at least one call to
150 * pages_unmap().
151 *
152 * A more optimistic approach is to try mapping precisely the right
153 * amount, then try to append another mapping if alignment is off. In
154 * practice, this works out well as long as the application is not
155 * interleaving mappings via direct mmap() calls. If we do run into a
156 * situation where there is an interleaved mapping and we are unable to
157 * extend an unaligned mapping, our best option is to switch to the
158 * slow method until mmap() returns another aligned mapping. This will
159 * tend to leave a gap in the memory map that is too small to cause
160 * later problems for the optimistic method.
161 *
162 * Another possible confounding factor is address space layout
163 * randomization (ASLR), which causes mmap(2) to disregard the
164 * requested address. mmap_unaligned tracks whether the previous
165 * chunk_alloc_mmap() execution received any unaligned or relocated
166 * mappings, and if so, the current execution will immediately fall
167 * back to the slow method. However, we keep track of whether the fast
168 * method would have succeeded, and if so, we make a note to try the
169 * fast method next time.
170 */
171
Jason Evans2dbecf12010-09-05 10:35:13 -0700172 if (MMAP_UNALIGNED_GET() == false) {
Jason Evans4201af02010-01-24 02:53:40 -0800173 size_t offset;
174
Jordan DeLong2206e1a2010-05-10 14:17:00 -0700175 ret = pages_map(NULL, size, noreserve);
Jason Evans4201af02010-01-24 02:53:40 -0800176 if (ret == NULL)
177 return (NULL);
178
179 offset = CHUNK_ADDR2OFFSET(ret);
180 if (offset != 0) {
Jason Evans2dbecf12010-09-05 10:35:13 -0700181 MMAP_UNALIGNED_SET(true);
Jason Evans4201af02010-01-24 02:53:40 -0800182 /* Try to extend chunk boundary. */
183 if (pages_map((void *)((uintptr_t)ret + size),
Jordan DeLong2206e1a2010-05-10 14:17:00 -0700184 chunksize - offset, noreserve) == NULL) {
Jason Evans4201af02010-01-24 02:53:40 -0800185 /*
186 * Extension failed. Clean up, then revert to
187 * the reliable-but-expensive method.
188 */
189 pages_unmap(ret, size);
Jason Evans2dbecf12010-09-05 10:35:13 -0700190 ret = chunk_alloc_mmap_slow(size, true,
191 noreserve);
Jason Evans4201af02010-01-24 02:53:40 -0800192 } else {
193 /* Clean up unneeded leading space. */
194 pages_unmap(ret, chunksize - offset);
195 ret = (void *)((uintptr_t)ret + (chunksize -
196 offset));
197 }
198 }
Jason Evans4fb7f512010-01-27 18:27:09 -0800199 } else
Jordan DeLong2206e1a2010-05-10 14:17:00 -0700200 ret = chunk_alloc_mmap_slow(size, false, noreserve);
Jason Evans4201af02010-01-24 02:53:40 -0800201
202 return (ret);
203}
204
Jordan DeLong2206e1a2010-05-10 14:17:00 -0700205void *
206chunk_alloc_mmap(size_t size)
207{
208 return chunk_alloc_mmap_internal(size, false);
209}
210
211void *
212chunk_alloc_mmap_noreserve(size_t size)
213{
214 return chunk_alloc_mmap_internal(size, true);
215}
216
Jason Evans4201af02010-01-24 02:53:40 -0800217void
218chunk_dealloc_mmap(void *chunk, size_t size)
219{
220
221 pages_unmap(chunk, size);
222}
Jason Evans2dbecf12010-09-05 10:35:13 -0700223
224bool
225chunk_mmap_boot(void)
226{
227
228#ifdef NO_TLS
229 if (pthread_key_create(&mmap_unaligned_tsd, NULL) != 0) {
230 malloc_write("<jemalloc>: Error in pthread_key_create()\n");
231 return (true);
232 }
233#endif
234
235 return (false);
236}