blob: 6ea21180e60f38cf816ff2a39f5aec514645fbcb [file] [log] [blame]
Jason Evans4201af02010-01-24 02:53:40 -08001#define JEMALLOC_CHUNK_MMAP_C_
Jason Evans376b1522010-02-11 14:45:59 -08002#include "jemalloc/internal/jemalloc_internal.h"
Jason Evans4201af02010-01-24 02:53:40 -08003
4/******************************************************************************/
5/* Data. */
6
7/*
8 * Used by chunk_alloc_mmap() to decide whether to attempt the fast path and
Jason Evans2dbecf12010-09-05 10:35:13 -07009 * potentially avoid some system calls.
Jason Evans4201af02010-01-24 02:53:40 -080010 */
Jason Evanse24c7af2012-03-19 10:21:17 -070011#ifdef JEMALLOC_TLS
Jason Evans2dbecf12010-09-05 10:35:13 -070012static __thread bool mmap_unaligned_tls
13 JEMALLOC_ATTR(tls_model("initial-exec"));
14#define MMAP_UNALIGNED_GET() mmap_unaligned_tls
15#define MMAP_UNALIGNED_SET(v) do { \
16 mmap_unaligned_tls = (v); \
17} while (0)
18#else
19static pthread_key_t mmap_unaligned_tsd;
20#define MMAP_UNALIGNED_GET() ((bool)pthread_getspecific(mmap_unaligned_tsd))
21#define MMAP_UNALIGNED_SET(v) do { \
22 pthread_setspecific(mmap_unaligned_tsd, (void *)(v)); \
23} while (0)
Jason Evans4201af02010-01-24 02:53:40 -080024#endif
Jason Evans4201af02010-01-24 02:53:40 -080025
26/******************************************************************************/
27/* Function prototypes for non-inline static functions. */
28
Jordan DeLong2206e1a2010-05-10 14:17:00 -070029static void *pages_map(void *addr, size_t size, bool noreserve);
Jason Evans4201af02010-01-24 02:53:40 -080030static void pages_unmap(void *addr, size_t size);
Jason Evansa09f55c2010-09-20 16:05:41 -070031static void *chunk_alloc_mmap_slow(size_t size, bool unaligned,
32 bool noreserve);
Jordan DeLong2206e1a2010-05-10 14:17:00 -070033static void *chunk_alloc_mmap_internal(size_t size, bool noreserve);
Jason Evans4201af02010-01-24 02:53:40 -080034
35/******************************************************************************/
36
37static void *
Jordan DeLong2206e1a2010-05-10 14:17:00 -070038pages_map(void *addr, size_t size, bool noreserve)
Jason Evans4201af02010-01-24 02:53:40 -080039{
40 void *ret;
41
42 /*
43 * We don't use MAP_FIXED here, because it can cause the *replacement*
44 * of existing mappings, and we only want to create new mappings.
45 */
Jordan DeLong2206e1a2010-05-10 14:17:00 -070046 int flags = MAP_PRIVATE | MAP_ANON;
47#ifdef MAP_NORESERVE
48 if (noreserve)
49 flags |= MAP_NORESERVE;
50#endif
51 ret = mmap(addr, size, PROT_READ | PROT_WRITE, flags, -1, 0);
Jason Evans4201af02010-01-24 02:53:40 -080052 assert(ret != NULL);
53
54 if (ret == MAP_FAILED)
55 ret = NULL;
56 else if (addr != NULL && ret != addr) {
57 /*
58 * We succeeded in mapping memory, but not in the right place.
59 */
60 if (munmap(ret, size) == -1) {
Jason Evansa09f55c2010-09-20 16:05:41 -070061 char buf[BUFERROR_BUF];
Jason Evans4201af02010-01-24 02:53:40 -080062
Jason Evansa09f55c2010-09-20 16:05:41 -070063 buferror(errno, buf, sizeof(buf));
Jason Evansd81e4bd2012-03-06 14:57:45 -080064 malloc_printf("<jemalloc: Error in munmap(): %s\n",
65 buf);
Jason Evans4201af02010-01-24 02:53:40 -080066 if (opt_abort)
67 abort();
68 }
69 ret = NULL;
70 }
71
72 assert(ret == NULL || (addr == NULL && ret != addr)
73 || (addr != NULL && ret == addr));
74 return (ret);
75}
76
77static void
78pages_unmap(void *addr, size_t size)
79{
80
81 if (munmap(addr, size) == -1) {
Jason Evansa09f55c2010-09-20 16:05:41 -070082 char buf[BUFERROR_BUF];
Jason Evans4201af02010-01-24 02:53:40 -080083
Jason Evansa09f55c2010-09-20 16:05:41 -070084 buferror(errno, buf, sizeof(buf));
Jason Evansd81e4bd2012-03-06 14:57:45 -080085 malloc_printf("<jemalloc>: Error in munmap(): %s\n", buf);
Jason Evans4201af02010-01-24 02:53:40 -080086 if (opt_abort)
87 abort();
88 }
89}
90
91static void *
Jordan DeLong2206e1a2010-05-10 14:17:00 -070092chunk_alloc_mmap_slow(size_t size, bool unaligned, bool noreserve)
Jason Evans4201af02010-01-24 02:53:40 -080093{
94 void *ret;
95 size_t offset;
96
97 /* Beware size_t wrap-around. */
98 if (size + chunksize <= size)
99 return (NULL);
100
Jordan DeLong2206e1a2010-05-10 14:17:00 -0700101 ret = pages_map(NULL, size + chunksize, noreserve);
Jason Evans4201af02010-01-24 02:53:40 -0800102 if (ret == NULL)
103 return (NULL);
104
105 /* Clean up unneeded leading/trailing space. */
106 offset = CHUNK_ADDR2OFFSET(ret);
107 if (offset != 0) {
108 /* Note that mmap() returned an unaligned mapping. */
109 unaligned = true;
110
111 /* Leading space. */
112 pages_unmap(ret, chunksize - offset);
113
114 ret = (void *)((uintptr_t)ret +
115 (chunksize - offset));
116
117 /* Trailing space. */
118 pages_unmap((void *)((uintptr_t)ret + size),
119 offset);
120 } else {
121 /* Trailing space only. */
122 pages_unmap((void *)((uintptr_t)ret + size),
123 chunksize);
124 }
125
126 /*
127 * If mmap() returned an aligned mapping, reset mmap_unaligned so that
128 * the next chunk_alloc_mmap() execution tries the fast allocation
129 * method.
130 */
131 if (unaligned == false)
Jason Evans2dbecf12010-09-05 10:35:13 -0700132 MMAP_UNALIGNED_SET(false);
Jason Evans4201af02010-01-24 02:53:40 -0800133
134 return (ret);
135}
136
Jordan DeLong2206e1a2010-05-10 14:17:00 -0700137static void *
138chunk_alloc_mmap_internal(size_t size, bool noreserve)
Jason Evans4201af02010-01-24 02:53:40 -0800139{
140 void *ret;
141
142 /*
143 * Ideally, there would be a way to specify alignment to mmap() (like
144 * NetBSD has), but in the absence of such a feature, we have to work
145 * hard to efficiently create aligned mappings. The reliable, but
146 * slow method is to create a mapping that is over-sized, then trim the
147 * excess. However, that always results in at least one call to
148 * pages_unmap().
149 *
150 * A more optimistic approach is to try mapping precisely the right
151 * amount, then try to append another mapping if alignment is off. In
152 * practice, this works out well as long as the application is not
153 * interleaving mappings via direct mmap() calls. If we do run into a
154 * situation where there is an interleaved mapping and we are unable to
155 * extend an unaligned mapping, our best option is to switch to the
156 * slow method until mmap() returns another aligned mapping. This will
157 * tend to leave a gap in the memory map that is too small to cause
158 * later problems for the optimistic method.
159 *
160 * Another possible confounding factor is address space layout
161 * randomization (ASLR), which causes mmap(2) to disregard the
162 * requested address. mmap_unaligned tracks whether the previous
163 * chunk_alloc_mmap() execution received any unaligned or relocated
164 * mappings, and if so, the current execution will immediately fall
165 * back to the slow method. However, we keep track of whether the fast
166 * method would have succeeded, and if so, we make a note to try the
167 * fast method next time.
168 */
169
Jason Evans2dbecf12010-09-05 10:35:13 -0700170 if (MMAP_UNALIGNED_GET() == false) {
Jason Evans4201af02010-01-24 02:53:40 -0800171 size_t offset;
172
Jordan DeLong2206e1a2010-05-10 14:17:00 -0700173 ret = pages_map(NULL, size, noreserve);
Jason Evans4201af02010-01-24 02:53:40 -0800174 if (ret == NULL)
175 return (NULL);
176
177 offset = CHUNK_ADDR2OFFSET(ret);
178 if (offset != 0) {
Jason Evans2dbecf12010-09-05 10:35:13 -0700179 MMAP_UNALIGNED_SET(true);
Jason Evans4201af02010-01-24 02:53:40 -0800180 /* Try to extend chunk boundary. */
181 if (pages_map((void *)((uintptr_t)ret + size),
Jordan DeLong2206e1a2010-05-10 14:17:00 -0700182 chunksize - offset, noreserve) == NULL) {
Jason Evans4201af02010-01-24 02:53:40 -0800183 /*
184 * Extension failed. Clean up, then revert to
185 * the reliable-but-expensive method.
186 */
187 pages_unmap(ret, size);
Jason Evans2dbecf12010-09-05 10:35:13 -0700188 ret = chunk_alloc_mmap_slow(size, true,
189 noreserve);
Jason Evans4201af02010-01-24 02:53:40 -0800190 } else {
191 /* Clean up unneeded leading space. */
192 pages_unmap(ret, chunksize - offset);
193 ret = (void *)((uintptr_t)ret + (chunksize -
194 offset));
195 }
196 }
Jason Evans4fb7f512010-01-27 18:27:09 -0800197 } else
Jordan DeLong2206e1a2010-05-10 14:17:00 -0700198 ret = chunk_alloc_mmap_slow(size, false, noreserve);
Jason Evans4201af02010-01-24 02:53:40 -0800199
200 return (ret);
201}
202
Jordan DeLong2206e1a2010-05-10 14:17:00 -0700203void *
204chunk_alloc_mmap(size_t size)
205{
Jason Evans655f04a2011-02-13 18:44:59 -0800206
207 return (chunk_alloc_mmap_internal(size, false));
Jordan DeLong2206e1a2010-05-10 14:17:00 -0700208}
209
210void *
211chunk_alloc_mmap_noreserve(size_t size)
212{
Jason Evans655f04a2011-02-13 18:44:59 -0800213
214 return (chunk_alloc_mmap_internal(size, true));
Jordan DeLong2206e1a2010-05-10 14:17:00 -0700215}
216
Jason Evans4201af02010-01-24 02:53:40 -0800217void
218chunk_dealloc_mmap(void *chunk, size_t size)
219{
220
221 pages_unmap(chunk, size);
222}
Jason Evans2dbecf12010-09-05 10:35:13 -0700223
224bool
225chunk_mmap_boot(void)
226{
227
Jason Evanse24c7af2012-03-19 10:21:17 -0700228#ifndef JEMALLOC_TLS
Jason Evans2dbecf12010-09-05 10:35:13 -0700229 if (pthread_key_create(&mmap_unaligned_tsd, NULL) != 0) {
230 malloc_write("<jemalloc>: Error in pthread_key_create()\n");
231 return (true);
232 }
233#endif
234
235 return (false);
236}