blob: 8f0711384e3a94e91a229f13981181ee0a0f5690 [file] [log] [blame]
Jason Evans4201af02010-01-24 02:53:40 -08001#define JEMALLOC_CHUNK_MMAP_C_
Jason Evans376b1522010-02-11 14:45:59 -08002#include "jemalloc/internal/jemalloc_internal.h"
Jason Evans4201af02010-01-24 02:53:40 -08003
4/******************************************************************************/
5/* Data. */
6
7/*
8 * Used by chunk_alloc_mmap() to decide whether to attempt the fast path and
9 * potentially avoid some system calls. We can get away without TLS here,
10 * since the state of mmap_unaligned only affects performance, rather than
11 * correct function.
12 */
13static
14#ifndef NO_TLS
15 __thread
16#endif
17 bool mmap_unaligned
18#ifndef NO_TLS
19 JEMALLOC_ATTR(tls_model("initial-exec"))
20#endif
21 ;
22
23/******************************************************************************/
24/* Function prototypes for non-inline static functions. */
25
26static void *pages_map(void *addr, size_t size);
27static void pages_unmap(void *addr, size_t size);
28static void *chunk_alloc_mmap_slow(size_t size, bool unaligned);
29
30/******************************************************************************/
31
32static void *
33pages_map(void *addr, size_t size)
34{
35 void *ret;
36
37 /*
38 * We don't use MAP_FIXED here, because it can cause the *replacement*
39 * of existing mappings, and we only want to create new mappings.
40 */
41 ret = mmap(addr, size, PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANON,
42 -1, 0);
43 assert(ret != NULL);
44
45 if (ret == MAP_FAILED)
46 ret = NULL;
47 else if (addr != NULL && ret != addr) {
48 /*
49 * We succeeded in mapping memory, but not in the right place.
50 */
51 if (munmap(ret, size) == -1) {
52 char buf[STRERROR_BUF];
53
54 strerror_r(errno, buf, sizeof(buf));
Jason Evans698805c2010-03-03 17:45:38 -080055 malloc_write("<jemalloc>: Error in munmap(): ");
56 malloc_write(buf);
57 malloc_write("\n");
Jason Evans4201af02010-01-24 02:53:40 -080058 if (opt_abort)
59 abort();
60 }
61 ret = NULL;
62 }
63
64 assert(ret == NULL || (addr == NULL && ret != addr)
65 || (addr != NULL && ret == addr));
66 return (ret);
67}
68
69static void
70pages_unmap(void *addr, size_t size)
71{
72
73 if (munmap(addr, size) == -1) {
74 char buf[STRERROR_BUF];
75
76 strerror_r(errno, buf, sizeof(buf));
Jason Evans698805c2010-03-03 17:45:38 -080077 malloc_write("<jemalloc>: Error in munmap(): ");
78 malloc_write(buf);
79 malloc_write("\n");
Jason Evans4201af02010-01-24 02:53:40 -080080 if (opt_abort)
81 abort();
82 }
83}
84
85static void *
86chunk_alloc_mmap_slow(size_t size, bool unaligned)
87{
88 void *ret;
89 size_t offset;
90
91 /* Beware size_t wrap-around. */
92 if (size + chunksize <= size)
93 return (NULL);
94
95 ret = pages_map(NULL, size + chunksize);
96 if (ret == NULL)
97 return (NULL);
98
99 /* Clean up unneeded leading/trailing space. */
100 offset = CHUNK_ADDR2OFFSET(ret);
101 if (offset != 0) {
102 /* Note that mmap() returned an unaligned mapping. */
103 unaligned = true;
104
105 /* Leading space. */
106 pages_unmap(ret, chunksize - offset);
107
108 ret = (void *)((uintptr_t)ret +
109 (chunksize - offset));
110
111 /* Trailing space. */
112 pages_unmap((void *)((uintptr_t)ret + size),
113 offset);
114 } else {
115 /* Trailing space only. */
116 pages_unmap((void *)((uintptr_t)ret + size),
117 chunksize);
118 }
119
120 /*
121 * If mmap() returned an aligned mapping, reset mmap_unaligned so that
122 * the next chunk_alloc_mmap() execution tries the fast allocation
123 * method.
124 */
125 if (unaligned == false)
126 mmap_unaligned = false;
127
128 return (ret);
129}
130
131void *
132chunk_alloc_mmap(size_t size)
133{
134 void *ret;
135
136 /*
137 * Ideally, there would be a way to specify alignment to mmap() (like
138 * NetBSD has), but in the absence of such a feature, we have to work
139 * hard to efficiently create aligned mappings. The reliable, but
140 * slow method is to create a mapping that is over-sized, then trim the
141 * excess. However, that always results in at least one call to
142 * pages_unmap().
143 *
144 * A more optimistic approach is to try mapping precisely the right
145 * amount, then try to append another mapping if alignment is off. In
146 * practice, this works out well as long as the application is not
147 * interleaving mappings via direct mmap() calls. If we do run into a
148 * situation where there is an interleaved mapping and we are unable to
149 * extend an unaligned mapping, our best option is to switch to the
150 * slow method until mmap() returns another aligned mapping. This will
151 * tend to leave a gap in the memory map that is too small to cause
152 * later problems for the optimistic method.
153 *
154 * Another possible confounding factor is address space layout
155 * randomization (ASLR), which causes mmap(2) to disregard the
156 * requested address. mmap_unaligned tracks whether the previous
157 * chunk_alloc_mmap() execution received any unaligned or relocated
158 * mappings, and if so, the current execution will immediately fall
159 * back to the slow method. However, we keep track of whether the fast
160 * method would have succeeded, and if so, we make a note to try the
161 * fast method next time.
162 */
163
164 if (mmap_unaligned == false) {
165 size_t offset;
166
167 ret = pages_map(NULL, size);
168 if (ret == NULL)
169 return (NULL);
170
171 offset = CHUNK_ADDR2OFFSET(ret);
172 if (offset != 0) {
173 mmap_unaligned = true;
174 /* Try to extend chunk boundary. */
175 if (pages_map((void *)((uintptr_t)ret + size),
176 chunksize - offset) == NULL) {
177 /*
178 * Extension failed. Clean up, then revert to
179 * the reliable-but-expensive method.
180 */
181 pages_unmap(ret, size);
182 ret = chunk_alloc_mmap_slow(size, true);
183 } else {
184 /* Clean up unneeded leading space. */
185 pages_unmap(ret, chunksize - offset);
186 ret = (void *)((uintptr_t)ret + (chunksize -
187 offset));
188 }
189 }
Jason Evans4fb7f512010-01-27 18:27:09 -0800190 } else
Jason Evans4201af02010-01-24 02:53:40 -0800191 ret = chunk_alloc_mmap_slow(size, false);
192
193 return (ret);
194}
195
196void
197chunk_dealloc_mmap(void *chunk, size_t size)
198{
199
200 pages_unmap(chunk, size);
201}