blob: 2110b434258f4f724b2db18c32b716de623bd3a8 [file] [log] [blame]
The Android Open Source Projectdd7bc332009-03-03 19:32:55 -08001/* Copyright 2006 The Android Open Source Project */
2
3/* A wrapper file for dlmalloc.c that compiles in the
4 * mspace_*() functions, which provide an interface for
5 * creating multiple heaps.
6 */
7#include <sys/types.h>
8#include <sys/stat.h>
9#include <fcntl.h>
10#include <unistd.h>
11#include <stdint.h>
12#include <sys/ioctl.h>
13
14#include <cutils/ashmem.h>
15
16/* It's a pain getting the mallinfo stuff to work
17 * with Linux, OSX, and klibc, so just turn it off
18 * for now.
19 * TODO: make mallinfo work
20 */
21#define NO_MALLINFO 1
22
23/* Allow setting the maximum heap footprint.
24 */
25#define USE_MAX_ALLOWED_FOOTPRINT 1
26
27/* Don't try to trim memory.
28 * TODO: support this.
29 */
30#define MORECORE_CANNOT_TRIM 1
31
32/* Use mmap()d anonymous memory to guarantee
33 * that an mspace is contiguous.
34 *
35 * create_mspace() won't work right if this is
36 * defined, so hide the definition of it and
37 * break any users at build time.
38 */
39#define USE_CONTIGUOUS_MSPACES 1
40#if USE_CONTIGUOUS_MSPACES
41/* This combination of settings forces sys_alloc()
42 * to always use MORECORE(). It won't expect the
43 * results to be contiguous, but we'll guarantee
44 * that they are.
45 */
46#define HAVE_MMAP 0
47#define HAVE_MORECORE 1
48#define MORECORE_CONTIGUOUS 0
49/* m is always the appropriate local when MORECORE() is called. */
50#define MORECORE(S) contiguous_mspace_morecore(m, S)
51#define create_mspace HIDDEN_create_mspace_HIDDEN
52#define destroy_mspace HIDDEN_destroy_mspace_HIDDEN
53typedef struct malloc_state *mstate0;
54static void *contiguous_mspace_morecore(mstate0 m, ssize_t nb);
55#endif
56
57#define MSPACES 1
58#define ONLY_MSPACES 1
59#include "../../../bionic/libc/bionic/dlmalloc.c"
60
61#ifndef PAGESIZE
62#define PAGESIZE mparams.page_size
63#endif
64
65#define ALIGN_UP(p, alignment) \
66 (((uintptr_t)(p) + (alignment)-1) & ~((alignment)-1))
67
68/* A direct copy of dlmalloc_usable_size(),
69 * which isn't compiled in when ONLY_MSPACES is set.
70 * The mspace parameter isn't actually necessary,
71 * but we include it to be consistent with the
72 * rest of the mspace_*() functions.
73 */
74size_t mspace_usable_size(mspace _unused, const void* mem) {
75 if (mem != 0) {
76 const mchunkptr p = mem2chunk(mem);
77 if (cinuse(p))
78 return chunksize(p) - overhead_for(p);
79 }
80 return 0;
81}
82
83#if USE_CONTIGUOUS_MSPACES
84#include <sys/mman.h>
85#include <limits.h>
86
87#define CONTIG_STATE_MAGIC 0xf00dd00d
88struct mspace_contig_state {
89 unsigned int magic;
90 char *brk;
91 char *top;
92 mspace m;
93};
94
95static void *contiguous_mspace_morecore(mstate m, ssize_t nb) {
96 struct mspace_contig_state *cs;
97 char *oldbrk;
98 const unsigned int pagesize = PAGESIZE;
99
100 cs = (struct mspace_contig_state *)((uintptr_t)m & ~(pagesize-1));
101 assert(cs->magic == CONTIG_STATE_MAGIC);
102 assert(cs->m == m);
103assert(nb >= 0); //xxx deal with the trim case
104
105 oldbrk = cs->brk;
106 if (nb > 0) {
107 /* Break to the first page boundary that satisfies the request.
108 */
109 char *newbrk = (char *)ALIGN_UP(oldbrk + nb, pagesize);
110 if (newbrk > cs->top)
111 return CMFAIL;
112
113 /* Update the protection on the underlying memory.
114 * Pages we've given to dlmalloc are read/write, and
115 * pages we haven't are not accessable (read or write
116 * will cause a seg fault).
117 */
118 if (mprotect(cs, newbrk - (char *)cs, PROT_READ | PROT_WRITE) < 0)
119 return CMFAIL;
120 if (newbrk != cs->top) {
121 if (mprotect(newbrk, cs->top - newbrk, PROT_NONE) < 0)
122 return CMFAIL;
123 }
124
125 cs->brk = newbrk;
126
127 /* Make sure that dlmalloc will merge this block with the
128 * initial block that was passed to create_mspace_with_base().
129 * We don't care about extern vs. non-extern, so just clear it.
130 */
131 m->seg.sflags &= ~EXTERN_BIT;
132 }
133
134 return oldbrk;
135}
136
137mspace create_contiguous_mspace_with_name(size_t starting_capacity,
138 size_t max_capacity, int locked, char const * name) {
139 int fd, ret;
140 struct mspace_contig_state *cs;
141 char buf[ASHMEM_NAME_LEN] = "mspace";
142 void *base;
143 unsigned int pagesize;
144 mstate m;
145
146 if (starting_capacity > max_capacity)
147 return (mspace)0;
148
149 init_mparams();
150 pagesize = PAGESIZE;
151
152 /* Create the anonymous memory that will back the mspace.
153 * This reserves all of the virtual address space we could
154 * ever need. Physical pages will be mapped as the memory
155 * is touched.
156 *
157 * Align max_capacity to a whole page.
158 */
159 max_capacity = (size_t)ALIGN_UP(max_capacity, pagesize);
160
161 if (name)
162 snprintf(buf, sizeof(buf), "mspace/%s", name);
163 fd = ashmem_create_region(buf, max_capacity);
164 if (fd < 0)
165 return (mspace)0;
166
167 base = mmap(NULL, max_capacity, PROT_READ | PROT_WRITE, MAP_PRIVATE, fd, 0);
168 close(fd);
169 if (base == MAP_FAILED)
170 return (mspace)0;
171
172 /* Make sure that base is at the beginning of a page.
173 */
174 assert(((uintptr_t)base & (pagesize-1)) == 0);
175
176 /* Reserve some space for the information that our MORECORE needs.
177 */
178 cs = base;
179
180 /* Create the mspace, pointing to the memory we just reserved.
181 */
Barry Hayes8cd186d2009-12-09 10:18:41 -0800182 m = create_mspace_with_base((char *)base + sizeof(*cs), starting_capacity,
183 locked);
The Android Open Source Projectdd7bc332009-03-03 19:32:55 -0800184 if (m == (mspace)0)
185 goto error;
186
187 /* Make sure that m is in the same page as cs.
188 */
189 assert(((uintptr_t)m & (uintptr_t)~(pagesize-1)) == (uintptr_t)base);
190
191 /* Find out exactly how much of the memory the mspace
192 * is using.
193 */
194 cs->brk = m->seg.base + m->seg.size;
195 cs->top = (char *)base + max_capacity;
196 assert((char *)base <= cs->brk);
197 assert(cs->brk <= cs->top);
198
199 /* Prevent access to the memory we haven't handed out yet.
200 */
201 if (cs->brk != cs->top) {
202 /* mprotect() requires page-aligned arguments, but it's possible
203 * for cs->brk not to be page-aligned at this point.
204 */
205 char *prot_brk = (char *)ALIGN_UP(cs->brk, pagesize);
206 if (mprotect(prot_brk, cs->top - prot_brk, PROT_NONE) < 0)
207 goto error;
208 }
209
210 cs->m = m;
211 cs->magic = CONTIG_STATE_MAGIC;
212
213 return (mspace)m;
214
215error:
216 munmap(base, max_capacity);
217 return (mspace)0;
218}
219
220mspace create_contiguous_mspace(size_t starting_capacity,
221 size_t max_capacity, int locked) {
222 return create_contiguous_mspace_with_name(starting_capacity,
223 max_capacity, locked, NULL);
224}
225
226size_t destroy_contiguous_mspace(mspace msp) {
227 mstate ms = (mstate)msp;
228
229 if (ok_magic(ms)) {
230 struct mspace_contig_state *cs;
231 size_t length;
232 const unsigned int pagesize = PAGESIZE;
233
234 cs = (struct mspace_contig_state *)((uintptr_t)ms & ~(pagesize-1));
235 assert(cs->magic == CONTIG_STATE_MAGIC);
236 assert(cs->m == ms);
237
238 length = cs->top - (char *)cs;
239 if (munmap((char *)cs, length) != 0)
240 return length;
241 }
242 else {
243 USAGE_ERROR_ACTION(ms, ms);
244 }
245 return 0;
246}
247#endif