blob: 8fd5de77de850144d6042f91983a2d30894414f1 [file] [log] [blame]
The Android Open Source Projectdd7bc332009-03-03 19:32:55 -08001/* Copyright 2006 The Android Open Source Project */
2
3/* A wrapper file for dlmalloc.c that compiles in the
4 * mspace_*() functions, which provide an interface for
5 * creating multiple heaps.
6 */
7#include <sys/types.h>
8#include <sys/stat.h>
9#include <fcntl.h>
10#include <unistd.h>
11#include <stdint.h>
12#include <sys/ioctl.h>
13
14#include <cutils/ashmem.h>
15
16/* It's a pain getting the mallinfo stuff to work
17 * with Linux, OSX, and klibc, so just turn it off
18 * for now.
19 * TODO: make mallinfo work
20 */
21#define NO_MALLINFO 1
22
23/* Allow setting the maximum heap footprint.
24 */
25#define USE_MAX_ALLOWED_FOOTPRINT 1
26
27/* Don't try to trim memory.
28 * TODO: support this.
29 */
30#define MORECORE_CANNOT_TRIM 1
31
32/* Use mmap()d anonymous memory to guarantee
33 * that an mspace is contiguous.
34 *
35 * create_mspace() won't work right if this is
36 * defined, so hide the definition of it and
37 * break any users at build time.
38 */
39#define USE_CONTIGUOUS_MSPACES 1
40#if USE_CONTIGUOUS_MSPACES
41/* This combination of settings forces sys_alloc()
42 * to always use MORECORE(). It won't expect the
43 * results to be contiguous, but we'll guarantee
44 * that they are.
45 */
46#define HAVE_MMAP 0
47#define HAVE_MORECORE 1
48#define MORECORE_CONTIGUOUS 0
49/* m is always the appropriate local when MORECORE() is called. */
50#define MORECORE(S) contiguous_mspace_morecore(m, S)
51#define create_mspace HIDDEN_create_mspace_HIDDEN
52#define destroy_mspace HIDDEN_destroy_mspace_HIDDEN
53typedef struct malloc_state *mstate0;
54static void *contiguous_mspace_morecore(mstate0 m, ssize_t nb);
55#endif
56
57#define MSPACES 1
58#define ONLY_MSPACES 1
59#include "../../../bionic/libc/bionic/dlmalloc.c"
60
61#ifndef PAGESIZE
62#define PAGESIZE mparams.page_size
63#endif
64
65#define ALIGN_UP(p, alignment) \
66 (((uintptr_t)(p) + (alignment)-1) & ~((alignment)-1))
67
68/* A direct copy of dlmalloc_usable_size(),
69 * which isn't compiled in when ONLY_MSPACES is set.
70 * The mspace parameter isn't actually necessary,
71 * but we include it to be consistent with the
72 * rest of the mspace_*() functions.
73 */
74size_t mspace_usable_size(mspace _unused, const void* mem) {
75 if (mem != 0) {
76 const mchunkptr p = mem2chunk(mem);
77 if (cinuse(p))
78 return chunksize(p) - overhead_for(p);
79 }
80 return 0;
81}
82
83#if USE_CONTIGUOUS_MSPACES
84#include <sys/mman.h>
85#include <limits.h>
86
87#define CONTIG_STATE_MAGIC 0xf00dd00d
88struct mspace_contig_state {
89 unsigned int magic;
90 char *brk;
91 char *top;
92 mspace m;
93};
94
95static void *contiguous_mspace_morecore(mstate m, ssize_t nb) {
96 struct mspace_contig_state *cs;
97 char *oldbrk;
98 const unsigned int pagesize = PAGESIZE;
99
100 cs = (struct mspace_contig_state *)((uintptr_t)m & ~(pagesize-1));
101 assert(cs->magic == CONTIG_STATE_MAGIC);
102 assert(cs->m == m);
103assert(nb >= 0); //xxx deal with the trim case
104
105 oldbrk = cs->brk;
106 if (nb > 0) {
107 /* Break to the first page boundary that satisfies the request.
108 */
109 char *newbrk = (char *)ALIGN_UP(oldbrk + nb, pagesize);
110 if (newbrk > cs->top)
111 return CMFAIL;
112
113 /* Update the protection on the underlying memory.
114 * Pages we've given to dlmalloc are read/write, and
115 * pages we haven't are not accessable (read or write
116 * will cause a seg fault).
117 */
118 if (mprotect(cs, newbrk - (char *)cs, PROT_READ | PROT_WRITE) < 0)
119 return CMFAIL;
120 if (newbrk != cs->top) {
121 if (mprotect(newbrk, cs->top - newbrk, PROT_NONE) < 0)
122 return CMFAIL;
123 }
124
125 cs->brk = newbrk;
126
127 /* Make sure that dlmalloc will merge this block with the
128 * initial block that was passed to create_mspace_with_base().
129 * We don't care about extern vs. non-extern, so just clear it.
130 */
131 m->seg.sflags &= ~EXTERN_BIT;
132 }
133
134 return oldbrk;
135}
136
137mspace create_contiguous_mspace_with_name(size_t starting_capacity,
138 size_t max_capacity, int locked, char const * name) {
139 int fd, ret;
140 struct mspace_contig_state *cs;
141 char buf[ASHMEM_NAME_LEN] = "mspace";
142 void *base;
143 unsigned int pagesize;
144 mstate m;
145
146 if (starting_capacity > max_capacity)
147 return (mspace)0;
148
149 init_mparams();
150 pagesize = PAGESIZE;
151
152 /* Create the anonymous memory that will back the mspace.
153 * This reserves all of the virtual address space we could
154 * ever need. Physical pages will be mapped as the memory
155 * is touched.
156 *
157 * Align max_capacity to a whole page.
158 */
159 max_capacity = (size_t)ALIGN_UP(max_capacity, pagesize);
160
161 if (name)
162 snprintf(buf, sizeof(buf), "mspace/%s", name);
163 fd = ashmem_create_region(buf, max_capacity);
164 if (fd < 0)
165 return (mspace)0;
166
167 base = mmap(NULL, max_capacity, PROT_READ | PROT_WRITE, MAP_PRIVATE, fd, 0);
168 close(fd);
169 if (base == MAP_FAILED)
170 return (mspace)0;
171
172 /* Make sure that base is at the beginning of a page.
173 */
174 assert(((uintptr_t)base & (pagesize-1)) == 0);
175
176 /* Reserve some space for the information that our MORECORE needs.
177 */
178 cs = base;
179
180 /* Create the mspace, pointing to the memory we just reserved.
181 */
182 m = create_mspace_with_base(base + sizeof(*cs), starting_capacity, locked);
183 if (m == (mspace)0)
184 goto error;
185
186 /* Make sure that m is in the same page as cs.
187 */
188 assert(((uintptr_t)m & (uintptr_t)~(pagesize-1)) == (uintptr_t)base);
189
190 /* Find out exactly how much of the memory the mspace
191 * is using.
192 */
193 cs->brk = m->seg.base + m->seg.size;
194 cs->top = (char *)base + max_capacity;
195 assert((char *)base <= cs->brk);
196 assert(cs->brk <= cs->top);
197
198 /* Prevent access to the memory we haven't handed out yet.
199 */
200 if (cs->brk != cs->top) {
201 /* mprotect() requires page-aligned arguments, but it's possible
202 * for cs->brk not to be page-aligned at this point.
203 */
204 char *prot_brk = (char *)ALIGN_UP(cs->brk, pagesize);
205 if (mprotect(prot_brk, cs->top - prot_brk, PROT_NONE) < 0)
206 goto error;
207 }
208
209 cs->m = m;
210 cs->magic = CONTIG_STATE_MAGIC;
211
212 return (mspace)m;
213
214error:
215 munmap(base, max_capacity);
216 return (mspace)0;
217}
218
219mspace create_contiguous_mspace(size_t starting_capacity,
220 size_t max_capacity, int locked) {
221 return create_contiguous_mspace_with_name(starting_capacity,
222 max_capacity, locked, NULL);
223}
224
225size_t destroy_contiguous_mspace(mspace msp) {
226 mstate ms = (mstate)msp;
227
228 if (ok_magic(ms)) {
229 struct mspace_contig_state *cs;
230 size_t length;
231 const unsigned int pagesize = PAGESIZE;
232
233 cs = (struct mspace_contig_state *)((uintptr_t)ms & ~(pagesize-1));
234 assert(cs->magic == CONTIG_STATE_MAGIC);
235 assert(cs->m == ms);
236
237 length = cs->top - (char *)cs;
238 if (munmap((char *)cs, length) != 0)
239 return length;
240 }
241 else {
242 USAGE_ERROR_ACTION(ms, ms);
243 }
244 return 0;
245}
246#endif