| The Android Open Source Project | f6c3871 | 2009-03-03 19:28:47 -0800 | [diff] [blame] | 1 | /* | 
|  | 2 | * Copyright (C) 2008 The Android Open Source Project | 
|  | 3 | * | 
|  | 4 | * Licensed under the Apache License, Version 2.0 (the "License"); | 
|  | 5 | * you may not use this file except in compliance with the License. | 
|  | 6 | * You may obtain a copy of the License at | 
|  | 7 | * | 
|  | 8 | *      http://www.apache.org/licenses/LICENSE-2.0 | 
|  | 9 | * | 
|  | 10 | * Unless required by applicable law or agreed to in writing, software | 
|  | 11 | * distributed under the License is distributed on an "AS IS" BASIS, | 
|  | 12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | 
|  | 13 | * See the License for the specific language governing permissions and | 
|  | 14 | * limitations under the License. | 
|  | 15 | */ | 
| Andy McFadden | b51ea11 | 2009-05-08 16:50:17 -0700 | [diff] [blame] | 16 |  | 
| The Android Open Source Project | f6c3871 | 2009-03-03 19:28:47 -0800 | [diff] [blame] | 17 | /* | 
|  | 18 | * Linear memory allocation, tied to class loaders. | 
|  | 19 | */ | 
|  | 20 | #include "Dalvik.h" | 
|  | 21 |  | 
|  | 22 | #include <sys/mman.h> | 
|  | 23 | #include <limits.h> | 
|  | 24 | #include <errno.h> | 
|  | 25 |  | 
|  | 26 | //#define DISABLE_LINEAR_ALLOC | 
|  | 27 |  | 
|  | 28 | // Use ashmem to name the LinearAlloc section | 
|  | 29 | #define USE_ASHMEM 1 | 
|  | 30 |  | 
|  | 31 | #ifdef USE_ASHMEM | 
|  | 32 | #include <cutils/ashmem.h> | 
|  | 33 | #endif /* USE_ASHMEM */ | 
|  | 34 |  | 
|  | 35 | /* | 
|  | 36 | Overview | 
|  | 37 |  | 
|  | 38 | This is intended to be a simple, fast allocator for "write-once" storage. | 
|  | 39 | The expectation is that this will hold small allocations that don't change, | 
|  | 40 | such as parts of classes (vtables, fields, methods, interfaces).  Because | 
|  | 41 | the lifetime of these items is tied to classes, which in turn are tied | 
|  | 42 | to class loaders, we associate the storage with a ClassLoader object. | 
|  | 43 |  | 
|  | 44 | [ We don't yet support class unloading, and our ClassLoader implementation | 
|  | 45 | is in flux, so for now we just have a single global region and the | 
|  | 46 | "classLoader" argument is ignored. ] | 
|  | 47 |  | 
|  | 48 | By storing the data here, rather than on the system heap, we reduce heap | 
|  | 49 | clutter, speed class loading, reduce the memory footprint (reduced heap | 
|  | 50 | structure overhead), and most importantly we increase the number of pages | 
|  | 51 | that remain shared between processes launched in "Zygote mode". | 
|  | 52 |  | 
|  | 53 | The 4 bytes preceding each block contain the block length.  This allows us | 
|  | 54 | to support "free" and "realloc" calls in a limited way.  We don't free | 
|  | 55 | storage once it has been allocated, but in some circumstances it could be | 
|  | 56 | useful to erase storage to garbage values after a "free" or "realloc". | 
|  | 57 | (Bad idea if we're trying to share pages.)  We need to align to 8-byte | 
|  | 58 | boundaries for some architectures, so we have a 50-50 chance of getting | 
|  | 59 | this for free in a given block. | 
|  | 60 |  | 
|  | 61 | A NULL value for the "classLoader" argument refers to the bootstrap class | 
|  | 62 | loader, which is never unloaded (until the VM shuts down). | 
|  | 63 |  | 
|  | 64 | Because the memory is not expected to be updated, we can use mprotect to | 
|  | 65 | guard the pages on debug builds.  Handy when tracking down corruption. | 
|  | 66 | */ | 
|  | 67 |  | 
|  | 68 | /* alignment for allocations; must be power of 2, and currently >= hdr_xtra */ | 
|  | 69 | #define BLOCK_ALIGN         8 | 
|  | 70 |  | 
|  | 71 | /* default length of memory segment (worst case is probably "dexopt") */ | 
| Andy McFadden | 6882597 | 2009-05-08 11:25:35 -0700 | [diff] [blame] | 72 | #define DEFAULT_MAX_LENGTH  (5*1024*1024) | 
| The Android Open Source Project | f6c3871 | 2009-03-03 19:28:47 -0800 | [diff] [blame] | 73 |  | 
|  | 74 | /* leave enough space for a length word */ | 
|  | 75 | #define HEADER_EXTRA        4 | 
|  | 76 |  | 
|  | 77 | /* overload the length word */ | 
|  | 78 | #define LENGTHFLAG_FREE    0x80000000 | 
|  | 79 | #define LENGTHFLAG_RW      0x40000000 | 
|  | 80 | #define LENGTHFLAG_MASK    (~(LENGTHFLAG_FREE|LENGTHFLAG_RW)) | 
|  | 81 |  | 
|  | 82 | /* in case limits.h doesn't have it; must be a power of 2 */ | 
|  | 83 | #ifndef PAGESIZE | 
|  | 84 | # define PAGESIZE           4096 | 
|  | 85 | #endif | 
|  | 86 |  | 
|  | 87 |  | 
|  | 88 | /* fwd */ | 
|  | 89 | static void checkAllFree(Object* classLoader); | 
|  | 90 |  | 
|  | 91 |  | 
|  | 92 | /* | 
|  | 93 | * Someday, retrieve the linear alloc struct associated with a particular | 
|  | 94 | * class loader.  For now, always use the boostrap loader's instance. | 
|  | 95 | */ | 
|  | 96 | static inline LinearAllocHdr* getHeader(Object* classLoader) | 
|  | 97 | { | 
|  | 98 | return gDvm.pBootLoaderAlloc; | 
|  | 99 | } | 
|  | 100 |  | 
|  | 101 | /* | 
|  | 102 | * Convert a pointer to memory to a pointer to the block header (which is | 
|  | 103 | * currently just a length word). | 
|  | 104 | */ | 
|  | 105 | static inline u4* getBlockHeader(void* mem) | 
|  | 106 | { | 
|  | 107 | return ((u4*) mem) -1; | 
|  | 108 | } | 
|  | 109 |  | 
|  | 110 | /* | 
|  | 111 | * Create a new linear allocation block. | 
|  | 112 | */ | 
|  | 113 | LinearAllocHdr* dvmLinearAllocCreate(Object* classLoader) | 
|  | 114 | { | 
|  | 115 | #ifdef DISABLE_LINEAR_ALLOC | 
|  | 116 | return (LinearAllocHdr*) 0x12345; | 
|  | 117 | #endif | 
|  | 118 | LinearAllocHdr* pHdr; | 
|  | 119 |  | 
|  | 120 | pHdr = (LinearAllocHdr*) malloc(sizeof(*pHdr)); | 
|  | 121 |  | 
|  | 122 |  | 
|  | 123 | /* | 
|  | 124 | * "curOffset" points to the location of the next pre-block header, | 
|  | 125 | * which means we have to advance to the next BLOCK_ALIGN address and | 
|  | 126 | * back up. | 
|  | 127 | * | 
|  | 128 | * Note we leave the first page empty (see below), and start the | 
|  | 129 | * first entry on the second page at an offset that ensures the next | 
|  | 130 | * chunk of data will be properly aligned. | 
|  | 131 | */ | 
|  | 132 | assert(BLOCK_ALIGN >= HEADER_EXTRA); | 
|  | 133 | pHdr->curOffset = pHdr->firstOffset = (BLOCK_ALIGN-HEADER_EXTRA) + PAGESIZE; | 
|  | 134 | pHdr->mapLength = DEFAULT_MAX_LENGTH; | 
|  | 135 |  | 
|  | 136 | #ifdef USE_ASHMEM | 
|  | 137 | int fd; | 
|  | 138 |  | 
|  | 139 | fd = ashmem_create_region("dalvik-LinearAlloc", DEFAULT_MAX_LENGTH); | 
|  | 140 | if (fd < 0) { | 
| Bob Lee | cdacef5 | 2009-07-30 18:17:37 -0700 | [diff] [blame^] | 141 | LOGE("ashmem LinearAlloc failed %s", strerror(errno)); | 
| The Android Open Source Project | f6c3871 | 2009-03-03 19:28:47 -0800 | [diff] [blame] | 142 | free(pHdr); | 
|  | 143 | return NULL; | 
|  | 144 | } | 
|  | 145 |  | 
|  | 146 | pHdr->mapAddr = mmap(NULL, pHdr->mapLength, PROT_READ | PROT_WRITE, | 
|  | 147 | MAP_PRIVATE, fd, 0); | 
|  | 148 | if (pHdr->mapAddr == MAP_FAILED) { | 
|  | 149 | LOGE("LinearAlloc mmap(%d) failed: %s\n", pHdr->mapLength, | 
|  | 150 | strerror(errno)); | 
|  | 151 | free(pHdr); | 
| Andy McFadden | b51ea11 | 2009-05-08 16:50:17 -0700 | [diff] [blame] | 152 | close(fd); | 
| The Android Open Source Project | f6c3871 | 2009-03-03 19:28:47 -0800 | [diff] [blame] | 153 | return NULL; | 
|  | 154 | } | 
|  | 155 |  | 
|  | 156 | close(fd); | 
|  | 157 | #else /*USE_ASHMEM*/ | 
| Bob Lee | cdacef5 | 2009-07-30 18:17:37 -0700 | [diff] [blame^] | 158 | // MAP_ANON is listed as "deprecated" on Linux, | 
| The Android Open Source Project | f6c3871 | 2009-03-03 19:28:47 -0800 | [diff] [blame] | 159 | // but MAP_ANONYMOUS is not defined under Mac OS X. | 
|  | 160 | pHdr->mapAddr = mmap(NULL, pHdr->mapLength, PROT_READ | PROT_WRITE, | 
|  | 161 | MAP_PRIVATE | MAP_ANON, -1, 0); | 
|  | 162 | if (pHdr->mapAddr == MAP_FAILED) { | 
|  | 163 | LOGE("LinearAlloc mmap(%d) failed: %s\n", pHdr->mapLength, | 
|  | 164 | strerror(errno)); | 
|  | 165 | free(pHdr); | 
|  | 166 | return NULL; | 
|  | 167 | } | 
|  | 168 | #endif /*USE_ASHMEM*/ | 
|  | 169 |  | 
|  | 170 | /* region expected to begin on a page boundary */ | 
|  | 171 | assert(((int) pHdr->mapAddr & (PAGESIZE-1)) == 0); | 
|  | 172 |  | 
|  | 173 | /* the system should initialize newly-mapped memory to zero */ | 
|  | 174 | assert(*(u4*) (pHdr->mapAddr + pHdr->curOffset) == 0); | 
|  | 175 |  | 
|  | 176 | /* | 
|  | 177 | * Disable access to all except starting page.  We will enable pages | 
|  | 178 | * as we use them.  This helps prevent bad pointers from working.  The | 
|  | 179 | * pages start out PROT_NONE, become read/write while we access them, | 
|  | 180 | * then go to read-only after we finish our changes. | 
|  | 181 | * | 
|  | 182 | * We have to make the first page readable because we have 4 pad bytes, | 
|  | 183 | * followed by 4 length bytes, giving an initial offset of 8.  The | 
|  | 184 | * generic code below assumes that there could have been a previous | 
|  | 185 | * allocation that wrote into those 4 pad bytes, therefore the page | 
|  | 186 | * must have been marked readable by the previous allocation. | 
|  | 187 | * | 
|  | 188 | * We insert an extra page in here to force a break in the memory map | 
|  | 189 | * so we can see ourselves more easily in "showmap".  Otherwise this | 
|  | 190 | * stuff blends into the neighboring pages.  [TODO: do we still need | 
|  | 191 | * the extra page now that we have ashmem?] | 
|  | 192 | */ | 
|  | 193 | if (mprotect(pHdr->mapAddr, pHdr->mapLength, PROT_NONE) != 0) { | 
|  | 194 | LOGW("LinearAlloc init mprotect failed: %s\n", strerror(errno)); | 
|  | 195 | free(pHdr); | 
|  | 196 | return NULL; | 
|  | 197 | } | 
|  | 198 | if (mprotect(pHdr->mapAddr + PAGESIZE, PAGESIZE, | 
|  | 199 | ENFORCE_READ_ONLY ? PROT_READ : PROT_READ|PROT_WRITE) != 0) | 
|  | 200 | { | 
|  | 201 | LOGW("LinearAlloc init mprotect #2 failed: %s\n", strerror(errno)); | 
|  | 202 | free(pHdr); | 
|  | 203 | return NULL; | 
|  | 204 | } | 
|  | 205 |  | 
|  | 206 | if (ENFORCE_READ_ONLY) { | 
|  | 207 | /* allocate the per-page ref count */ | 
|  | 208 | int numPages = (pHdr->mapLength+PAGESIZE-1) / PAGESIZE; | 
|  | 209 | pHdr->writeRefCount = calloc(numPages, sizeof(short)); | 
|  | 210 | if (pHdr->writeRefCount == NULL) { | 
|  | 211 | free(pHdr); | 
|  | 212 | return NULL; | 
|  | 213 | } | 
|  | 214 | } | 
|  | 215 |  | 
|  | 216 | dvmInitMutex(&pHdr->lock); | 
|  | 217 |  | 
|  | 218 | LOGV("LinearAlloc: created region at %p-%p\n", | 
|  | 219 | pHdr->mapAddr, pHdr->mapAddr + pHdr->mapLength-1); | 
|  | 220 |  | 
|  | 221 | return pHdr; | 
|  | 222 | } | 
|  | 223 |  | 
|  | 224 | /* | 
|  | 225 | * Destroy a linear allocation area. | 
|  | 226 | * | 
|  | 227 | * We do a trivial "has everything been freed?" check before unmapping the | 
|  | 228 | * memory and freeing the LinearAllocHdr. | 
|  | 229 | */ | 
|  | 230 | void dvmLinearAllocDestroy(Object* classLoader) | 
|  | 231 | { | 
|  | 232 | #ifdef DISABLE_LINEAR_ALLOC | 
|  | 233 | return; | 
|  | 234 | #endif | 
|  | 235 | LinearAllocHdr* pHdr = getHeader(classLoader); | 
|  | 236 | if (pHdr == NULL) | 
|  | 237 | return; | 
|  | 238 |  | 
|  | 239 | checkAllFree(classLoader); | 
|  | 240 |  | 
|  | 241 | //dvmLinearAllocDump(classLoader); | 
|  | 242 |  | 
|  | 243 | LOGV("Unmapping linear allocator base=%p\n", pHdr->mapAddr); | 
|  | 244 | LOGD("LinearAlloc %p used %d of %d (%d%%)\n", | 
|  | 245 | classLoader, pHdr->curOffset, pHdr->mapLength, | 
|  | 246 | (pHdr->curOffset * 100) / pHdr->mapLength); | 
|  | 247 |  | 
|  | 248 | if (munmap(pHdr->mapAddr, pHdr->mapLength) != 0) { | 
|  | 249 | LOGW("LinearAlloc munmap(%p, %d) failed: %s\n", | 
|  | 250 | pHdr->mapAddr, pHdr->mapLength, strerror(errno)); | 
|  | 251 | } | 
|  | 252 | free(pHdr); | 
|  | 253 | } | 
|  | 254 |  | 
|  | 255 | /* | 
|  | 256 | * Allocate "size" bytes of storage, associated with a particular class | 
|  | 257 | * loader. | 
|  | 258 | * | 
|  | 259 | * It's okay for size to be zero. | 
|  | 260 | * | 
|  | 261 | * We always leave "curOffset" pointing at the next place where we will | 
|  | 262 | * store the header that precedes the returned storage. | 
|  | 263 | * | 
|  | 264 | * This aborts the VM on failure, so it's not necessary to check for a | 
|  | 265 | * NULL return value. | 
|  | 266 | */ | 
|  | 267 | void* dvmLinearAlloc(Object* classLoader, size_t size) | 
|  | 268 | { | 
|  | 269 | LinearAllocHdr* pHdr = getHeader(classLoader); | 
|  | 270 | int startOffset, nextOffset; | 
|  | 271 | int lastGoodOff, firstWriteOff, lastWriteOff; | 
|  | 272 |  | 
|  | 273 | #ifdef DISABLE_LINEAR_ALLOC | 
|  | 274 | return calloc(1, size); | 
|  | 275 | #endif | 
|  | 276 |  | 
|  | 277 | LOGVV("--- LinearAlloc(%p, %d)\n", classLoader, size); | 
|  | 278 |  | 
|  | 279 | /* | 
|  | 280 | * What we'd like to do is just determine the new end-of-alloc size | 
|  | 281 | * and atomic-swap the updated value in.  The trouble is that, the | 
|  | 282 | * first time we reach a new page, we need to call mprotect() to | 
|  | 283 | * make the page available, and we don't want to call mprotect() on | 
|  | 284 | * every allocation.  The troubled situation is: | 
|  | 285 | *  - thread A allocs across a page boundary, but gets preempted | 
|  | 286 | *    before mprotect() completes | 
|  | 287 | *  - thread B allocs within the new page, and doesn't call mprotect() | 
|  | 288 | */ | 
|  | 289 | dvmLockMutex(&pHdr->lock); | 
|  | 290 |  | 
|  | 291 | startOffset = pHdr->curOffset; | 
|  | 292 | assert(((startOffset + HEADER_EXTRA) & (BLOCK_ALIGN-1)) == 0); | 
|  | 293 |  | 
|  | 294 | /* | 
|  | 295 | * Compute the new offset.  The old offset points at the address where | 
|  | 296 | * we will store the hidden block header, so we advance past that, | 
|  | 297 | * add the size of data they want, add another header's worth so we | 
|  | 298 | * know we have room for that, and round up to BLOCK_ALIGN.  That's | 
|  | 299 | * the next location where we'll put user data.  We then subtract the | 
|  | 300 | * chunk header size off so we're back to the header pointer. | 
|  | 301 | * | 
|  | 302 | * Examples: | 
|  | 303 | *   old=12 size=3 new=((12+(4*2)+3+7) & ~7)-4 = 24-4 --> 20 | 
|  | 304 | *   old=12 size=5 new=((12+(4*2)+5+7) & ~7)-4 = 32-4 --> 28 | 
|  | 305 | */ | 
|  | 306 | nextOffset = ((startOffset + HEADER_EXTRA*2 + size + (BLOCK_ALIGN-1)) | 
|  | 307 | & ~(BLOCK_ALIGN-1)) - HEADER_EXTRA; | 
|  | 308 | LOGVV("--- old=%d size=%d new=%d\n", startOffset, size, nextOffset); | 
|  | 309 |  | 
|  | 310 | if (nextOffset > pHdr->mapLength) { | 
|  | 311 | /* | 
|  | 312 | * We don't have to abort here.  We could fall back on the system | 
|  | 313 | * malloc(), and have our "free" call figure out what to do.  Only | 
|  | 314 | * works if the users of these functions actually free everything | 
|  | 315 | * they allocate. | 
|  | 316 | */ | 
| Andy McFadden | 6882597 | 2009-05-08 11:25:35 -0700 | [diff] [blame] | 317 | LOGE("LinearAlloc exceeded capacity (%d), last=%d\n", | 
|  | 318 | pHdr->mapLength, (int) size); | 
| The Android Open Source Project | f6c3871 | 2009-03-03 19:28:47 -0800 | [diff] [blame] | 319 | dvmAbort(); | 
|  | 320 | } | 
|  | 321 |  | 
|  | 322 | /* | 
|  | 323 | * Round up "size" to encompass the entire region, including the 0-7 | 
|  | 324 | * pad bytes before the next chunk header.  This way we get maximum | 
|  | 325 | * utility out of "realloc", and when we're doing ENFORCE_READ_ONLY | 
|  | 326 | * stuff we always treat the full extent. | 
|  | 327 | */ | 
|  | 328 | size = nextOffset - (startOffset + HEADER_EXTRA); | 
|  | 329 | LOGVV("--- (size now %d)\n", size); | 
|  | 330 |  | 
|  | 331 | /* | 
|  | 332 | * See if we are starting on or have crossed into a new page.  If so, | 
|  | 333 | * call mprotect on the page(s) we're about to write to.  We have to | 
|  | 334 | * page-align the start address, but don't have to make the length a | 
|  | 335 | * PAGESIZE multiple (but we do it anyway). | 
|  | 336 | * | 
|  | 337 | * Note that "startOffset" is not the last *allocated* byte, but rather | 
|  | 338 | * the offset of the first *unallocated* byte (which we are about to | 
|  | 339 | * write the chunk header to).  "nextOffset" is similar. | 
|  | 340 | * | 
|  | 341 | * If ENFORCE_READ_ONLY is enabled, we have to call mprotect even if | 
|  | 342 | * we've written to this page before, because it might be read-only. | 
|  | 343 | */ | 
|  | 344 | lastGoodOff = (startOffset-1) & ~(PAGESIZE-1); | 
|  | 345 | firstWriteOff = startOffset & ~(PAGESIZE-1); | 
|  | 346 | lastWriteOff = (nextOffset-1) & ~(PAGESIZE-1); | 
|  | 347 | LOGVV("---  lastGood=0x%04x firstWrite=0x%04x lastWrite=0x%04x\n", | 
|  | 348 | lastGoodOff, firstWriteOff, lastWriteOff); | 
|  | 349 | if (lastGoodOff != lastWriteOff || ENFORCE_READ_ONLY) { | 
|  | 350 | int cc, start, len; | 
|  | 351 |  | 
|  | 352 | start = firstWriteOff; | 
|  | 353 | assert(start <= nextOffset); | 
|  | 354 | len = (lastWriteOff - firstWriteOff) + PAGESIZE; | 
|  | 355 |  | 
|  | 356 | LOGVV("---    calling mprotect(start=%d len=%d RW)\n", start, len); | 
|  | 357 | cc = mprotect(pHdr->mapAddr + start, len, PROT_READ | PROT_WRITE); | 
|  | 358 | if (cc != 0) { | 
|  | 359 | LOGE("LinearAlloc mprotect (+%d %d) failed: %s\n", | 
|  | 360 | start, len, strerror(errno)); | 
|  | 361 | /* we're going to fail soon, might as do it now */ | 
|  | 362 | dvmAbort(); | 
|  | 363 | } | 
|  | 364 | } | 
|  | 365 |  | 
|  | 366 | /* update the ref counts on the now-writable pages */ | 
|  | 367 | if (ENFORCE_READ_ONLY) { | 
|  | 368 | int i, start, end; | 
|  | 369 |  | 
|  | 370 | start = firstWriteOff / PAGESIZE; | 
|  | 371 | end = lastWriteOff / PAGESIZE; | 
|  | 372 |  | 
|  | 373 | LOGVV("---  marking pages %d-%d RW (alloc %d at %p)\n", | 
|  | 374 | start, end, size, pHdr->mapAddr + startOffset + HEADER_EXTRA); | 
|  | 375 | for (i = start; i <= end; i++) | 
|  | 376 | pHdr->writeRefCount[i]++; | 
|  | 377 | } | 
|  | 378 |  | 
|  | 379 | /* stow the size in the header */ | 
|  | 380 | if (ENFORCE_READ_ONLY) | 
|  | 381 | *(u4*)(pHdr->mapAddr + startOffset) = size | LENGTHFLAG_RW; | 
|  | 382 | else | 
|  | 383 | *(u4*)(pHdr->mapAddr + startOffset) = size; | 
|  | 384 |  | 
|  | 385 | /* | 
|  | 386 | * Update data structure. | 
|  | 387 | */ | 
|  | 388 | pHdr->curOffset = nextOffset; | 
|  | 389 |  | 
|  | 390 | dvmUnlockMutex(&pHdr->lock); | 
|  | 391 | return pHdr->mapAddr + startOffset + HEADER_EXTRA; | 
|  | 392 | } | 
|  | 393 |  | 
|  | 394 | /* | 
|  | 395 | * Helper function, replaces strdup(). | 
|  | 396 | */ | 
|  | 397 | char* dvmLinearStrdup(Object* classLoader, const char* str) | 
|  | 398 | { | 
|  | 399 | #ifdef DISABLE_LINEAR_ALLOC | 
|  | 400 | return strdup(str); | 
|  | 401 | #endif | 
|  | 402 | int len = strlen(str); | 
|  | 403 | void* mem = dvmLinearAlloc(classLoader, len+1); | 
|  | 404 | memcpy(mem, str, len+1); | 
|  | 405 | if (ENFORCE_READ_ONLY) | 
|  | 406 | dvmLinearSetReadOnly(classLoader, mem); | 
|  | 407 | return (char*) mem; | 
|  | 408 | } | 
|  | 409 |  | 
|  | 410 | /* | 
|  | 411 | * "Reallocate" a piece of memory. | 
|  | 412 | * | 
|  | 413 | * If the new size is <= the old size, we return the original pointer | 
|  | 414 | * without doing anything. | 
|  | 415 | * | 
|  | 416 | * If the new size is > the old size, we allocate new storage, copy the | 
|  | 417 | * old stuff over, and mark the new stuff as free. | 
|  | 418 | */ | 
|  | 419 | void* dvmLinearRealloc(Object* classLoader, void* mem, size_t newSize) | 
|  | 420 | { | 
|  | 421 | #ifdef DISABLE_LINEAR_ALLOC | 
|  | 422 | return realloc(mem, newSize); | 
|  | 423 | #endif | 
|  | 424 | LinearAllocHdr* pHdr = getHeader(classLoader); | 
|  | 425 |  | 
|  | 426 | /* make sure we have the right region (and mem != NULL) */ | 
|  | 427 | assert(mem != NULL); | 
|  | 428 | assert(mem >= (void*) pHdr->mapAddr && | 
|  | 429 | mem < (void*) (pHdr->mapAddr + pHdr->curOffset)); | 
|  | 430 |  | 
|  | 431 | const u4* pLen = getBlockHeader(mem); | 
|  | 432 | LOGV("--- LinearRealloc(%d) old=%d\n", newSize, *pLen); | 
|  | 433 |  | 
|  | 434 | /* handle size reduction case */ | 
|  | 435 | if (*pLen >= newSize) { | 
|  | 436 | if (ENFORCE_READ_ONLY) | 
|  | 437 | dvmLinearSetReadWrite(classLoader, mem); | 
|  | 438 | return mem; | 
|  | 439 | } | 
|  | 440 |  | 
|  | 441 | void* newMem; | 
|  | 442 |  | 
|  | 443 | newMem = dvmLinearAlloc(classLoader, newSize); | 
|  | 444 | assert(newMem != NULL); | 
|  | 445 | memcpy(newMem, mem, *pLen); | 
|  | 446 | dvmLinearFree(classLoader, mem); | 
|  | 447 |  | 
|  | 448 | return newMem; | 
|  | 449 | } | 
|  | 450 |  | 
|  | 451 |  | 
|  | 452 | /* | 
|  | 453 | * Update the read/write status of one or more pages. | 
|  | 454 | */ | 
|  | 455 | static void updatePages(Object* classLoader, void* mem, int direction) | 
|  | 456 | { | 
|  | 457 | LinearAllocHdr* pHdr = getHeader(classLoader); | 
|  | 458 | dvmLockMutex(&pHdr->lock); | 
|  | 459 |  | 
|  | 460 | /* make sure we have the right region */ | 
|  | 461 | assert(mem >= (void*) pHdr->mapAddr && | 
|  | 462 | mem < (void*) (pHdr->mapAddr + pHdr->curOffset)); | 
|  | 463 |  | 
|  | 464 | u4* pLen = getBlockHeader(mem); | 
|  | 465 | u4 len = *pLen & LENGTHFLAG_MASK; | 
|  | 466 | int firstPage, lastPage; | 
|  | 467 |  | 
|  | 468 | firstPage = ((u1*)pLen - (u1*)pHdr->mapAddr) / PAGESIZE; | 
|  | 469 | lastPage = ((u1*)mem - (u1*)pHdr->mapAddr + (len-1)) / PAGESIZE; | 
|  | 470 | LOGVV("--- updating pages %d-%d (%d)\n", firstPage, lastPage, direction); | 
|  | 471 |  | 
|  | 472 | int i, cc; | 
|  | 473 |  | 
|  | 474 | /* | 
|  | 475 | * Update individual pages.  We could do some sort of "lazy update" to | 
|  | 476 | * combine mprotect calls, but that's almost certainly more trouble | 
|  | 477 | * than it's worth. | 
|  | 478 | */ | 
|  | 479 | for (i = firstPage; i <= lastPage; i++) { | 
|  | 480 | if (direction < 0) { | 
|  | 481 | /* | 
|  | 482 | * Trying to mark read-only. | 
|  | 483 | */ | 
|  | 484 | if (i == firstPage) { | 
|  | 485 | if ((*pLen & LENGTHFLAG_RW) == 0) { | 
|  | 486 | LOGW("Double RO on %p\n", mem); | 
|  | 487 | dvmAbort(); | 
|  | 488 | } else | 
|  | 489 | *pLen &= ~LENGTHFLAG_RW; | 
|  | 490 | } | 
|  | 491 |  | 
|  | 492 | if (pHdr->writeRefCount[i] == 0) { | 
|  | 493 | LOGE("Can't make page %d any less writable\n", i); | 
|  | 494 | dvmAbort(); | 
|  | 495 | } | 
|  | 496 | pHdr->writeRefCount[i]--; | 
|  | 497 | if (pHdr->writeRefCount[i] == 0) { | 
|  | 498 | LOGVV("---  prot page %d RO\n", i); | 
|  | 499 | cc = mprotect(pHdr->mapAddr + PAGESIZE * i, PAGESIZE, PROT_READ); | 
|  | 500 | assert(cc == 0); | 
|  | 501 | } | 
|  | 502 | } else { | 
|  | 503 | /* | 
|  | 504 | * Trying to mark writable. | 
|  | 505 | */ | 
|  | 506 | if (pHdr->writeRefCount[i] >= 32767) { | 
|  | 507 | LOGE("Can't make page %d any more writable\n", i); | 
|  | 508 | dvmAbort(); | 
|  | 509 | } | 
|  | 510 | if (pHdr->writeRefCount[i] == 0) { | 
|  | 511 | LOGVV("---  prot page %d RW\n", i); | 
|  | 512 | cc = mprotect(pHdr->mapAddr + PAGESIZE * i, PAGESIZE, | 
|  | 513 | PROT_READ | PROT_WRITE); | 
|  | 514 | assert(cc == 0); | 
|  | 515 | } | 
|  | 516 | pHdr->writeRefCount[i]++; | 
|  | 517 |  | 
|  | 518 | if (i == firstPage) { | 
|  | 519 | if ((*pLen & LENGTHFLAG_RW) != 0) { | 
|  | 520 | LOGW("Double RW on %p\n", mem); | 
|  | 521 | dvmAbort(); | 
|  | 522 | } else | 
|  | 523 | *pLen |= LENGTHFLAG_RW; | 
|  | 524 | } | 
|  | 525 | } | 
|  | 526 | } | 
|  | 527 |  | 
|  | 528 | dvmUnlockMutex(&pHdr->lock); | 
|  | 529 | } | 
|  | 530 |  | 
|  | 531 | /* | 
|  | 532 | * Try to mark the pages in which a chunk of memory lives as read-only. | 
|  | 533 | * Whether or not the pages actually change state depends on how many | 
|  | 534 | * others are trying to access the same pages. | 
|  | 535 | * | 
|  | 536 | * Only call here if ENFORCE_READ_ONLY is true. | 
|  | 537 | */ | 
|  | 538 | void dvmLinearSetReadOnly(Object* classLoader, void* mem) | 
|  | 539 | { | 
|  | 540 | #ifdef DISABLE_LINEAR_ALLOC | 
|  | 541 | return; | 
|  | 542 | #endif | 
|  | 543 | updatePages(classLoader, mem, -1); | 
|  | 544 | } | 
|  | 545 |  | 
|  | 546 | /* | 
|  | 547 | * Make the pages on which "mem" sits read-write. | 
|  | 548 | * | 
|  | 549 | * This covers the header as well as the data itself.  (We could add a | 
|  | 550 | * "header-only" mode for dvmLinearFree.) | 
|  | 551 | * | 
|  | 552 | * Only call here if ENFORCE_READ_ONLY is true. | 
|  | 553 | */ | 
|  | 554 | void dvmLinearSetReadWrite(Object* classLoader, void* mem) | 
|  | 555 | { | 
|  | 556 | #ifdef DISABLE_LINEAR_ALLOC | 
|  | 557 | return; | 
|  | 558 | #endif | 
|  | 559 | updatePages(classLoader, mem, 1); | 
|  | 560 | } | 
|  | 561 |  | 
|  | 562 | /* | 
|  | 563 | * Mark an allocation as free. | 
|  | 564 | */ | 
|  | 565 | void dvmLinearFree(Object* classLoader, void* mem) | 
|  | 566 | { | 
|  | 567 | #ifdef DISABLE_LINEAR_ALLOC | 
|  | 568 | free(mem); | 
|  | 569 | return; | 
|  | 570 | #endif | 
|  | 571 | if (mem == NULL) | 
|  | 572 | return; | 
|  | 573 |  | 
|  | 574 | LinearAllocHdr* pHdr = getHeader(classLoader); | 
|  | 575 |  | 
|  | 576 | /* make sure we have the right region */ | 
|  | 577 | assert(mem >= (void*) pHdr->mapAddr && | 
|  | 578 | mem < (void*) (pHdr->mapAddr + pHdr->curOffset)); | 
|  | 579 |  | 
|  | 580 | if (ENFORCE_READ_ONLY) | 
|  | 581 | dvmLinearSetReadWrite(classLoader, mem); | 
|  | 582 |  | 
|  | 583 | u4* pLen = getBlockHeader(mem); | 
|  | 584 | *pLen |= LENGTHFLAG_FREE; | 
|  | 585 |  | 
|  | 586 | if (ENFORCE_READ_ONLY) | 
|  | 587 | dvmLinearSetReadOnly(classLoader, mem); | 
|  | 588 | } | 
|  | 589 |  | 
|  | 590 | /* | 
|  | 591 | * For debugging, dump the contents of a linear alloc area. | 
|  | 592 | * | 
|  | 593 | * We grab the lock so that the header contents and list output are | 
|  | 594 | * consistent. | 
|  | 595 | */ | 
|  | 596 | void dvmLinearAllocDump(Object* classLoader) | 
|  | 597 | { | 
|  | 598 | #ifdef DISABLE_LINEAR_ALLOC | 
|  | 599 | return; | 
|  | 600 | #endif | 
|  | 601 | LinearAllocHdr* pHdr = getHeader(classLoader); | 
|  | 602 |  | 
|  | 603 | dvmLockMutex(&pHdr->lock); | 
|  | 604 |  | 
|  | 605 | LOGI("LinearAlloc classLoader=%p\n", classLoader); | 
|  | 606 | LOGI("  mapAddr=%p mapLength=%d firstOffset=%d\n", | 
|  | 607 | pHdr->mapAddr, pHdr->mapLength, pHdr->firstOffset); | 
|  | 608 | LOGI("  curOffset=%d\n", pHdr->curOffset); | 
|  | 609 |  | 
|  | 610 | int off = pHdr->firstOffset; | 
|  | 611 | u4 rawLen, fullLen; | 
|  | 612 |  | 
|  | 613 | while (off < pHdr->curOffset) { | 
|  | 614 | rawLen = *(u4*) (pHdr->mapAddr + off); | 
|  | 615 | fullLen = ((HEADER_EXTRA*2 + (rawLen & LENGTHFLAG_MASK)) | 
|  | 616 | & ~(BLOCK_ALIGN-1)); | 
|  | 617 |  | 
|  | 618 | LOGI("  %p (%3d): %clen=%d%s\n", pHdr->mapAddr + off + HEADER_EXTRA, | 
|  | 619 | (int) ((off + HEADER_EXTRA) / PAGESIZE), | 
|  | 620 | (rawLen & LENGTHFLAG_FREE) != 0 ? '*' : ' ', | 
|  | 621 | rawLen & LENGTHFLAG_MASK, | 
|  | 622 | (rawLen & LENGTHFLAG_RW) != 0 ? " [RW]" : ""); | 
|  | 623 |  | 
|  | 624 | off += fullLen; | 
|  | 625 | } | 
|  | 626 |  | 
|  | 627 | if (ENFORCE_READ_ONLY) { | 
|  | 628 | LOGI("writeRefCount map:\n"); | 
|  | 629 |  | 
|  | 630 | int numPages = (pHdr->mapLength+PAGESIZE-1) / PAGESIZE; | 
|  | 631 | int zstart = 0; | 
|  | 632 | int i; | 
|  | 633 |  | 
|  | 634 | for (i = 0; i < numPages; i++) { | 
|  | 635 | int count = pHdr->writeRefCount[i]; | 
|  | 636 |  | 
|  | 637 | if (count != 0) { | 
|  | 638 | if (zstart < i-1) | 
|  | 639 | printf(" %d-%d: zero\n", zstart, i-1); | 
|  | 640 | else if (zstart == i-1) | 
|  | 641 | printf(" %d: zero\n", zstart); | 
|  | 642 | zstart = i+1; | 
|  | 643 | printf(" %d: %d\n", i, count); | 
|  | 644 | } | 
|  | 645 | } | 
|  | 646 | if (zstart < i) | 
|  | 647 | printf(" %d-%d: zero\n", zstart, i-1); | 
|  | 648 | } | 
|  | 649 |  | 
|  | 650 | LOGD("LinearAlloc %p using %d of %d (%d%%)\n", | 
|  | 651 | classLoader, pHdr->curOffset, pHdr->mapLength, | 
|  | 652 | (pHdr->curOffset * 100) / pHdr->mapLength); | 
|  | 653 |  | 
|  | 654 | dvmUnlockMutex(&pHdr->lock); | 
|  | 655 | } | 
|  | 656 |  | 
|  | 657 | /* | 
|  | 658 | * Verify that all blocks are freed. | 
|  | 659 | * | 
|  | 660 | * This should only be done as we're shutting down, but there could be a | 
|  | 661 | * daemon thread that's still trying to do something, so we grab the locks. | 
|  | 662 | */ | 
|  | 663 | static void checkAllFree(Object* classLoader) | 
|  | 664 | { | 
|  | 665 | #ifdef DISABLE_LINEAR_ALLOC | 
|  | 666 | return; | 
|  | 667 | #endif | 
|  | 668 | LinearAllocHdr* pHdr = getHeader(classLoader); | 
|  | 669 |  | 
|  | 670 | dvmLockMutex(&pHdr->lock); | 
|  | 671 |  | 
|  | 672 | int off = pHdr->firstOffset; | 
|  | 673 | u4 rawLen, fullLen; | 
|  | 674 |  | 
|  | 675 | while (off < pHdr->curOffset) { | 
|  | 676 | rawLen = *(u4*) (pHdr->mapAddr + off); | 
|  | 677 | fullLen = ((HEADER_EXTRA*2 + (rawLen & LENGTHFLAG_MASK)) | 
|  | 678 | & ~(BLOCK_ALIGN-1)); | 
|  | 679 |  | 
|  | 680 | if ((rawLen & LENGTHFLAG_FREE) == 0) { | 
|  | 681 | LOGW("LinearAlloc %p not freed: %p len=%d\n", classLoader, | 
|  | 682 | pHdr->mapAddr + off + HEADER_EXTRA, rawLen & LENGTHFLAG_MASK); | 
|  | 683 | } | 
|  | 684 |  | 
|  | 685 | off += fullLen; | 
|  | 686 | } | 
|  | 687 |  | 
|  | 688 | dvmUnlockMutex(&pHdr->lock); | 
|  | 689 | } | 
|  | 690 |  | 
| Andy McFadden | 7605a84 | 2009-07-27 14:44:22 -0700 | [diff] [blame] | 691 | /* | 
|  | 692 | * Determine if [start, start+length) is contained in the in-use area of | 
|  | 693 | * a single LinearAlloc.  The full set of linear allocators is scanned. | 
|  | 694 | * | 
|  | 695 | * [ Since we currently only have one region, this is pretty simple.  In | 
|  | 696 | * the future we'll need to traverse a table of class loaders. ] | 
|  | 697 | */ | 
| Bob Lee | cdacef5 | 2009-07-30 18:17:37 -0700 | [diff] [blame^] | 698 | bool dvmLinearAllocContains(const void* start, size_t length) | 
| Andy McFadden | 7605a84 | 2009-07-27 14:44:22 -0700 | [diff] [blame] | 699 | { | 
|  | 700 | LinearAllocHdr* pHdr = getHeader(NULL); | 
|  | 701 |  | 
|  | 702 | if (pHdr == NULL) | 
|  | 703 | return false; | 
|  | 704 |  | 
|  | 705 | return (char*) start >= pHdr->mapAddr && | 
|  | 706 | ((char*)start + length) <= (pHdr->mapAddr + pHdr->curOffset); | 
|  | 707 | } | 
|  | 708 |  |