blob: 73d9235de6b8b8ca871dbd8cdf6815cf15faaf91 [file] [log] [blame]
The Android Open Source Projectf6c38712009-03-03 19:28:47 -08001/*
2 * Copyright (C) 2008 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16/*
17 * Linear memory allocation, tied to class loaders.
18 */
19#include "Dalvik.h"
20
21#include <sys/mman.h>
22#include <limits.h>
23#include <errno.h>
24
25//#define DISABLE_LINEAR_ALLOC
26
27// Use ashmem to name the LinearAlloc section
28#define USE_ASHMEM 1
29
30#ifdef USE_ASHMEM
31#include <cutils/ashmem.h>
32#endif /* USE_ASHMEM */
33
34/*
35Overview
36
37This is intended to be a simple, fast allocator for "write-once" storage.
38The expectation is that this will hold small allocations that don't change,
39such as parts of classes (vtables, fields, methods, interfaces). Because
40the lifetime of these items is tied to classes, which in turn are tied
41to class loaders, we associate the storage with a ClassLoader object.
42
43[ We don't yet support class unloading, and our ClassLoader implementation
44is in flux, so for now we just have a single global region and the
45"classLoader" argument is ignored. ]
46
47By storing the data here, rather than on the system heap, we reduce heap
48clutter, speed class loading, reduce the memory footprint (reduced heap
49structure overhead), and most importantly we increase the number of pages
50that remain shared between processes launched in "Zygote mode".
51
52The 4 bytes preceding each block contain the block length. This allows us
53to support "free" and "realloc" calls in a limited way. We don't free
54storage once it has been allocated, but in some circumstances it could be
55useful to erase storage to garbage values after a "free" or "realloc".
56(Bad idea if we're trying to share pages.) We need to align to 8-byte
57boundaries for some architectures, so we have a 50-50 chance of getting
58this for free in a given block.
59
60A NULL value for the "classLoader" argument refers to the bootstrap class
61loader, which is never unloaded (until the VM shuts down).
62
63Because the memory is not expected to be updated, we can use mprotect to
64guard the pages on debug builds. Handy when tracking down corruption.
65*/
66
67/* alignment for allocations; must be power of 2, and currently >= hdr_xtra */
68#define BLOCK_ALIGN 8
69
70/* default length of memory segment (worst case is probably "dexopt") */
Andy McFadden68825972009-05-08 11:25:35 -070071#define DEFAULT_MAX_LENGTH (5*1024*1024)
The Android Open Source Projectf6c38712009-03-03 19:28:47 -080072
73/* leave enough space for a length word */
74#define HEADER_EXTRA 4
75
76/* overload the length word */
77#define LENGTHFLAG_FREE 0x80000000
78#define LENGTHFLAG_RW 0x40000000
79#define LENGTHFLAG_MASK (~(LENGTHFLAG_FREE|LENGTHFLAG_RW))
80
81/* in case limits.h doesn't have it; must be a power of 2 */
82#ifndef PAGESIZE
83# define PAGESIZE 4096
84#endif
85
86
87/* fwd */
88static void checkAllFree(Object* classLoader);
89
90
91/*
92 * Someday, retrieve the linear alloc struct associated with a particular
93 * class loader. For now, always use the boostrap loader's instance.
94 */
95static inline LinearAllocHdr* getHeader(Object* classLoader)
96{
97 return gDvm.pBootLoaderAlloc;
98}
99
100/*
101 * Convert a pointer to memory to a pointer to the block header (which is
102 * currently just a length word).
103 */
104static inline u4* getBlockHeader(void* mem)
105{
106 return ((u4*) mem) -1;
107}
108
109/*
110 * Create a new linear allocation block.
111 */
112LinearAllocHdr* dvmLinearAllocCreate(Object* classLoader)
113{
114#ifdef DISABLE_LINEAR_ALLOC
115 return (LinearAllocHdr*) 0x12345;
116#endif
117 LinearAllocHdr* pHdr;
118
119 pHdr = (LinearAllocHdr*) malloc(sizeof(*pHdr));
120
121
122 /*
123 * "curOffset" points to the location of the next pre-block header,
124 * which means we have to advance to the next BLOCK_ALIGN address and
125 * back up.
126 *
127 * Note we leave the first page empty (see below), and start the
128 * first entry on the second page at an offset that ensures the next
129 * chunk of data will be properly aligned.
130 */
131 assert(BLOCK_ALIGN >= HEADER_EXTRA);
132 pHdr->curOffset = pHdr->firstOffset = (BLOCK_ALIGN-HEADER_EXTRA) + PAGESIZE;
133 pHdr->mapLength = DEFAULT_MAX_LENGTH;
134
135#ifdef USE_ASHMEM
136 int fd;
137
138 fd = ashmem_create_region("dalvik-LinearAlloc", DEFAULT_MAX_LENGTH);
139 if (fd < 0) {
140 LOGE("ashmem LinearAlloc failed %s", strerror(errno));
141 free(pHdr);
142 return NULL;
143 }
144
145 pHdr->mapAddr = mmap(NULL, pHdr->mapLength, PROT_READ | PROT_WRITE,
146 MAP_PRIVATE, fd, 0);
147 if (pHdr->mapAddr == MAP_FAILED) {
148 LOGE("LinearAlloc mmap(%d) failed: %s\n", pHdr->mapLength,
149 strerror(errno));
150 free(pHdr);
151 close(fd);
152 return NULL;
153 }
154
155 close(fd);
156#else /*USE_ASHMEM*/
157 // MAP_ANON is listed as "deprecated" on Linux,
158 // but MAP_ANONYMOUS is not defined under Mac OS X.
159 pHdr->mapAddr = mmap(NULL, pHdr->mapLength, PROT_READ | PROT_WRITE,
160 MAP_PRIVATE | MAP_ANON, -1, 0);
161 if (pHdr->mapAddr == MAP_FAILED) {
162 LOGE("LinearAlloc mmap(%d) failed: %s\n", pHdr->mapLength,
163 strerror(errno));
164 free(pHdr);
165 return NULL;
166 }
167#endif /*USE_ASHMEM*/
168
169 /* region expected to begin on a page boundary */
170 assert(((int) pHdr->mapAddr & (PAGESIZE-1)) == 0);
171
172 /* the system should initialize newly-mapped memory to zero */
173 assert(*(u4*) (pHdr->mapAddr + pHdr->curOffset) == 0);
174
175 /*
176 * Disable access to all except starting page. We will enable pages
177 * as we use them. This helps prevent bad pointers from working. The
178 * pages start out PROT_NONE, become read/write while we access them,
179 * then go to read-only after we finish our changes.
180 *
181 * We have to make the first page readable because we have 4 pad bytes,
182 * followed by 4 length bytes, giving an initial offset of 8. The
183 * generic code below assumes that there could have been a previous
184 * allocation that wrote into those 4 pad bytes, therefore the page
185 * must have been marked readable by the previous allocation.
186 *
187 * We insert an extra page in here to force a break in the memory map
188 * so we can see ourselves more easily in "showmap". Otherwise this
189 * stuff blends into the neighboring pages. [TODO: do we still need
190 * the extra page now that we have ashmem?]
191 */
192 if (mprotect(pHdr->mapAddr, pHdr->mapLength, PROT_NONE) != 0) {
193 LOGW("LinearAlloc init mprotect failed: %s\n", strerror(errno));
194 free(pHdr);
195 return NULL;
196 }
197 if (mprotect(pHdr->mapAddr + PAGESIZE, PAGESIZE,
198 ENFORCE_READ_ONLY ? PROT_READ : PROT_READ|PROT_WRITE) != 0)
199 {
200 LOGW("LinearAlloc init mprotect #2 failed: %s\n", strerror(errno));
201 free(pHdr);
202 return NULL;
203 }
204
205 if (ENFORCE_READ_ONLY) {
206 /* allocate the per-page ref count */
207 int numPages = (pHdr->mapLength+PAGESIZE-1) / PAGESIZE;
208 pHdr->writeRefCount = calloc(numPages, sizeof(short));
209 if (pHdr->writeRefCount == NULL) {
210 free(pHdr);
211 return NULL;
212 }
213 }
214
215 dvmInitMutex(&pHdr->lock);
216
217 LOGV("LinearAlloc: created region at %p-%p\n",
218 pHdr->mapAddr, pHdr->mapAddr + pHdr->mapLength-1);
219
220 return pHdr;
221}
222
223/*
224 * Destroy a linear allocation area.
225 *
226 * We do a trivial "has everything been freed?" check before unmapping the
227 * memory and freeing the LinearAllocHdr.
228 */
229void dvmLinearAllocDestroy(Object* classLoader)
230{
231#ifdef DISABLE_LINEAR_ALLOC
232 return;
233#endif
234 LinearAllocHdr* pHdr = getHeader(classLoader);
235 if (pHdr == NULL)
236 return;
237
238 checkAllFree(classLoader);
239
240 //dvmLinearAllocDump(classLoader);
241
242 LOGV("Unmapping linear allocator base=%p\n", pHdr->mapAddr);
243 LOGD("LinearAlloc %p used %d of %d (%d%%)\n",
244 classLoader, pHdr->curOffset, pHdr->mapLength,
245 (pHdr->curOffset * 100) / pHdr->mapLength);
246
247 if (munmap(pHdr->mapAddr, pHdr->mapLength) != 0) {
248 LOGW("LinearAlloc munmap(%p, %d) failed: %s\n",
249 pHdr->mapAddr, pHdr->mapLength, strerror(errno));
250 }
251 free(pHdr);
252}
253
254/*
255 * Allocate "size" bytes of storage, associated with a particular class
256 * loader.
257 *
258 * It's okay for size to be zero.
259 *
260 * We always leave "curOffset" pointing at the next place where we will
261 * store the header that precedes the returned storage.
262 *
263 * This aborts the VM on failure, so it's not necessary to check for a
264 * NULL return value.
265 */
266void* dvmLinearAlloc(Object* classLoader, size_t size)
267{
268 LinearAllocHdr* pHdr = getHeader(classLoader);
269 int startOffset, nextOffset;
270 int lastGoodOff, firstWriteOff, lastWriteOff;
271
272#ifdef DISABLE_LINEAR_ALLOC
273 return calloc(1, size);
274#endif
275
276 LOGVV("--- LinearAlloc(%p, %d)\n", classLoader, size);
277
278 /*
279 * What we'd like to do is just determine the new end-of-alloc size
280 * and atomic-swap the updated value in. The trouble is that, the
281 * first time we reach a new page, we need to call mprotect() to
282 * make the page available, and we don't want to call mprotect() on
283 * every allocation. The troubled situation is:
284 * - thread A allocs across a page boundary, but gets preempted
285 * before mprotect() completes
286 * - thread B allocs within the new page, and doesn't call mprotect()
287 */
288 dvmLockMutex(&pHdr->lock);
289
290 startOffset = pHdr->curOffset;
291 assert(((startOffset + HEADER_EXTRA) & (BLOCK_ALIGN-1)) == 0);
292
293 /*
294 * Compute the new offset. The old offset points at the address where
295 * we will store the hidden block header, so we advance past that,
296 * add the size of data they want, add another header's worth so we
297 * know we have room for that, and round up to BLOCK_ALIGN. That's
298 * the next location where we'll put user data. We then subtract the
299 * chunk header size off so we're back to the header pointer.
300 *
301 * Examples:
302 * old=12 size=3 new=((12+(4*2)+3+7) & ~7)-4 = 24-4 --> 20
303 * old=12 size=5 new=((12+(4*2)+5+7) & ~7)-4 = 32-4 --> 28
304 */
305 nextOffset = ((startOffset + HEADER_EXTRA*2 + size + (BLOCK_ALIGN-1))
306 & ~(BLOCK_ALIGN-1)) - HEADER_EXTRA;
307 LOGVV("--- old=%d size=%d new=%d\n", startOffset, size, nextOffset);
308
309 if (nextOffset > pHdr->mapLength) {
310 /*
311 * We don't have to abort here. We could fall back on the system
312 * malloc(), and have our "free" call figure out what to do. Only
313 * works if the users of these functions actually free everything
314 * they allocate.
315 */
Andy McFadden68825972009-05-08 11:25:35 -0700316 LOGE("LinearAlloc exceeded capacity (%d), last=%d\n",
317 pHdr->mapLength, (int) size);
The Android Open Source Projectf6c38712009-03-03 19:28:47 -0800318 dvmAbort();
319 }
320
321 /*
322 * Round up "size" to encompass the entire region, including the 0-7
323 * pad bytes before the next chunk header. This way we get maximum
324 * utility out of "realloc", and when we're doing ENFORCE_READ_ONLY
325 * stuff we always treat the full extent.
326 */
327 size = nextOffset - (startOffset + HEADER_EXTRA);
328 LOGVV("--- (size now %d)\n", size);
329
330 /*
331 * See if we are starting on or have crossed into a new page. If so,
332 * call mprotect on the page(s) we're about to write to. We have to
333 * page-align the start address, but don't have to make the length a
334 * PAGESIZE multiple (but we do it anyway).
335 *
336 * Note that "startOffset" is not the last *allocated* byte, but rather
337 * the offset of the first *unallocated* byte (which we are about to
338 * write the chunk header to). "nextOffset" is similar.
339 *
340 * If ENFORCE_READ_ONLY is enabled, we have to call mprotect even if
341 * we've written to this page before, because it might be read-only.
342 */
343 lastGoodOff = (startOffset-1) & ~(PAGESIZE-1);
344 firstWriteOff = startOffset & ~(PAGESIZE-1);
345 lastWriteOff = (nextOffset-1) & ~(PAGESIZE-1);
346 LOGVV("--- lastGood=0x%04x firstWrite=0x%04x lastWrite=0x%04x\n",
347 lastGoodOff, firstWriteOff, lastWriteOff);
348 if (lastGoodOff != lastWriteOff || ENFORCE_READ_ONLY) {
349 int cc, start, len;
350
351 start = firstWriteOff;
352 assert(start <= nextOffset);
353 len = (lastWriteOff - firstWriteOff) + PAGESIZE;
354
355 LOGVV("--- calling mprotect(start=%d len=%d RW)\n", start, len);
356 cc = mprotect(pHdr->mapAddr + start, len, PROT_READ | PROT_WRITE);
357 if (cc != 0) {
358 LOGE("LinearAlloc mprotect (+%d %d) failed: %s\n",
359 start, len, strerror(errno));
360 /* we're going to fail soon, might as do it now */
361 dvmAbort();
362 }
363 }
364
365 /* update the ref counts on the now-writable pages */
366 if (ENFORCE_READ_ONLY) {
367 int i, start, end;
368
369 start = firstWriteOff / PAGESIZE;
370 end = lastWriteOff / PAGESIZE;
371
372 LOGVV("--- marking pages %d-%d RW (alloc %d at %p)\n",
373 start, end, size, pHdr->mapAddr + startOffset + HEADER_EXTRA);
374 for (i = start; i <= end; i++)
375 pHdr->writeRefCount[i]++;
376 }
377
378 /* stow the size in the header */
379 if (ENFORCE_READ_ONLY)
380 *(u4*)(pHdr->mapAddr + startOffset) = size | LENGTHFLAG_RW;
381 else
382 *(u4*)(pHdr->mapAddr + startOffset) = size;
383
384 /*
385 * Update data structure.
386 */
387 pHdr->curOffset = nextOffset;
388
389 dvmUnlockMutex(&pHdr->lock);
390 return pHdr->mapAddr + startOffset + HEADER_EXTRA;
391}
392
393/*
394 * Helper function, replaces strdup().
395 */
396char* dvmLinearStrdup(Object* classLoader, const char* str)
397{
398#ifdef DISABLE_LINEAR_ALLOC
399 return strdup(str);
400#endif
401 int len = strlen(str);
402 void* mem = dvmLinearAlloc(classLoader, len+1);
403 memcpy(mem, str, len+1);
404 if (ENFORCE_READ_ONLY)
405 dvmLinearSetReadOnly(classLoader, mem);
406 return (char*) mem;
407}
408
409/*
410 * "Reallocate" a piece of memory.
411 *
412 * If the new size is <= the old size, we return the original pointer
413 * without doing anything.
414 *
415 * If the new size is > the old size, we allocate new storage, copy the
416 * old stuff over, and mark the new stuff as free.
417 */
418void* dvmLinearRealloc(Object* classLoader, void* mem, size_t newSize)
419{
420#ifdef DISABLE_LINEAR_ALLOC
421 return realloc(mem, newSize);
422#endif
423 LinearAllocHdr* pHdr = getHeader(classLoader);
424
425 /* make sure we have the right region (and mem != NULL) */
426 assert(mem != NULL);
427 assert(mem >= (void*) pHdr->mapAddr &&
428 mem < (void*) (pHdr->mapAddr + pHdr->curOffset));
429
430 const u4* pLen = getBlockHeader(mem);
431 LOGV("--- LinearRealloc(%d) old=%d\n", newSize, *pLen);
432
433 /* handle size reduction case */
434 if (*pLen >= newSize) {
435 if (ENFORCE_READ_ONLY)
436 dvmLinearSetReadWrite(classLoader, mem);
437 return mem;
438 }
439
440 void* newMem;
441
442 newMem = dvmLinearAlloc(classLoader, newSize);
443 assert(newMem != NULL);
444 memcpy(newMem, mem, *pLen);
445 dvmLinearFree(classLoader, mem);
446
447 return newMem;
448}
449
450
451/*
452 * Update the read/write status of one or more pages.
453 */
454static void updatePages(Object* classLoader, void* mem, int direction)
455{
456 LinearAllocHdr* pHdr = getHeader(classLoader);
457 dvmLockMutex(&pHdr->lock);
458
459 /* make sure we have the right region */
460 assert(mem >= (void*) pHdr->mapAddr &&
461 mem < (void*) (pHdr->mapAddr + pHdr->curOffset));
462
463 u4* pLen = getBlockHeader(mem);
464 u4 len = *pLen & LENGTHFLAG_MASK;
465 int firstPage, lastPage;
466
467 firstPage = ((u1*)pLen - (u1*)pHdr->mapAddr) / PAGESIZE;
468 lastPage = ((u1*)mem - (u1*)pHdr->mapAddr + (len-1)) / PAGESIZE;
469 LOGVV("--- updating pages %d-%d (%d)\n", firstPage, lastPage, direction);
470
471 int i, cc;
472
473 /*
474 * Update individual pages. We could do some sort of "lazy update" to
475 * combine mprotect calls, but that's almost certainly more trouble
476 * than it's worth.
477 */
478 for (i = firstPage; i <= lastPage; i++) {
479 if (direction < 0) {
480 /*
481 * Trying to mark read-only.
482 */
483 if (i == firstPage) {
484 if ((*pLen & LENGTHFLAG_RW) == 0) {
485 LOGW("Double RO on %p\n", mem);
486 dvmAbort();
487 } else
488 *pLen &= ~LENGTHFLAG_RW;
489 }
490
491 if (pHdr->writeRefCount[i] == 0) {
492 LOGE("Can't make page %d any less writable\n", i);
493 dvmAbort();
494 }
495 pHdr->writeRefCount[i]--;
496 if (pHdr->writeRefCount[i] == 0) {
497 LOGVV("--- prot page %d RO\n", i);
498 cc = mprotect(pHdr->mapAddr + PAGESIZE * i, PAGESIZE, PROT_READ);
499 assert(cc == 0);
500 }
501 } else {
502 /*
503 * Trying to mark writable.
504 */
505 if (pHdr->writeRefCount[i] >= 32767) {
506 LOGE("Can't make page %d any more writable\n", i);
507 dvmAbort();
508 }
509 if (pHdr->writeRefCount[i] == 0) {
510 LOGVV("--- prot page %d RW\n", i);
511 cc = mprotect(pHdr->mapAddr + PAGESIZE * i, PAGESIZE,
512 PROT_READ | PROT_WRITE);
513 assert(cc == 0);
514 }
515 pHdr->writeRefCount[i]++;
516
517 if (i == firstPage) {
518 if ((*pLen & LENGTHFLAG_RW) != 0) {
519 LOGW("Double RW on %p\n", mem);
520 dvmAbort();
521 } else
522 *pLen |= LENGTHFLAG_RW;
523 }
524 }
525 }
526
527 dvmUnlockMutex(&pHdr->lock);
528}
529
530/*
531 * Try to mark the pages in which a chunk of memory lives as read-only.
532 * Whether or not the pages actually change state depends on how many
533 * others are trying to access the same pages.
534 *
535 * Only call here if ENFORCE_READ_ONLY is true.
536 */
537void dvmLinearSetReadOnly(Object* classLoader, void* mem)
538{
539#ifdef DISABLE_LINEAR_ALLOC
540 return;
541#endif
542 updatePages(classLoader, mem, -1);
543}
544
545/*
546 * Make the pages on which "mem" sits read-write.
547 *
548 * This covers the header as well as the data itself. (We could add a
549 * "header-only" mode for dvmLinearFree.)
550 *
551 * Only call here if ENFORCE_READ_ONLY is true.
552 */
553void dvmLinearSetReadWrite(Object* classLoader, void* mem)
554{
555#ifdef DISABLE_LINEAR_ALLOC
556 return;
557#endif
558 updatePages(classLoader, mem, 1);
559}
560
561/*
562 * Mark an allocation as free.
563 */
564void dvmLinearFree(Object* classLoader, void* mem)
565{
566#ifdef DISABLE_LINEAR_ALLOC
567 free(mem);
568 return;
569#endif
570 if (mem == NULL)
571 return;
572
573 LinearAllocHdr* pHdr = getHeader(classLoader);
574
575 /* make sure we have the right region */
576 assert(mem >= (void*) pHdr->mapAddr &&
577 mem < (void*) (pHdr->mapAddr + pHdr->curOffset));
578
579 if (ENFORCE_READ_ONLY)
580 dvmLinearSetReadWrite(classLoader, mem);
581
582 u4* pLen = getBlockHeader(mem);
583 *pLen |= LENGTHFLAG_FREE;
584
585 if (ENFORCE_READ_ONLY)
586 dvmLinearSetReadOnly(classLoader, mem);
587}
588
589/*
590 * For debugging, dump the contents of a linear alloc area.
591 *
592 * We grab the lock so that the header contents and list output are
593 * consistent.
594 */
595void dvmLinearAllocDump(Object* classLoader)
596{
597#ifdef DISABLE_LINEAR_ALLOC
598 return;
599#endif
600 LinearAllocHdr* pHdr = getHeader(classLoader);
601
602 dvmLockMutex(&pHdr->lock);
603
604 LOGI("LinearAlloc classLoader=%p\n", classLoader);
605 LOGI(" mapAddr=%p mapLength=%d firstOffset=%d\n",
606 pHdr->mapAddr, pHdr->mapLength, pHdr->firstOffset);
607 LOGI(" curOffset=%d\n", pHdr->curOffset);
608
609 int off = pHdr->firstOffset;
610 u4 rawLen, fullLen;
611
612 while (off < pHdr->curOffset) {
613 rawLen = *(u4*) (pHdr->mapAddr + off);
614 fullLen = ((HEADER_EXTRA*2 + (rawLen & LENGTHFLAG_MASK))
615 & ~(BLOCK_ALIGN-1));
616
617 LOGI(" %p (%3d): %clen=%d%s\n", pHdr->mapAddr + off + HEADER_EXTRA,
618 (int) ((off + HEADER_EXTRA) / PAGESIZE),
619 (rawLen & LENGTHFLAG_FREE) != 0 ? '*' : ' ',
620 rawLen & LENGTHFLAG_MASK,
621 (rawLen & LENGTHFLAG_RW) != 0 ? " [RW]" : "");
622
623 off += fullLen;
624 }
625
626 if (ENFORCE_READ_ONLY) {
627 LOGI("writeRefCount map:\n");
628
629 int numPages = (pHdr->mapLength+PAGESIZE-1) / PAGESIZE;
630 int zstart = 0;
631 int i;
632
633 for (i = 0; i < numPages; i++) {
634 int count = pHdr->writeRefCount[i];
635
636 if (count != 0) {
637 if (zstart < i-1)
638 printf(" %d-%d: zero\n", zstart, i-1);
639 else if (zstart == i-1)
640 printf(" %d: zero\n", zstart);
641 zstart = i+1;
642 printf(" %d: %d\n", i, count);
643 }
644 }
645 if (zstart < i)
646 printf(" %d-%d: zero\n", zstart, i-1);
647 }
648
649 LOGD("LinearAlloc %p using %d of %d (%d%%)\n",
650 classLoader, pHdr->curOffset, pHdr->mapLength,
651 (pHdr->curOffset * 100) / pHdr->mapLength);
652
653 dvmUnlockMutex(&pHdr->lock);
654}
655
656/*
657 * Verify that all blocks are freed.
658 *
659 * This should only be done as we're shutting down, but there could be a
660 * daemon thread that's still trying to do something, so we grab the locks.
661 */
662static void checkAllFree(Object* classLoader)
663{
664#ifdef DISABLE_LINEAR_ALLOC
665 return;
666#endif
667 LinearAllocHdr* pHdr = getHeader(classLoader);
668
669 dvmLockMutex(&pHdr->lock);
670
671 int off = pHdr->firstOffset;
672 u4 rawLen, fullLen;
673
674 while (off < pHdr->curOffset) {
675 rawLen = *(u4*) (pHdr->mapAddr + off);
676 fullLen = ((HEADER_EXTRA*2 + (rawLen & LENGTHFLAG_MASK))
677 & ~(BLOCK_ALIGN-1));
678
679 if ((rawLen & LENGTHFLAG_FREE) == 0) {
680 LOGW("LinearAlloc %p not freed: %p len=%d\n", classLoader,
681 pHdr->mapAddr + off + HEADER_EXTRA, rawLen & LENGTHFLAG_MASK);
682 }
683
684 off += fullLen;
685 }
686
687 dvmUnlockMutex(&pHdr->lock);
688}
689