blob: a5a417b00946d4e7546d3dae457a593e048a1d6a [file] [log] [blame]
Carl Shapirod28668c2010-04-15 16:10:00 -07001/*
2 * Copyright (C) 2009 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17#include <errno.h>
18#include <limits.h>
19#include <sys/mman.h>
20
21#include "Dalvik.h"
22#include "alloc/Heap.h"
23#include "alloc/HeapBitmap.h"
24#include "alloc/HeapInternal.h"
25#include "alloc/HeapSource.h"
26#include "alloc/Verify.h"
27#include "alloc/clz.h"
28
29/*
30 * A "mostly copying", generational, garbage collector.
31 *
32 * TODO: we allocate our own contiguous tract of page frames to back
33 * object allocations. To cooperate with other heaps active in the
34 * virtual machine we need to move the responsibility of allocating
35 * pages someplace outside of this code.
36 *
37 * The other major data structures that maintain the state of the heap
38 * are the block space table and the block queue.
39 *
40 * The block space table records the state of a block. We must track
41 * whether a block is:
42 *
43 * - Free or allocated in some space.
44 *
45 * - If the block holds part of a large object allocation, whether the
46 * block is the initial or a continued block of the allocation.
47 *
48 * - Whether the block is pinned, that is to say whether at least one
49 * object in the block must remain stationary. Only needed during a
50 * GC.
51 *
52 * - Which space the object belongs to. At present this means
53 * from-space or to-space.
54 *
55 * The block queue is used during garbage collection. Unlike Cheney's
56 * algorithm, from-space and to-space are not contiguous. Therefore,
57 * one cannot maintain the state of the copy with just two pointers.
58 * The block queue exists to thread lists of blocks from the various
59 * spaces together.
60 *
61 * Additionally, we record the free space frontier of the heap, as
62 * well as the address of the first object within a block, which is
63 * required to copy objects following a large object (not currently
64 * implemented). This is stored in the heap source structure. This
65 * should be moved elsewhere to support in-line allocations from Java
66 * threads.
67 *
68 * Allocation requests are satisfied by reserving storage from one or
69 * more contiguous blocks. Objects that are small enough to fit
70 * inside a block are packed together within a block. Objects that
71 * are larger than a block are allocated from contiguous sequences of
72 * blocks. When half the available blocks are filled, a garbage
73 * collection occurs. We "flip" spaces (exchange from- and to-space),
74 * copy live objects into to space, and perform pointer adjustment.
75 *
76 * Copying is made more complicated by the requirement that some
77 * objects must not be moved. This property is known as "pinning".
78 * These objects must be dealt with specially. We use Bartlett's
79 * scheme; blocks containing such objects are grayed (promoted) at the
Carl Shapiro952e84a2010-05-06 14:35:29 -070080 * start of a garbage collection. By virtue of this trick, tracing
Carl Shapirod28668c2010-04-15 16:10:00 -070081 * from the roots proceeds as usual but all objects on those pages are
82 * considered promoted and therefore not moved.
83 *
84 * TODO: there is sufficient information within the garbage collector
85 * to implement Attardi's scheme for evacuating unpinned objects from
86 * a page that is otherwise pinned. This would eliminate false
87 * retention caused by the large pinning granularity.
88 *
89 * We need a scheme for medium and large objects. Ignore that for
90 * now, we can return to this later.
91 *
92 * Eventually we need to worry about promoting objects out of the
93 * copy-collected heap (tenuring) into a less volatile space. Copying
94 * may not always be the best policy for such spaces. We should
95 * consider a variant of mark, sweep, compact.
96 *
97 * The block scheme allows us to use VM page faults to maintain a
98 * write barrier. Consider having a special leaf state for a page.
99 *
100 * Bibliography:
101 *
102 * C. J. Cheney. 1970. A non-recursive list compacting
103 * algorithm. CACM. 13-11 pp677--678.
104 *
105 * Joel F. Bartlett. 1988. Compacting Garbage Collection with
106 * Ambiguous Roots. Digital Equipment Corporation.
107 *
108 * Joel F. Bartlett. 1989. Mostly-Copying Garbage Collection Picks Up
109 * Generations and C++. Digital Equipment Corporation.
110 *
111 * G. May Yip. 1991. Incremental, Generational Mostly-Copying Garbage
112 * Collection in Uncooperative Environments. Digital Equipment
113 * Corporation.
114 *
115 * Giuseppe Attardi, Tito Flagella. 1994. A Customisable Memory
116 * Management Framework. TR-94-010
117 *
118 * Giuseppe Attardi, Tito Flagella, Pietro Iglio. 1998. A customisable
119 * memory management framework for C++. Software -- Practice and
120 * Experience. 28(11), 1143-1183.
121 *
122 */
123
124#define ARRAYSIZE(x) (sizeof(x) / sizeof(x[0]))
125
126#if 1
127#define LOG_ALLOC LOGI
128#define LOG_SCAVENGE LOGI
129#define LOG_TRANSPORT LOGI
130#define LOG_PROMOTE LOGI
131#define LOG_VERIFY LOGI
Carl Shapiro952e84a2010-05-06 14:35:29 -0700132#define LOG_REFERENCE LOGI
Carl Shapirod28668c2010-04-15 16:10:00 -0700133#else
134#define LOG_ALLOC(...) ((void *)0)
135#define LOG_SCAVENGE(...) ((void *)0)
136#define LOG_TRANSPORT(...) ((void *)0)
137#define LOG_PROMOTE(...) ((void *)0)
138#define LOG_VERIFY(...) ((void *)0)
Carl Shapiro952e84a2010-05-06 14:35:29 -0700139#define LOG_REFERENCE(...) ((void *)0)
Carl Shapirod28668c2010-04-15 16:10:00 -0700140#endif
141
142static void enqueueBlock(HeapSource *heapSource, size_t block);
Carl Shapiro2396fda2010-05-03 20:14:14 -0700143static void scavengeReference(Object **obj);
Carl Shapirod28668c2010-04-15 16:10:00 -0700144static void verifyReference(const void *obj);
145static void printHeapBitmap(const HeapBitmap *bitmap);
146static void printHeapBitmapSxS(const HeapBitmap *b1, const HeapBitmap *b2);
Carl Shapiro952e84a2010-05-06 14:35:29 -0700147static bool toSpaceContains(const void *addr);
148static bool fromSpaceContains(const void *addr);
Carl Shapirod28668c2010-04-15 16:10:00 -0700149static size_t sumHeapBitmap(const HeapBitmap *bitmap);
Carl Shapiro2396fda2010-05-03 20:14:14 -0700150static size_t objectSize(const Object *obj);
Carl Shapiro952e84a2010-05-06 14:35:29 -0700151static void scavengeDataObject(Object *obj);
152static void scavengeBlockQueue(void);
Carl Shapirod28668c2010-04-15 16:10:00 -0700153
154/*
155 * We use 512-byte blocks.
156 */
157enum { BLOCK_SHIFT = 9 };
158enum { BLOCK_SIZE = 1 << BLOCK_SHIFT };
159
160/*
161 * Space identifiers, stored into the blockSpace array.
162 */
163enum {
164 BLOCK_FREE = 0,
165 BLOCK_FROM_SPACE = 1,
166 BLOCK_TO_SPACE = 2,
167 BLOCK_CONTINUED = 7
168};
169
170/*
171 * Alignment for all allocations, in bytes.
172 */
173enum { ALLOC_ALIGNMENT = 8 };
174
175/*
176 * Sentinel value for the queue end.
177 */
178#define QUEUE_TAIL (~(size_t)0)
179
180struct HeapSource {
181
182 /* The base address of backing store. */
183 u1 *blockBase;
184
185 /* Total number of blocks available for allocation. */
186 size_t totalBlocks;
187 size_t allocBlocks;
188
189 /*
190 * The scavenger work queue. Implemented as an array of index
191 * values into the queue.
192 */
193 size_t *blockQueue;
194
195 /*
196 * Base and limit blocks. Basically the shifted start address of
197 * the block. We convert blocks to a relative number when
198 * indexing in the block queue. TODO: make the block queue base
199 * relative rather than the index into the block queue.
200 */
201 size_t baseBlock, limitBlock;
202
203 size_t queueHead;
204 size_t queueTail;
205 size_t queueSize;
206
207 /* The space of the current block 0 (free), 1 or 2. */
208 char *blockSpace;
209
210 /* Start of free space in the current block. */
211 u1 *allocPtr;
212 /* Exclusive limit of free space in the current block. */
213 u1 *allocLimit;
214
215 HeapBitmap allocBits;
216
217 /*
Carl Shapirod28668c2010-04-15 16:10:00 -0700218 * The starting size of the heap. This value is the same as the
219 * value provided to the -Xms flag.
220 */
221 size_t minimumSize;
222
223 /*
224 * The maximum size of the heap. This value is the same as the
225 * -Xmx flag.
226 */
227 size_t maximumSize;
228
229 /*
230 * The current, committed size of the heap. At present, this is
231 * equivalent to the maximumSize.
232 */
233 size_t currentSize;
234
235 size_t bytesAllocated;
236};
237
238static unsigned long alignDown(unsigned long x, unsigned long n)
239{
240 return x & -n;
241}
242
243static unsigned long alignUp(unsigned long x, unsigned long n)
244{
245 return alignDown(x + (n - 1), n);
246}
247
248static void describeBlocks(const HeapSource *heapSource)
249{
250 size_t i;
251
252 for (i = 0; i < heapSource->totalBlocks; ++i) {
253 if ((i % 32) == 0) putchar('\n');
254 printf("%d ", heapSource->blockSpace[i]);
255 }
256 putchar('\n');
257}
258
259/*
260 * Virtual memory interface.
261 */
262
263static void *virtualAlloc(size_t length)
264{
265 void *addr;
266 int flags, prot;
267
268 flags = MAP_PRIVATE | MAP_ANONYMOUS;
269 prot = PROT_READ | PROT_WRITE;
270 addr = mmap(NULL, length, prot, flags, -1, 0);
271 if (addr == MAP_FAILED) {
272 LOGE_HEAP("mmap: %s", strerror(errno));
273 addr = NULL;
274 }
275 return addr;
276}
277
278static void virtualFree(void *addr, size_t length)
279{
280 int res;
281
282 assert(addr != NULL);
283 assert((uintptr_t)addr % SYSTEM_PAGE_SIZE == 0);
284 res = munmap(addr, length);
285 if (res == -1) {
286 LOGE_HEAP("munmap: %s", strerror(errno));
287 }
288}
289
290static int isValidAddress(const HeapSource *heapSource, const u1 *addr)
291{
292 size_t block;
293
294 block = (uintptr_t)addr >> BLOCK_SHIFT;
295 return heapSource->baseBlock <= block &&
296 heapSource->limitBlock > block;
297}
298
299/*
300 * Iterate over the block map looking for a contiguous run of free
301 * blocks.
302 */
303static void *allocateBlocks(HeapSource *heapSource, size_t blocks)
304{
305 void *addr;
306 size_t allocBlocks, totalBlocks;
307 size_t i, j;
308
309 allocBlocks = heapSource->allocBlocks;
310 totalBlocks = heapSource->totalBlocks;
311 /* Check underflow. */
312 assert(blocks != 0);
313 /* Check overflow. */
314 if (allocBlocks + blocks > totalBlocks / 2) {
315 return NULL;
316 }
317 /* Scan block map. */
318 for (i = 0; i < totalBlocks; ++i) {
319 /* Check fit. */
320 for (j = 0; j < blocks; ++j) { /* runs over totalBlocks */
321 if (heapSource->blockSpace[i+j] != BLOCK_FREE) {
322 break;
323 }
324 }
325 /* No fit? */
326 if (j != blocks) {
327 i += j;
328 continue;
329 }
330 /* Fit, allocate. */
331 heapSource->blockSpace[i] = BLOCK_TO_SPACE; /* why to-space? */
332 for (j = 1; j < blocks; ++j) {
333 heapSource->blockSpace[i+j] = BLOCK_CONTINUED;
334 }
335 heapSource->allocBlocks += blocks;
336 addr = &heapSource->blockBase[i*BLOCK_SIZE];
337 memset(addr, 0, blocks*BLOCK_SIZE);
338 /* Collecting? */
339 if (heapSource->queueHead != QUEUE_TAIL) {
340 LOG_ALLOC("allocateBlocks allocBlocks=%zu,block#=%zu", heapSource->allocBlocks, i);
341 /*
342 * This allocated was on behalf of the transporter when it
343 * shaded a white object gray. We enqueue the block so
344 * the scavenger can further shade the gray objects black.
345 */
346 enqueueBlock(heapSource, i);
347 }
348
349 return addr;
350 }
351 /* Insufficient space, fail. */
352 LOGE("Insufficient space, %zu blocks, %zu blocks allocated and %zu bytes allocated",
353 heapSource->totalBlocks,
354 heapSource->allocBlocks,
355 heapSource->bytesAllocated);
356 return NULL;
357}
358
359/* Converts an absolute address to a relative block number. */
360static size_t addressToBlock(const HeapSource *heapSource, const void *addr)
361{
362 assert(heapSource != NULL);
363 assert(isValidAddress(heapSource, addr));
364 return (((uintptr_t)addr) >> BLOCK_SHIFT) - heapSource->baseBlock;
365}
366
367/* Converts a relative block number to an absolute address. */
368static u1 *blockToAddress(const HeapSource *heapSource, size_t block)
369{
370 u1 *addr;
371
372 addr = (u1 *) (((uintptr_t) heapSource->baseBlock + block) * BLOCK_SIZE);
373 assert(isValidAddress(heapSource, addr));
374 return addr;
375}
376
377static void clearBlock(HeapSource *heapSource, size_t block)
378{
379 u1 *addr;
380 size_t i;
381
382 assert(heapSource != NULL);
383 assert(block < heapSource->totalBlocks);
384 addr = heapSource->blockBase + block*BLOCK_SIZE;
385 memset(addr, 0xCC, BLOCK_SIZE);
386 for (i = 0; i < BLOCK_SIZE; i += 8) {
387 dvmHeapBitmapClearObjectBit(&heapSource->allocBits, addr + i);
388 }
389}
390
391static void clearFromSpace(HeapSource *heapSource)
392{
393 size_t i, count;
394
395 assert(heapSource != NULL);
396 i = count = 0;
397 while (i < heapSource->totalBlocks) {
398 if (heapSource->blockSpace[i] != BLOCK_FROM_SPACE) {
399 ++i;
400 continue;
401 }
402 heapSource->blockSpace[i] = BLOCK_FREE;
403 clearBlock(heapSource, i);
404 ++i;
405 ++count;
406 while (i < heapSource->totalBlocks &&
407 heapSource->blockSpace[i] == BLOCK_CONTINUED) {
408 heapSource->blockSpace[i] = BLOCK_FREE;
409 clearBlock(heapSource, i);
410 ++i;
411 ++count;
412 }
413 }
414 LOGI("freed %zu blocks (%zu bytes)", count, count*BLOCK_SIZE);
415}
416
417/*
418 * Appends the given block to the block queue. The block queue is
419 * processed in-order by the scavenger.
420 */
421static void enqueueBlock(HeapSource *heapSource, size_t block)
422{
423 assert(heapSource != NULL);
424 assert(block < heapSource->totalBlocks);
425 if (heapSource->queueHead != QUEUE_TAIL) {
426 heapSource->blockQueue[heapSource->queueTail] = block;
427 } else {
428 heapSource->queueHead = block;
429 }
430 heapSource->blockQueue[block] = QUEUE_TAIL;
431 heapSource->queueTail = block;
432 ++heapSource->queueSize;
433}
434
435/*
436 * Grays all objects within the block corresponding to the given
437 * address.
438 */
439static void promoteBlockByAddr(HeapSource *heapSource, const void *addr)
440{
441 size_t block;
442
443 block = addressToBlock(heapSource, (const u1 *)addr);
444 if (heapSource->blockSpace[block] != BLOCK_TO_SPACE) {
445 // LOGI("promoting block %zu %d @ %p", block, heapSource->blockSpace[block], obj);
446 heapSource->blockSpace[block] = BLOCK_TO_SPACE;
447 enqueueBlock(heapSource, block);
448 /* TODO(cshapiro): count continued blocks?*/
449 heapSource->allocBlocks += 1;
450 } else {
451 // LOGI("NOT promoting block %zu %d @ %p", block, heapSource->blockSpace[block], obj);
452 }
453}
454
455GcHeap *dvmHeapSourceStartup(size_t startSize, size_t absoluteMaxSize)
456{
457 GcHeap* gcHeap;
458 HeapSource *heapSource;
459
460 assert(startSize <= absoluteMaxSize);
461
462 heapSource = malloc(sizeof(*heapSource));
463 assert(heapSource != NULL);
464 memset(heapSource, 0, sizeof(*heapSource));
465
466 heapSource->minimumSize = alignUp(startSize, BLOCK_SIZE);
467 heapSource->maximumSize = alignUp(absoluteMaxSize, BLOCK_SIZE);
468
469 heapSource->currentSize = heapSource->maximumSize;
470
471 /* Allocate underlying storage for blocks. */
472 heapSource->blockBase = virtualAlloc(heapSource->maximumSize);
473 assert(heapSource->blockBase != NULL);
474 heapSource->baseBlock = (uintptr_t) heapSource->blockBase >> BLOCK_SHIFT;
475 heapSource->limitBlock = ((uintptr_t) heapSource->blockBase + heapSource->maximumSize) >> BLOCK_SHIFT;
476
477 heapSource->allocBlocks = 0;
478 heapSource->totalBlocks = (heapSource->limitBlock - heapSource->baseBlock);
479
480 assert(heapSource->totalBlocks = heapSource->maximumSize / BLOCK_SIZE);
481
482 {
483 size_t size = sizeof(heapSource->blockQueue[0]);
484 heapSource->blockQueue = malloc(heapSource->totalBlocks*size);
485 assert(heapSource->blockQueue != NULL);
486 memset(heapSource->blockQueue, 0xCC, heapSource->totalBlocks*size);
487 heapSource->queueHead = QUEUE_TAIL;
488 }
489
490 /* Byte indicating space residence or free status of block. */
491 {
492 size_t size = sizeof(heapSource->blockSpace[0]);
493 heapSource->blockSpace = malloc(heapSource->totalBlocks*size);
494 assert(heapSource->blockSpace != NULL);
495 memset(heapSource->blockSpace, 0, heapSource->totalBlocks*size);
496 }
497
498 dvmHeapBitmapInit(&heapSource->allocBits,
499 heapSource->blockBase,
500 heapSource->maximumSize,
501 "blockBase");
502
503 /* Initialize allocation pointers. */
504 heapSource->allocPtr = allocateBlocks(heapSource, 1);
505 heapSource->allocLimit = heapSource->allocPtr + BLOCK_SIZE;
506
507 gcHeap = malloc(sizeof(*gcHeap));
508 assert(gcHeap != NULL);
509 memset(gcHeap, 0, sizeof(*gcHeap));
510 gcHeap->heapSource = heapSource;
511
512 return gcHeap;
513}
514
515/*
516 * Perform any required heap initializations after forking from the
517 * zygote process. This is a no-op for the time being. Eventually
518 * this will demarcate the shared region of the heap.
519 */
520bool dvmHeapSourceStartupAfterZygote(void)
521{
522 return true;
523}
524
525bool dvmHeapSourceStartupBeforeFork(void)
526{
527 assert(!"implemented");
528 return false;
529}
530
531void dvmHeapSourceShutdown(GcHeap **gcHeap)
532{
533 if (*gcHeap == NULL || (*gcHeap)->heapSource == NULL)
534 return;
535 virtualFree((*gcHeap)->heapSource->blockBase,
536 (*gcHeap)->heapSource->maximumSize);
537 free((*gcHeap)->heapSource);
538 (*gcHeap)->heapSource = NULL;
539 free(*gcHeap);
540 *gcHeap = NULL;
541}
542
543size_t dvmHeapSourceGetValue(enum HeapSourceValueSpec spec,
544 size_t perHeapStats[],
545 size_t arrayLen)
546{
547 HeapSource *heapSource;
548 size_t value;
549
550 heapSource = gDvm.gcHeap->heapSource;
551 switch (spec) {
552 case HS_EXTERNAL_BYTES_ALLOCATED:
553 value = 0;
554 break;
555 case HS_EXTERNAL_LIMIT:
556 value = 0;
557 break;
558 case HS_FOOTPRINT:
559 value = heapSource->maximumSize;
560 break;
561 case HS_ALLOWED_FOOTPRINT:
562 value = heapSource->maximumSize;
563 break;
564 case HS_BYTES_ALLOCATED:
565 value = heapSource->bytesAllocated;
566 break;
567 case HS_OBJECTS_ALLOCATED:
568 value = sumHeapBitmap(&heapSource->allocBits);
569 break;
570 default:
571 assert(!"implemented");
572 value = 0;
573 }
574 if (perHeapStats) {
575 *perHeapStats = value;
576 }
577 return value;
578}
579
580/*
581 * Performs a shallow copy of the allocation bitmap into the given
582 * vector of heap bitmaps.
583 */
584void dvmHeapSourceGetObjectBitmaps(HeapBitmap objBits[], HeapBitmap markBits[],
585 size_t numHeaps)
586{
587 assert(!"implemented");
588}
589
590HeapBitmap *dvmHeapSourceGetLiveBits(void)
591{
592 assert(!"implemented");
593 return NULL;
594}
595
596/*
597 * Allocate the specified number of bytes from the heap. The
598 * allocation cursor points into a block of free storage. If the
599 * given allocation fits in the remaining space of the block, we
600 * advance the cursor and return a pointer to the free storage. If
601 * the allocation cannot fit in the current block but is smaller than
602 * a block we request a new block and allocate from it instead. If
603 * the allocation is larger than a block we must allocate from a span
604 * of contiguous blocks.
605 */
606void *dvmHeapSourceAlloc(size_t length)
607{
608 HeapSource *heapSource;
609 unsigned char *addr;
610 size_t aligned, available, blocks;
611
612 heapSource = gDvm.gcHeap->heapSource;
613 assert(heapSource != NULL);
614 assert(heapSource->allocPtr != NULL);
615 assert(heapSource->allocLimit != NULL);
616
617 aligned = alignUp(length, ALLOC_ALIGNMENT);
618 available = heapSource->allocLimit - heapSource->allocPtr;
619
620 /* Try allocating inside the current block. */
621 if (aligned <= available) {
622 addr = heapSource->allocPtr;
623 heapSource->allocPtr += aligned;
624 heapSource->bytesAllocated += aligned;
625 dvmHeapBitmapSetObjectBit(&heapSource->allocBits, addr);
626 return addr;
627 }
628
629 /* Try allocating in a new block. */
630 if (aligned <= BLOCK_SIZE) {
631 addr = allocateBlocks(heapSource, 1);
632 if (addr != NULL) {
633 heapSource->allocLimit = addr + BLOCK_SIZE;
634 heapSource->allocPtr = addr + aligned;
635 heapSource->bytesAllocated += aligned;
636 dvmHeapBitmapSetObjectBit(&heapSource->allocBits, addr);
637 /* TODO(cshapiro): pad out the current block. */
638 }
639 return addr;
640 }
641
642 /* Try allocating in a span of blocks. */
643 blocks = alignUp(aligned, BLOCK_SIZE) / BLOCK_SIZE;
644
645 addr = allocateBlocks(heapSource, blocks);
646 /* Propagate failure upward. */
647 if (addr != NULL) {
648 heapSource->bytesAllocated += aligned;
649 dvmHeapBitmapSetObjectBit(&heapSource->allocBits, addr);
650 /* TODO(cshapiro): pad out free space in the last block. */
651 }
652 return addr;
653}
654
655void *dvmHeapSourceAllocAndGrow(size_t size)
656{
657 return dvmHeapSourceAlloc(size);
658}
659
660/* TODO: refactor along with dvmHeapSourceAlloc */
661void *allocateGray(size_t size)
662{
Carl Shapiro952e84a2010-05-06 14:35:29 -0700663 /* TODO: add a check that we are in a GC. */
664 /* assert(gDvm.gcHeap->heapSource->queueHead != QUEUE_TAIL); */
Carl Shapirod28668c2010-04-15 16:10:00 -0700665 return dvmHeapSourceAlloc(size);
666}
667
668/*
669 * Returns true if the given address is within the heap and points to
670 * the header of a live object.
671 */
672bool dvmHeapSourceContains(const void *addr)
673{
674 HeapSource *heapSource;
675 HeapBitmap *bitmap;
676
677 heapSource = gDvm.gcHeap->heapSource;
678 bitmap = &heapSource->allocBits;
Carl Shapirodc1e4f12010-05-01 22:27:56 -0700679 if (!dvmHeapBitmapCoversAddress(bitmap, addr)) {
680 return false;
681 } else {
682 return dvmHeapBitmapIsObjectBitSet(bitmap, addr);
683 }
Carl Shapirod28668c2010-04-15 16:10:00 -0700684}
685
686bool dvmHeapSourceGetPtrFlag(const void *ptr, enum HeapSourcePtrFlag flag)
687{
688 assert(!"implemented");
689 return false;
690}
691
692size_t dvmHeapSourceChunkSize(const void *ptr)
693{
694 assert(!"implemented");
695 return 0;
696}
697
698size_t dvmHeapSourceFootprint(void)
699{
700 assert(!"implemented");
701 return 0;
702}
703
704/*
705 * Returns the "ideal footprint" which appears to be the number of
706 * bytes currently committed to the heap. This starts out at the
707 * start size of the heap and grows toward the maximum size.
708 */
709size_t dvmHeapSourceGetIdealFootprint(void)
710{
711 return gDvm.gcHeap->heapSource->currentSize;
712}
713
714float dvmGetTargetHeapUtilization(void)
715{
716 assert(!"implemented");
717 return 0.0f;
718}
719
720void dvmSetTargetHeapUtilization(float newTarget)
721{
722 assert(!"implemented");
723}
724
725size_t dvmMinimumHeapSize(size_t size, bool set)
726{
727 return gDvm.gcHeap->heapSource->minimumSize;
728}
729
730/*
731 * Expands the size of the heap after a collection. At present we
732 * commit the pages for maximum size of the heap so this routine is
733 * just a no-op. Eventually, we will either allocate or commit pages
734 * on an as-need basis.
735 */
736void dvmHeapSourceGrowForUtilization(void)
737{
738 /* do nothing */
739}
740
741void dvmHeapSourceTrim(size_t bytesTrimmed[], size_t arrayLen)
742{
743 /* do nothing */
744}
745
746void dvmHeapSourceWalk(void (*callback)(const void *chunkptr, size_t chunklen,
747 const void *userptr, size_t userlen,
748 void *arg),
749 void *arg)
750{
751 assert(!"implemented");
752}
753
754size_t dvmHeapSourceGetNumHeaps(void)
755{
756 return 1;
757}
758
759bool dvmTrackExternalAllocation(size_t n)
760{
761 assert(!"implemented");
762 return false;
763}
764
765void dvmTrackExternalFree(size_t n)
766{
767 assert(!"implemented");
768}
769
770size_t dvmGetExternalBytesAllocated(void)
771{
772 assert(!"implemented");
773 return 0;
774}
775
776void dvmHeapSourceFlip(void)
777{
778 HeapSource *heapSource;
779 size_t i;
780
781 heapSource = gDvm.gcHeap->heapSource;
782
783 /* Reset the block queue. */
784 heapSource->allocBlocks = 0;
785 heapSource->queueSize = 0;
786 heapSource->queueHead = QUEUE_TAIL;
787
Carl Shapirod28668c2010-04-15 16:10:00 -0700788 /* TODO(cshapiro): pad the current (prev) block. */
789
790 heapSource->allocPtr = NULL;
791 heapSource->allocLimit = NULL;
792
793 /* Whiten all allocated blocks. */
794 for (i = 0; i < heapSource->totalBlocks; ++i) {
795 if (heapSource->blockSpace[i] == BLOCK_TO_SPACE) {
796 heapSource->blockSpace[i] = BLOCK_FROM_SPACE;
797 }
798 }
799}
800
801static void room(size_t *alloc, size_t *avail, size_t *total)
802{
803 HeapSource *heapSource;
804 size_t i;
805
806 heapSource = gDvm.gcHeap->heapSource;
807 *total = heapSource->totalBlocks*BLOCK_SIZE;
808 *alloc = heapSource->allocBlocks*BLOCK_SIZE;
809 *avail = *total - *alloc;
810}
811
812static bool isSpaceInternal(u1 *addr, int space)
813{
814 HeapSource *heapSource;
815 u1 *base, *limit;
816 size_t offset;
817 char space2;
818
819 heapSource = gDvm.gcHeap->heapSource;
820 base = heapSource->blockBase;
821 assert(addr >= base);
822 limit = heapSource->blockBase + heapSource->maximumSize;
823 assert(addr < limit);
824 offset = addr - base;
825 space2 = heapSource->blockSpace[offset >> BLOCK_SHIFT];
826 return space == space2;
827}
828
Carl Shapiro952e84a2010-05-06 14:35:29 -0700829static bool fromSpaceContains(const void *addr)
Carl Shapirod28668c2010-04-15 16:10:00 -0700830{
831 return isSpaceInternal((u1 *)addr, BLOCK_FROM_SPACE);
832}
833
Carl Shapiro952e84a2010-05-06 14:35:29 -0700834static bool toSpaceContains(const void *addr)
Carl Shapirod28668c2010-04-15 16:10:00 -0700835{
836 return isSpaceInternal((u1 *)addr, BLOCK_TO_SPACE);
837}
838
839/*
840 * Notifies the collector that the object at the given address must
841 * remain stationary during the current collection.
842 */
843static void pinObject(const Object *obj)
844{
845 promoteBlockByAddr(gDvm.gcHeap->heapSource, obj);
846}
847
848static void printHeapBitmap(const HeapBitmap *bitmap)
849{
850 const char *cp;
851 size_t i, length;
852
853 length = bitmap->bitsLen >> 2;
854 fprintf(stderr, "%p", bitmap->bits);
855 for (i = 0; i < length; ++i) {
856 fprintf(stderr, " %lx", bitmap->bits[i]);
857 fputc('\n', stderr);
858 }
859}
860
861static void printHeapBitmapSxS(const HeapBitmap *b1, const HeapBitmap *b2)
862{
863 uintptr_t addr;
864 size_t i, length;
865
866 assert(b1->base == b2->base);
867 assert(b1->bitsLen == b2->bitsLen);
868 addr = b1->base;
869 length = b1->bitsLen >> 2;
870 for (i = 0; i < length; ++i) {
871 int diff = b1->bits[i] == b2->bits[i];
872 fprintf(stderr, "%08x %08lx %08lx %d\n",
873 addr, b1->bits[i], b2->bits[i], diff);
874 addr += sizeof(*b1->bits)*CHAR_BIT;
875 }
876}
877
878static size_t sumHeapBitmap(const HeapBitmap *bitmap)
879{
880 const char *cp;
881 size_t i, sum;
882
883 sum = 0;
884 for (i = 0; i < bitmap->bitsLen >> 2; ++i) {
885 sum += dvmClzImpl(bitmap->bits[i]);
886 }
887 return sum;
888}
889
890/*
891 * Miscellaneous functionality.
892 */
893
894static int isForward(const void *addr)
895{
896 return (uintptr_t)addr & 0x1;
897}
898
899static void setForward(const void *toObj, void *fromObj)
900{
901 *(unsigned long *)fromObj = (uintptr_t)toObj | 0x1;
902}
903
904static void* getForward(const void *fromObj)
905{
906 return (void *)((uintptr_t)fromObj & ~0x1);
907}
908
909/* Beware, uses the same encoding as a forwarding pointers! */
910static int isPermanentString(const StringObject *obj) {
911 return (uintptr_t)obj & 0x1;
912}
913
914static void* getPermanentString(const StringObject *obj)
915{
916 return (void *)((uintptr_t)obj & ~0x1);
917}
918
919
920/*
921 * Scavenging and transporting routines follow. A transporter grays
922 * an object. A scavenger blackens an object. We define these
923 * routines for each fundamental object type. Dispatch is performed
924 * in scavengeObject.
925 */
926
927/*
Carl Shapiro2396fda2010-05-03 20:14:14 -0700928 * Class object scavenging.
Carl Shapirod28668c2010-04-15 16:10:00 -0700929 */
Carl Shapiro2396fda2010-05-03 20:14:14 -0700930static void scavengeClassObject(ClassObject *obj)
Carl Shapirod28668c2010-04-15 16:10:00 -0700931{
Carl Shapirod28668c2010-04-15 16:10:00 -0700932 int i;
933
Carl Shapirod28668c2010-04-15 16:10:00 -0700934 LOG_SCAVENGE("scavengeClassObject(obj=%p)", obj);
Carl Shapiro2396fda2010-05-03 20:14:14 -0700935 assert(obj != NULL);
Carl Shapirod28668c2010-04-15 16:10:00 -0700936 assert(obj->obj.clazz != NULL);
937 assert(obj->obj.clazz->descriptor != NULL);
938 assert(!strcmp(obj->obj.clazz->descriptor, "Ljava/lang/Class;"));
939 assert(obj->descriptor != NULL);
940 LOG_SCAVENGE("scavengeClassObject: descriptor='%s',vtableCount=%zu",
941 obj->descriptor, obj->vtableCount);
Carl Shapiro2396fda2010-05-03 20:14:14 -0700942 /* Scavenge our class object. */
Carl Shapirod28668c2010-04-15 16:10:00 -0700943 scavengeReference((Object **) obj);
944 /* Scavenge the array element class object. */
945 if (IS_CLASS_FLAG_SET(obj, CLASS_ISARRAY)) {
946 scavengeReference((Object **)(void *)&obj->elementClass);
947 }
948 /* Scavenge the superclass. */
949 scavengeReference((Object **)(void *)&obj->super);
950 /* Scavenge the class loader. */
951 scavengeReference(&obj->classLoader);
952 /* Scavenge static fields. */
953 for (i = 0; i < obj->sfieldCount; ++i) {
954 char ch = obj->sfields[i].field.signature[0];
955 if (ch == '[' || ch == 'L') {
956 scavengeReference((Object **)(void *)&obj->sfields[i].value.l);
957 }
958 }
959 /* Scavenge interface class objects. */
960 for (i = 0; i < obj->interfaceCount; ++i) {
961 scavengeReference((Object **) &obj->interfaces[i]);
962 }
Carl Shapirod28668c2010-04-15 16:10:00 -0700963}
964
965/*
966 * Array object scavenging.
967 */
Carl Shapirod28668c2010-04-15 16:10:00 -0700968static size_t scavengeArrayObject(ArrayObject *array)
969{
970 size_t i, length;
971
972 LOG_SCAVENGE("scavengeArrayObject(array=%p)", array);
973 /* Scavenge the class object. */
Carl Shapiro952e84a2010-05-06 14:35:29 -0700974 assert(toSpaceContains(array));
Carl Shapirod28668c2010-04-15 16:10:00 -0700975 assert(array != NULL);
976 assert(array->obj.clazz != NULL);
977 scavengeReference((Object **) array);
978 length = dvmArrayObjectSize(array);
979 /* Scavenge the array contents. */
980 if (IS_CLASS_FLAG_SET(array->obj.clazz, CLASS_ISOBJECTARRAY)) {
981 Object **contents = (Object **)array->contents;
982 for (i = 0; i < array->length; ++i) {
983 scavengeReference(&contents[i]);
984 }
985 }
986 return length;
987}
988
989/*
990 * Reference object scavenging.
991 */
992
Carl Shapiro952e84a2010-05-06 14:35:29 -0700993static int getReferenceFlags(const Object *obj)
Carl Shapirod28668c2010-04-15 16:10:00 -0700994{
995 int flags;
996
997 flags = CLASS_ISREFERENCE |
998 CLASS_ISWEAKREFERENCE |
999 CLASS_ISPHANTOMREFERENCE;
Carl Shapiro952e84a2010-05-06 14:35:29 -07001000 return GET_CLASS_FLAG_GROUP(obj->clazz, flags);
Carl Shapirod28668c2010-04-15 16:10:00 -07001001}
1002
Carl Shapiro952e84a2010-05-06 14:35:29 -07001003static int isReference(const Object *obj)
Carl Shapirod28668c2010-04-15 16:10:00 -07001004{
1005 return getReferenceFlags(obj) != 0;
1006}
1007
Carl Shapiro952e84a2010-05-06 14:35:29 -07001008static int isSoftReference(const Object *obj)
Carl Shapirod28668c2010-04-15 16:10:00 -07001009{
1010 return getReferenceFlags(obj) == CLASS_ISREFERENCE;
1011}
1012
Carl Shapiro952e84a2010-05-06 14:35:29 -07001013static int isWeakReference(const Object *obj)
Carl Shapirod28668c2010-04-15 16:10:00 -07001014{
1015 return getReferenceFlags(obj) & CLASS_ISWEAKREFERENCE;
1016}
1017
Carl Shapiro952e84a2010-05-06 14:35:29 -07001018static bool isPhantomReference(const Object *obj)
Carl Shapirod28668c2010-04-15 16:10:00 -07001019{
1020 return getReferenceFlags(obj) & CLASS_ISPHANTOMREFERENCE;
1021}
1022
Carl Shapiro952e84a2010-05-06 14:35:29 -07001023/*
1024 * Returns true if the reference was registered with a reference queue
1025 * but has not yet been appended to it.
1026 */
1027static bool isReferenceEnqueuable(const Object *ref)
Carl Shapirod28668c2010-04-15 16:10:00 -07001028{
Carl Shapiro952e84a2010-05-06 14:35:29 -07001029 Object *queue, *queueNext;
Carl Shapirod28668c2010-04-15 16:10:00 -07001030
Carl Shapiro952e84a2010-05-06 14:35:29 -07001031 queue = dvmGetFieldObject(ref, gDvm.offJavaLangRefReference_queue);
1032 queueNext = dvmGetFieldObject(ref, gDvm.offJavaLangRefReference_queueNext);
1033 if (queue == NULL || queueNext != NULL) {
1034 /*
1035 * There is no queue, or the reference has already
1036 * been enqueued. The Reference.enqueue() method
1037 * will do nothing even if we call it.
1038 */
1039 return false;
Carl Shapirod28668c2010-04-15 16:10:00 -07001040 }
Carl Shapiro952e84a2010-05-06 14:35:29 -07001041
1042 /*
1043 * We need to call enqueue(), but if we called it from
1044 * here we'd probably deadlock. Schedule a call.
1045 */
1046 return true;
1047}
1048
1049/*
1050 * Schedules a reference to be appended to its reference queue.
1051 */
1052static void enqueueReference(const Object *ref)
1053{
1054 LargeHeapRefTable **table;
1055 Object *op;
1056
1057 assert(((uintptr_t)ref & 3) == 0);
1058 assert((WORKER_ENQUEUE & ~3) == 0);
1059 assert(dvmGetFieldObject(ref, gDvm.offJavaLangRefReference_queue) != NULL);
1060 assert(dvmGetFieldObject(ref, gDvm.offJavaLangRefReference_queueNext) == NULL);
1061 /*
1062 * Set the enqueue bit in the bottom of the pointer. Assumes that
1063 * objects are 8-byte aligned.
1064 *
1065 * Note that we are adding the *Reference* (which is by definition
1066 * already black at this point) to this list; we're not adding the
1067 * referent (which has already been cleared).
1068 */
1069 table = &gDvm.gcHeap->referenceOperations;
1070 op = (Object *)((uintptr_t)ref | WORKER_ENQUEUE);
1071 if (!dvmHeapAddRefToLargeTable(table, op)) {
1072 LOGE("enqueueReference(): "
1073 "no room for any more reference operations");
1074 dvmAbort();
1075 }
1076}
1077
1078/*
1079 * Sets the referent field of a reference object to NULL.
1080 */
1081static void clearReference(Object *obj)
1082{
1083 dvmSetFieldObject(obj, gDvm.offJavaLangRefReference_referent, NULL);
1084}
1085
1086/*
1087 * Clears reference objects with white referents.
1088 */
1089void clearWhiteReferences(Object **list)
1090{
1091 size_t referentOffset, vmDataOffset;
1092 bool doSignal;
1093
1094 vmDataOffset = gDvm.offJavaLangRefReference_vmData;
1095 referentOffset = gDvm.offJavaLangRefReference_referent;
1096 doSignal = false;
1097 while (*list != NULL) {
1098 Object *ref = *list;
1099 JValue *field = dvmFieldPtr(ref, referentOffset);
1100 Object *referent = field->l;
1101 *list = dvmGetFieldObject(ref, vmDataOffset);
1102 assert(referent != NULL);
1103 if (isForward(referent->clazz)) {
1104 field->l = referent = getForward(referent->clazz);
1105 continue;
1106 }
1107 if (fromSpaceContains(referent)) {
1108 /* Referent is white, clear it. */
1109 clearReference(ref);
1110 if (isReferenceEnqueuable(ref)) {
1111 enqueueReference(ref);
1112 doSignal = true;
1113 }
1114 }
1115 }
1116 /*
1117 * If we cleared a reference with a reference queue we must notify
1118 * the heap worker to append the reference.
1119 */
1120 if (doSignal) {
1121 dvmSignalHeapWorker(false);
1122 }
1123 assert(*list == NULL);
1124}
1125
1126/*
1127 * Blackens referents subject to the soft reference preservation
1128 * policy.
1129 */
1130void preserveSoftReferences(Object **list)
1131{
1132 Object *ref;
1133 Object *prev, *next;
1134 size_t referentOffset, vmDataOffset;
1135 unsigned counter;
1136 bool white;
1137
1138 vmDataOffset = gDvm.offJavaLangRefReference_vmData;
1139 referentOffset = gDvm.offJavaLangRefReference_referent;
1140 counter = 0;
1141 prev = next = NULL;
1142 ref = *list;
1143 while (ref != NULL) {
1144 JValue *field = dvmFieldPtr(ref, referentOffset);
1145 Object *referent = field->l;
1146 next = dvmGetFieldObject(ref, vmDataOffset);
1147 assert(referent != NULL);
1148 if (isForward(referent->clazz)) {
1149 /* Referent is black. */
1150 field->l = referent = getForward(referent->clazz);
1151 white = false;
1152 } else {
1153 white = fromSpaceContains(referent);
1154 }
1155 if (!white && ((++counter) & 1)) {
1156 /* Referent is white and biased toward saving, gray it. */
1157 scavengeReference((Object **)(void *)&field->l);
1158 white = true;
1159 }
1160 if (white) {
1161 /* Referent is black, unlink it. */
1162 if (prev != NULL) {
1163 dvmSetFieldObject(ref, vmDataOffset, NULL);
1164 dvmSetFieldObject(prev, vmDataOffset, next);
1165 }
1166 } else {
1167 /* Referent is white, skip over it. */
1168 prev = ref;
1169 }
1170 ref = next;
1171 }
1172 /*
1173 * Restart the trace with the newly gray references added to the
1174 * root set.
1175 */
1176 scavengeBlockQueue();
1177}
1178
1179void processFinalizableReferences(void)
1180{
1181 HeapRefTable newPendingRefs;
1182 LargeHeapRefTable *finRefs = gDvm.gcHeap->finalizableRefs;
1183 Object **ref;
1184 Object **lastRef;
1185 size_t totalPendCount;
1186
1187 /*
1188 * All strongly, reachable objects are black.
1189 * Any white finalizable objects need to be finalized.
1190 */
1191
1192 /* Create a table that the new pending refs will
1193 * be added to.
1194 */
1195 if (!dvmHeapInitHeapRefTable(&newPendingRefs, 128)) {
1196 //TODO: mark all finalizable refs and hope that
1197 // we can schedule them next time. Watch out,
1198 // because we may be expecting to free up space
1199 // by calling finalizers.
1200 LOG_REFERENCE("dvmHeapScheduleFinalizations(): no room for "
1201 "pending finalizations\n");
1202 dvmAbort();
1203 }
1204
1205 /*
1206 * Walk through finalizableRefs and move any white references to
1207 * the list of new pending refs.
1208 */
1209 totalPendCount = 0;
1210 while (finRefs != NULL) {
1211 Object **gapRef;
1212 size_t newPendCount = 0;
1213
1214 gapRef = ref = finRefs->refs.table;
1215 lastRef = finRefs->refs.nextEntry;
1216 while (ref < lastRef) {
1217 if (fromSpaceContains(*ref)) {
1218 if (!dvmHeapAddToHeapRefTable(&newPendingRefs, *ref)) {
1219 //TODO: add the current table and allocate
1220 // a new, smaller one.
1221 LOG_REFERENCE("dvmHeapScheduleFinalizations(): "
1222 "no room for any more pending finalizations: %zd\n",
1223 dvmHeapNumHeapRefTableEntries(&newPendingRefs));
1224 dvmAbort();
1225 }
1226 newPendCount++;
1227 } else {
1228 /* This ref is black, so will remain on finalizableRefs.
1229 */
1230 if (newPendCount > 0) {
1231 /* Copy it up to fill the holes.
1232 */
1233 *gapRef++ = *ref;
1234 } else {
1235 /* No holes yet; don't bother copying.
1236 */
1237 gapRef++;
1238 }
1239 }
1240 ref++;
1241 }
1242 finRefs->refs.nextEntry = gapRef;
1243 //TODO: if the table is empty when we're done, free it.
1244 totalPendCount += newPendCount;
1245 finRefs = finRefs->next;
1246 }
1247 LOG_REFERENCE("processFinalizableReferences(): %zd finalizers triggered.\n",
1248 totalPendCount);
1249 if (totalPendCount == 0) {
1250 /* No objects required finalization.
1251 * Free the empty temporary table.
1252 */
1253 dvmClearReferenceTable(&newPendingRefs);
1254 return;
1255 }
1256
1257 /* Add the new pending refs to the main list.
1258 */
1259 if (!dvmHeapAddTableToLargeTable(&gDvm.gcHeap->pendingFinalizationRefs,
1260 &newPendingRefs))
1261 {
1262 LOG_REFERENCE("dvmHeapScheduleFinalizations(): can't insert new "
1263 "pending finalizations\n");
1264 dvmAbort();
1265 }
1266
1267 //TODO: try compacting the main list with a memcpy loop
1268
1269 /* Blacken the refs we just moved; we don't want them or their
1270 * children to get swept yet.
1271 */
1272 ref = newPendingRefs.table;
1273 lastRef = newPendingRefs.nextEntry;
1274 assert(ref < lastRef);
1275 HPROF_SET_GC_SCAN_STATE(HPROF_ROOT_FINALIZING, 0);
1276 while (ref < lastRef) {
1277 scavengeReference(ref);
1278 ref++;
1279 }
1280 HPROF_CLEAR_GC_SCAN_STATE();
1281 scavengeBlockQueue();
1282 dvmSignalHeapWorker(false);
Carl Shapirod28668c2010-04-15 16:10:00 -07001283}
1284
Carl Shapirod28668c2010-04-15 16:10:00 -07001285/*
1286 * If a reference points to from-space and has been forwarded, we snap
1287 * the pointer to its new to-space address. If the reference points
1288 * to an unforwarded from-space address we must enqueue the reference
1289 * for later processing. TODO: implement proper reference processing
1290 * and move the referent scavenging elsewhere.
1291 */
Carl Shapiro952e84a2010-05-06 14:35:29 -07001292static void scavengeReferenceObject(Object *obj)
Carl Shapirod28668c2010-04-15 16:10:00 -07001293{
Carl Shapiro952e84a2010-05-06 14:35:29 -07001294 Object *referent;
1295 Object **queue;
1296 size_t referentOffset, vmDataOffset;
1297
Carl Shapirod28668c2010-04-15 16:10:00 -07001298 assert(obj != NULL);
Carl Shapiro952e84a2010-05-06 14:35:29 -07001299 LOG_SCAVENGE("scavengeReferenceObject(obj=%p),'%s'", obj, obj->clazz->descriptor);
Carl Shapiro2396fda2010-05-03 20:14:14 -07001300 scavengeDataObject(obj);
Carl Shapiro952e84a2010-05-06 14:35:29 -07001301 referentOffset = gDvm.offJavaLangRefReference_referent;
1302 referent = dvmGetFieldObject(obj, referentOffset);
1303 if (referent == NULL || toSpaceContains(referent)) {
1304 return;
Carl Shapirod28668c2010-04-15 16:10:00 -07001305 }
Carl Shapiro952e84a2010-05-06 14:35:29 -07001306 if (isSoftReference(obj)) {
1307 queue = &gDvm.gcHeap->softReferences;
1308 } else if (isWeakReference(obj)) {
1309 queue = &gDvm.gcHeap->weakReferences;
1310 } else {
1311 assert(isPhantomReference(obj));
1312 queue = &gDvm.gcHeap->phantomReferences;
1313 }
1314 vmDataOffset = gDvm.offJavaLangRefReference_vmData;
1315 dvmSetFieldObject(obj, vmDataOffset, (Object *)*queue);
1316 *queue = obj;
1317 LOG_SCAVENGE("scavengeReferenceObject: enqueueing %p", obj);
Carl Shapirod28668c2010-04-15 16:10:00 -07001318}
1319
1320/*
1321 * Data object scavenging.
1322 */
Carl Shapiro952e84a2010-05-06 14:35:29 -07001323static void scavengeDataObject(Object *obj)
Carl Shapirod28668c2010-04-15 16:10:00 -07001324{
1325 ClassObject *clazz;
Carl Shapirod28668c2010-04-15 16:10:00 -07001326 int i;
1327
1328 // LOGI("scavengeDataObject(obj=%p)", obj);
1329 assert(obj != NULL);
Carl Shapiro952e84a2010-05-06 14:35:29 -07001330 assert(obj->clazz != NULL);
1331 assert(obj->clazz->objectSize != 0);
1332 assert(toSpaceContains(obj));
Carl Shapirod28668c2010-04-15 16:10:00 -07001333 /* Scavenge the class object. */
Carl Shapiro952e84a2010-05-06 14:35:29 -07001334 clazz = obj->clazz;
Carl Shapirod28668c2010-04-15 16:10:00 -07001335 scavengeReference((Object **) obj);
Carl Shapirod28668c2010-04-15 16:10:00 -07001336 /* Scavenge instance fields. */
1337 if (clazz->refOffsets != CLASS_WALK_SUPER) {
1338 size_t refOffsets = clazz->refOffsets;
1339 while (refOffsets != 0) {
1340 size_t rshift = CLZ(refOffsets);
1341 size_t offset = CLASS_OFFSET_FROM_CLZ(rshift);
1342 Object **ref = (Object **)((u1 *)obj + offset);
1343 scavengeReference(ref);
1344 refOffsets &= ~(CLASS_HIGH_BIT >> rshift);
1345 }
1346 } else {
1347 for (; clazz != NULL; clazz = clazz->super) {
1348 InstField *field = clazz->ifields;
1349 for (i = 0; i < clazz->ifieldRefCount; ++i, ++field) {
1350 size_t offset = field->byteOffset;
1351 Object **ref = (Object **)((u1 *)obj + offset);
1352 scavengeReference(ref);
1353 }
1354 }
1355 }
Carl Shapiro2396fda2010-05-03 20:14:14 -07001356}
1357
1358static Object *transportObject(const Object *fromObj)
1359{
1360 Object *toObj;
1361 size_t allocSize, copySize;
1362
1363 LOG_TRANSPORT("transportObject(fromObj=%p) allocBlocks=%zu",
1364 fromObj,
1365 gDvm.gcHeap->heapSource->allocBlocks);
1366 assert(fromObj != NULL);
Carl Shapiro952e84a2010-05-06 14:35:29 -07001367 assert(fromSpaceContains(fromObj));
Carl Shapiro2396fda2010-05-03 20:14:14 -07001368 allocSize = copySize = objectSize(fromObj);
1369 if (LW_HASH_STATE(fromObj->lock) != LW_HASH_STATE_UNHASHED) {
1370 /*
1371 * The object has been hashed or hashed and moved. We must
1372 * reserve an additional word for a hash code.
1373 */
1374 allocSize += sizeof(u4);
1375 }
1376 if (LW_HASH_STATE(fromObj->lock) == LW_HASH_STATE_HASHED_AND_MOVED) {
1377 /*
1378 * The object has its hash code allocated. Ensure the hash
1379 * code is copied along with the instance data.
1380 */
1381 copySize += sizeof(u4);
1382 }
1383 /* TODO(cshapiro): don't copy, re-map large data objects. */
1384 assert(copySize <= allocSize);
1385 toObj = allocateGray(allocSize);
1386 assert(toObj != NULL);
Carl Shapiro952e84a2010-05-06 14:35:29 -07001387 assert(toSpaceContains(toObj));
Carl Shapiro2396fda2010-05-03 20:14:14 -07001388 memcpy(toObj, fromObj, copySize);
1389 if (LW_HASH_STATE(fromObj->lock) == LW_HASH_STATE_HASHED) {
1390 /*
1391 * The object has had its hash code exposed. Append it to the
1392 * instance and set a bit so we know to look for it there.
1393 */
1394 *(u4 *)(((char *)toObj) + copySize) = (u4)fromObj >> 3;
1395 toObj->lock |= LW_HASH_STATE_HASHED_AND_MOVED << LW_HASH_STATE_SHIFT;
1396 }
1397 LOG_TRANSPORT("transportObject: from %p/%zu to %p/%zu (%zu,%zu) %s",
1398 fromObj, addressToBlock(gDvm.gcHeap->heapSource,fromObj),
1399 toObj, addressToBlock(gDvm.gcHeap->heapSource,toObj),
1400 copySize, allocSize, copySize < allocSize ? "DIFFERENT" : "");
1401 return toObj;
Carl Shapirod28668c2010-04-15 16:10:00 -07001402}
1403
1404/*
1405 * Generic reference scavenging.
1406 */
1407
1408/*
1409 * Given a reference to an object, the scavenge routine will gray the
1410 * reference. Any objects pointed to by the scavenger object will be
1411 * transported to new space and a forwarding pointer will be installed
1412 * in the header of the object.
1413 */
1414
1415/*
1416 * Blacken the given pointer. If the pointer is in from space, it is
1417 * transported to new space. If the object has a forwarding pointer
1418 * installed it has already been transported and the referent is
1419 * snapped to the new address.
1420 */
Carl Shapiro2396fda2010-05-03 20:14:14 -07001421static void scavengeReference(Object **obj)
Carl Shapirod28668c2010-04-15 16:10:00 -07001422{
1423 ClassObject *clazz;
Carl Shapiro2396fda2010-05-03 20:14:14 -07001424 Object *fromObj, *toObj;
Carl Shapirod28668c2010-04-15 16:10:00 -07001425 uintptr_t word;
1426
1427 assert(obj);
1428
Carl Shapiro2396fda2010-05-03 20:14:14 -07001429 if (*obj == NULL) return;
Carl Shapirod28668c2010-04-15 16:10:00 -07001430
1431 assert(dvmIsValidObject(*obj));
1432
1433 /* The entire block is black. */
Carl Shapiro952e84a2010-05-06 14:35:29 -07001434 if (toSpaceContains(*obj)) {
Carl Shapirod28668c2010-04-15 16:10:00 -07001435 LOG_SCAVENGE("scavengeReference skipping pinned object @ %p", *obj);
Carl Shapiro2396fda2010-05-03 20:14:14 -07001436 return;
Carl Shapirod28668c2010-04-15 16:10:00 -07001437 }
1438 LOG_SCAVENGE("scavengeReference(*obj=%p)", *obj);
1439
Carl Shapiro952e84a2010-05-06 14:35:29 -07001440 assert(fromSpaceContains(*obj));
Carl Shapirod28668c2010-04-15 16:10:00 -07001441
1442 clazz = (*obj)->clazz;
1443
1444 if (isForward(clazz)) {
1445 // LOGI("forwarding %p @ %p to %p", *obj, obj, (void *)((uintptr_t)clazz & ~0x1));
1446 *obj = (Object *)getForward(clazz);
Carl Shapiro2396fda2010-05-03 20:14:14 -07001447 return;
Carl Shapirod28668c2010-04-15 16:10:00 -07001448 }
Carl Shapiro2396fda2010-05-03 20:14:14 -07001449 fromObj = *obj;
1450 if (clazz == NULL) {
1451 // LOGI("scavangeReference %p has a NULL class object", fromObj);
1452 assert(!"implemented");
1453 toObj = NULL;
1454 } else if (clazz == gDvm.unlinkedJavaLangClass) {
1455 // LOGI("scavangeReference %p is an unlinked class object", fromObj);
1456 assert(!"implemented");
1457 toObj = NULL;
1458 } else {
1459 toObj = transportObject(fromObj);
1460 }
1461 setForward(toObj, fromObj);
1462 *obj = (Object *)toObj;
Carl Shapirod28668c2010-04-15 16:10:00 -07001463}
1464
1465static void verifyReference(const void *obj)
1466{
1467 HeapSource *heapSource;
1468 size_t block;
1469 char space;
1470
1471 if (obj == NULL) {
1472 LOG_VERIFY("verifyReference(obj=%p)", obj);
1473 return;
1474 }
1475 heapSource = gDvm.gcHeap->heapSource;
1476 block = addressToBlock(heapSource, obj);
1477 space = heapSource->blockSpace[block];
1478 LOG_VERIFY("verifyReference(obj=%p),block=%zu,space=%d", obj, block, space);
1479 assert(!((uintptr_t)obj & 7));
Carl Shapiro952e84a2010-05-06 14:35:29 -07001480 assert(toSpaceContains(obj));
Carl Shapirod28668c2010-04-15 16:10:00 -07001481 assert(dvmIsValidObject(obj));
1482}
1483
1484/*
1485 * Generic object scavenging.
1486 */
Carl Shapiro2396fda2010-05-03 20:14:14 -07001487static void scavengeObject(Object *obj)
Carl Shapirod28668c2010-04-15 16:10:00 -07001488{
1489 ClassObject *clazz;
Carl Shapirod28668c2010-04-15 16:10:00 -07001490
1491 assert(obj != NULL);
Carl Shapiro952e84a2010-05-06 14:35:29 -07001492 assert(obj->clazz != NULL);
1493 assert(!((uintptr_t)obj->clazz & 0x1));
1494 assert(obj->clazz != gDvm.unlinkedJavaLangClass);
Carl Shapirod28668c2010-04-15 16:10:00 -07001495 clazz = obj->clazz;
Carl Shapirod28668c2010-04-15 16:10:00 -07001496 if (clazz == gDvm.classJavaLangClass) {
Carl Shapiro2396fda2010-05-03 20:14:14 -07001497 scavengeClassObject((ClassObject *)obj);
Carl Shapirod28668c2010-04-15 16:10:00 -07001498 } else if (IS_CLASS_FLAG_SET(clazz, CLASS_ISARRAY)) {
Carl Shapiro2396fda2010-05-03 20:14:14 -07001499 scavengeArrayObject((ArrayObject *)obj);
Carl Shapirod28668c2010-04-15 16:10:00 -07001500 } else if (IS_CLASS_FLAG_SET(clazz, CLASS_ISREFERENCE)) {
Carl Shapiro952e84a2010-05-06 14:35:29 -07001501 scavengeReferenceObject(obj);
Carl Shapirod28668c2010-04-15 16:10:00 -07001502 } else {
Carl Shapiro952e84a2010-05-06 14:35:29 -07001503 scavengeDataObject(obj);
Carl Shapirod28668c2010-04-15 16:10:00 -07001504 }
Carl Shapirod28668c2010-04-15 16:10:00 -07001505}
1506
1507/*
1508 * External root scavenging routines.
1509 */
1510
1511static void scavengeHashTable(HashTable *table)
1512{
1513 HashEntry *entry;
1514 void *obj;
1515 int i;
1516
1517 if (table == NULL) {
1518 return;
1519 }
1520 dvmHashTableLock(table);
1521 for (i = 0; i < table->tableSize; ++i) {
1522 entry = &table->pEntries[i];
1523 obj = entry->data;
1524 if (obj == NULL || obj == HASH_TOMBSTONE) {
1525 continue;
1526 }
1527 scavengeReference((Object **)(void *)&entry->data);
1528 }
1529 dvmHashTableUnlock(table);
1530}
1531
1532static void pinHashTableEntries(HashTable *table)
1533{
1534 HashEntry *entry;
1535 void *obj;
1536 int i;
1537
1538 LOGI(">>> pinHashTableEntries(table=%p)", table);
1539 if (table == NULL) {
1540 return;
1541 }
1542 dvmHashTableLock(table);
1543 for (i = 0; i < table->tableSize; ++i) {
1544 entry = &table->pEntries[i];
1545 obj = entry->data;
1546 if (obj == NULL || obj == HASH_TOMBSTONE) {
1547 continue;
1548 }
1549 pinObject(entry->data);
1550 }
1551 dvmHashTableUnlock(table);
1552 LOGI("<<< pinHashTableEntries(table=%p)", table);
1553}
1554
1555static void pinPrimitiveClasses(void)
1556{
1557 size_t length;
1558 size_t i;
1559
1560 length = ARRAYSIZE(gDvm.primitiveClass);
1561 for (i = 0; i < length; i++) {
1562 if (gDvm.primitiveClass[i] != NULL) {
1563 pinObject((Object *)gDvm.primitiveClass[i]);
1564 }
1565 }
1566}
1567
1568/*
1569 * Scavenge interned strings. Permanent interned strings will have
1570 * been pinned and are therefore ignored. Non-permanent strings that
1571 * have been forwarded are snapped. All other entries are removed.
1572 */
1573static void scavengeInternedStrings(void)
1574{
1575 HashTable *table;
1576 HashEntry *entry;
1577 Object *obj;
1578 int i;
1579
1580 table = gDvm.internedStrings;
1581 if (table == NULL) {
1582 return;
1583 }
1584 dvmHashTableLock(table);
1585 for (i = 0; i < table->tableSize; ++i) {
1586 entry = &table->pEntries[i];
1587 obj = (Object *)entry->data;
1588 if (obj == NULL || obj == HASH_TOMBSTONE) {
1589 continue;
1590 } else if (!isPermanentString((StringObject *)obj)) {
1591 // LOGI("entry->data=%p", entry->data);
1592 LOG_SCAVENGE(">>> string obj=%p", entry->data);
1593 /* TODO(cshapiro): detach white string objects */
1594 scavengeReference((Object **)(void *)&entry->data);
1595 LOG_SCAVENGE("<<< string obj=%p", entry->data);
1596 }
1597 }
1598 dvmHashTableUnlock(table);
1599}
1600
1601static void pinInternedStrings(void)
1602{
1603 HashTable *table;
1604 HashEntry *entry;
1605 Object *obj;
1606 int i;
1607
1608 table = gDvm.internedStrings;
1609 if (table == NULL) {
1610 return;
1611 }
1612 dvmHashTableLock(table);
1613 for (i = 0; i < table->tableSize; ++i) {
1614 entry = &table->pEntries[i];
1615 obj = (Object *)entry->data;
1616 if (obj == NULL || obj == HASH_TOMBSTONE) {
1617 continue;
1618 } else if (isPermanentString((StringObject *)obj)) {
1619 obj = (Object *)getPermanentString((StringObject*)obj);
1620 LOG_PROMOTE(">>> pin string obj=%p", obj);
1621 pinObject(obj);
1622 LOG_PROMOTE("<<< pin string obj=%p", obj);
1623 }
1624 }
1625 dvmHashTableUnlock(table);
1626}
1627
1628static void verifyInternedStrings(void)
1629{
1630 HashTable *table;
1631 HashEntry *entry;
1632 Object *fwd, *obj;
1633 int i;
1634
1635 table = gDvm.internedStrings;
1636 if (table == NULL) {
1637 return;
1638 }
1639 dvmHashTableLock(table);
1640 for (i = 0; i < table->tableSize; ++i) {
1641 entry = &table->pEntries[i];
1642 obj = (Object *)entry->data;
1643 if (obj == NULL || obj == HASH_TOMBSTONE) {
1644 continue;
1645 } else if (isPermanentString((StringObject *)obj)) {
1646 fwd = (Object *)getForward(obj);
1647 LOG_VERIFY(">>> verify string fwd=%p obj=%p", fwd, obj);
1648 verifyReference(fwd);
1649 LOG_VERIFY(">>> verify string fwd=%p obj=%p", fwd, obj);
1650 } else {
1651 LOG_SCAVENGE(">>> verify string obj=%p %p", obj, entry->data);
1652 verifyReference(obj);
1653 LOG_SCAVENGE("<<< verify string obj=%p %p", obj, entry->data);
1654 }
1655 }
1656 dvmHashTableUnlock(table);
1657}
1658
1659/*
1660 * At present, reference tables contain references that must not be
1661 * moved by the collector. Instead of scavenging each reference in
1662 * the table we pin each referenced object.
1663 */
Carl Shapiro583d64c2010-05-04 10:44:47 -07001664static void pinReferenceTable(const ReferenceTable *table)
Carl Shapirod28668c2010-04-15 16:10:00 -07001665{
1666 Object **entry;
1667 int i;
1668
1669 assert(table != NULL);
1670 assert(table->table != NULL);
1671 assert(table->nextEntry != NULL);
1672 for (entry = table->table; entry < table->nextEntry; ++entry) {
1673 assert(entry != NULL);
1674 assert(!isForward(*entry));
1675 pinObject(*entry);
1676 }
1677}
1678
1679static void verifyReferenceTable(const ReferenceTable *table)
1680{
1681 Object **entry;
1682 int i;
1683
1684 LOGI(">>> verifyReferenceTable(table=%p)", table);
1685 for (entry = table->table; entry < table->nextEntry; ++entry) {
1686 assert(entry != NULL);
1687 assert(!isForward(*entry));
1688 verifyReference(*entry);
1689 }
1690 LOGI("<<< verifyReferenceTable(table=%p)", table);
1691}
1692
1693static void scavengeLargeHeapRefTable(LargeHeapRefTable *table)
1694{
1695 Object **entry;
1696
1697 for (; table != NULL; table = table->next) {
1698 for (entry = table->refs.table; entry < table->refs.nextEntry; ++entry) {
1699 if ((uintptr_t)*entry & ~0x3) {
1700 /* It's a pending reference operation. */
1701 assert(!"implemented");
1702 }
1703 scavengeReference(entry);
1704 }
1705 }
1706}
1707
1708/* This code was copied from Thread.c */
1709static void scavengeThreadStack(Thread *thread)
1710{
1711 const u4 *framePtr;
1712#if WITH_EXTRA_GC_CHECKS > 1
1713 bool first = true;
1714#endif
1715
1716 framePtr = (const u4 *)thread->curFrame;
1717 while (framePtr != NULL) {
1718 const StackSaveArea *saveArea;
1719 const Method *method;
1720
1721 saveArea = SAVEAREA_FROM_FP(framePtr);
1722 method = saveArea->method;
Carl Shapiro583d64c2010-05-04 10:44:47 -07001723 if (method != NULL && !dvmIsNativeMethod(method)) {
Carl Shapirod28668c2010-04-15 16:10:00 -07001724#ifdef COUNT_PRECISE_METHODS
1725 /* the GC is running, so no lock required */
1726 if (dvmPointerSetAddEntry(gDvm.preciseMethods, method))
1727 LOGI("PGC: added %s.%s %p\n",
1728 method->clazz->descriptor, method->name, method);
1729#endif
1730#if WITH_EXTRA_GC_CHECKS > 1
1731 /*
1732 * May also want to enable the memset() in the "invokeMethod"
1733 * goto target in the portable interpreter. That sets the stack
1734 * to a pattern that makes referring to uninitialized data
1735 * very obvious.
1736 */
1737
1738 if (first) {
1739 /*
1740 * First frame, isn't native, check the "alternate" saved PC
1741 * as a sanity check.
1742 *
1743 * It seems like we could check the second frame if the first
1744 * is native, since the PCs should be the same. It turns out
1745 * this doesn't always work. The problem is that we could
1746 * have calls in the sequence:
1747 * interp method #2
1748 * native method
1749 * interp method #1
1750 *
1751 * and then GC while in the native method after returning
1752 * from interp method #2. The currentPc on the stack is
1753 * for interp method #1, but thread->currentPc2 is still
1754 * set for the last thing interp method #2 did.
1755 *
1756 * This can also happen in normal execution:
1757 * - sget-object on not-yet-loaded class
1758 * - class init updates currentPc2
1759 * - static field init is handled by parsing annotations;
1760 * static String init requires creation of a String object,
1761 * which can cause a GC
1762 *
1763 * Essentially, any pattern that involves executing
1764 * interpreted code and then causes an allocation without
1765 * executing instructions in the original method will hit
1766 * this. These are rare enough that the test still has
1767 * some value.
1768 */
1769 if (saveArea->xtra.currentPc != thread->currentPc2) {
1770 LOGW("PGC: savedPC(%p) != current PC(%p), %s.%s ins=%p\n",
1771 saveArea->xtra.currentPc, thread->currentPc2,
1772 method->clazz->descriptor, method->name, method->insns);
1773 if (saveArea->xtra.currentPc != NULL)
1774 LOGE(" pc inst = 0x%04x\n", *saveArea->xtra.currentPc);
1775 if (thread->currentPc2 != NULL)
1776 LOGE(" pc2 inst = 0x%04x\n", *thread->currentPc2);
1777 dvmDumpThread(thread, false);
1778 }
1779 } else {
1780 /*
1781 * It's unusual, but not impossible, for a non-first frame
1782 * to be at something other than a method invocation. For
1783 * example, if we do a new-instance on a nonexistent class,
1784 * we'll have a lot of class loader activity on the stack
1785 * above the frame with the "new" operation. Could also
1786 * happen while we initialize a Throwable when an instruction
1787 * fails.
1788 *
1789 * So there's not much we can do here to verify the PC,
1790 * except to verify that it's a GC point.
1791 */
1792 }
1793 assert(saveArea->xtra.currentPc != NULL);
1794#endif
1795
1796 const RegisterMap* pMap;
1797 const u1* regVector;
1798 int i;
1799
1800 Method* nonConstMethod = (Method*) method; // quiet gcc
1801 pMap = dvmGetExpandedRegisterMap(nonConstMethod);
1802
1803 /* assert(pMap != NULL); */
1804
1805 //LOGI("PGC: %s.%s\n", method->clazz->descriptor, method->name);
1806
1807 if (pMap != NULL) {
1808 /* found map, get registers for this address */
1809 int addr = saveArea->xtra.currentPc - method->insns;
1810 regVector = dvmRegisterMapGetLine(pMap, addr);
1811 /*
1812 if (regVector == NULL) {
1813 LOGI("PGC: map but no entry for %s.%s addr=0x%04x\n",
1814 method->clazz->descriptor, method->name, addr);
1815 } else {
1816 LOGI("PGC: found map for %s.%s 0x%04x (t=%d)\n",
1817 method->clazz->descriptor, method->name, addr,
1818 thread->threadId);
1819 }
1820 */
1821 } else {
1822 /*
1823 * No map found. If precise GC is disabled this is
1824 * expected -- we don't create pointers to the map data even
1825 * if it's present -- but if it's enabled it means we're
1826 * unexpectedly falling back on a conservative scan, so it's
1827 * worth yelling a little.
1828 */
1829 if (gDvm.preciseGc) {
1830 LOGI("PGC: no map for %s.%s\n", method->clazz->descriptor, method->name);
1831 }
1832 regVector = NULL;
1833 }
1834
1835 /* assert(regVector != NULL); */
1836
1837 if (regVector == NULL) {
1838 /* conservative scan */
1839 for (i = method->registersSize - 1; i >= 0; i--) {
1840 u4 rval = *framePtr++;
1841 if (rval != 0 && (rval & 0x3) == 0) {
1842 abort();
1843 /* dvmMarkIfObject((Object *)rval); */
1844 }
1845 }
1846 } else {
1847 /*
1848 * Precise scan. v0 is at the lowest address on the
1849 * interpreted stack, and is the first bit in the register
1850 * vector, so we can walk through the register map and
1851 * memory in the same direction.
1852 *
1853 * A '1' bit indicates a live reference.
1854 */
1855 u2 bits = 1 << 1;
1856 for (i = method->registersSize - 1; i >= 0; i--) {
1857 /* u4 rval = *framePtr++; */
1858 u4 rval = *framePtr;
1859
1860 bits >>= 1;
1861 if (bits == 1) {
1862 /* set bit 9 so we can tell when we're empty */
1863 bits = *regVector++ | 0x0100;
1864 LOGVV("loaded bits: 0x%02x\n", bits & 0xff);
1865 }
1866
1867 if (rval != 0 && (bits & 0x01) != 0) {
1868 /*
1869 * Non-null, register marked as live reference. This
1870 * should always be a valid object.
1871 */
1872#if WITH_EXTRA_GC_CHECKS > 0
1873 if ((rval & 0x3) != 0 || !dvmIsValidObject((Object*) rval)) {
1874 /* this is very bad */
1875 LOGE("PGC: invalid ref in reg %d: 0x%08x\n",
1876 method->registersSize-1 - i, rval);
1877 } else
1878#endif
1879 {
1880
1881 // LOGI("stack reference %u@%p", *framePtr, framePtr);
1882 /* dvmMarkObjectNonNull((Object *)rval); */
1883 scavengeReference((Object **) framePtr);
1884 }
1885 } else {
1886 /*
1887 * Null or non-reference, do nothing at all.
1888 */
1889#if WITH_EXTRA_GC_CHECKS > 1
1890 if (dvmIsValidObject((Object*) rval)) {
1891 /* this is normal, but we feel chatty */
1892 LOGD("PGC: ignoring valid ref in reg %d: 0x%08x\n",
1893 method->registersSize-1 - i, rval);
1894 }
1895#endif
1896 }
1897 ++framePtr;
1898 }
1899 dvmReleaseRegisterMapLine(pMap, regVector);
1900 }
1901 }
Carl Shapiro952e84a2010-05-06 14:35:29 -07001902 /* else this is a break frame and there is nothing to gray, or
Carl Shapirod28668c2010-04-15 16:10:00 -07001903 * this is a native method and the registers are just the "ins",
1904 * copied from various registers in the caller's set.
1905 */
1906
1907#if WITH_EXTRA_GC_CHECKS > 1
1908 first = false;
1909#endif
1910
1911 /* Don't fall into an infinite loop if things get corrupted.
1912 */
1913 assert((uintptr_t)saveArea->prevFrame > (uintptr_t)framePtr ||
1914 saveArea->prevFrame == NULL);
1915 framePtr = saveArea->prevFrame;
1916 }
1917}
1918
1919static void scavengeThread(Thread *thread)
1920{
1921 assert(thread->status != THREAD_RUNNING ||
1922 thread->isSuspended ||
1923 thread == dvmThreadSelf());
1924
1925 // LOGI("scavengeThread(thread=%p)", thread);
1926
1927 // LOGI("Scavenging threadObj=%p", thread->threadObj);
1928 scavengeReference(&thread->threadObj);
1929
1930 // LOGI("Scavenging exception=%p", thread->exception);
1931 scavengeReference(&thread->exception);
1932
1933 scavengeThreadStack(thread);
1934}
1935
1936static void scavengeThreadList(void)
1937{
1938 Thread *thread;
1939
1940 dvmLockThreadList(dvmThreadSelf());
1941 thread = gDvm.threadList;
1942 while (thread) {
1943 scavengeThread(thread);
1944 thread = thread->next;
1945 }
1946 dvmUnlockThreadList();
1947}
1948
1949static void verifyThreadStack(const Thread *thread)
1950{
1951 const u4 *framePtr;
1952
1953 assert(thread != NULL);
1954 framePtr = (const u4 *)thread->curFrame;
1955 while (framePtr != NULL) {
1956 const StackSaveArea *saveArea;
1957 const Method *method;
1958
1959 saveArea = SAVEAREA_FROM_FP(framePtr);
1960 method = saveArea->method;
1961 if (method != NULL && !dvmIsNativeMethod(method)) {
1962 const RegisterMap* pMap;
1963 const u1* regVector;
1964 int i;
1965
1966 Method* nonConstMethod = (Method*) method; // quiet gcc
1967 pMap = dvmGetExpandedRegisterMap(nonConstMethod);
1968
1969 /* assert(pMap != NULL); */
1970
1971 // LOGI("PGC: %s.%s\n", method->clazz->descriptor, method->name);
1972
1973 if (pMap != NULL) {
1974 /* found map, get registers for this address */
1975 int addr = saveArea->xtra.currentPc - method->insns;
1976 regVector = dvmRegisterMapGetLine(pMap, addr);
1977 if (regVector == NULL) {
1978 LOGI("PGC: map but no entry for %s.%s addr=0x%04x\n",
1979 method->clazz->descriptor, method->name, addr);
1980 } else {
1981 //LOGI("PGC: found map for %s.%s 0x%04x (t=%d)\n", method->clazz->descriptor, method->name, addr, thread->threadId);
1982 }
1983 } else {
1984 /*
1985 * No map found. If precise GC is disabled this is
1986 * expected -- we don't create pointers to the map data even
1987 * if it's present -- but if it's enabled it means we're
1988 * unexpectedly falling back on a conservative scan, so it's
1989 * worth yelling a little.
1990 */
1991 if (gDvm.preciseGc) {
1992 LOGI("PGC: no map for %s.%s\n",
1993 method->clazz->descriptor, method->name);
1994 }
1995 regVector = NULL;
1996 }
1997
1998 /* assert(regVector != NULL); */
1999
2000 if (regVector == NULL) {
2001 /* conservative scan */
2002 for (i = method->registersSize - 1; i >= 0; i--) {
2003 u4 rval = *framePtr++;
2004 if (rval != 0 && (rval & 0x3) == 0) {
2005 abort();
2006 /* dvmMarkIfObject((Object *)rval); */
2007 }
2008 }
2009 } else {
2010 /*
2011 * Precise scan. v0 is at the lowest address on the
2012 * interpreted stack, and is the first bit in the register
2013 * vector, so we can walk through the register map and
2014 * memory in the same direction.
2015 *
2016 * A '1' bit indicates a live reference.
2017 */
2018 u2 bits = 1 << 1;
2019 for (i = method->registersSize - 1; i >= 0; i--) {
2020 u4 rval = *framePtr;
2021
2022 bits >>= 1;
2023 if (bits == 1) {
2024 /* set bit 9 so we can tell when we're empty */
2025 bits = *regVector++ | 0x0100;
2026 LOGVV("loaded bits: 0x%02x\n", bits & 0xff);
2027 }
2028
2029 if (rval != 0 && (bits & 0x01) != 0) {
2030 /*
2031 * Non-null, register marked as live reference. This
2032 * should always be a valid object.
2033 */
2034 //LOGI("verify stack reference %p", (Object *)*framePtr);
2035 verifyReference((Object *)*framePtr);
2036 } else {
2037 /*
2038 * Null or non-reference, do nothing at all.
2039 */
2040 }
2041 ++framePtr;
2042 }
2043 dvmReleaseRegisterMapLine(pMap, regVector);
2044 }
2045 }
Carl Shapiro952e84a2010-05-06 14:35:29 -07002046 /* else this is a break frame and there is nothing to gray, or
Carl Shapirod28668c2010-04-15 16:10:00 -07002047 * this is a native method and the registers are just the "ins",
2048 * copied from various registers in the caller's set.
2049 */
2050
2051 /* Don't fall into an infinite loop if things get corrupted.
2052 */
2053 assert((uintptr_t)saveArea->prevFrame > (uintptr_t)framePtr ||
2054 saveArea->prevFrame == NULL);
2055 framePtr = saveArea->prevFrame;
2056 }
2057}
2058
2059static void verifyThread(const Thread *thread)
2060{
2061 assert(thread->status != THREAD_RUNNING ||
2062 thread->isSuspended ||
2063 thread == dvmThreadSelf());
2064
2065 LOGI("verifyThread(thread=%p)", thread);
2066
2067 LOGI("verify threadObj=%p", thread->threadObj);
2068 verifyReference(thread->threadObj);
2069
2070 LOGI("verify exception=%p", thread->exception);
2071 verifyReference(thread->exception);
2072
2073 LOGI("verify thread->internalLocalRefTable");
2074 verifyReferenceTable(&thread->internalLocalRefTable);
2075
2076 LOGI("verify thread->jniLocalRefTable");
2077 verifyReferenceTable(&thread->jniLocalRefTable);
2078
2079 /* Can the check be pushed into the promote routine? */
2080 if (thread->jniMonitorRefTable.table) {
2081 LOGI("verify thread->jniMonitorRefTable");
2082 verifyReferenceTable(&thread->jniMonitorRefTable);
2083 }
2084
2085 verifyThreadStack(thread);
2086}
2087
2088static void verifyThreadList(void)
2089{
2090 Thread *thread;
2091
2092 dvmLockThreadList(dvmThreadSelf());
2093 thread = gDvm.threadList;
2094 while (thread) {
2095 verifyThread(thread);
2096 thread = thread->next;
2097 }
2098 dvmUnlockThreadList();
2099}
2100
Carl Shapiro583d64c2010-05-04 10:44:47 -07002101static void pinNativeMethodArguments(const Thread *thread)
2102{
2103 const u4 *framePtr;
2104 const StackSaveArea *saveArea;
2105 const Method *method;
2106 const char *shorty;
2107 Object *obj;
2108 int i;
2109
2110 saveArea = NULL;
2111 framePtr = (const u4 *)thread->curFrame;
2112 for (; framePtr != NULL; framePtr = saveArea->prevFrame) {
2113 saveArea = SAVEAREA_FROM_FP(framePtr);
2114 method = saveArea->method;
2115 if (method != NULL && dvmIsNativeMethod(method)) {
2116 /*
Carl Shapiro952e84a2010-05-06 14:35:29 -07002117 * For purposes of graying references, we don't need to do
Carl Shapiro583d64c2010-05-04 10:44:47 -07002118 * anything here, because all of the native "ins" were copied
2119 * from registers in the caller's stack frame and won't be
2120 * changed (an interpreted method can freely use registers
2121 * with parameters like any other register, but natives don't
2122 * work that way).
2123 *
2124 * However, we need to ensure that references visible to
2125 * native methods don't move around. We can do a precise scan
2126 * of the arguments by examining the method signature.
2127 */
2128 LOGI("+++ native scan %s.%s\n",
2129 method->clazz->descriptor, method->name);
2130 assert(method->registersSize == method->insSize);
2131 if (!dvmIsStaticMethod(method)) {
2132 /* grab the "this" pointer */
2133 obj = (Object *)*framePtr++;
2134 if (obj == NULL) {
2135 /*
2136 * This can happen for the "fake" entry frame inserted
2137 * for threads created outside the VM. There's no actual
2138 * call so there's no object. If we changed the fake
2139 * entry method to be declared "static" then this
2140 * situation should never occur.
2141 */
2142 } else {
2143 assert(dvmIsValidObject(obj));
2144 pinObject(obj);
2145 }
2146 }
2147 shorty = method->shorty+1; // skip return value
2148 for (i = method->registersSize - 1; i >= 0; i--, framePtr++) {
2149 switch (*shorty++) {
2150 case 'L':
2151 obj = (Object *)*framePtr;
2152 if (obj != NULL) {
2153 assert(dvmIsValidObject(obj));
2154 pinObject(obj);
2155 }
2156 break;
2157 case 'D':
2158 case 'J':
2159 framePtr++;
2160 break;
2161 default:
2162 /* 32-bit non-reference value */
2163 obj = (Object *)*framePtr; // debug, remove
2164 if (dvmIsValidObject(obj)) { // debug, remove
2165 /* if we see a lot of these, our scan might be off */
2166 LOGI("+++ did NOT pin obj %p\n", obj);
2167 }
2168 break;
2169 }
2170 }
2171 }
2172 /*
2173 * Don't fall into an infinite loop if things get corrupted.
2174 */
2175 assert((uintptr_t)saveArea->prevFrame > (uintptr_t)framePtr ||
2176 saveArea->prevFrame == NULL);
2177 }
2178}
2179
2180static void pinThread(const Thread *thread)
Carl Shapirod28668c2010-04-15 16:10:00 -07002181{
2182 assert(thread != NULL);
2183 assert(thread->status != THREAD_RUNNING ||
2184 thread->isSuspended ||
2185 thread == dvmThreadSelf());
2186 LOGI("pinThread(thread=%p)", thread);
2187
Carl Shapiro583d64c2010-05-04 10:44:47 -07002188 LOGI("Pin native method arguments");
2189 pinNativeMethodArguments(thread);
2190
Carl Shapirod28668c2010-04-15 16:10:00 -07002191 LOGI("Pin internalLocalRefTable");
2192 pinReferenceTable(&thread->internalLocalRefTable);
2193
2194 LOGI("Pin jniLocalRefTable");
2195 pinReferenceTable(&thread->jniLocalRefTable);
2196
2197 /* Can the check be pushed into the promote routine? */
2198 if (thread->jniMonitorRefTable.table) {
2199 LOGI("Pin jniMonitorRefTable");
2200 pinReferenceTable(&thread->jniMonitorRefTable);
2201 }
2202}
2203
2204static void pinThreadList(void)
2205{
2206 Thread *thread;
2207
2208 dvmLockThreadList(dvmThreadSelf());
2209 thread = gDvm.threadList;
2210 while (thread) {
2211 pinThread(thread);
2212 thread = thread->next;
2213 }
2214 dvmUnlockThreadList();
2215}
2216
2217/*
2218 * Heap block scavenging.
2219 */
2220
2221/*
2222 * Scavenge objects in the current block. Scavenging terminates when
2223 * the pointer reaches the highest address in the block or when a run
2224 * of zero words that continues to the highest address is reached.
2225 */
2226static void scavengeBlock(HeapSource *heapSource, size_t block)
2227{
2228 u1 *cursor;
2229 u1 *end;
2230 size_t size;
2231
2232 LOG_SCAVENGE("scavengeBlock(heapSource=%p,block=%zu)", heapSource, block);
2233
2234 assert(heapSource != NULL);
2235 assert(block < heapSource->totalBlocks);
2236 assert(heapSource->blockSpace[block] == BLOCK_TO_SPACE);
2237
2238 cursor = blockToAddress(heapSource, block);
2239 end = cursor + BLOCK_SIZE;
2240 LOG_SCAVENGE("scavengeBlock start=%p, end=%p", cursor, end);
2241
2242 /* Parse and scavenge the current block. */
2243 size = 0;
2244 while (cursor < end) {
2245 u4 word = *(u4 *)cursor;
2246 if (word != 0) {
Carl Shapiro2396fda2010-05-03 20:14:14 -07002247 scavengeObject((Object *)cursor);
2248 size = objectSize((Object *)cursor);
Carl Shapirod28668c2010-04-15 16:10:00 -07002249 size = alignUp(size, ALLOC_ALIGNMENT);
2250 cursor += size;
2251 } else if (word == 0 && cursor == (u1 *)gDvm.unlinkedJavaLangClass) {
2252 size = sizeof(ClassObject);
2253 cursor += size;
2254 } else {
2255 /* Check for padding. */
2256 while (*(u4 *)cursor == 0) {
2257 cursor += 4;
2258 if (cursor == end) break;
2259 }
2260 /* Punt if something went wrong. */
2261 assert(cursor == end);
2262 }
2263 }
2264}
2265
Carl Shapiro2396fda2010-05-03 20:14:14 -07002266static size_t objectSize(const Object *obj)
Carl Shapirod28668c2010-04-15 16:10:00 -07002267{
2268 size_t size;
2269
Carl Shapiro2396fda2010-05-03 20:14:14 -07002270 assert(obj != NULL);
2271 assert(obj->clazz != NULL);
Carl Shapirod28668c2010-04-15 16:10:00 -07002272 if (obj->clazz == gDvm.classJavaLangClass ||
2273 obj->clazz == gDvm.unlinkedJavaLangClass) {
2274 size = dvmClassObjectSize((ClassObject *)obj);
2275 } else if (IS_CLASS_FLAG_SET(obj->clazz, CLASS_ISARRAY)) {
2276 size = dvmArrayObjectSize((ArrayObject *)obj);
2277 } else {
Carl Shapiro2396fda2010-05-03 20:14:14 -07002278 assert(obj->clazz->objectSize != 0);
Carl Shapirod28668c2010-04-15 16:10:00 -07002279 size = obj->clazz->objectSize;
2280 }
Carl Shapiro2396fda2010-05-03 20:14:14 -07002281 if (LW_HASH_STATE(obj->lock) == LW_HASH_STATE_HASHED_AND_MOVED) {
2282 size += sizeof(u4);
2283 }
Carl Shapirod28668c2010-04-15 16:10:00 -07002284 return size;
2285}
2286
2287static void verifyBlock(HeapSource *heapSource, size_t block)
2288{
2289 u1 *cursor;
2290 u1 *end;
2291 size_t size;
2292
2293 // LOGI("verifyBlock(heapSource=%p,block=%zu)", heapSource, block);
2294
2295 assert(heapSource != NULL);
2296 assert(block < heapSource->totalBlocks);
2297 assert(heapSource->blockSpace[block] == BLOCK_TO_SPACE);
2298
2299 cursor = blockToAddress(heapSource, block);
2300 end = cursor + BLOCK_SIZE;
2301 // LOGI("verifyBlock start=%p, end=%p", cursor, end);
2302
2303 /* Parse and scavenge the current block. */
2304 size = 0;
2305 while (cursor < end) {
2306 u4 word = *(u4 *)cursor;
2307 if (word != 0) {
2308 dvmVerifyObject((Object *)cursor);
2309 size = objectSize((Object *)cursor);
2310 size = alignUp(size, ALLOC_ALIGNMENT);
2311 cursor += size;
2312 } else if (word == 0 && cursor == (u1 *)gDvm.unlinkedJavaLangClass) {
2313 size = sizeof(ClassObject);
2314 cursor += size;
2315 } else {
2316 /* Check for padding. */
2317 while (*(unsigned long *)cursor == 0) {
2318 cursor += 4;
2319 if (cursor == end) break;
2320 }
2321 /* Punt if something went wrong. */
2322 assert(cursor == end);
2323 }
2324 }
2325}
2326
2327static void describeBlockQueue(const HeapSource *heapSource)
2328{
2329 size_t block, count;
2330 char space;
2331
2332 block = heapSource->queueHead;
2333 count = 0;
2334 LOG_SCAVENGE(">>> describeBlockQueue(heapSource=%p)", heapSource);
2335 /* Count the number of blocks enqueued. */
2336 while (block != QUEUE_TAIL) {
2337 block = heapSource->blockQueue[block];
2338 ++count;
2339 }
2340 LOG_SCAVENGE("blockQueue %zu elements, enqueued %zu",
2341 count, heapSource->queueSize);
2342 block = heapSource->queueHead;
2343 while (block != QUEUE_TAIL) {
2344 space = heapSource->blockSpace[block];
2345 LOG_SCAVENGE("block=%zu@%p,space=%zu", block, blockToAddress(heapSource,block), space);
2346 block = heapSource->blockQueue[block];
2347 }
2348
2349 LOG_SCAVENGE("<<< describeBlockQueue(heapSource=%p)", heapSource);
2350}
2351
2352/*
2353 * Blackens promoted objects.
2354 */
2355static void scavengeBlockQueue(void)
2356{
2357 HeapSource *heapSource;
2358 size_t block;
2359
2360 LOG_SCAVENGE(">>> scavengeBlockQueue()");
2361 heapSource = gDvm.gcHeap->heapSource;
2362 describeBlockQueue(heapSource);
2363 while (heapSource->queueHead != QUEUE_TAIL) {
2364 block = heapSource->queueHead;
2365 LOG_SCAVENGE("Dequeueing block %zu\n", block);
2366 scavengeBlock(heapSource, block);
2367 heapSource->queueHead = heapSource->blockQueue[block];
2368 LOGI("New queue head is %zu\n", heapSource->queueHead);
2369 }
2370 LOG_SCAVENGE("<<< scavengeBlockQueue()");
2371}
2372
2373/*
2374 * Scan the block list and verify all blocks that are marked as being
2375 * in new space. This should be parametrized so we can invoke this
2376 * routine outside of the context of a collection.
2377 */
2378static void verifyNewSpace(void)
2379{
2380 HeapSource *heapSource;
2381 size_t i;
2382 size_t c0, c1, c2, c7;
2383
2384 c0 = c1 = c2 = c7 = 0;
2385 heapSource = gDvm.gcHeap->heapSource;
2386 for (i = 0; i < heapSource->totalBlocks; ++i) {
2387 switch (heapSource->blockSpace[i]) {
2388 case BLOCK_FREE: ++c0; break;
2389 case BLOCK_TO_SPACE: ++c1; break;
2390 case BLOCK_FROM_SPACE: ++c2; break;
2391 case BLOCK_CONTINUED: ++c7; break;
2392 default: assert(!"reached");
2393 }
2394 }
2395 LOG_VERIFY("Block Demographics: "
2396 "Free=%zu,ToSpace=%zu,FromSpace=%zu,Continued=%zu",
2397 c0, c1, c2, c7);
2398 for (i = 0; i < heapSource->totalBlocks; ++i) {
2399 if (heapSource->blockSpace[i] != BLOCK_TO_SPACE) {
2400 continue;
2401 }
2402 verifyBlock(heapSource, i);
2403 }
2404}
2405
2406static void scavengeGlobals(void)
2407{
2408 scavengeReference((Object **)(void *)&gDvm.classJavaLangClass);
2409 scavengeReference((Object **)(void *)&gDvm.classJavaLangClassArray);
2410 scavengeReference((Object **)(void *)&gDvm.classJavaLangError);
2411 scavengeReference((Object **)(void *)&gDvm.classJavaLangObject);
2412 scavengeReference((Object **)(void *)&gDvm.classJavaLangObjectArray);
2413 scavengeReference((Object **)(void *)&gDvm.classJavaLangRuntimeException);
2414 scavengeReference((Object **)(void *)&gDvm.classJavaLangString);
2415 scavengeReference((Object **)(void *)&gDvm.classJavaLangThread);
2416 scavengeReference((Object **)(void *)&gDvm.classJavaLangVMThread);
2417 scavengeReference((Object **)(void *)&gDvm.classJavaLangThreadGroup);
2418 scavengeReference((Object **)(void *)&gDvm.classJavaLangThrowable);
2419 scavengeReference((Object **)(void *)&gDvm.classJavaLangStackTraceElement);
2420 scavengeReference((Object **)(void *)&gDvm.classJavaLangStackTraceElementArray);
2421 scavengeReference((Object **)(void *)&gDvm.classJavaLangAnnotationAnnotationArray);
2422 scavengeReference((Object **)(void *)&gDvm.classJavaLangAnnotationAnnotationArrayArray);
2423 scavengeReference((Object **)(void *)&gDvm.classJavaLangReflectAccessibleObject);
2424 scavengeReference((Object **)(void *)&gDvm.classJavaLangReflectConstructor);
2425 scavengeReference((Object **)(void *)&gDvm.classJavaLangReflectConstructorArray);
2426 scavengeReference((Object **)(void *)&gDvm.classJavaLangReflectField);
2427 scavengeReference((Object **)(void *)&gDvm.classJavaLangReflectFieldArray);
2428 scavengeReference((Object **)(void *)&gDvm.classJavaLangReflectMethod);
2429 scavengeReference((Object **)(void *)&gDvm.classJavaLangReflectMethodArray);
2430 scavengeReference((Object **)(void *)&gDvm.classJavaLangReflectProxy);
2431 scavengeReference((Object **)(void *)&gDvm.classJavaLangExceptionInInitializerError);
2432 scavengeReference((Object **)(void *)&gDvm.classJavaLangRefReference);
2433 scavengeReference((Object **)(void *)&gDvm.classJavaNioReadWriteDirectByteBuffer);
2434 scavengeReference((Object **)(void *)&gDvm.classJavaSecurityAccessController);
2435 scavengeReference((Object **)(void *)&gDvm.classOrgApacheHarmonyLangAnnotationAnnotationFactory);
2436 scavengeReference((Object **)(void *)&gDvm.classOrgApacheHarmonyLangAnnotationAnnotationMember);
2437 scavengeReference((Object **)(void *)&gDvm.classOrgApacheHarmonyLangAnnotationAnnotationMemberArray);
2438 scavengeReference((Object **)(void *)&gDvm.classOrgApacheHarmonyNioInternalDirectBuffer);
2439 scavengeReference((Object **)(void *)&gDvm.classArrayBoolean);
2440 scavengeReference((Object **)(void *)&gDvm.classArrayChar);
2441 scavengeReference((Object **)(void *)&gDvm.classArrayFloat);
2442 scavengeReference((Object **)(void *)&gDvm.classArrayDouble);
2443 scavengeReference((Object **)(void *)&gDvm.classArrayByte);
2444 scavengeReference((Object **)(void *)&gDvm.classArrayShort);
2445 scavengeReference((Object **)(void *)&gDvm.classArrayInt);
2446 scavengeReference((Object **)(void *)&gDvm.classArrayLong);
2447}
2448
2449void describeHeap(void)
2450{
2451 HeapSource *heapSource;
2452
2453 heapSource = gDvm.gcHeap->heapSource;
2454 describeBlocks(heapSource);
2455}
2456
2457/*
2458 * The collection interface. Collection has a few distinct phases.
2459 * The first is flipping AKA condemning AKA whitening the heap. The
2460 * second is to promote all objects which are pointed to by pinned or
2461 * ambiguous references. The third phase is tracing from the stacks,
2462 * registers and various globals. Lastly, a verification of the heap
2463 * is performed. The last phase should be optional.
2464 */
2465void dvmScavengeRoots(void) /* Needs a new name badly */
2466{
2467 HeapRefTable *refs;
2468 GcHeap *gcHeap;
2469
2470 {
2471 size_t alloc, unused, total;
2472
2473 room(&alloc, &unused, &total);
2474 LOGI("BEFORE GC: %zu alloc, %zu free, %zu total.",
2475 alloc, unused, total);
2476 }
2477
2478 gcHeap = gDvm.gcHeap;
2479 dvmHeapSourceFlip();
2480
2481 /*
2482 * Promote blocks with stationary objects.
2483 */
2484
2485 // LOGI("Pinning gDvm.threadList");
2486 pinThreadList();
2487
2488 // LOGI("Pinning gDvm.jniGlobalRefTable");
2489 pinReferenceTable(&gDvm.jniGlobalRefTable);
2490
2491 // LOGI("Pinning gDvm.jniPinRefTable");
2492 pinReferenceTable(&gDvm.jniPinRefTable);
2493
2494 // LOGI("Pinning gDvm.gcHeap->nonCollectableRefs");
2495 pinReferenceTable(&gcHeap->nonCollectableRefs);
2496
2497 // LOGI("Pinning gDvm.loadedClasses");
2498 pinHashTableEntries(gDvm.loadedClasses);
2499
2500 // LOGI("Pinning gDvm.primitiveClass");
2501 pinPrimitiveClasses();
2502
2503 // LOGI("Pinning gDvm.internedStrings");
2504 pinInternedStrings();
2505
2506 // describeBlocks(gcHeap->heapSource);
2507
2508 /*
2509 * Create first, open new-space page right here.
2510 */
2511
2512 /* Reset allocation to an unallocated block. */
2513 gDvm.gcHeap->heapSource->allocPtr = allocateBlocks(gDvm.gcHeap->heapSource, 1);
2514 gDvm.gcHeap->heapSource->allocLimit = gDvm.gcHeap->heapSource->allocPtr + BLOCK_SIZE;
2515 /*
2516 * Hack: promote the empty block allocated above. If the
2517 * promotions that occurred above did not actually gray any
2518 * objects, the block queue may be empty. We must force a
2519 * promotion to be safe.
2520 */
2521 promoteBlockByAddr(gDvm.gcHeap->heapSource, gDvm.gcHeap->heapSource->allocPtr);
2522
2523 /*
2524 * Scavenge blocks and relocate movable objects.
2525 */
2526
2527 LOGI("Scavenging gDvm.threadList");
2528 scavengeThreadList();
2529
2530 LOGI("Scavenging gDvm.gcHeap->referenceOperations");
2531 scavengeLargeHeapRefTable(gcHeap->referenceOperations);
2532
2533 LOGI("Scavenging gDvm.gcHeap->pendingFinalizationRefs");
2534 scavengeLargeHeapRefTable(gcHeap->pendingFinalizationRefs);
2535
2536 LOGI("Scavenging random global stuff");
2537 scavengeReference(&gDvm.outOfMemoryObj);
2538 scavengeReference(&gDvm.internalErrorObj);
2539 scavengeReference(&gDvm.noClassDefFoundErrorObj);
2540
2541 LOGI("Scavenging gDvm.dbgRegistry");
2542 scavengeHashTable(gDvm.dbgRegistry);
2543
2544 // LOGI("Scavenging gDvm.internedString");
2545 scavengeInternedStrings();
2546
2547 LOGI("Root scavenge has completed.");
2548
2549 scavengeBlockQueue();
2550
2551 LOGI("Re-snap global class pointers.");
2552 scavengeGlobals();
2553
2554 LOGI("New space scavenge has completed.");
2555
2556 /*
Carl Shapiro952e84a2010-05-06 14:35:29 -07002557 * Process reference objects in strength order.
2558 */
2559
2560 LOG_REFERENCE("Processing soft references...");
2561 preserveSoftReferences(&gDvm.gcHeap->softReferences);
2562 clearWhiteReferences(&gDvm.gcHeap->softReferences);
2563
2564 LOG_REFERENCE("Processing weak references...");
2565 clearWhiteReferences(&gDvm.gcHeap->weakReferences);
2566
2567 LOG_REFERENCE("Finding finalizations...");
2568 processFinalizableReferences();
2569
2570 LOG_REFERENCE("Processing f-reachable soft references...");
2571 clearWhiteReferences(&gDvm.gcHeap->softReferences);
2572
2573 LOG_REFERENCE("Processing f-reachable weak references...");
2574 clearWhiteReferences(&gDvm.gcHeap->weakReferences);
2575
2576 LOG_REFERENCE("Processing phantom references...");
2577 clearWhiteReferences(&gDvm.gcHeap->phantomReferences);
2578
2579 /*
Carl Shapirod28668c2010-04-15 16:10:00 -07002580 * Verify the stack and heap.
2581 */
2582
2583 // LOGI("Validating new space.");
2584
2585 verifyInternedStrings();
2586
2587 verifyThreadList();
2588
2589 verifyNewSpace();
2590
2591 // LOGI("New space verify has completed.");
2592
2593 //describeBlocks(gcHeap->heapSource);
2594
2595 clearFromSpace(gcHeap->heapSource);
2596
2597 {
2598 size_t alloc, rem, total;
2599
2600 room(&alloc, &rem, &total);
2601 LOGI("AFTER GC: %zu alloc, %zu free, %zu total.", alloc, rem, total);
2602 }
2603}
2604
2605/*
2606 * Interface compatibility routines.
2607 */
2608
2609void dvmClearWhiteRefs(Object **list)
2610{
Carl Shapiro952e84a2010-05-06 14:35:29 -07002611 /* do nothing */
Carl Shapirod28668c2010-04-15 16:10:00 -07002612 assert(*list == NULL);
2613}
2614
2615void dvmHandleSoftRefs(Object **list)
2616{
Carl Shapiro952e84a2010-05-06 14:35:29 -07002617 /* do nothing */
Carl Shapirod28668c2010-04-15 16:10:00 -07002618 assert(*list == NULL);
2619}
2620
2621bool dvmHeapBeginMarkStep(GcMode mode)
2622{
2623 /* do nothing */
2624 return true;
2625}
2626
2627void dvmHeapFinishMarkStep(void)
2628{
2629 /* do nothing */
2630}
2631
2632void dvmHeapMarkRootSet(void)
2633{
2634 /* do nothing */
2635}
2636
2637void dvmHeapScanMarkedObjects(void)
2638{
2639 dvmScavengeRoots();
2640}
2641
2642void dvmHeapScheduleFinalizations(void)
2643{
2644 /* do nothing */
2645}
2646
2647void dvmHeapSweepUnmarkedObjects(GcMode mode, int *numFreed, size_t *sizeFreed)
2648{
2649 /* do nothing */
2650}
2651
2652void dvmMarkObjectNonNull(const Object *obj)
2653{
2654 assert(!"implemented");
2655}