blob: b590c7907447623e829382655e0d6e536363a5ed [file] [log] [blame]
Carl Shapirod28668c2010-04-15 16:10:00 -07001/*
2 * Copyright (C) 2009 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17#include <errno.h>
18#include <limits.h>
19#include <sys/mman.h>
20
21#include "Dalvik.h"
22#include "alloc/Heap.h"
23#include "alloc/HeapBitmap.h"
24#include "alloc/HeapInternal.h"
25#include "alloc/HeapSource.h"
26#include "alloc/Verify.h"
27#include "alloc/clz.h"
28
29/*
30 * A "mostly copying", generational, garbage collector.
31 *
32 * TODO: we allocate our own contiguous tract of page frames to back
33 * object allocations. To cooperate with other heaps active in the
34 * virtual machine we need to move the responsibility of allocating
35 * pages someplace outside of this code.
36 *
37 * The other major data structures that maintain the state of the heap
38 * are the block space table and the block queue.
39 *
40 * The block space table records the state of a block. We must track
41 * whether a block is:
42 *
43 * - Free or allocated in some space.
44 *
45 * - If the block holds part of a large object allocation, whether the
46 * block is the initial or a continued block of the allocation.
47 *
48 * - Whether the block is pinned, that is to say whether at least one
49 * object in the block must remain stationary. Only needed during a
50 * GC.
51 *
52 * - Which space the object belongs to. At present this means
53 * from-space or to-space.
54 *
55 * The block queue is used during garbage collection. Unlike Cheney's
56 * algorithm, from-space and to-space are not contiguous. Therefore,
57 * one cannot maintain the state of the copy with just two pointers.
58 * The block queue exists to thread lists of blocks from the various
59 * spaces together.
60 *
61 * Additionally, we record the free space frontier of the heap, as
62 * well as the address of the first object within a block, which is
63 * required to copy objects following a large object (not currently
64 * implemented). This is stored in the heap source structure. This
65 * should be moved elsewhere to support in-line allocations from Java
66 * threads.
67 *
68 * Allocation requests are satisfied by reserving storage from one or
69 * more contiguous blocks. Objects that are small enough to fit
70 * inside a block are packed together within a block. Objects that
71 * are larger than a block are allocated from contiguous sequences of
72 * blocks. When half the available blocks are filled, a garbage
73 * collection occurs. We "flip" spaces (exchange from- and to-space),
74 * copy live objects into to space, and perform pointer adjustment.
75 *
76 * Copying is made more complicated by the requirement that some
77 * objects must not be moved. This property is known as "pinning".
78 * These objects must be dealt with specially. We use Bartlett's
79 * scheme; blocks containing such objects are grayed (promoted) at the
Carl Shapiro952e84a2010-05-06 14:35:29 -070080 * start of a garbage collection. By virtue of this trick, tracing
Carl Shapirod28668c2010-04-15 16:10:00 -070081 * from the roots proceeds as usual but all objects on those pages are
82 * considered promoted and therefore not moved.
83 *
84 * TODO: there is sufficient information within the garbage collector
85 * to implement Attardi's scheme for evacuating unpinned objects from
86 * a page that is otherwise pinned. This would eliminate false
87 * retention caused by the large pinning granularity.
88 *
89 * We need a scheme for medium and large objects. Ignore that for
90 * now, we can return to this later.
91 *
92 * Eventually we need to worry about promoting objects out of the
93 * copy-collected heap (tenuring) into a less volatile space. Copying
94 * may not always be the best policy for such spaces. We should
95 * consider a variant of mark, sweep, compact.
96 *
97 * The block scheme allows us to use VM page faults to maintain a
98 * write barrier. Consider having a special leaf state for a page.
99 *
100 * Bibliography:
101 *
102 * C. J. Cheney. 1970. A non-recursive list compacting
103 * algorithm. CACM. 13-11 pp677--678.
104 *
105 * Joel F. Bartlett. 1988. Compacting Garbage Collection with
106 * Ambiguous Roots. Digital Equipment Corporation.
107 *
108 * Joel F. Bartlett. 1989. Mostly-Copying Garbage Collection Picks Up
109 * Generations and C++. Digital Equipment Corporation.
110 *
111 * G. May Yip. 1991. Incremental, Generational Mostly-Copying Garbage
112 * Collection in Uncooperative Environments. Digital Equipment
113 * Corporation.
114 *
115 * Giuseppe Attardi, Tito Flagella. 1994. A Customisable Memory
116 * Management Framework. TR-94-010
117 *
118 * Giuseppe Attardi, Tito Flagella, Pietro Iglio. 1998. A customisable
119 * memory management framework for C++. Software -- Practice and
120 * Experience. 28(11), 1143-1183.
121 *
122 */
123
124#define ARRAYSIZE(x) (sizeof(x) / sizeof(x[0]))
125
Carl Shapiro8bb533e2010-05-06 15:35:27 -0700126#if 0
Carl Shapirod28668c2010-04-15 16:10:00 -0700127#define LOG_ALLOC LOGI
Carl Shapiro8bb533e2010-05-06 15:35:27 -0700128#define LOG_PIN LOGI
129#define LOG_PROM LOGI
130#define LOG_REF LOGI
131#define LOG_SCAV LOGI
132#define LOG_TRAN LOGI
133#define LOG_VER LOGI
Carl Shapirod28668c2010-04-15 16:10:00 -0700134#else
Carl Shapiro8bb533e2010-05-06 15:35:27 -0700135#define LOG_ALLOC(...) ((void)0)
136#define LOG_PIN(...) ((void)0)
137#define LOG_PROM(...) ((void)0)
138#define LOG_REF(...) ((void)0)
139#define LOG_SCAV(...) ((void)0)
140#define LOG_TRAN(...) ((void)0)
141#define LOG_VER(...) ((void)0)
Carl Shapirod28668c2010-04-15 16:10:00 -0700142#endif
143
144static void enqueueBlock(HeapSource *heapSource, size_t block);
Carl Shapiro2396fda2010-05-03 20:14:14 -0700145static void scavengeReference(Object **obj);
Carl Shapirod28668c2010-04-15 16:10:00 -0700146static void verifyReference(const void *obj);
147static void printHeapBitmap(const HeapBitmap *bitmap);
148static void printHeapBitmapSxS(const HeapBitmap *b1, const HeapBitmap *b2);
Carl Shapiro952e84a2010-05-06 14:35:29 -0700149static bool toSpaceContains(const void *addr);
150static bool fromSpaceContains(const void *addr);
Carl Shapirod28668c2010-04-15 16:10:00 -0700151static size_t sumHeapBitmap(const HeapBitmap *bitmap);
Carl Shapiro2396fda2010-05-03 20:14:14 -0700152static size_t objectSize(const Object *obj);
Carl Shapiro952e84a2010-05-06 14:35:29 -0700153static void scavengeDataObject(Object *obj);
154static void scavengeBlockQueue(void);
Carl Shapirod28668c2010-04-15 16:10:00 -0700155
156/*
157 * We use 512-byte blocks.
158 */
159enum { BLOCK_SHIFT = 9 };
160enum { BLOCK_SIZE = 1 << BLOCK_SHIFT };
161
162/*
163 * Space identifiers, stored into the blockSpace array.
164 */
165enum {
166 BLOCK_FREE = 0,
167 BLOCK_FROM_SPACE = 1,
168 BLOCK_TO_SPACE = 2,
169 BLOCK_CONTINUED = 7
170};
171
172/*
173 * Alignment for all allocations, in bytes.
174 */
175enum { ALLOC_ALIGNMENT = 8 };
176
177/*
178 * Sentinel value for the queue end.
179 */
180#define QUEUE_TAIL (~(size_t)0)
181
182struct HeapSource {
183
184 /* The base address of backing store. */
185 u1 *blockBase;
186
187 /* Total number of blocks available for allocation. */
188 size_t totalBlocks;
189 size_t allocBlocks;
190
191 /*
192 * The scavenger work queue. Implemented as an array of index
193 * values into the queue.
194 */
195 size_t *blockQueue;
196
197 /*
198 * Base and limit blocks. Basically the shifted start address of
199 * the block. We convert blocks to a relative number when
200 * indexing in the block queue. TODO: make the block queue base
201 * relative rather than the index into the block queue.
202 */
203 size_t baseBlock, limitBlock;
204
205 size_t queueHead;
206 size_t queueTail;
207 size_t queueSize;
208
209 /* The space of the current block 0 (free), 1 or 2. */
210 char *blockSpace;
211
212 /* Start of free space in the current block. */
213 u1 *allocPtr;
214 /* Exclusive limit of free space in the current block. */
215 u1 *allocLimit;
216
217 HeapBitmap allocBits;
218
219 /*
Carl Shapirod28668c2010-04-15 16:10:00 -0700220 * The starting size of the heap. This value is the same as the
221 * value provided to the -Xms flag.
222 */
223 size_t minimumSize;
224
225 /*
226 * The maximum size of the heap. This value is the same as the
227 * -Xmx flag.
228 */
229 size_t maximumSize;
230
231 /*
232 * The current, committed size of the heap. At present, this is
233 * equivalent to the maximumSize.
234 */
235 size_t currentSize;
236
237 size_t bytesAllocated;
238};
239
240static unsigned long alignDown(unsigned long x, unsigned long n)
241{
242 return x & -n;
243}
244
245static unsigned long alignUp(unsigned long x, unsigned long n)
246{
247 return alignDown(x + (n - 1), n);
248}
249
250static void describeBlocks(const HeapSource *heapSource)
251{
252 size_t i;
253
254 for (i = 0; i < heapSource->totalBlocks; ++i) {
255 if ((i % 32) == 0) putchar('\n');
256 printf("%d ", heapSource->blockSpace[i]);
257 }
258 putchar('\n');
259}
260
261/*
262 * Virtual memory interface.
263 */
264
265static void *virtualAlloc(size_t length)
266{
267 void *addr;
268 int flags, prot;
269
270 flags = MAP_PRIVATE | MAP_ANONYMOUS;
271 prot = PROT_READ | PROT_WRITE;
272 addr = mmap(NULL, length, prot, flags, -1, 0);
273 if (addr == MAP_FAILED) {
274 LOGE_HEAP("mmap: %s", strerror(errno));
275 addr = NULL;
276 }
277 return addr;
278}
279
280static void virtualFree(void *addr, size_t length)
281{
282 int res;
283
284 assert(addr != NULL);
285 assert((uintptr_t)addr % SYSTEM_PAGE_SIZE == 0);
286 res = munmap(addr, length);
287 if (res == -1) {
288 LOGE_HEAP("munmap: %s", strerror(errno));
289 }
290}
291
292static int isValidAddress(const HeapSource *heapSource, const u1 *addr)
293{
294 size_t block;
295
296 block = (uintptr_t)addr >> BLOCK_SHIFT;
297 return heapSource->baseBlock <= block &&
298 heapSource->limitBlock > block;
299}
300
301/*
302 * Iterate over the block map looking for a contiguous run of free
303 * blocks.
304 */
305static void *allocateBlocks(HeapSource *heapSource, size_t blocks)
306{
307 void *addr;
308 size_t allocBlocks, totalBlocks;
309 size_t i, j;
310
311 allocBlocks = heapSource->allocBlocks;
312 totalBlocks = heapSource->totalBlocks;
313 /* Check underflow. */
314 assert(blocks != 0);
315 /* Check overflow. */
316 if (allocBlocks + blocks > totalBlocks / 2) {
317 return NULL;
318 }
319 /* Scan block map. */
320 for (i = 0; i < totalBlocks; ++i) {
321 /* Check fit. */
322 for (j = 0; j < blocks; ++j) { /* runs over totalBlocks */
323 if (heapSource->blockSpace[i+j] != BLOCK_FREE) {
324 break;
325 }
326 }
327 /* No fit? */
328 if (j != blocks) {
329 i += j;
330 continue;
331 }
332 /* Fit, allocate. */
333 heapSource->blockSpace[i] = BLOCK_TO_SPACE; /* why to-space? */
334 for (j = 1; j < blocks; ++j) {
335 heapSource->blockSpace[i+j] = BLOCK_CONTINUED;
336 }
337 heapSource->allocBlocks += blocks;
338 addr = &heapSource->blockBase[i*BLOCK_SIZE];
339 memset(addr, 0, blocks*BLOCK_SIZE);
340 /* Collecting? */
341 if (heapSource->queueHead != QUEUE_TAIL) {
342 LOG_ALLOC("allocateBlocks allocBlocks=%zu,block#=%zu", heapSource->allocBlocks, i);
343 /*
344 * This allocated was on behalf of the transporter when it
345 * shaded a white object gray. We enqueue the block so
346 * the scavenger can further shade the gray objects black.
347 */
348 enqueueBlock(heapSource, i);
349 }
350
351 return addr;
352 }
353 /* Insufficient space, fail. */
354 LOGE("Insufficient space, %zu blocks, %zu blocks allocated and %zu bytes allocated",
355 heapSource->totalBlocks,
356 heapSource->allocBlocks,
357 heapSource->bytesAllocated);
358 return NULL;
359}
360
361/* Converts an absolute address to a relative block number. */
362static size_t addressToBlock(const HeapSource *heapSource, const void *addr)
363{
364 assert(heapSource != NULL);
365 assert(isValidAddress(heapSource, addr));
366 return (((uintptr_t)addr) >> BLOCK_SHIFT) - heapSource->baseBlock;
367}
368
369/* Converts a relative block number to an absolute address. */
370static u1 *blockToAddress(const HeapSource *heapSource, size_t block)
371{
372 u1 *addr;
373
374 addr = (u1 *) (((uintptr_t) heapSource->baseBlock + block) * BLOCK_SIZE);
375 assert(isValidAddress(heapSource, addr));
376 return addr;
377}
378
379static void clearBlock(HeapSource *heapSource, size_t block)
380{
381 u1 *addr;
382 size_t i;
383
384 assert(heapSource != NULL);
385 assert(block < heapSource->totalBlocks);
386 addr = heapSource->blockBase + block*BLOCK_SIZE;
387 memset(addr, 0xCC, BLOCK_SIZE);
388 for (i = 0; i < BLOCK_SIZE; i += 8) {
389 dvmHeapBitmapClearObjectBit(&heapSource->allocBits, addr + i);
390 }
391}
392
393static void clearFromSpace(HeapSource *heapSource)
394{
395 size_t i, count;
396
397 assert(heapSource != NULL);
398 i = count = 0;
399 while (i < heapSource->totalBlocks) {
400 if (heapSource->blockSpace[i] != BLOCK_FROM_SPACE) {
401 ++i;
402 continue;
403 }
404 heapSource->blockSpace[i] = BLOCK_FREE;
405 clearBlock(heapSource, i);
406 ++i;
407 ++count;
408 while (i < heapSource->totalBlocks &&
409 heapSource->blockSpace[i] == BLOCK_CONTINUED) {
410 heapSource->blockSpace[i] = BLOCK_FREE;
411 clearBlock(heapSource, i);
412 ++i;
413 ++count;
414 }
415 }
Carl Shapiro8bb533e2010-05-06 15:35:27 -0700416 LOG_SCAV("freed %zu blocks (%zu bytes)", count, count*BLOCK_SIZE);
Carl Shapirod28668c2010-04-15 16:10:00 -0700417}
418
419/*
420 * Appends the given block to the block queue. The block queue is
421 * processed in-order by the scavenger.
422 */
423static void enqueueBlock(HeapSource *heapSource, size_t block)
424{
425 assert(heapSource != NULL);
426 assert(block < heapSource->totalBlocks);
427 if (heapSource->queueHead != QUEUE_TAIL) {
428 heapSource->blockQueue[heapSource->queueTail] = block;
429 } else {
430 heapSource->queueHead = block;
431 }
432 heapSource->blockQueue[block] = QUEUE_TAIL;
433 heapSource->queueTail = block;
434 ++heapSource->queueSize;
435}
436
437/*
438 * Grays all objects within the block corresponding to the given
439 * address.
440 */
441static void promoteBlockByAddr(HeapSource *heapSource, const void *addr)
442{
443 size_t block;
444
445 block = addressToBlock(heapSource, (const u1 *)addr);
446 if (heapSource->blockSpace[block] != BLOCK_TO_SPACE) {
Carl Shapiro8bb533e2010-05-06 15:35:27 -0700447 // LOG_PROM("promoting block %zu %d @ %p", block, heapSource->blockSpace[block], obj);
Carl Shapirod28668c2010-04-15 16:10:00 -0700448 heapSource->blockSpace[block] = BLOCK_TO_SPACE;
449 enqueueBlock(heapSource, block);
450 /* TODO(cshapiro): count continued blocks?*/
451 heapSource->allocBlocks += 1;
452 } else {
Carl Shapiro8bb533e2010-05-06 15:35:27 -0700453 // LOG_PROM("NOT promoting block %zu %d @ %p", block, heapSource->blockSpace[block], obj);
Carl Shapirod28668c2010-04-15 16:10:00 -0700454 }
455}
456
457GcHeap *dvmHeapSourceStartup(size_t startSize, size_t absoluteMaxSize)
458{
459 GcHeap* gcHeap;
460 HeapSource *heapSource;
461
462 assert(startSize <= absoluteMaxSize);
463
464 heapSource = malloc(sizeof(*heapSource));
465 assert(heapSource != NULL);
466 memset(heapSource, 0, sizeof(*heapSource));
467
468 heapSource->minimumSize = alignUp(startSize, BLOCK_SIZE);
469 heapSource->maximumSize = alignUp(absoluteMaxSize, BLOCK_SIZE);
470
471 heapSource->currentSize = heapSource->maximumSize;
472
473 /* Allocate underlying storage for blocks. */
474 heapSource->blockBase = virtualAlloc(heapSource->maximumSize);
475 assert(heapSource->blockBase != NULL);
476 heapSource->baseBlock = (uintptr_t) heapSource->blockBase >> BLOCK_SHIFT;
477 heapSource->limitBlock = ((uintptr_t) heapSource->blockBase + heapSource->maximumSize) >> BLOCK_SHIFT;
478
479 heapSource->allocBlocks = 0;
480 heapSource->totalBlocks = (heapSource->limitBlock - heapSource->baseBlock);
481
482 assert(heapSource->totalBlocks = heapSource->maximumSize / BLOCK_SIZE);
483
484 {
485 size_t size = sizeof(heapSource->blockQueue[0]);
486 heapSource->blockQueue = malloc(heapSource->totalBlocks*size);
487 assert(heapSource->blockQueue != NULL);
488 memset(heapSource->blockQueue, 0xCC, heapSource->totalBlocks*size);
489 heapSource->queueHead = QUEUE_TAIL;
490 }
491
492 /* Byte indicating space residence or free status of block. */
493 {
494 size_t size = sizeof(heapSource->blockSpace[0]);
495 heapSource->blockSpace = malloc(heapSource->totalBlocks*size);
496 assert(heapSource->blockSpace != NULL);
497 memset(heapSource->blockSpace, 0, heapSource->totalBlocks*size);
498 }
499
500 dvmHeapBitmapInit(&heapSource->allocBits,
501 heapSource->blockBase,
502 heapSource->maximumSize,
503 "blockBase");
504
505 /* Initialize allocation pointers. */
506 heapSource->allocPtr = allocateBlocks(heapSource, 1);
507 heapSource->allocLimit = heapSource->allocPtr + BLOCK_SIZE;
508
509 gcHeap = malloc(sizeof(*gcHeap));
510 assert(gcHeap != NULL);
511 memset(gcHeap, 0, sizeof(*gcHeap));
512 gcHeap->heapSource = heapSource;
513
514 return gcHeap;
515}
516
517/*
518 * Perform any required heap initializations after forking from the
519 * zygote process. This is a no-op for the time being. Eventually
520 * this will demarcate the shared region of the heap.
521 */
522bool dvmHeapSourceStartupAfterZygote(void)
523{
524 return true;
525}
526
527bool dvmHeapSourceStartupBeforeFork(void)
528{
529 assert(!"implemented");
530 return false;
531}
532
533void dvmHeapSourceShutdown(GcHeap **gcHeap)
534{
535 if (*gcHeap == NULL || (*gcHeap)->heapSource == NULL)
536 return;
Carl Shapiro88b00352010-05-19 17:38:33 -0700537 free((*gcHeap)->heapSource->blockQueue);
538 free((*gcHeap)->heapSource->blockSpace);
Carl Shapirod28668c2010-04-15 16:10:00 -0700539 virtualFree((*gcHeap)->heapSource->blockBase,
540 (*gcHeap)->heapSource->maximumSize);
541 free((*gcHeap)->heapSource);
542 (*gcHeap)->heapSource = NULL;
543 free(*gcHeap);
544 *gcHeap = NULL;
545}
546
547size_t dvmHeapSourceGetValue(enum HeapSourceValueSpec spec,
548 size_t perHeapStats[],
549 size_t arrayLen)
550{
551 HeapSource *heapSource;
552 size_t value;
553
554 heapSource = gDvm.gcHeap->heapSource;
555 switch (spec) {
556 case HS_EXTERNAL_BYTES_ALLOCATED:
557 value = 0;
558 break;
559 case HS_EXTERNAL_LIMIT:
560 value = 0;
561 break;
562 case HS_FOOTPRINT:
563 value = heapSource->maximumSize;
564 break;
565 case HS_ALLOWED_FOOTPRINT:
566 value = heapSource->maximumSize;
567 break;
568 case HS_BYTES_ALLOCATED:
569 value = heapSource->bytesAllocated;
570 break;
571 case HS_OBJECTS_ALLOCATED:
572 value = sumHeapBitmap(&heapSource->allocBits);
573 break;
574 default:
575 assert(!"implemented");
576 value = 0;
577 }
578 if (perHeapStats) {
579 *perHeapStats = value;
580 }
581 return value;
582}
583
584/*
585 * Performs a shallow copy of the allocation bitmap into the given
586 * vector of heap bitmaps.
587 */
588void dvmHeapSourceGetObjectBitmaps(HeapBitmap objBits[], HeapBitmap markBits[],
589 size_t numHeaps)
590{
591 assert(!"implemented");
592}
593
594HeapBitmap *dvmHeapSourceGetLiveBits(void)
595{
596 assert(!"implemented");
597 return NULL;
598}
599
600/*
601 * Allocate the specified number of bytes from the heap. The
602 * allocation cursor points into a block of free storage. If the
603 * given allocation fits in the remaining space of the block, we
604 * advance the cursor and return a pointer to the free storage. If
605 * the allocation cannot fit in the current block but is smaller than
606 * a block we request a new block and allocate from it instead. If
607 * the allocation is larger than a block we must allocate from a span
608 * of contiguous blocks.
609 */
610void *dvmHeapSourceAlloc(size_t length)
611{
612 HeapSource *heapSource;
613 unsigned char *addr;
614 size_t aligned, available, blocks;
615
616 heapSource = gDvm.gcHeap->heapSource;
617 assert(heapSource != NULL);
618 assert(heapSource->allocPtr != NULL);
619 assert(heapSource->allocLimit != NULL);
620
621 aligned = alignUp(length, ALLOC_ALIGNMENT);
622 available = heapSource->allocLimit - heapSource->allocPtr;
623
624 /* Try allocating inside the current block. */
625 if (aligned <= available) {
626 addr = heapSource->allocPtr;
627 heapSource->allocPtr += aligned;
628 heapSource->bytesAllocated += aligned;
629 dvmHeapBitmapSetObjectBit(&heapSource->allocBits, addr);
630 return addr;
631 }
632
633 /* Try allocating in a new block. */
634 if (aligned <= BLOCK_SIZE) {
635 addr = allocateBlocks(heapSource, 1);
636 if (addr != NULL) {
637 heapSource->allocLimit = addr + BLOCK_SIZE;
638 heapSource->allocPtr = addr + aligned;
639 heapSource->bytesAllocated += aligned;
640 dvmHeapBitmapSetObjectBit(&heapSource->allocBits, addr);
641 /* TODO(cshapiro): pad out the current block. */
642 }
643 return addr;
644 }
645
646 /* Try allocating in a span of blocks. */
647 blocks = alignUp(aligned, BLOCK_SIZE) / BLOCK_SIZE;
648
649 addr = allocateBlocks(heapSource, blocks);
650 /* Propagate failure upward. */
651 if (addr != NULL) {
652 heapSource->bytesAllocated += aligned;
653 dvmHeapBitmapSetObjectBit(&heapSource->allocBits, addr);
654 /* TODO(cshapiro): pad out free space in the last block. */
655 }
656 return addr;
657}
658
659void *dvmHeapSourceAllocAndGrow(size_t size)
660{
661 return dvmHeapSourceAlloc(size);
662}
663
664/* TODO: refactor along with dvmHeapSourceAlloc */
665void *allocateGray(size_t size)
666{
Carl Shapiro7800c092010-05-11 13:46:29 -0700667 HeapSource *heapSource;
668 void *addr;
669 size_t block;
670
Carl Shapiro952e84a2010-05-06 14:35:29 -0700671 /* TODO: add a check that we are in a GC. */
Carl Shapiro7800c092010-05-11 13:46:29 -0700672 heapSource = gDvm.gcHeap->heapSource;
673 addr = dvmHeapSourceAlloc(size);
Carl Shapiro703a2f32010-05-12 23:11:37 -0700674 assert(addr != NULL);
Carl Shapiro7800c092010-05-11 13:46:29 -0700675 block = addressToBlock(heapSource, (const u1 *)addr);
676 if (heapSource->queueHead == QUEUE_TAIL) {
677 /*
678 * Forcibly append the underlying block to the queue. This
679 * condition occurs when referents are transported following
680 * the initial trace.
681 */
682 enqueueBlock(heapSource, block);
683 LOG_PROM("forced promoting block %zu %d @ %p", block, heapSource->blockSpace[block], addr);
684 }
685 return addr;
Carl Shapirod28668c2010-04-15 16:10:00 -0700686}
687
688/*
689 * Returns true if the given address is within the heap and points to
690 * the header of a live object.
691 */
692bool dvmHeapSourceContains(const void *addr)
693{
694 HeapSource *heapSource;
695 HeapBitmap *bitmap;
696
697 heapSource = gDvm.gcHeap->heapSource;
698 bitmap = &heapSource->allocBits;
Carl Shapirodc1e4f12010-05-01 22:27:56 -0700699 if (!dvmHeapBitmapCoversAddress(bitmap, addr)) {
700 return false;
701 } else {
702 return dvmHeapBitmapIsObjectBitSet(bitmap, addr);
703 }
Carl Shapirod28668c2010-04-15 16:10:00 -0700704}
705
706bool dvmHeapSourceGetPtrFlag(const void *ptr, enum HeapSourcePtrFlag flag)
707{
708 assert(!"implemented");
709 return false;
710}
711
712size_t dvmHeapSourceChunkSize(const void *ptr)
713{
714 assert(!"implemented");
715 return 0;
716}
717
718size_t dvmHeapSourceFootprint(void)
719{
720 assert(!"implemented");
721 return 0;
722}
723
724/*
725 * Returns the "ideal footprint" which appears to be the number of
726 * bytes currently committed to the heap. This starts out at the
727 * start size of the heap and grows toward the maximum size.
728 */
729size_t dvmHeapSourceGetIdealFootprint(void)
730{
731 return gDvm.gcHeap->heapSource->currentSize;
732}
733
734float dvmGetTargetHeapUtilization(void)
735{
736 assert(!"implemented");
737 return 0.0f;
738}
739
740void dvmSetTargetHeapUtilization(float newTarget)
741{
742 assert(!"implemented");
743}
744
745size_t dvmMinimumHeapSize(size_t size, bool set)
746{
747 return gDvm.gcHeap->heapSource->minimumSize;
748}
749
750/*
751 * Expands the size of the heap after a collection. At present we
752 * commit the pages for maximum size of the heap so this routine is
753 * just a no-op. Eventually, we will either allocate or commit pages
754 * on an as-need basis.
755 */
756void dvmHeapSourceGrowForUtilization(void)
757{
758 /* do nothing */
759}
760
761void dvmHeapSourceTrim(size_t bytesTrimmed[], size_t arrayLen)
762{
763 /* do nothing */
764}
765
766void dvmHeapSourceWalk(void (*callback)(const void *chunkptr, size_t chunklen,
767 const void *userptr, size_t userlen,
768 void *arg),
769 void *arg)
770{
771 assert(!"implemented");
772}
773
774size_t dvmHeapSourceGetNumHeaps(void)
775{
776 return 1;
777}
778
779bool dvmTrackExternalAllocation(size_t n)
780{
Carl Shapiro528f3812010-05-18 14:16:26 -0700781 /* do nothing */
782 return true;
Carl Shapirod28668c2010-04-15 16:10:00 -0700783}
784
785void dvmTrackExternalFree(size_t n)
786{
Carl Shapiro528f3812010-05-18 14:16:26 -0700787 /* do nothing */
Carl Shapirod28668c2010-04-15 16:10:00 -0700788}
789
790size_t dvmGetExternalBytesAllocated(void)
791{
792 assert(!"implemented");
793 return 0;
794}
795
796void dvmHeapSourceFlip(void)
797{
798 HeapSource *heapSource;
799 size_t i;
800
801 heapSource = gDvm.gcHeap->heapSource;
802
803 /* Reset the block queue. */
804 heapSource->allocBlocks = 0;
805 heapSource->queueSize = 0;
806 heapSource->queueHead = QUEUE_TAIL;
807
Carl Shapirod28668c2010-04-15 16:10:00 -0700808 /* TODO(cshapiro): pad the current (prev) block. */
809
810 heapSource->allocPtr = NULL;
811 heapSource->allocLimit = NULL;
812
813 /* Whiten all allocated blocks. */
814 for (i = 0; i < heapSource->totalBlocks; ++i) {
815 if (heapSource->blockSpace[i] == BLOCK_TO_SPACE) {
816 heapSource->blockSpace[i] = BLOCK_FROM_SPACE;
817 }
818 }
819}
820
821static void room(size_t *alloc, size_t *avail, size_t *total)
822{
823 HeapSource *heapSource;
824 size_t i;
825
826 heapSource = gDvm.gcHeap->heapSource;
827 *total = heapSource->totalBlocks*BLOCK_SIZE;
828 *alloc = heapSource->allocBlocks*BLOCK_SIZE;
829 *avail = *total - *alloc;
830}
831
832static bool isSpaceInternal(u1 *addr, int space)
833{
834 HeapSource *heapSource;
835 u1 *base, *limit;
836 size_t offset;
837 char space2;
838
839 heapSource = gDvm.gcHeap->heapSource;
840 base = heapSource->blockBase;
841 assert(addr >= base);
842 limit = heapSource->blockBase + heapSource->maximumSize;
843 assert(addr < limit);
844 offset = addr - base;
845 space2 = heapSource->blockSpace[offset >> BLOCK_SHIFT];
846 return space == space2;
847}
848
Carl Shapiro952e84a2010-05-06 14:35:29 -0700849static bool fromSpaceContains(const void *addr)
Carl Shapirod28668c2010-04-15 16:10:00 -0700850{
851 return isSpaceInternal((u1 *)addr, BLOCK_FROM_SPACE);
852}
853
Carl Shapiro952e84a2010-05-06 14:35:29 -0700854static bool toSpaceContains(const void *addr)
Carl Shapirod28668c2010-04-15 16:10:00 -0700855{
856 return isSpaceInternal((u1 *)addr, BLOCK_TO_SPACE);
857}
858
859/*
860 * Notifies the collector that the object at the given address must
861 * remain stationary during the current collection.
862 */
863static void pinObject(const Object *obj)
864{
865 promoteBlockByAddr(gDvm.gcHeap->heapSource, obj);
866}
867
868static void printHeapBitmap(const HeapBitmap *bitmap)
869{
870 const char *cp;
871 size_t i, length;
872
873 length = bitmap->bitsLen >> 2;
874 fprintf(stderr, "%p", bitmap->bits);
875 for (i = 0; i < length; ++i) {
876 fprintf(stderr, " %lx", bitmap->bits[i]);
877 fputc('\n', stderr);
878 }
879}
880
881static void printHeapBitmapSxS(const HeapBitmap *b1, const HeapBitmap *b2)
882{
883 uintptr_t addr;
884 size_t i, length;
885
886 assert(b1->base == b2->base);
887 assert(b1->bitsLen == b2->bitsLen);
888 addr = b1->base;
889 length = b1->bitsLen >> 2;
890 for (i = 0; i < length; ++i) {
891 int diff = b1->bits[i] == b2->bits[i];
892 fprintf(stderr, "%08x %08lx %08lx %d\n",
893 addr, b1->bits[i], b2->bits[i], diff);
894 addr += sizeof(*b1->bits)*CHAR_BIT;
895 }
896}
897
898static size_t sumHeapBitmap(const HeapBitmap *bitmap)
899{
900 const char *cp;
901 size_t i, sum;
902
903 sum = 0;
904 for (i = 0; i < bitmap->bitsLen >> 2; ++i) {
905 sum += dvmClzImpl(bitmap->bits[i]);
906 }
907 return sum;
908}
909
910/*
911 * Miscellaneous functionality.
912 */
913
914static int isForward(const void *addr)
915{
916 return (uintptr_t)addr & 0x1;
917}
918
919static void setForward(const void *toObj, void *fromObj)
920{
921 *(unsigned long *)fromObj = (uintptr_t)toObj | 0x1;
922}
923
924static void* getForward(const void *fromObj)
925{
926 return (void *)((uintptr_t)fromObj & ~0x1);
927}
928
929/* Beware, uses the same encoding as a forwarding pointers! */
930static int isPermanentString(const StringObject *obj) {
931 return (uintptr_t)obj & 0x1;
932}
933
934static void* getPermanentString(const StringObject *obj)
935{
936 return (void *)((uintptr_t)obj & ~0x1);
937}
938
939
940/*
941 * Scavenging and transporting routines follow. A transporter grays
942 * an object. A scavenger blackens an object. We define these
943 * routines for each fundamental object type. Dispatch is performed
944 * in scavengeObject.
945 */
946
947/*
Carl Shapiro2396fda2010-05-03 20:14:14 -0700948 * Class object scavenging.
Carl Shapirod28668c2010-04-15 16:10:00 -0700949 */
Carl Shapiro2396fda2010-05-03 20:14:14 -0700950static void scavengeClassObject(ClassObject *obj)
Carl Shapirod28668c2010-04-15 16:10:00 -0700951{
Carl Shapirod28668c2010-04-15 16:10:00 -0700952 int i;
953
Carl Shapiro8bb533e2010-05-06 15:35:27 -0700954 LOG_SCAV("scavengeClassObject(obj=%p)", obj);
Carl Shapiro2396fda2010-05-03 20:14:14 -0700955 assert(obj != NULL);
Carl Shapirod28668c2010-04-15 16:10:00 -0700956 assert(obj->obj.clazz != NULL);
957 assert(obj->obj.clazz->descriptor != NULL);
958 assert(!strcmp(obj->obj.clazz->descriptor, "Ljava/lang/Class;"));
959 assert(obj->descriptor != NULL);
Carl Shapiro8bb533e2010-05-06 15:35:27 -0700960 LOG_SCAV("scavengeClassObject: descriptor='%s',vtableCount=%zu",
961 obj->descriptor, obj->vtableCount);
Carl Shapiro703a2f32010-05-12 23:11:37 -0700962 /* Delegate class object and instance field scavenging. */
963 scavengeDataObject((Object *)obj);
Carl Shapirod28668c2010-04-15 16:10:00 -0700964 /* Scavenge the array element class object. */
965 if (IS_CLASS_FLAG_SET(obj, CLASS_ISARRAY)) {
966 scavengeReference((Object **)(void *)&obj->elementClass);
967 }
968 /* Scavenge the superclass. */
969 scavengeReference((Object **)(void *)&obj->super);
970 /* Scavenge the class loader. */
971 scavengeReference(&obj->classLoader);
972 /* Scavenge static fields. */
973 for (i = 0; i < obj->sfieldCount; ++i) {
974 char ch = obj->sfields[i].field.signature[0];
975 if (ch == '[' || ch == 'L') {
976 scavengeReference((Object **)(void *)&obj->sfields[i].value.l);
977 }
978 }
979 /* Scavenge interface class objects. */
980 for (i = 0; i < obj->interfaceCount; ++i) {
981 scavengeReference((Object **) &obj->interfaces[i]);
982 }
Carl Shapirod28668c2010-04-15 16:10:00 -0700983}
984
985/*
986 * Array object scavenging.
987 */
Carl Shapirod28668c2010-04-15 16:10:00 -0700988static size_t scavengeArrayObject(ArrayObject *array)
989{
990 size_t i, length;
991
Carl Shapiro8bb533e2010-05-06 15:35:27 -0700992 LOG_SCAV("scavengeArrayObject(array=%p)", array);
Carl Shapirod28668c2010-04-15 16:10:00 -0700993 /* Scavenge the class object. */
Carl Shapiro952e84a2010-05-06 14:35:29 -0700994 assert(toSpaceContains(array));
Carl Shapirod28668c2010-04-15 16:10:00 -0700995 assert(array != NULL);
996 assert(array->obj.clazz != NULL);
997 scavengeReference((Object **) array);
998 length = dvmArrayObjectSize(array);
999 /* Scavenge the array contents. */
1000 if (IS_CLASS_FLAG_SET(array->obj.clazz, CLASS_ISOBJECTARRAY)) {
1001 Object **contents = (Object **)array->contents;
1002 for (i = 0; i < array->length; ++i) {
1003 scavengeReference(&contents[i]);
1004 }
1005 }
1006 return length;
1007}
1008
1009/*
1010 * Reference object scavenging.
1011 */
1012
Carl Shapiro952e84a2010-05-06 14:35:29 -07001013static int getReferenceFlags(const Object *obj)
Carl Shapirod28668c2010-04-15 16:10:00 -07001014{
1015 int flags;
1016
1017 flags = CLASS_ISREFERENCE |
1018 CLASS_ISWEAKREFERENCE |
1019 CLASS_ISPHANTOMREFERENCE;
Carl Shapiro952e84a2010-05-06 14:35:29 -07001020 return GET_CLASS_FLAG_GROUP(obj->clazz, flags);
Carl Shapirod28668c2010-04-15 16:10:00 -07001021}
1022
Carl Shapiro952e84a2010-05-06 14:35:29 -07001023static int isReference(const Object *obj)
Carl Shapirod28668c2010-04-15 16:10:00 -07001024{
1025 return getReferenceFlags(obj) != 0;
1026}
1027
Carl Shapiro952e84a2010-05-06 14:35:29 -07001028static int isSoftReference(const Object *obj)
Carl Shapirod28668c2010-04-15 16:10:00 -07001029{
1030 return getReferenceFlags(obj) == CLASS_ISREFERENCE;
1031}
1032
Carl Shapiro952e84a2010-05-06 14:35:29 -07001033static int isWeakReference(const Object *obj)
Carl Shapirod28668c2010-04-15 16:10:00 -07001034{
1035 return getReferenceFlags(obj) & CLASS_ISWEAKREFERENCE;
1036}
1037
Carl Shapiro952e84a2010-05-06 14:35:29 -07001038static bool isPhantomReference(const Object *obj)
Carl Shapirod28668c2010-04-15 16:10:00 -07001039{
1040 return getReferenceFlags(obj) & CLASS_ISPHANTOMREFERENCE;
1041}
1042
Carl Shapiro952e84a2010-05-06 14:35:29 -07001043/*
1044 * Returns true if the reference was registered with a reference queue
1045 * but has not yet been appended to it.
1046 */
1047static bool isReferenceEnqueuable(const Object *ref)
Carl Shapirod28668c2010-04-15 16:10:00 -07001048{
Carl Shapiro952e84a2010-05-06 14:35:29 -07001049 Object *queue, *queueNext;
Carl Shapirod28668c2010-04-15 16:10:00 -07001050
Carl Shapiro952e84a2010-05-06 14:35:29 -07001051 queue = dvmGetFieldObject(ref, gDvm.offJavaLangRefReference_queue);
1052 queueNext = dvmGetFieldObject(ref, gDvm.offJavaLangRefReference_queueNext);
1053 if (queue == NULL || queueNext != NULL) {
1054 /*
1055 * There is no queue, or the reference has already
1056 * been enqueued. The Reference.enqueue() method
1057 * will do nothing even if we call it.
1058 */
1059 return false;
Carl Shapirod28668c2010-04-15 16:10:00 -07001060 }
Carl Shapiro952e84a2010-05-06 14:35:29 -07001061
1062 /*
1063 * We need to call enqueue(), but if we called it from
1064 * here we'd probably deadlock. Schedule a call.
1065 */
1066 return true;
1067}
1068
1069/*
1070 * Schedules a reference to be appended to its reference queue.
1071 */
1072static void enqueueReference(const Object *ref)
1073{
1074 LargeHeapRefTable **table;
1075 Object *op;
1076
1077 assert(((uintptr_t)ref & 3) == 0);
1078 assert((WORKER_ENQUEUE & ~3) == 0);
1079 assert(dvmGetFieldObject(ref, gDvm.offJavaLangRefReference_queue) != NULL);
1080 assert(dvmGetFieldObject(ref, gDvm.offJavaLangRefReference_queueNext) == NULL);
1081 /*
1082 * Set the enqueue bit in the bottom of the pointer. Assumes that
1083 * objects are 8-byte aligned.
1084 *
1085 * Note that we are adding the *Reference* (which is by definition
1086 * already black at this point) to this list; we're not adding the
1087 * referent (which has already been cleared).
1088 */
1089 table = &gDvm.gcHeap->referenceOperations;
1090 op = (Object *)((uintptr_t)ref | WORKER_ENQUEUE);
1091 if (!dvmHeapAddRefToLargeTable(table, op)) {
Carl Shapiro8bb533e2010-05-06 15:35:27 -07001092 LOGE("no room for any more reference operations");
Carl Shapiro952e84a2010-05-06 14:35:29 -07001093 dvmAbort();
1094 }
1095}
1096
1097/*
1098 * Sets the referent field of a reference object to NULL.
1099 */
1100static void clearReference(Object *obj)
1101{
1102 dvmSetFieldObject(obj, gDvm.offJavaLangRefReference_referent, NULL);
1103}
1104
1105/*
1106 * Clears reference objects with white referents.
1107 */
1108void clearWhiteReferences(Object **list)
1109{
1110 size_t referentOffset, vmDataOffset;
1111 bool doSignal;
1112
1113 vmDataOffset = gDvm.offJavaLangRefReference_vmData;
1114 referentOffset = gDvm.offJavaLangRefReference_referent;
1115 doSignal = false;
1116 while (*list != NULL) {
1117 Object *ref = *list;
1118 JValue *field = dvmFieldPtr(ref, referentOffset);
1119 Object *referent = field->l;
1120 *list = dvmGetFieldObject(ref, vmDataOffset);
1121 assert(referent != NULL);
1122 if (isForward(referent->clazz)) {
1123 field->l = referent = getForward(referent->clazz);
1124 continue;
1125 }
1126 if (fromSpaceContains(referent)) {
1127 /* Referent is white, clear it. */
1128 clearReference(ref);
1129 if (isReferenceEnqueuable(ref)) {
1130 enqueueReference(ref);
1131 doSignal = true;
1132 }
1133 }
1134 }
1135 /*
1136 * If we cleared a reference with a reference queue we must notify
1137 * the heap worker to append the reference.
1138 */
1139 if (doSignal) {
1140 dvmSignalHeapWorker(false);
1141 }
1142 assert(*list == NULL);
1143}
1144
1145/*
1146 * Blackens referents subject to the soft reference preservation
1147 * policy.
1148 */
1149void preserveSoftReferences(Object **list)
1150{
1151 Object *ref;
1152 Object *prev, *next;
1153 size_t referentOffset, vmDataOffset;
1154 unsigned counter;
1155 bool white;
1156
1157 vmDataOffset = gDvm.offJavaLangRefReference_vmData;
1158 referentOffset = gDvm.offJavaLangRefReference_referent;
1159 counter = 0;
1160 prev = next = NULL;
1161 ref = *list;
1162 while (ref != NULL) {
1163 JValue *field = dvmFieldPtr(ref, referentOffset);
1164 Object *referent = field->l;
1165 next = dvmGetFieldObject(ref, vmDataOffset);
1166 assert(referent != NULL);
1167 if (isForward(referent->clazz)) {
1168 /* Referent is black. */
1169 field->l = referent = getForward(referent->clazz);
1170 white = false;
1171 } else {
1172 white = fromSpaceContains(referent);
1173 }
1174 if (!white && ((++counter) & 1)) {
1175 /* Referent is white and biased toward saving, gray it. */
1176 scavengeReference((Object **)(void *)&field->l);
1177 white = true;
1178 }
1179 if (white) {
1180 /* Referent is black, unlink it. */
1181 if (prev != NULL) {
1182 dvmSetFieldObject(ref, vmDataOffset, NULL);
1183 dvmSetFieldObject(prev, vmDataOffset, next);
1184 }
1185 } else {
1186 /* Referent is white, skip over it. */
1187 prev = ref;
1188 }
1189 ref = next;
1190 }
1191 /*
1192 * Restart the trace with the newly gray references added to the
1193 * root set.
1194 */
1195 scavengeBlockQueue();
1196}
1197
1198void processFinalizableReferences(void)
1199{
1200 HeapRefTable newPendingRefs;
1201 LargeHeapRefTable *finRefs = gDvm.gcHeap->finalizableRefs;
1202 Object **ref;
1203 Object **lastRef;
1204 size_t totalPendCount;
1205
1206 /*
1207 * All strongly, reachable objects are black.
1208 * Any white finalizable objects need to be finalized.
1209 */
1210
1211 /* Create a table that the new pending refs will
1212 * be added to.
1213 */
1214 if (!dvmHeapInitHeapRefTable(&newPendingRefs, 128)) {
1215 //TODO: mark all finalizable refs and hope that
1216 // we can schedule them next time. Watch out,
1217 // because we may be expecting to free up space
1218 // by calling finalizers.
Carl Shapiro8bb533e2010-05-06 15:35:27 -07001219 LOG_REF("no room for pending finalizations\n");
Carl Shapiro952e84a2010-05-06 14:35:29 -07001220 dvmAbort();
1221 }
1222
1223 /*
1224 * Walk through finalizableRefs and move any white references to
1225 * the list of new pending refs.
1226 */
1227 totalPendCount = 0;
1228 while (finRefs != NULL) {
1229 Object **gapRef;
1230 size_t newPendCount = 0;
1231
1232 gapRef = ref = finRefs->refs.table;
1233 lastRef = finRefs->refs.nextEntry;
1234 while (ref < lastRef) {
1235 if (fromSpaceContains(*ref)) {
1236 if (!dvmHeapAddToHeapRefTable(&newPendingRefs, *ref)) {
1237 //TODO: add the current table and allocate
1238 // a new, smaller one.
Carl Shapiro8bb533e2010-05-06 15:35:27 -07001239 LOG_REF("no room for any more pending finalizations: %zd\n",
Carl Shapiro952e84a2010-05-06 14:35:29 -07001240 dvmHeapNumHeapRefTableEntries(&newPendingRefs));
1241 dvmAbort();
1242 }
1243 newPendCount++;
1244 } else {
1245 /* This ref is black, so will remain on finalizableRefs.
1246 */
1247 if (newPendCount > 0) {
1248 /* Copy it up to fill the holes.
1249 */
1250 *gapRef++ = *ref;
1251 } else {
1252 /* No holes yet; don't bother copying.
1253 */
1254 gapRef++;
1255 }
1256 }
1257 ref++;
1258 }
1259 finRefs->refs.nextEntry = gapRef;
1260 //TODO: if the table is empty when we're done, free it.
1261 totalPendCount += newPendCount;
1262 finRefs = finRefs->next;
1263 }
Carl Shapiro8bb533e2010-05-06 15:35:27 -07001264 LOG_REF("%zd finalizers triggered.\n", totalPendCount);
Carl Shapiro952e84a2010-05-06 14:35:29 -07001265 if (totalPendCount == 0) {
1266 /* No objects required finalization.
1267 * Free the empty temporary table.
1268 */
1269 dvmClearReferenceTable(&newPendingRefs);
1270 return;
1271 }
1272
1273 /* Add the new pending refs to the main list.
1274 */
1275 if (!dvmHeapAddTableToLargeTable(&gDvm.gcHeap->pendingFinalizationRefs,
1276 &newPendingRefs))
1277 {
Carl Shapiro8bb533e2010-05-06 15:35:27 -07001278 LOG_REF("can't insert new pending finalizations\n");
Carl Shapiro952e84a2010-05-06 14:35:29 -07001279 dvmAbort();
1280 }
1281
1282 //TODO: try compacting the main list with a memcpy loop
1283
1284 /* Blacken the refs we just moved; we don't want them or their
1285 * children to get swept yet.
1286 */
1287 ref = newPendingRefs.table;
1288 lastRef = newPendingRefs.nextEntry;
1289 assert(ref < lastRef);
1290 HPROF_SET_GC_SCAN_STATE(HPROF_ROOT_FINALIZING, 0);
1291 while (ref < lastRef) {
1292 scavengeReference(ref);
1293 ref++;
1294 }
1295 HPROF_CLEAR_GC_SCAN_STATE();
1296 scavengeBlockQueue();
1297 dvmSignalHeapWorker(false);
Carl Shapirod28668c2010-04-15 16:10:00 -07001298}
1299
Carl Shapirod28668c2010-04-15 16:10:00 -07001300/*
1301 * If a reference points to from-space and has been forwarded, we snap
1302 * the pointer to its new to-space address. If the reference points
1303 * to an unforwarded from-space address we must enqueue the reference
1304 * for later processing. TODO: implement proper reference processing
1305 * and move the referent scavenging elsewhere.
1306 */
Carl Shapiro952e84a2010-05-06 14:35:29 -07001307static void scavengeReferenceObject(Object *obj)
Carl Shapirod28668c2010-04-15 16:10:00 -07001308{
Carl Shapiro952e84a2010-05-06 14:35:29 -07001309 Object *referent;
1310 Object **queue;
1311 size_t referentOffset, vmDataOffset;
1312
Carl Shapirod28668c2010-04-15 16:10:00 -07001313 assert(obj != NULL);
Carl Shapiro8bb533e2010-05-06 15:35:27 -07001314 LOG_SCAV("scavengeReferenceObject(obj=%p),'%s'", obj, obj->clazz->descriptor);
Carl Shapiro2396fda2010-05-03 20:14:14 -07001315 scavengeDataObject(obj);
Carl Shapiro952e84a2010-05-06 14:35:29 -07001316 referentOffset = gDvm.offJavaLangRefReference_referent;
1317 referent = dvmGetFieldObject(obj, referentOffset);
1318 if (referent == NULL || toSpaceContains(referent)) {
1319 return;
Carl Shapirod28668c2010-04-15 16:10:00 -07001320 }
Carl Shapiro952e84a2010-05-06 14:35:29 -07001321 if (isSoftReference(obj)) {
1322 queue = &gDvm.gcHeap->softReferences;
1323 } else if (isWeakReference(obj)) {
1324 queue = &gDvm.gcHeap->weakReferences;
1325 } else {
1326 assert(isPhantomReference(obj));
1327 queue = &gDvm.gcHeap->phantomReferences;
1328 }
1329 vmDataOffset = gDvm.offJavaLangRefReference_vmData;
Carl Shapiro7800c092010-05-11 13:46:29 -07001330 dvmSetFieldObject(obj, vmDataOffset, *queue);
Carl Shapiro952e84a2010-05-06 14:35:29 -07001331 *queue = obj;
Carl Shapiro8bb533e2010-05-06 15:35:27 -07001332 LOG_SCAV("scavengeReferenceObject: enqueueing %p", obj);
Carl Shapirod28668c2010-04-15 16:10:00 -07001333}
1334
1335/*
1336 * Data object scavenging.
1337 */
Carl Shapiro952e84a2010-05-06 14:35:29 -07001338static void scavengeDataObject(Object *obj)
Carl Shapirod28668c2010-04-15 16:10:00 -07001339{
1340 ClassObject *clazz;
Carl Shapirod28668c2010-04-15 16:10:00 -07001341 int i;
1342
Carl Shapiro8bb533e2010-05-06 15:35:27 -07001343 // LOG_SCAV("scavengeDataObject(obj=%p)", obj);
Carl Shapirod28668c2010-04-15 16:10:00 -07001344 assert(obj != NULL);
Carl Shapiro952e84a2010-05-06 14:35:29 -07001345 assert(obj->clazz != NULL);
1346 assert(obj->clazz->objectSize != 0);
1347 assert(toSpaceContains(obj));
Carl Shapirod28668c2010-04-15 16:10:00 -07001348 /* Scavenge the class object. */
Carl Shapiro952e84a2010-05-06 14:35:29 -07001349 clazz = obj->clazz;
Carl Shapirod28668c2010-04-15 16:10:00 -07001350 scavengeReference((Object **) obj);
Carl Shapirod28668c2010-04-15 16:10:00 -07001351 /* Scavenge instance fields. */
1352 if (clazz->refOffsets != CLASS_WALK_SUPER) {
1353 size_t refOffsets = clazz->refOffsets;
1354 while (refOffsets != 0) {
1355 size_t rshift = CLZ(refOffsets);
1356 size_t offset = CLASS_OFFSET_FROM_CLZ(rshift);
1357 Object **ref = (Object **)((u1 *)obj + offset);
1358 scavengeReference(ref);
1359 refOffsets &= ~(CLASS_HIGH_BIT >> rshift);
1360 }
1361 } else {
1362 for (; clazz != NULL; clazz = clazz->super) {
1363 InstField *field = clazz->ifields;
1364 for (i = 0; i < clazz->ifieldRefCount; ++i, ++field) {
1365 size_t offset = field->byteOffset;
1366 Object **ref = (Object **)((u1 *)obj + offset);
1367 scavengeReference(ref);
1368 }
1369 }
1370 }
Carl Shapiro2396fda2010-05-03 20:14:14 -07001371}
1372
1373static Object *transportObject(const Object *fromObj)
1374{
1375 Object *toObj;
1376 size_t allocSize, copySize;
1377
Carl Shapiro8bb533e2010-05-06 15:35:27 -07001378 LOG_TRAN("transportObject(fromObj=%p) allocBlocks=%zu",
Carl Shapiro2396fda2010-05-03 20:14:14 -07001379 fromObj,
1380 gDvm.gcHeap->heapSource->allocBlocks);
1381 assert(fromObj != NULL);
Carl Shapiro952e84a2010-05-06 14:35:29 -07001382 assert(fromSpaceContains(fromObj));
Carl Shapiro2396fda2010-05-03 20:14:14 -07001383 allocSize = copySize = objectSize(fromObj);
1384 if (LW_HASH_STATE(fromObj->lock) != LW_HASH_STATE_UNHASHED) {
1385 /*
1386 * The object has been hashed or hashed and moved. We must
1387 * reserve an additional word for a hash code.
1388 */
1389 allocSize += sizeof(u4);
1390 }
1391 if (LW_HASH_STATE(fromObj->lock) == LW_HASH_STATE_HASHED_AND_MOVED) {
1392 /*
1393 * The object has its hash code allocated. Ensure the hash
1394 * code is copied along with the instance data.
1395 */
1396 copySize += sizeof(u4);
1397 }
1398 /* TODO(cshapiro): don't copy, re-map large data objects. */
1399 assert(copySize <= allocSize);
1400 toObj = allocateGray(allocSize);
1401 assert(toObj != NULL);
Carl Shapiro952e84a2010-05-06 14:35:29 -07001402 assert(toSpaceContains(toObj));
Carl Shapiro2396fda2010-05-03 20:14:14 -07001403 memcpy(toObj, fromObj, copySize);
1404 if (LW_HASH_STATE(fromObj->lock) == LW_HASH_STATE_HASHED) {
1405 /*
1406 * The object has had its hash code exposed. Append it to the
1407 * instance and set a bit so we know to look for it there.
1408 */
1409 *(u4 *)(((char *)toObj) + copySize) = (u4)fromObj >> 3;
1410 toObj->lock |= LW_HASH_STATE_HASHED_AND_MOVED << LW_HASH_STATE_SHIFT;
1411 }
Carl Shapiro8bb533e2010-05-06 15:35:27 -07001412 LOG_TRAN("transportObject: from %p/%zu to %p/%zu (%zu,%zu) %s",
1413 fromObj, addressToBlock(gDvm.gcHeap->heapSource,fromObj),
1414 toObj, addressToBlock(gDvm.gcHeap->heapSource,toObj),
1415 copySize, allocSize, copySize < allocSize ? "DIFFERENT" : "");
Carl Shapiro2396fda2010-05-03 20:14:14 -07001416 return toObj;
Carl Shapirod28668c2010-04-15 16:10:00 -07001417}
1418
1419/*
1420 * Generic reference scavenging.
1421 */
1422
1423/*
1424 * Given a reference to an object, the scavenge routine will gray the
1425 * reference. Any objects pointed to by the scavenger object will be
1426 * transported to new space and a forwarding pointer will be installed
1427 * in the header of the object.
1428 */
1429
1430/*
1431 * Blacken the given pointer. If the pointer is in from space, it is
1432 * transported to new space. If the object has a forwarding pointer
1433 * installed it has already been transported and the referent is
1434 * snapped to the new address.
1435 */
Carl Shapiro2396fda2010-05-03 20:14:14 -07001436static void scavengeReference(Object **obj)
Carl Shapirod28668c2010-04-15 16:10:00 -07001437{
1438 ClassObject *clazz;
Carl Shapiro2396fda2010-05-03 20:14:14 -07001439 Object *fromObj, *toObj;
Carl Shapirod28668c2010-04-15 16:10:00 -07001440 uintptr_t word;
1441
1442 assert(obj);
1443
Carl Shapiro2396fda2010-05-03 20:14:14 -07001444 if (*obj == NULL) return;
Carl Shapirod28668c2010-04-15 16:10:00 -07001445
1446 assert(dvmIsValidObject(*obj));
1447
1448 /* The entire block is black. */
Carl Shapiro952e84a2010-05-06 14:35:29 -07001449 if (toSpaceContains(*obj)) {
Carl Shapiro8bb533e2010-05-06 15:35:27 -07001450 LOG_SCAV("scavengeReference skipping pinned object @ %p", *obj);
Carl Shapiro2396fda2010-05-03 20:14:14 -07001451 return;
Carl Shapirod28668c2010-04-15 16:10:00 -07001452 }
Carl Shapiro8bb533e2010-05-06 15:35:27 -07001453 LOG_SCAV("scavengeReference(*obj=%p)", *obj);
Carl Shapirod28668c2010-04-15 16:10:00 -07001454
Carl Shapiro952e84a2010-05-06 14:35:29 -07001455 assert(fromSpaceContains(*obj));
Carl Shapirod28668c2010-04-15 16:10:00 -07001456
1457 clazz = (*obj)->clazz;
1458
1459 if (isForward(clazz)) {
Carl Shapiro8bb533e2010-05-06 15:35:27 -07001460 // LOG_SCAV("forwarding %p @ %p to %p", *obj, obj, (void *)((uintptr_t)clazz & ~0x1));
Carl Shapirod28668c2010-04-15 16:10:00 -07001461 *obj = (Object *)getForward(clazz);
Carl Shapiro2396fda2010-05-03 20:14:14 -07001462 return;
Carl Shapirod28668c2010-04-15 16:10:00 -07001463 }
Carl Shapiro2396fda2010-05-03 20:14:14 -07001464 fromObj = *obj;
1465 if (clazz == NULL) {
Carl Shapiro8bb533e2010-05-06 15:35:27 -07001466 // LOG_SCAV("scavangeReference %p has a NULL class object", fromObj);
Carl Shapiro2396fda2010-05-03 20:14:14 -07001467 assert(!"implemented");
1468 toObj = NULL;
1469 } else if (clazz == gDvm.unlinkedJavaLangClass) {
Carl Shapiro8bb533e2010-05-06 15:35:27 -07001470 // LOG_SCAV("scavangeReference %p is an unlinked class object", fromObj);
Carl Shapiro2396fda2010-05-03 20:14:14 -07001471 assert(!"implemented");
1472 toObj = NULL;
1473 } else {
1474 toObj = transportObject(fromObj);
1475 }
1476 setForward(toObj, fromObj);
1477 *obj = (Object *)toObj;
Carl Shapirod28668c2010-04-15 16:10:00 -07001478}
1479
1480static void verifyReference(const void *obj)
1481{
1482 HeapSource *heapSource;
1483 size_t block;
1484 char space;
1485
1486 if (obj == NULL) {
Carl Shapiro8bb533e2010-05-06 15:35:27 -07001487 LOG_VER("verifyReference(obj=%p)", obj);
Carl Shapirod28668c2010-04-15 16:10:00 -07001488 return;
1489 }
1490 heapSource = gDvm.gcHeap->heapSource;
1491 block = addressToBlock(heapSource, obj);
1492 space = heapSource->blockSpace[block];
Carl Shapiro8bb533e2010-05-06 15:35:27 -07001493 LOG_VER("verifyReference(obj=%p),block=%zu,space=%d", obj, block, space);
Carl Shapirod28668c2010-04-15 16:10:00 -07001494 assert(!((uintptr_t)obj & 7));
Carl Shapiro952e84a2010-05-06 14:35:29 -07001495 assert(toSpaceContains(obj));
Carl Shapirod28668c2010-04-15 16:10:00 -07001496 assert(dvmIsValidObject(obj));
1497}
1498
1499/*
1500 * Generic object scavenging.
1501 */
Carl Shapiro2396fda2010-05-03 20:14:14 -07001502static void scavengeObject(Object *obj)
Carl Shapirod28668c2010-04-15 16:10:00 -07001503{
1504 ClassObject *clazz;
Carl Shapirod28668c2010-04-15 16:10:00 -07001505
1506 assert(obj != NULL);
Carl Shapiro952e84a2010-05-06 14:35:29 -07001507 assert(obj->clazz != NULL);
1508 assert(!((uintptr_t)obj->clazz & 0x1));
1509 assert(obj->clazz != gDvm.unlinkedJavaLangClass);
Carl Shapirod28668c2010-04-15 16:10:00 -07001510 clazz = obj->clazz;
Carl Shapirod28668c2010-04-15 16:10:00 -07001511 if (clazz == gDvm.classJavaLangClass) {
Carl Shapiro2396fda2010-05-03 20:14:14 -07001512 scavengeClassObject((ClassObject *)obj);
Carl Shapirod28668c2010-04-15 16:10:00 -07001513 } else if (IS_CLASS_FLAG_SET(clazz, CLASS_ISARRAY)) {
Carl Shapiro2396fda2010-05-03 20:14:14 -07001514 scavengeArrayObject((ArrayObject *)obj);
Carl Shapirod28668c2010-04-15 16:10:00 -07001515 } else if (IS_CLASS_FLAG_SET(clazz, CLASS_ISREFERENCE)) {
Carl Shapiro952e84a2010-05-06 14:35:29 -07001516 scavengeReferenceObject(obj);
Carl Shapirod28668c2010-04-15 16:10:00 -07001517 } else {
Carl Shapiro952e84a2010-05-06 14:35:29 -07001518 scavengeDataObject(obj);
Carl Shapirod28668c2010-04-15 16:10:00 -07001519 }
Carl Shapirod28668c2010-04-15 16:10:00 -07001520}
1521
1522/*
1523 * External root scavenging routines.
1524 */
1525
1526static void scavengeHashTable(HashTable *table)
1527{
1528 HashEntry *entry;
1529 void *obj;
1530 int i;
1531
1532 if (table == NULL) {
1533 return;
1534 }
1535 dvmHashTableLock(table);
1536 for (i = 0; i < table->tableSize; ++i) {
1537 entry = &table->pEntries[i];
1538 obj = entry->data;
1539 if (obj == NULL || obj == HASH_TOMBSTONE) {
1540 continue;
1541 }
1542 scavengeReference((Object **)(void *)&entry->data);
1543 }
1544 dvmHashTableUnlock(table);
1545}
1546
1547static void pinHashTableEntries(HashTable *table)
1548{
1549 HashEntry *entry;
1550 void *obj;
1551 int i;
1552
Carl Shapiro8bb533e2010-05-06 15:35:27 -07001553 LOG_PIN(">>> pinHashTableEntries(table=%p)", table);
Carl Shapirod28668c2010-04-15 16:10:00 -07001554 if (table == NULL) {
1555 return;
1556 }
1557 dvmHashTableLock(table);
1558 for (i = 0; i < table->tableSize; ++i) {
1559 entry = &table->pEntries[i];
1560 obj = entry->data;
1561 if (obj == NULL || obj == HASH_TOMBSTONE) {
1562 continue;
1563 }
1564 pinObject(entry->data);
1565 }
1566 dvmHashTableUnlock(table);
Carl Shapiro8bb533e2010-05-06 15:35:27 -07001567 LOG_PIN("<<< pinHashTableEntries(table=%p)", table);
Carl Shapirod28668c2010-04-15 16:10:00 -07001568}
1569
1570static void pinPrimitiveClasses(void)
1571{
1572 size_t length;
1573 size_t i;
1574
1575 length = ARRAYSIZE(gDvm.primitiveClass);
1576 for (i = 0; i < length; i++) {
1577 if (gDvm.primitiveClass[i] != NULL) {
1578 pinObject((Object *)gDvm.primitiveClass[i]);
1579 }
1580 }
1581}
1582
1583/*
1584 * Scavenge interned strings. Permanent interned strings will have
1585 * been pinned and are therefore ignored. Non-permanent strings that
1586 * have been forwarded are snapped. All other entries are removed.
1587 */
1588static void scavengeInternedStrings(void)
1589{
1590 HashTable *table;
1591 HashEntry *entry;
1592 Object *obj;
1593 int i;
1594
1595 table = gDvm.internedStrings;
1596 if (table == NULL) {
1597 return;
1598 }
1599 dvmHashTableLock(table);
1600 for (i = 0; i < table->tableSize; ++i) {
1601 entry = &table->pEntries[i];
1602 obj = (Object *)entry->data;
1603 if (obj == NULL || obj == HASH_TOMBSTONE) {
1604 continue;
1605 } else if (!isPermanentString((StringObject *)obj)) {
Carl Shapiro8bb533e2010-05-06 15:35:27 -07001606 // LOG_SCAV("entry->data=%p", entry->data);
1607 LOG_SCAV(">>> string obj=%p", entry->data);
Carl Shapirod28668c2010-04-15 16:10:00 -07001608 /* TODO(cshapiro): detach white string objects */
1609 scavengeReference((Object **)(void *)&entry->data);
Carl Shapiro8bb533e2010-05-06 15:35:27 -07001610 LOG_SCAV("<<< string obj=%p", entry->data);
Carl Shapirod28668c2010-04-15 16:10:00 -07001611 }
1612 }
1613 dvmHashTableUnlock(table);
1614}
1615
1616static void pinInternedStrings(void)
1617{
1618 HashTable *table;
1619 HashEntry *entry;
1620 Object *obj;
1621 int i;
1622
1623 table = gDvm.internedStrings;
1624 if (table == NULL) {
1625 return;
1626 }
1627 dvmHashTableLock(table);
1628 for (i = 0; i < table->tableSize; ++i) {
1629 entry = &table->pEntries[i];
1630 obj = (Object *)entry->data;
1631 if (obj == NULL || obj == HASH_TOMBSTONE) {
1632 continue;
1633 } else if (isPermanentString((StringObject *)obj)) {
1634 obj = (Object *)getPermanentString((StringObject*)obj);
Carl Shapiro8bb533e2010-05-06 15:35:27 -07001635 LOG_PROM(">>> pin string obj=%p", obj);
Carl Shapirod28668c2010-04-15 16:10:00 -07001636 pinObject(obj);
Carl Shapiro8bb533e2010-05-06 15:35:27 -07001637 LOG_PROM("<<< pin string obj=%p", obj);
Carl Shapirod28668c2010-04-15 16:10:00 -07001638 }
1639 }
1640 dvmHashTableUnlock(table);
1641}
1642
Carl Shapirod28668c2010-04-15 16:10:00 -07001643/*
1644 * At present, reference tables contain references that must not be
1645 * moved by the collector. Instead of scavenging each reference in
1646 * the table we pin each referenced object.
1647 */
Carl Shapiro583d64c2010-05-04 10:44:47 -07001648static void pinReferenceTable(const ReferenceTable *table)
Carl Shapirod28668c2010-04-15 16:10:00 -07001649{
1650 Object **entry;
1651 int i;
1652
1653 assert(table != NULL);
1654 assert(table->table != NULL);
1655 assert(table->nextEntry != NULL);
1656 for (entry = table->table; entry < table->nextEntry; ++entry) {
1657 assert(entry != NULL);
1658 assert(!isForward(*entry));
1659 pinObject(*entry);
1660 }
1661}
1662
1663static void verifyReferenceTable(const ReferenceTable *table)
1664{
1665 Object **entry;
1666 int i;
1667
Carl Shapiro8bb533e2010-05-06 15:35:27 -07001668 LOG_VER(">>> verifyReferenceTable(table=%p)", table);
Carl Shapirod28668c2010-04-15 16:10:00 -07001669 for (entry = table->table; entry < table->nextEntry; ++entry) {
1670 assert(entry != NULL);
1671 assert(!isForward(*entry));
1672 verifyReference(*entry);
1673 }
Carl Shapiro8bb533e2010-05-06 15:35:27 -07001674 LOG_VER("<<< verifyReferenceTable(table=%p)", table);
Carl Shapirod28668c2010-04-15 16:10:00 -07001675}
1676
Carl Shapiro7800c092010-05-11 13:46:29 -07001677static void scavengeLargeHeapRefTable(LargeHeapRefTable *table, bool stripLowBits)
Carl Shapirod28668c2010-04-15 16:10:00 -07001678{
Carl Shapirod28668c2010-04-15 16:10:00 -07001679 for (; table != NULL; table = table->next) {
Carl Shapiro7800c092010-05-11 13:46:29 -07001680 Object **ref = table->refs.table;
1681 for (; ref < table->refs.nextEntry; ++ref) {
1682 if (stripLowBits) {
1683 Object *obj = (Object *)((uintptr_t)*ref & ~3);
1684 scavengeReference(&obj);
1685 *ref = (Object *)((uintptr_t)obj | ((uintptr_t)*ref & 3));
1686 } else {
1687 scavengeReference(ref);
Carl Shapirod28668c2010-04-15 16:10:00 -07001688 }
Carl Shapiro7800c092010-05-11 13:46:29 -07001689 }
1690 }
1691}
1692
1693static void verifyLargeHeapRefTable(LargeHeapRefTable *table, bool stripLowBits)
1694{
1695 for (; table != NULL; table = table->next) {
1696 Object **ref = table->refs.table;
1697 for (; ref < table->refs.nextEntry; ++ref) {
1698 if (stripLowBits) {
1699 dvmVerifyObject((Object *)((uintptr_t)*ref & ~3));
1700 } else {
1701 dvmVerifyObject(*ref);
1702 }
Carl Shapirod28668c2010-04-15 16:10:00 -07001703 }
1704 }
1705}
1706
1707/* This code was copied from Thread.c */
1708static void scavengeThreadStack(Thread *thread)
1709{
1710 const u4 *framePtr;
1711#if WITH_EXTRA_GC_CHECKS > 1
1712 bool first = true;
1713#endif
1714
1715 framePtr = (const u4 *)thread->curFrame;
1716 while (framePtr != NULL) {
1717 const StackSaveArea *saveArea;
1718 const Method *method;
1719
1720 saveArea = SAVEAREA_FROM_FP(framePtr);
1721 method = saveArea->method;
Carl Shapiro583d64c2010-05-04 10:44:47 -07001722 if (method != NULL && !dvmIsNativeMethod(method)) {
Carl Shapirod28668c2010-04-15 16:10:00 -07001723#ifdef COUNT_PRECISE_METHODS
1724 /* the GC is running, so no lock required */
1725 if (dvmPointerSetAddEntry(gDvm.preciseMethods, method))
Carl Shapiro8bb533e2010-05-06 15:35:27 -07001726 LOG_SCAV("PGC: added %s.%s %p\n",
1727 method->clazz->descriptor, method->name, method);
Carl Shapirod28668c2010-04-15 16:10:00 -07001728#endif
1729#if WITH_EXTRA_GC_CHECKS > 1
1730 /*
1731 * May also want to enable the memset() in the "invokeMethod"
1732 * goto target in the portable interpreter. That sets the stack
1733 * to a pattern that makes referring to uninitialized data
1734 * very obvious.
1735 */
1736
1737 if (first) {
1738 /*
1739 * First frame, isn't native, check the "alternate" saved PC
1740 * as a sanity check.
1741 *
1742 * It seems like we could check the second frame if the first
1743 * is native, since the PCs should be the same. It turns out
1744 * this doesn't always work. The problem is that we could
1745 * have calls in the sequence:
1746 * interp method #2
1747 * native method
1748 * interp method #1
1749 *
1750 * and then GC while in the native method after returning
1751 * from interp method #2. The currentPc on the stack is
1752 * for interp method #1, but thread->currentPc2 is still
1753 * set for the last thing interp method #2 did.
1754 *
1755 * This can also happen in normal execution:
1756 * - sget-object on not-yet-loaded class
1757 * - class init updates currentPc2
1758 * - static field init is handled by parsing annotations;
1759 * static String init requires creation of a String object,
1760 * which can cause a GC
1761 *
1762 * Essentially, any pattern that involves executing
1763 * interpreted code and then causes an allocation without
1764 * executing instructions in the original method will hit
1765 * this. These are rare enough that the test still has
1766 * some value.
1767 */
1768 if (saveArea->xtra.currentPc != thread->currentPc2) {
1769 LOGW("PGC: savedPC(%p) != current PC(%p), %s.%s ins=%p\n",
1770 saveArea->xtra.currentPc, thread->currentPc2,
1771 method->clazz->descriptor, method->name, method->insns);
1772 if (saveArea->xtra.currentPc != NULL)
1773 LOGE(" pc inst = 0x%04x\n", *saveArea->xtra.currentPc);
1774 if (thread->currentPc2 != NULL)
1775 LOGE(" pc2 inst = 0x%04x\n", *thread->currentPc2);
1776 dvmDumpThread(thread, false);
1777 }
1778 } else {
1779 /*
1780 * It's unusual, but not impossible, for a non-first frame
1781 * to be at something other than a method invocation. For
1782 * example, if we do a new-instance on a nonexistent class,
1783 * we'll have a lot of class loader activity on the stack
1784 * above the frame with the "new" operation. Could also
1785 * happen while we initialize a Throwable when an instruction
1786 * fails.
1787 *
1788 * So there's not much we can do here to verify the PC,
1789 * except to verify that it's a GC point.
1790 */
1791 }
1792 assert(saveArea->xtra.currentPc != NULL);
1793#endif
1794
1795 const RegisterMap* pMap;
1796 const u1* regVector;
1797 int i;
1798
1799 Method* nonConstMethod = (Method*) method; // quiet gcc
1800 pMap = dvmGetExpandedRegisterMap(nonConstMethod);
1801
Carl Shapiro8bb533e2010-05-06 15:35:27 -07001802 //LOG_SCAV("PGC: %s.%s\n", method->clazz->descriptor, method->name);
Carl Shapirod28668c2010-04-15 16:10:00 -07001803
1804 if (pMap != NULL) {
1805 /* found map, get registers for this address */
1806 int addr = saveArea->xtra.currentPc - method->insns;
1807 regVector = dvmRegisterMapGetLine(pMap, addr);
1808 /*
1809 if (regVector == NULL) {
Carl Shapiro8bb533e2010-05-06 15:35:27 -07001810 LOG_SCAV("PGC: map but no entry for %s.%s addr=0x%04x\n",
1811 method->clazz->descriptor, method->name, addr);
Carl Shapirod28668c2010-04-15 16:10:00 -07001812 } else {
Carl Shapiro8bb533e2010-05-06 15:35:27 -07001813 LOG_SCAV("PGC: found map for %s.%s 0x%04x (t=%d)\n",
1814 method->clazz->descriptor, method->name, addr,
1815 thread->threadId);
Carl Shapirod28668c2010-04-15 16:10:00 -07001816 }
1817 */
1818 } else {
1819 /*
1820 * No map found. If precise GC is disabled this is
1821 * expected -- we don't create pointers to the map data even
1822 * if it's present -- but if it's enabled it means we're
1823 * unexpectedly falling back on a conservative scan, so it's
1824 * worth yelling a little.
1825 */
1826 if (gDvm.preciseGc) {
Carl Shapiro8bb533e2010-05-06 15:35:27 -07001827 LOG_SCAV("PGC: no map for %s.%s\n", method->clazz->descriptor, method->name);
Carl Shapirod28668c2010-04-15 16:10:00 -07001828 }
1829 regVector = NULL;
1830 }
Carl Shapirod28668c2010-04-15 16:10:00 -07001831 if (regVector == NULL) {
Carl Shapiro88b00352010-05-19 17:38:33 -07001832 /*
1833 * There are no roots to scavenge. Skip over the entire frame.
1834 */
1835 framePtr += method->registersSize;
Carl Shapirod28668c2010-04-15 16:10:00 -07001836 } else {
1837 /*
1838 * Precise scan. v0 is at the lowest address on the
1839 * interpreted stack, and is the first bit in the register
1840 * vector, so we can walk through the register map and
1841 * memory in the same direction.
1842 *
1843 * A '1' bit indicates a live reference.
1844 */
1845 u2 bits = 1 << 1;
1846 for (i = method->registersSize - 1; i >= 0; i--) {
Carl Shapirod28668c2010-04-15 16:10:00 -07001847 u4 rval = *framePtr;
1848
1849 bits >>= 1;
1850 if (bits == 1) {
1851 /* set bit 9 so we can tell when we're empty */
1852 bits = *regVector++ | 0x0100;
1853 LOGVV("loaded bits: 0x%02x\n", bits & 0xff);
1854 }
1855
1856 if (rval != 0 && (bits & 0x01) != 0) {
1857 /*
1858 * Non-null, register marked as live reference. This
1859 * should always be a valid object.
1860 */
1861#if WITH_EXTRA_GC_CHECKS > 0
1862 if ((rval & 0x3) != 0 || !dvmIsValidObject((Object*) rval)) {
1863 /* this is very bad */
1864 LOGE("PGC: invalid ref in reg %d: 0x%08x\n",
1865 method->registersSize-1 - i, rval);
1866 } else
1867#endif
1868 {
1869
Carl Shapiro8bb533e2010-05-06 15:35:27 -07001870 // LOG_SCAV("stack reference %u@%p", *framePtr, framePtr);
Carl Shapirod28668c2010-04-15 16:10:00 -07001871 /* dvmMarkObjectNonNull((Object *)rval); */
1872 scavengeReference((Object **) framePtr);
1873 }
1874 } else {
1875 /*
1876 * Null or non-reference, do nothing at all.
1877 */
1878#if WITH_EXTRA_GC_CHECKS > 1
1879 if (dvmIsValidObject((Object*) rval)) {
1880 /* this is normal, but we feel chatty */
1881 LOGD("PGC: ignoring valid ref in reg %d: 0x%08x\n",
1882 method->registersSize-1 - i, rval);
1883 }
1884#endif
1885 }
1886 ++framePtr;
1887 }
1888 dvmReleaseRegisterMapLine(pMap, regVector);
1889 }
1890 }
Carl Shapiro952e84a2010-05-06 14:35:29 -07001891 /* else this is a break frame and there is nothing to gray, or
Carl Shapirod28668c2010-04-15 16:10:00 -07001892 * this is a native method and the registers are just the "ins",
1893 * copied from various registers in the caller's set.
1894 */
1895
1896#if WITH_EXTRA_GC_CHECKS > 1
1897 first = false;
1898#endif
1899
1900 /* Don't fall into an infinite loop if things get corrupted.
1901 */
1902 assert((uintptr_t)saveArea->prevFrame > (uintptr_t)framePtr ||
1903 saveArea->prevFrame == NULL);
1904 framePtr = saveArea->prevFrame;
1905 }
1906}
1907
1908static void scavengeThread(Thread *thread)
1909{
1910 assert(thread->status != THREAD_RUNNING ||
1911 thread->isSuspended ||
1912 thread == dvmThreadSelf());
1913
Carl Shapiro8bb533e2010-05-06 15:35:27 -07001914 // LOG_SCAV("scavengeThread(thread=%p)", thread);
Carl Shapirod28668c2010-04-15 16:10:00 -07001915
Carl Shapiro8bb533e2010-05-06 15:35:27 -07001916 // LOG_SCAV("Scavenging threadObj=%p", thread->threadObj);
Carl Shapirod28668c2010-04-15 16:10:00 -07001917 scavengeReference(&thread->threadObj);
1918
Carl Shapiro8bb533e2010-05-06 15:35:27 -07001919 // LOG_SCAV("Scavenging exception=%p", thread->exception);
Carl Shapirod28668c2010-04-15 16:10:00 -07001920 scavengeReference(&thread->exception);
1921
1922 scavengeThreadStack(thread);
1923}
1924
1925static void scavengeThreadList(void)
1926{
1927 Thread *thread;
1928
1929 dvmLockThreadList(dvmThreadSelf());
1930 thread = gDvm.threadList;
1931 while (thread) {
1932 scavengeThread(thread);
1933 thread = thread->next;
1934 }
1935 dvmUnlockThreadList();
1936}
1937
1938static void verifyThreadStack(const Thread *thread)
1939{
1940 const u4 *framePtr;
1941
1942 assert(thread != NULL);
1943 framePtr = (const u4 *)thread->curFrame;
1944 while (framePtr != NULL) {
1945 const StackSaveArea *saveArea;
Carl Shapiro88b00352010-05-19 17:38:33 -07001946 Method *method;
Carl Shapirod28668c2010-04-15 16:10:00 -07001947
1948 saveArea = SAVEAREA_FROM_FP(framePtr);
Carl Shapiro88b00352010-05-19 17:38:33 -07001949 method = (Method *)saveArea->method;
Carl Shapirod28668c2010-04-15 16:10:00 -07001950 if (method != NULL && !dvmIsNativeMethod(method)) {
1951 const RegisterMap* pMap;
1952 const u1* regVector;
1953 int i;
1954
Carl Shapiro88b00352010-05-19 17:38:33 -07001955 pMap = dvmGetExpandedRegisterMap(method);
1956 regVector = NULL;
Carl Shapirod28668c2010-04-15 16:10:00 -07001957 if (pMap != NULL) {
1958 /* found map, get registers for this address */
1959 int addr = saveArea->xtra.currentPc - method->insns;
1960 regVector = dvmRegisterMapGetLine(pMap, addr);
Carl Shapirod28668c2010-04-15 16:10:00 -07001961 }
Carl Shapirod28668c2010-04-15 16:10:00 -07001962 if (regVector == NULL) {
1963 /* conservative scan */
Carl Shapiro88b00352010-05-19 17:38:33 -07001964 for (i = 0; i < method->registersSize; ++i) {
1965 Object *regValue = (Object *)framePtr[i];
1966 if (dvmIsValidObject(regValue)) dvmVerifyObject(regValue);
Carl Shapirod28668c2010-04-15 16:10:00 -07001967 }
1968 } else {
1969 /*
1970 * Precise scan. v0 is at the lowest address on the
1971 * interpreted stack, and is the first bit in the register
1972 * vector, so we can walk through the register map and
1973 * memory in the same direction.
1974 *
1975 * A '1' bit indicates a live reference.
1976 */
1977 u2 bits = 1 << 1;
Carl Shapiro88b00352010-05-19 17:38:33 -07001978 for (i = 0; i < method->registersSize; ++i) {
1979 u4 regValue = framePtr[i];
Carl Shapirod28668c2010-04-15 16:10:00 -07001980 bits >>= 1;
1981 if (bits == 1) {
1982 /* set bit 9 so we can tell when we're empty */
1983 bits = *regVector++ | 0x0100;
Carl Shapirod28668c2010-04-15 16:10:00 -07001984 }
Carl Shapiro88b00352010-05-19 17:38:33 -07001985 if (regValue != 0 && (bits & 0x1) != 0) {
Carl Shapirod28668c2010-04-15 16:10:00 -07001986 /*
1987 * Non-null, register marked as live reference. This
1988 * should always be a valid object.
1989 */
Carl Shapiro88b00352010-05-19 17:38:33 -07001990 verifyReference((Object *)regValue);
Carl Shapirod28668c2010-04-15 16:10:00 -07001991 }
Carl Shapirod28668c2010-04-15 16:10:00 -07001992 }
1993 dvmReleaseRegisterMapLine(pMap, regVector);
1994 }
Carl Shapiro88b00352010-05-19 17:38:33 -07001995 framePtr += method->registersSize;
Carl Shapirod28668c2010-04-15 16:10:00 -07001996 }
Carl Shapiro952e84a2010-05-06 14:35:29 -07001997 /* else this is a break frame and there is nothing to gray, or
Carl Shapirod28668c2010-04-15 16:10:00 -07001998 * this is a native method and the registers are just the "ins",
1999 * copied from various registers in the caller's set.
2000 */
2001
2002 /* Don't fall into an infinite loop if things get corrupted.
2003 */
2004 assert((uintptr_t)saveArea->prevFrame > (uintptr_t)framePtr ||
2005 saveArea->prevFrame == NULL);
2006 framePtr = saveArea->prevFrame;
2007 }
2008}
2009
2010static void verifyThread(const Thread *thread)
2011{
2012 assert(thread->status != THREAD_RUNNING ||
2013 thread->isSuspended ||
2014 thread == dvmThreadSelf());
2015
Carl Shapiro8bb533e2010-05-06 15:35:27 -07002016 LOG_VER("verifyThread(thread=%p)", thread);
Carl Shapirod28668c2010-04-15 16:10:00 -07002017
Carl Shapiro8bb533e2010-05-06 15:35:27 -07002018 LOG_VER("verify threadObj=%p", thread->threadObj);
Carl Shapirod28668c2010-04-15 16:10:00 -07002019 verifyReference(thread->threadObj);
2020
Carl Shapiro8bb533e2010-05-06 15:35:27 -07002021 LOG_VER("verify exception=%p", thread->exception);
Carl Shapirod28668c2010-04-15 16:10:00 -07002022 verifyReference(thread->exception);
2023
Carl Shapiro8bb533e2010-05-06 15:35:27 -07002024 LOG_VER("verify thread->internalLocalRefTable");
Carl Shapirod28668c2010-04-15 16:10:00 -07002025 verifyReferenceTable(&thread->internalLocalRefTable);
2026
Carl Shapiro8bb533e2010-05-06 15:35:27 -07002027 LOG_VER("verify thread->jniLocalRefTable");
Carl Shapirod28668c2010-04-15 16:10:00 -07002028 verifyReferenceTable(&thread->jniLocalRefTable);
2029
2030 /* Can the check be pushed into the promote routine? */
2031 if (thread->jniMonitorRefTable.table) {
Carl Shapiro8bb533e2010-05-06 15:35:27 -07002032 LOG_VER("verify thread->jniMonitorRefTable");
Carl Shapirod28668c2010-04-15 16:10:00 -07002033 verifyReferenceTable(&thread->jniMonitorRefTable);
2034 }
2035
2036 verifyThreadStack(thread);
2037}
2038
2039static void verifyThreadList(void)
2040{
2041 Thread *thread;
2042
2043 dvmLockThreadList(dvmThreadSelf());
2044 thread = gDvm.threadList;
2045 while (thread) {
2046 verifyThread(thread);
2047 thread = thread->next;
2048 }
2049 dvmUnlockThreadList();
2050}
2051
Carl Shapiro88b00352010-05-19 17:38:33 -07002052static void pinThreadStack(const Thread *thread)
Carl Shapiro583d64c2010-05-04 10:44:47 -07002053{
2054 const u4 *framePtr;
2055 const StackSaveArea *saveArea;
Carl Shapiro88b00352010-05-19 17:38:33 -07002056 Method *method;
Carl Shapiro583d64c2010-05-04 10:44:47 -07002057 const char *shorty;
2058 Object *obj;
2059 int i;
2060
2061 saveArea = NULL;
2062 framePtr = (const u4 *)thread->curFrame;
2063 for (; framePtr != NULL; framePtr = saveArea->prevFrame) {
2064 saveArea = SAVEAREA_FROM_FP(framePtr);
Carl Shapiro88b00352010-05-19 17:38:33 -07002065 method = (Method *)saveArea->method;
Carl Shapiro583d64c2010-05-04 10:44:47 -07002066 if (method != NULL && dvmIsNativeMethod(method)) {
2067 /*
Carl Shapiro88b00352010-05-19 17:38:33 -07002068 * This is native method, pin its arguments.
2069 *
Carl Shapiro952e84a2010-05-06 14:35:29 -07002070 * For purposes of graying references, we don't need to do
Carl Shapiro583d64c2010-05-04 10:44:47 -07002071 * anything here, because all of the native "ins" were copied
2072 * from registers in the caller's stack frame and won't be
2073 * changed (an interpreted method can freely use registers
2074 * with parameters like any other register, but natives don't
2075 * work that way).
2076 *
2077 * However, we need to ensure that references visible to
2078 * native methods don't move around. We can do a precise scan
2079 * of the arguments by examining the method signature.
2080 */
Carl Shapiro8bb533e2010-05-06 15:35:27 -07002081 LOG_PIN("+++ native scan %s.%s\n",
2082 method->clazz->descriptor, method->name);
Carl Shapiro583d64c2010-05-04 10:44:47 -07002083 assert(method->registersSize == method->insSize);
2084 if (!dvmIsStaticMethod(method)) {
2085 /* grab the "this" pointer */
2086 obj = (Object *)*framePtr++;
2087 if (obj == NULL) {
2088 /*
2089 * This can happen for the "fake" entry frame inserted
2090 * for threads created outside the VM. There's no actual
2091 * call so there's no object. If we changed the fake
2092 * entry method to be declared "static" then this
2093 * situation should never occur.
2094 */
2095 } else {
2096 assert(dvmIsValidObject(obj));
2097 pinObject(obj);
2098 }
2099 }
2100 shorty = method->shorty+1; // skip return value
2101 for (i = method->registersSize - 1; i >= 0; i--, framePtr++) {
2102 switch (*shorty++) {
2103 case 'L':
2104 obj = (Object *)*framePtr;
2105 if (obj != NULL) {
2106 assert(dvmIsValidObject(obj));
2107 pinObject(obj);
2108 }
2109 break;
2110 case 'D':
2111 case 'J':
2112 framePtr++;
2113 break;
2114 default:
2115 /* 32-bit non-reference value */
2116 obj = (Object *)*framePtr; // debug, remove
2117 if (dvmIsValidObject(obj)) { // debug, remove
2118 /* if we see a lot of these, our scan might be off */
Carl Shapiro8bb533e2010-05-06 15:35:27 -07002119 LOG_PIN("+++ did NOT pin obj %p\n", obj);
Carl Shapiro583d64c2010-05-04 10:44:47 -07002120 }
2121 break;
2122 }
2123 }
Carl Shapiro88b00352010-05-19 17:38:33 -07002124 } else if (method != NULL && !dvmIsNativeMethod(method)) {
2125 const RegisterMap* pMap = dvmGetExpandedRegisterMap(method);
2126 const u1* regVector = NULL;
2127
2128 LOGI("conservative : %s.%s\n", method->clazz->descriptor, method->name);
2129
2130 if (pMap != NULL) {
2131 int addr = saveArea->xtra.currentPc - method->insns;
2132 regVector = dvmRegisterMapGetLine(pMap, addr);
2133 }
2134 if (regVector == NULL) {
2135 /*
2136 * No register info for this frame, conservatively pin.
2137 */
2138 for (i = 0; i < method->registersSize; ++i) {
2139 u4 regValue = framePtr[i];
2140 if (regValue != 0 && (regValue & 0x3) == 0 && dvmIsValidObject((Object *)regValue)) {
2141 pinObject((Object *)regValue);
2142 }
2143 }
2144 }
Carl Shapiro583d64c2010-05-04 10:44:47 -07002145 }
2146 /*
2147 * Don't fall into an infinite loop if things get corrupted.
2148 */
2149 assert((uintptr_t)saveArea->prevFrame > (uintptr_t)framePtr ||
2150 saveArea->prevFrame == NULL);
2151 }
2152}
2153
2154static void pinThread(const Thread *thread)
Carl Shapirod28668c2010-04-15 16:10:00 -07002155{
2156 assert(thread != NULL);
2157 assert(thread->status != THREAD_RUNNING ||
2158 thread->isSuspended ||
2159 thread == dvmThreadSelf());
Carl Shapiro8bb533e2010-05-06 15:35:27 -07002160 LOG_PIN("pinThread(thread=%p)", thread);
Carl Shapirod28668c2010-04-15 16:10:00 -07002161
Carl Shapiro8bb533e2010-05-06 15:35:27 -07002162 LOG_PIN("Pin native method arguments");
Carl Shapiro88b00352010-05-19 17:38:33 -07002163 pinThreadStack(thread);
Carl Shapiro583d64c2010-05-04 10:44:47 -07002164
Carl Shapiro8bb533e2010-05-06 15:35:27 -07002165 LOG_PIN("Pin internalLocalRefTable");
Carl Shapirod28668c2010-04-15 16:10:00 -07002166 pinReferenceTable(&thread->internalLocalRefTable);
2167
Carl Shapiro8bb533e2010-05-06 15:35:27 -07002168 LOG_PIN("Pin jniLocalRefTable");
Carl Shapirod28668c2010-04-15 16:10:00 -07002169 pinReferenceTable(&thread->jniLocalRefTable);
2170
2171 /* Can the check be pushed into the promote routine? */
2172 if (thread->jniMonitorRefTable.table) {
Carl Shapiro8bb533e2010-05-06 15:35:27 -07002173 LOG_PIN("Pin jniMonitorRefTable");
Carl Shapirod28668c2010-04-15 16:10:00 -07002174 pinReferenceTable(&thread->jniMonitorRefTable);
2175 }
2176}
2177
2178static void pinThreadList(void)
2179{
2180 Thread *thread;
2181
2182 dvmLockThreadList(dvmThreadSelf());
2183 thread = gDvm.threadList;
2184 while (thread) {
2185 pinThread(thread);
2186 thread = thread->next;
2187 }
2188 dvmUnlockThreadList();
2189}
2190
2191/*
2192 * Heap block scavenging.
2193 */
2194
2195/*
2196 * Scavenge objects in the current block. Scavenging terminates when
2197 * the pointer reaches the highest address in the block or when a run
2198 * of zero words that continues to the highest address is reached.
2199 */
2200static void scavengeBlock(HeapSource *heapSource, size_t block)
2201{
2202 u1 *cursor;
2203 u1 *end;
2204 size_t size;
2205
Carl Shapiro8bb533e2010-05-06 15:35:27 -07002206 LOG_SCAV("scavengeBlock(heapSource=%p,block=%zu)", heapSource, block);
Carl Shapirod28668c2010-04-15 16:10:00 -07002207
2208 assert(heapSource != NULL);
2209 assert(block < heapSource->totalBlocks);
2210 assert(heapSource->blockSpace[block] == BLOCK_TO_SPACE);
2211
2212 cursor = blockToAddress(heapSource, block);
2213 end = cursor + BLOCK_SIZE;
Carl Shapiro8bb533e2010-05-06 15:35:27 -07002214 LOG_SCAV("scavengeBlock start=%p, end=%p", cursor, end);
Carl Shapirod28668c2010-04-15 16:10:00 -07002215
2216 /* Parse and scavenge the current block. */
2217 size = 0;
2218 while (cursor < end) {
2219 u4 word = *(u4 *)cursor;
2220 if (word != 0) {
Carl Shapiro2396fda2010-05-03 20:14:14 -07002221 scavengeObject((Object *)cursor);
2222 size = objectSize((Object *)cursor);
Carl Shapirod28668c2010-04-15 16:10:00 -07002223 size = alignUp(size, ALLOC_ALIGNMENT);
2224 cursor += size;
2225 } else if (word == 0 && cursor == (u1 *)gDvm.unlinkedJavaLangClass) {
2226 size = sizeof(ClassObject);
2227 cursor += size;
2228 } else {
2229 /* Check for padding. */
2230 while (*(u4 *)cursor == 0) {
2231 cursor += 4;
2232 if (cursor == end) break;
2233 }
2234 /* Punt if something went wrong. */
2235 assert(cursor == end);
2236 }
2237 }
2238}
2239
Carl Shapiro2396fda2010-05-03 20:14:14 -07002240static size_t objectSize(const Object *obj)
Carl Shapirod28668c2010-04-15 16:10:00 -07002241{
2242 size_t size;
2243
Carl Shapiro2396fda2010-05-03 20:14:14 -07002244 assert(obj != NULL);
2245 assert(obj->clazz != NULL);
Carl Shapirod28668c2010-04-15 16:10:00 -07002246 if (obj->clazz == gDvm.classJavaLangClass ||
2247 obj->clazz == gDvm.unlinkedJavaLangClass) {
2248 size = dvmClassObjectSize((ClassObject *)obj);
2249 } else if (IS_CLASS_FLAG_SET(obj->clazz, CLASS_ISARRAY)) {
2250 size = dvmArrayObjectSize((ArrayObject *)obj);
2251 } else {
Carl Shapiro2396fda2010-05-03 20:14:14 -07002252 assert(obj->clazz->objectSize != 0);
Carl Shapirod28668c2010-04-15 16:10:00 -07002253 size = obj->clazz->objectSize;
2254 }
Carl Shapiro2396fda2010-05-03 20:14:14 -07002255 if (LW_HASH_STATE(obj->lock) == LW_HASH_STATE_HASHED_AND_MOVED) {
2256 size += sizeof(u4);
2257 }
Carl Shapirod28668c2010-04-15 16:10:00 -07002258 return size;
2259}
2260
2261static void verifyBlock(HeapSource *heapSource, size_t block)
2262{
2263 u1 *cursor;
2264 u1 *end;
2265 size_t size;
2266
Carl Shapiro8bb533e2010-05-06 15:35:27 -07002267 // LOG_VER("verifyBlock(heapSource=%p,block=%zu)", heapSource, block);
Carl Shapirod28668c2010-04-15 16:10:00 -07002268
2269 assert(heapSource != NULL);
2270 assert(block < heapSource->totalBlocks);
2271 assert(heapSource->blockSpace[block] == BLOCK_TO_SPACE);
2272
2273 cursor = blockToAddress(heapSource, block);
2274 end = cursor + BLOCK_SIZE;
Carl Shapiro8bb533e2010-05-06 15:35:27 -07002275 // LOG_VER("verifyBlock start=%p, end=%p", cursor, end);
Carl Shapirod28668c2010-04-15 16:10:00 -07002276
2277 /* Parse and scavenge the current block. */
2278 size = 0;
2279 while (cursor < end) {
2280 u4 word = *(u4 *)cursor;
2281 if (word != 0) {
2282 dvmVerifyObject((Object *)cursor);
2283 size = objectSize((Object *)cursor);
2284 size = alignUp(size, ALLOC_ALIGNMENT);
2285 cursor += size;
2286 } else if (word == 0 && cursor == (u1 *)gDvm.unlinkedJavaLangClass) {
2287 size = sizeof(ClassObject);
2288 cursor += size;
2289 } else {
2290 /* Check for padding. */
2291 while (*(unsigned long *)cursor == 0) {
2292 cursor += 4;
2293 if (cursor == end) break;
2294 }
2295 /* Punt if something went wrong. */
2296 assert(cursor == end);
2297 }
2298 }
2299}
2300
2301static void describeBlockQueue(const HeapSource *heapSource)
2302{
2303 size_t block, count;
2304 char space;
2305
2306 block = heapSource->queueHead;
2307 count = 0;
Carl Shapiro8bb533e2010-05-06 15:35:27 -07002308 LOG_SCAV(">>> describeBlockQueue(heapSource=%p)", heapSource);
Carl Shapirod28668c2010-04-15 16:10:00 -07002309 /* Count the number of blocks enqueued. */
2310 while (block != QUEUE_TAIL) {
2311 block = heapSource->blockQueue[block];
2312 ++count;
2313 }
Carl Shapiro8bb533e2010-05-06 15:35:27 -07002314 LOG_SCAV("blockQueue %zu elements, enqueued %zu",
Carl Shapirod28668c2010-04-15 16:10:00 -07002315 count, heapSource->queueSize);
2316 block = heapSource->queueHead;
2317 while (block != QUEUE_TAIL) {
2318 space = heapSource->blockSpace[block];
Carl Shapiro8bb533e2010-05-06 15:35:27 -07002319 LOG_SCAV("block=%zu@%p,space=%zu", block, blockToAddress(heapSource,block), space);
Carl Shapirod28668c2010-04-15 16:10:00 -07002320 block = heapSource->blockQueue[block];
2321 }
2322
Carl Shapiro8bb533e2010-05-06 15:35:27 -07002323 LOG_SCAV("<<< describeBlockQueue(heapSource=%p)", heapSource);
Carl Shapirod28668c2010-04-15 16:10:00 -07002324}
2325
2326/*
2327 * Blackens promoted objects.
2328 */
2329static void scavengeBlockQueue(void)
2330{
2331 HeapSource *heapSource;
2332 size_t block;
2333
Carl Shapiro8bb533e2010-05-06 15:35:27 -07002334 LOG_SCAV(">>> scavengeBlockQueue()");
Carl Shapirod28668c2010-04-15 16:10:00 -07002335 heapSource = gDvm.gcHeap->heapSource;
2336 describeBlockQueue(heapSource);
2337 while (heapSource->queueHead != QUEUE_TAIL) {
2338 block = heapSource->queueHead;
Carl Shapiro8bb533e2010-05-06 15:35:27 -07002339 LOG_SCAV("Dequeueing block %zu\n", block);
Carl Shapirod28668c2010-04-15 16:10:00 -07002340 scavengeBlock(heapSource, block);
2341 heapSource->queueHead = heapSource->blockQueue[block];
Carl Shapiro8bb533e2010-05-06 15:35:27 -07002342 LOG_SCAV("New queue head is %zu\n", heapSource->queueHead);
Carl Shapirod28668c2010-04-15 16:10:00 -07002343 }
Carl Shapiro8bb533e2010-05-06 15:35:27 -07002344 LOG_SCAV("<<< scavengeBlockQueue()");
Carl Shapirod28668c2010-04-15 16:10:00 -07002345}
2346
2347/*
2348 * Scan the block list and verify all blocks that are marked as being
2349 * in new space. This should be parametrized so we can invoke this
2350 * routine outside of the context of a collection.
2351 */
2352static void verifyNewSpace(void)
2353{
2354 HeapSource *heapSource;
2355 size_t i;
2356 size_t c0, c1, c2, c7;
2357
2358 c0 = c1 = c2 = c7 = 0;
2359 heapSource = gDvm.gcHeap->heapSource;
2360 for (i = 0; i < heapSource->totalBlocks; ++i) {
2361 switch (heapSource->blockSpace[i]) {
2362 case BLOCK_FREE: ++c0; break;
2363 case BLOCK_TO_SPACE: ++c1; break;
2364 case BLOCK_FROM_SPACE: ++c2; break;
2365 case BLOCK_CONTINUED: ++c7; break;
2366 default: assert(!"reached");
2367 }
2368 }
Carl Shapiro8bb533e2010-05-06 15:35:27 -07002369 LOG_VER("Block Demographics: "
2370 "Free=%zu,ToSpace=%zu,FromSpace=%zu,Continued=%zu",
2371 c0, c1, c2, c7);
Carl Shapirod28668c2010-04-15 16:10:00 -07002372 for (i = 0; i < heapSource->totalBlocks; ++i) {
2373 if (heapSource->blockSpace[i] != BLOCK_TO_SPACE) {
2374 continue;
2375 }
2376 verifyBlock(heapSource, i);
2377 }
2378}
2379
2380static void scavengeGlobals(void)
2381{
2382 scavengeReference((Object **)(void *)&gDvm.classJavaLangClass);
2383 scavengeReference((Object **)(void *)&gDvm.classJavaLangClassArray);
2384 scavengeReference((Object **)(void *)&gDvm.classJavaLangError);
2385 scavengeReference((Object **)(void *)&gDvm.classJavaLangObject);
2386 scavengeReference((Object **)(void *)&gDvm.classJavaLangObjectArray);
2387 scavengeReference((Object **)(void *)&gDvm.classJavaLangRuntimeException);
2388 scavengeReference((Object **)(void *)&gDvm.classJavaLangString);
2389 scavengeReference((Object **)(void *)&gDvm.classJavaLangThread);
2390 scavengeReference((Object **)(void *)&gDvm.classJavaLangVMThread);
2391 scavengeReference((Object **)(void *)&gDvm.classJavaLangThreadGroup);
2392 scavengeReference((Object **)(void *)&gDvm.classJavaLangThrowable);
2393 scavengeReference((Object **)(void *)&gDvm.classJavaLangStackTraceElement);
2394 scavengeReference((Object **)(void *)&gDvm.classJavaLangStackTraceElementArray);
2395 scavengeReference((Object **)(void *)&gDvm.classJavaLangAnnotationAnnotationArray);
2396 scavengeReference((Object **)(void *)&gDvm.classJavaLangAnnotationAnnotationArrayArray);
2397 scavengeReference((Object **)(void *)&gDvm.classJavaLangReflectAccessibleObject);
2398 scavengeReference((Object **)(void *)&gDvm.classJavaLangReflectConstructor);
2399 scavengeReference((Object **)(void *)&gDvm.classJavaLangReflectConstructorArray);
2400 scavengeReference((Object **)(void *)&gDvm.classJavaLangReflectField);
2401 scavengeReference((Object **)(void *)&gDvm.classJavaLangReflectFieldArray);
2402 scavengeReference((Object **)(void *)&gDvm.classJavaLangReflectMethod);
2403 scavengeReference((Object **)(void *)&gDvm.classJavaLangReflectMethodArray);
2404 scavengeReference((Object **)(void *)&gDvm.classJavaLangReflectProxy);
2405 scavengeReference((Object **)(void *)&gDvm.classJavaLangExceptionInInitializerError);
2406 scavengeReference((Object **)(void *)&gDvm.classJavaLangRefReference);
2407 scavengeReference((Object **)(void *)&gDvm.classJavaNioReadWriteDirectByteBuffer);
2408 scavengeReference((Object **)(void *)&gDvm.classJavaSecurityAccessController);
2409 scavengeReference((Object **)(void *)&gDvm.classOrgApacheHarmonyLangAnnotationAnnotationFactory);
2410 scavengeReference((Object **)(void *)&gDvm.classOrgApacheHarmonyLangAnnotationAnnotationMember);
2411 scavengeReference((Object **)(void *)&gDvm.classOrgApacheHarmonyLangAnnotationAnnotationMemberArray);
2412 scavengeReference((Object **)(void *)&gDvm.classOrgApacheHarmonyNioInternalDirectBuffer);
2413 scavengeReference((Object **)(void *)&gDvm.classArrayBoolean);
2414 scavengeReference((Object **)(void *)&gDvm.classArrayChar);
2415 scavengeReference((Object **)(void *)&gDvm.classArrayFloat);
2416 scavengeReference((Object **)(void *)&gDvm.classArrayDouble);
2417 scavengeReference((Object **)(void *)&gDvm.classArrayByte);
2418 scavengeReference((Object **)(void *)&gDvm.classArrayShort);
2419 scavengeReference((Object **)(void *)&gDvm.classArrayInt);
2420 scavengeReference((Object **)(void *)&gDvm.classArrayLong);
2421}
2422
2423void describeHeap(void)
2424{
2425 HeapSource *heapSource;
2426
2427 heapSource = gDvm.gcHeap->heapSource;
2428 describeBlocks(heapSource);
2429}
2430
2431/*
2432 * The collection interface. Collection has a few distinct phases.
2433 * The first is flipping AKA condemning AKA whitening the heap. The
2434 * second is to promote all objects which are pointed to by pinned or
2435 * ambiguous references. The third phase is tracing from the stacks,
2436 * registers and various globals. Lastly, a verification of the heap
2437 * is performed. The last phase should be optional.
2438 */
2439void dvmScavengeRoots(void) /* Needs a new name badly */
2440{
2441 HeapRefTable *refs;
2442 GcHeap *gcHeap;
2443
2444 {
2445 size_t alloc, unused, total;
2446
2447 room(&alloc, &unused, &total);
Carl Shapiro8bb533e2010-05-06 15:35:27 -07002448 LOG_SCAV("BEFORE GC: %zu alloc, %zu free, %zu total.",
2449 alloc, unused, total);
Carl Shapirod28668c2010-04-15 16:10:00 -07002450 }
2451
2452 gcHeap = gDvm.gcHeap;
2453 dvmHeapSourceFlip();
2454
2455 /*
2456 * Promote blocks with stationary objects.
2457 */
Carl Shapirod28668c2010-04-15 16:10:00 -07002458 pinThreadList();
Carl Shapirod28668c2010-04-15 16:10:00 -07002459 pinReferenceTable(&gDvm.jniGlobalRefTable);
Carl Shapirod28668c2010-04-15 16:10:00 -07002460 pinReferenceTable(&gDvm.jniPinRefTable);
Carl Shapirod28668c2010-04-15 16:10:00 -07002461 pinReferenceTable(&gcHeap->nonCollectableRefs);
Carl Shapirod28668c2010-04-15 16:10:00 -07002462 pinHashTableEntries(gDvm.loadedClasses);
Carl Shapirod28668c2010-04-15 16:10:00 -07002463 pinPrimitiveClasses();
Carl Shapirod28668c2010-04-15 16:10:00 -07002464 pinInternedStrings();
2465
2466 // describeBlocks(gcHeap->heapSource);
2467
2468 /*
2469 * Create first, open new-space page right here.
2470 */
2471
2472 /* Reset allocation to an unallocated block. */
2473 gDvm.gcHeap->heapSource->allocPtr = allocateBlocks(gDvm.gcHeap->heapSource, 1);
2474 gDvm.gcHeap->heapSource->allocLimit = gDvm.gcHeap->heapSource->allocPtr + BLOCK_SIZE;
2475 /*
2476 * Hack: promote the empty block allocated above. If the
2477 * promotions that occurred above did not actually gray any
2478 * objects, the block queue may be empty. We must force a
2479 * promotion to be safe.
2480 */
2481 promoteBlockByAddr(gDvm.gcHeap->heapSource, gDvm.gcHeap->heapSource->allocPtr);
2482
2483 /*
2484 * Scavenge blocks and relocate movable objects.
2485 */
2486
Carl Shapiro8bb533e2010-05-06 15:35:27 -07002487 LOG_SCAV("Scavenging gDvm.threadList");
Carl Shapirod28668c2010-04-15 16:10:00 -07002488 scavengeThreadList();
2489
Carl Shapiro8bb533e2010-05-06 15:35:27 -07002490 LOG_SCAV("Scavenging gDvm.gcHeap->referenceOperations");
Carl Shapiro7800c092010-05-11 13:46:29 -07002491 scavengeLargeHeapRefTable(gcHeap->referenceOperations, true);
Carl Shapirod28668c2010-04-15 16:10:00 -07002492
Carl Shapiro8bb533e2010-05-06 15:35:27 -07002493 LOG_SCAV("Scavenging gDvm.gcHeap->pendingFinalizationRefs");
Carl Shapiro7800c092010-05-11 13:46:29 -07002494 scavengeLargeHeapRefTable(gcHeap->pendingFinalizationRefs, false);
Carl Shapirod28668c2010-04-15 16:10:00 -07002495
Carl Shapiro8bb533e2010-05-06 15:35:27 -07002496 LOG_SCAV("Scavenging random global stuff");
Carl Shapirod28668c2010-04-15 16:10:00 -07002497 scavengeReference(&gDvm.outOfMemoryObj);
2498 scavengeReference(&gDvm.internalErrorObj);
2499 scavengeReference(&gDvm.noClassDefFoundErrorObj);
2500
Carl Shapiro8bb533e2010-05-06 15:35:27 -07002501 LOG_SCAV("Scavenging gDvm.dbgRegistry");
Carl Shapirod28668c2010-04-15 16:10:00 -07002502 scavengeHashTable(gDvm.dbgRegistry);
2503
Carl Shapiro8bb533e2010-05-06 15:35:27 -07002504 // LOG_SCAV("Scavenging gDvm.internedString");
Carl Shapirod28668c2010-04-15 16:10:00 -07002505 scavengeInternedStrings();
2506
Carl Shapiro8bb533e2010-05-06 15:35:27 -07002507 LOG_SCAV("Root scavenge has completed.");
Carl Shapirod28668c2010-04-15 16:10:00 -07002508
2509 scavengeBlockQueue();
2510
Carl Shapiro8bb533e2010-05-06 15:35:27 -07002511 LOG_SCAV("Re-snap global class pointers.");
Carl Shapirod28668c2010-04-15 16:10:00 -07002512 scavengeGlobals();
2513
Carl Shapiro8bb533e2010-05-06 15:35:27 -07002514 LOG_SCAV("New space scavenge has completed.");
Carl Shapirod28668c2010-04-15 16:10:00 -07002515
2516 /*
Carl Shapiro952e84a2010-05-06 14:35:29 -07002517 * Process reference objects in strength order.
2518 */
2519
Carl Shapiro8bb533e2010-05-06 15:35:27 -07002520 LOG_REF("Processing soft references...");
Carl Shapiro952e84a2010-05-06 14:35:29 -07002521 preserveSoftReferences(&gDvm.gcHeap->softReferences);
2522 clearWhiteReferences(&gDvm.gcHeap->softReferences);
2523
Carl Shapiro8bb533e2010-05-06 15:35:27 -07002524 LOG_REF("Processing weak references...");
Carl Shapiro952e84a2010-05-06 14:35:29 -07002525 clearWhiteReferences(&gDvm.gcHeap->weakReferences);
2526
Carl Shapiro8bb533e2010-05-06 15:35:27 -07002527 LOG_REF("Finding finalizations...");
Carl Shapiro952e84a2010-05-06 14:35:29 -07002528 processFinalizableReferences();
2529
Carl Shapiro8bb533e2010-05-06 15:35:27 -07002530 LOG_REF("Processing f-reachable soft references...");
Carl Shapiro952e84a2010-05-06 14:35:29 -07002531 clearWhiteReferences(&gDvm.gcHeap->softReferences);
2532
Carl Shapiro8bb533e2010-05-06 15:35:27 -07002533 LOG_REF("Processing f-reachable weak references...");
Carl Shapiro952e84a2010-05-06 14:35:29 -07002534 clearWhiteReferences(&gDvm.gcHeap->weakReferences);
2535
Carl Shapiro8bb533e2010-05-06 15:35:27 -07002536 LOG_REF("Processing phantom references...");
Carl Shapiro952e84a2010-05-06 14:35:29 -07002537 clearWhiteReferences(&gDvm.gcHeap->phantomReferences);
2538
2539 /*
Carl Shapirod28668c2010-04-15 16:10:00 -07002540 * Verify the stack and heap.
2541 */
Carl Shapirof5718252010-05-11 20:55:13 -07002542 dvmVerifyRoots();
Carl Shapirod28668c2010-04-15 16:10:00 -07002543 verifyThreadList();
Carl Shapirod28668c2010-04-15 16:10:00 -07002544 verifyNewSpace();
2545
Carl Shapirod28668c2010-04-15 16:10:00 -07002546 //describeBlocks(gcHeap->heapSource);
2547
2548 clearFromSpace(gcHeap->heapSource);
2549
2550 {
2551 size_t alloc, rem, total;
2552
2553 room(&alloc, &rem, &total);
Carl Shapiro8bb533e2010-05-06 15:35:27 -07002554 LOG_SCAV("AFTER GC: %zu alloc, %zu free, %zu total.", alloc, rem, total);
Carl Shapirod28668c2010-04-15 16:10:00 -07002555 }
2556}
2557
2558/*
2559 * Interface compatibility routines.
2560 */
2561
2562void dvmClearWhiteRefs(Object **list)
2563{
Carl Shapiro952e84a2010-05-06 14:35:29 -07002564 /* do nothing */
Carl Shapirod28668c2010-04-15 16:10:00 -07002565 assert(*list == NULL);
2566}
2567
2568void dvmHandleSoftRefs(Object **list)
2569{
Carl Shapiro952e84a2010-05-06 14:35:29 -07002570 /* do nothing */
Carl Shapirod28668c2010-04-15 16:10:00 -07002571 assert(*list == NULL);
2572}
2573
2574bool dvmHeapBeginMarkStep(GcMode mode)
2575{
2576 /* do nothing */
2577 return true;
2578}
2579
2580void dvmHeapFinishMarkStep(void)
2581{
2582 /* do nothing */
2583}
2584
2585void dvmHeapMarkRootSet(void)
2586{
2587 /* do nothing */
2588}
2589
2590void dvmHeapScanMarkedObjects(void)
2591{
2592 dvmScavengeRoots();
2593}
2594
2595void dvmHeapScheduleFinalizations(void)
2596{
2597 /* do nothing */
2598}
2599
2600void dvmHeapSweepUnmarkedObjects(GcMode mode, int *numFreed, size_t *sizeFreed)
2601{
Carl Shapiro703a2f32010-05-12 23:11:37 -07002602 *numFreed = 0;
2603 *sizeFreed = 0;
Carl Shapirod28668c2010-04-15 16:10:00 -07002604 /* do nothing */
2605}
2606
2607void dvmMarkObjectNonNull(const Object *obj)
2608{
2609 assert(!"implemented");
2610}