blob: 6742e64a306aac04a1f1714cf563b2236fba1a16 [file] [log] [blame]
Carl Shapirod28668c2010-04-15 16:10:00 -07001/*
2 * Copyright (C) 2009 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17#include <errno.h>
18#include <limits.h>
19#include <sys/mman.h>
20
21#include "Dalvik.h"
22#include "alloc/Heap.h"
23#include "alloc/HeapBitmap.h"
24#include "alloc/HeapInternal.h"
25#include "alloc/HeapSource.h"
26#include "alloc/Verify.h"
27#include "alloc/clz.h"
28
29/*
30 * A "mostly copying", generational, garbage collector.
31 *
32 * TODO: we allocate our own contiguous tract of page frames to back
33 * object allocations. To cooperate with other heaps active in the
34 * virtual machine we need to move the responsibility of allocating
35 * pages someplace outside of this code.
36 *
37 * The other major data structures that maintain the state of the heap
38 * are the block space table and the block queue.
39 *
40 * The block space table records the state of a block. We must track
41 * whether a block is:
42 *
43 * - Free or allocated in some space.
44 *
45 * - If the block holds part of a large object allocation, whether the
46 * block is the initial or a continued block of the allocation.
47 *
48 * - Whether the block is pinned, that is to say whether at least one
49 * object in the block must remain stationary. Only needed during a
50 * GC.
51 *
52 * - Which space the object belongs to. At present this means
53 * from-space or to-space.
54 *
55 * The block queue is used during garbage collection. Unlike Cheney's
56 * algorithm, from-space and to-space are not contiguous. Therefore,
57 * one cannot maintain the state of the copy with just two pointers.
58 * The block queue exists to thread lists of blocks from the various
59 * spaces together.
60 *
61 * Additionally, we record the free space frontier of the heap, as
62 * well as the address of the first object within a block, which is
63 * required to copy objects following a large object (not currently
64 * implemented). This is stored in the heap source structure. This
65 * should be moved elsewhere to support in-line allocations from Java
66 * threads.
67 *
68 * Allocation requests are satisfied by reserving storage from one or
69 * more contiguous blocks. Objects that are small enough to fit
70 * inside a block are packed together within a block. Objects that
71 * are larger than a block are allocated from contiguous sequences of
72 * blocks. When half the available blocks are filled, a garbage
73 * collection occurs. We "flip" spaces (exchange from- and to-space),
74 * copy live objects into to space, and perform pointer adjustment.
75 *
76 * Copying is made more complicated by the requirement that some
77 * objects must not be moved. This property is known as "pinning".
78 * These objects must be dealt with specially. We use Bartlett's
79 * scheme; blocks containing such objects are grayed (promoted) at the
Carl Shapiro952e84a2010-05-06 14:35:29 -070080 * start of a garbage collection. By virtue of this trick, tracing
Carl Shapirod28668c2010-04-15 16:10:00 -070081 * from the roots proceeds as usual but all objects on those pages are
82 * considered promoted and therefore not moved.
83 *
84 * TODO: there is sufficient information within the garbage collector
85 * to implement Attardi's scheme for evacuating unpinned objects from
86 * a page that is otherwise pinned. This would eliminate false
87 * retention caused by the large pinning granularity.
88 *
89 * We need a scheme for medium and large objects. Ignore that for
90 * now, we can return to this later.
91 *
92 * Eventually we need to worry about promoting objects out of the
93 * copy-collected heap (tenuring) into a less volatile space. Copying
94 * may not always be the best policy for such spaces. We should
95 * consider a variant of mark, sweep, compact.
96 *
97 * The block scheme allows us to use VM page faults to maintain a
98 * write barrier. Consider having a special leaf state for a page.
99 *
100 * Bibliography:
101 *
102 * C. J. Cheney. 1970. A non-recursive list compacting
103 * algorithm. CACM. 13-11 pp677--678.
104 *
105 * Joel F. Bartlett. 1988. Compacting Garbage Collection with
106 * Ambiguous Roots. Digital Equipment Corporation.
107 *
108 * Joel F. Bartlett. 1989. Mostly-Copying Garbage Collection Picks Up
109 * Generations and C++. Digital Equipment Corporation.
110 *
111 * G. May Yip. 1991. Incremental, Generational Mostly-Copying Garbage
112 * Collection in Uncooperative Environments. Digital Equipment
113 * Corporation.
114 *
115 * Giuseppe Attardi, Tito Flagella. 1994. A Customisable Memory
116 * Management Framework. TR-94-010
117 *
118 * Giuseppe Attardi, Tito Flagella, Pietro Iglio. 1998. A customisable
119 * memory management framework for C++. Software -- Practice and
120 * Experience. 28(11), 1143-1183.
121 *
122 */
123
124#define ARRAYSIZE(x) (sizeof(x) / sizeof(x[0]))
125
Carl Shapiro8bb533e2010-05-06 15:35:27 -0700126#if 0
Carl Shapirod28668c2010-04-15 16:10:00 -0700127#define LOG_ALLOC LOGI
Carl Shapiro8bb533e2010-05-06 15:35:27 -0700128#define LOG_PIN LOGI
129#define LOG_PROM LOGI
130#define LOG_REF LOGI
131#define LOG_SCAV LOGI
132#define LOG_TRAN LOGI
133#define LOG_VER LOGI
Carl Shapirod28668c2010-04-15 16:10:00 -0700134#else
Carl Shapiro8bb533e2010-05-06 15:35:27 -0700135#define LOG_ALLOC(...) ((void)0)
136#define LOG_PIN(...) ((void)0)
137#define LOG_PROM(...) ((void)0)
138#define LOG_REF(...) ((void)0)
139#define LOG_SCAV(...) ((void)0)
140#define LOG_TRAN(...) ((void)0)
141#define LOG_VER(...) ((void)0)
Carl Shapirod28668c2010-04-15 16:10:00 -0700142#endif
143
144static void enqueueBlock(HeapSource *heapSource, size_t block);
Carl Shapiro2396fda2010-05-03 20:14:14 -0700145static void scavengeReference(Object **obj);
Carl Shapirod28668c2010-04-15 16:10:00 -0700146static void verifyReference(const void *obj);
147static void printHeapBitmap(const HeapBitmap *bitmap);
148static void printHeapBitmapSxS(const HeapBitmap *b1, const HeapBitmap *b2);
Carl Shapiro952e84a2010-05-06 14:35:29 -0700149static bool toSpaceContains(const void *addr);
150static bool fromSpaceContains(const void *addr);
Carl Shapirod28668c2010-04-15 16:10:00 -0700151static size_t sumHeapBitmap(const HeapBitmap *bitmap);
Carl Shapiro2396fda2010-05-03 20:14:14 -0700152static size_t objectSize(const Object *obj);
Carl Shapiro952e84a2010-05-06 14:35:29 -0700153static void scavengeDataObject(Object *obj);
154static void scavengeBlockQueue(void);
Carl Shapirod28668c2010-04-15 16:10:00 -0700155
156/*
157 * We use 512-byte blocks.
158 */
159enum { BLOCK_SHIFT = 9 };
160enum { BLOCK_SIZE = 1 << BLOCK_SHIFT };
161
162/*
163 * Space identifiers, stored into the blockSpace array.
164 */
165enum {
166 BLOCK_FREE = 0,
167 BLOCK_FROM_SPACE = 1,
168 BLOCK_TO_SPACE = 2,
169 BLOCK_CONTINUED = 7
170};
171
172/*
173 * Alignment for all allocations, in bytes.
174 */
175enum { ALLOC_ALIGNMENT = 8 };
176
177/*
178 * Sentinel value for the queue end.
179 */
180#define QUEUE_TAIL (~(size_t)0)
181
182struct HeapSource {
183
184 /* The base address of backing store. */
185 u1 *blockBase;
186
187 /* Total number of blocks available for allocation. */
188 size_t totalBlocks;
189 size_t allocBlocks;
190
191 /*
192 * The scavenger work queue. Implemented as an array of index
193 * values into the queue.
194 */
195 size_t *blockQueue;
196
197 /*
198 * Base and limit blocks. Basically the shifted start address of
199 * the block. We convert blocks to a relative number when
200 * indexing in the block queue. TODO: make the block queue base
201 * relative rather than the index into the block queue.
202 */
203 size_t baseBlock, limitBlock;
204
205 size_t queueHead;
206 size_t queueTail;
207 size_t queueSize;
208
209 /* The space of the current block 0 (free), 1 or 2. */
210 char *blockSpace;
211
212 /* Start of free space in the current block. */
213 u1 *allocPtr;
214 /* Exclusive limit of free space in the current block. */
215 u1 *allocLimit;
216
217 HeapBitmap allocBits;
218
219 /*
Carl Shapirod28668c2010-04-15 16:10:00 -0700220 * The starting size of the heap. This value is the same as the
221 * value provided to the -Xms flag.
222 */
223 size_t minimumSize;
224
225 /*
226 * The maximum size of the heap. This value is the same as the
227 * -Xmx flag.
228 */
229 size_t maximumSize;
230
231 /*
232 * The current, committed size of the heap. At present, this is
233 * equivalent to the maximumSize.
234 */
235 size_t currentSize;
236
237 size_t bytesAllocated;
238};
239
240static unsigned long alignDown(unsigned long x, unsigned long n)
241{
242 return x & -n;
243}
244
245static unsigned long alignUp(unsigned long x, unsigned long n)
246{
247 return alignDown(x + (n - 1), n);
248}
249
250static void describeBlocks(const HeapSource *heapSource)
251{
252 size_t i;
253
254 for (i = 0; i < heapSource->totalBlocks; ++i) {
255 if ((i % 32) == 0) putchar('\n');
256 printf("%d ", heapSource->blockSpace[i]);
257 }
258 putchar('\n');
259}
260
261/*
262 * Virtual memory interface.
263 */
264
265static void *virtualAlloc(size_t length)
266{
267 void *addr;
268 int flags, prot;
269
270 flags = MAP_PRIVATE | MAP_ANONYMOUS;
271 prot = PROT_READ | PROT_WRITE;
272 addr = mmap(NULL, length, prot, flags, -1, 0);
273 if (addr == MAP_FAILED) {
274 LOGE_HEAP("mmap: %s", strerror(errno));
275 addr = NULL;
276 }
277 return addr;
278}
279
280static void virtualFree(void *addr, size_t length)
281{
282 int res;
283
284 assert(addr != NULL);
285 assert((uintptr_t)addr % SYSTEM_PAGE_SIZE == 0);
286 res = munmap(addr, length);
287 if (res == -1) {
288 LOGE_HEAP("munmap: %s", strerror(errno));
289 }
290}
291
292static int isValidAddress(const HeapSource *heapSource, const u1 *addr)
293{
294 size_t block;
295
296 block = (uintptr_t)addr >> BLOCK_SHIFT;
297 return heapSource->baseBlock <= block &&
298 heapSource->limitBlock > block;
299}
300
301/*
302 * Iterate over the block map looking for a contiguous run of free
303 * blocks.
304 */
305static void *allocateBlocks(HeapSource *heapSource, size_t blocks)
306{
307 void *addr;
308 size_t allocBlocks, totalBlocks;
309 size_t i, j;
310
311 allocBlocks = heapSource->allocBlocks;
312 totalBlocks = heapSource->totalBlocks;
313 /* Check underflow. */
314 assert(blocks != 0);
315 /* Check overflow. */
316 if (allocBlocks + blocks > totalBlocks / 2) {
317 return NULL;
318 }
319 /* Scan block map. */
320 for (i = 0; i < totalBlocks; ++i) {
321 /* Check fit. */
322 for (j = 0; j < blocks; ++j) { /* runs over totalBlocks */
323 if (heapSource->blockSpace[i+j] != BLOCK_FREE) {
324 break;
325 }
326 }
327 /* No fit? */
328 if (j != blocks) {
329 i += j;
330 continue;
331 }
332 /* Fit, allocate. */
333 heapSource->blockSpace[i] = BLOCK_TO_SPACE; /* why to-space? */
334 for (j = 1; j < blocks; ++j) {
335 heapSource->blockSpace[i+j] = BLOCK_CONTINUED;
336 }
337 heapSource->allocBlocks += blocks;
338 addr = &heapSource->blockBase[i*BLOCK_SIZE];
339 memset(addr, 0, blocks*BLOCK_SIZE);
340 /* Collecting? */
341 if (heapSource->queueHead != QUEUE_TAIL) {
342 LOG_ALLOC("allocateBlocks allocBlocks=%zu,block#=%zu", heapSource->allocBlocks, i);
343 /*
344 * This allocated was on behalf of the transporter when it
345 * shaded a white object gray. We enqueue the block so
346 * the scavenger can further shade the gray objects black.
347 */
348 enqueueBlock(heapSource, i);
349 }
350
351 return addr;
352 }
353 /* Insufficient space, fail. */
354 LOGE("Insufficient space, %zu blocks, %zu blocks allocated and %zu bytes allocated",
355 heapSource->totalBlocks,
356 heapSource->allocBlocks,
357 heapSource->bytesAllocated);
358 return NULL;
359}
360
361/* Converts an absolute address to a relative block number. */
362static size_t addressToBlock(const HeapSource *heapSource, const void *addr)
363{
364 assert(heapSource != NULL);
365 assert(isValidAddress(heapSource, addr));
366 return (((uintptr_t)addr) >> BLOCK_SHIFT) - heapSource->baseBlock;
367}
368
369/* Converts a relative block number to an absolute address. */
370static u1 *blockToAddress(const HeapSource *heapSource, size_t block)
371{
372 u1 *addr;
373
374 addr = (u1 *) (((uintptr_t) heapSource->baseBlock + block) * BLOCK_SIZE);
375 assert(isValidAddress(heapSource, addr));
376 return addr;
377}
378
379static void clearBlock(HeapSource *heapSource, size_t block)
380{
381 u1 *addr;
382 size_t i;
383
384 assert(heapSource != NULL);
385 assert(block < heapSource->totalBlocks);
386 addr = heapSource->blockBase + block*BLOCK_SIZE;
387 memset(addr, 0xCC, BLOCK_SIZE);
388 for (i = 0; i < BLOCK_SIZE; i += 8) {
389 dvmHeapBitmapClearObjectBit(&heapSource->allocBits, addr + i);
390 }
391}
392
393static void clearFromSpace(HeapSource *heapSource)
394{
395 size_t i, count;
396
397 assert(heapSource != NULL);
398 i = count = 0;
399 while (i < heapSource->totalBlocks) {
400 if (heapSource->blockSpace[i] != BLOCK_FROM_SPACE) {
401 ++i;
402 continue;
403 }
404 heapSource->blockSpace[i] = BLOCK_FREE;
405 clearBlock(heapSource, i);
406 ++i;
407 ++count;
408 while (i < heapSource->totalBlocks &&
409 heapSource->blockSpace[i] == BLOCK_CONTINUED) {
410 heapSource->blockSpace[i] = BLOCK_FREE;
411 clearBlock(heapSource, i);
412 ++i;
413 ++count;
414 }
415 }
Carl Shapiro8bb533e2010-05-06 15:35:27 -0700416 LOG_SCAV("freed %zu blocks (%zu bytes)", count, count*BLOCK_SIZE);
Carl Shapirod28668c2010-04-15 16:10:00 -0700417}
418
419/*
420 * Appends the given block to the block queue. The block queue is
421 * processed in-order by the scavenger.
422 */
423static void enqueueBlock(HeapSource *heapSource, size_t block)
424{
425 assert(heapSource != NULL);
426 assert(block < heapSource->totalBlocks);
427 if (heapSource->queueHead != QUEUE_TAIL) {
428 heapSource->blockQueue[heapSource->queueTail] = block;
429 } else {
430 heapSource->queueHead = block;
431 }
432 heapSource->blockQueue[block] = QUEUE_TAIL;
433 heapSource->queueTail = block;
434 ++heapSource->queueSize;
435}
436
437/*
438 * Grays all objects within the block corresponding to the given
439 * address.
440 */
441static void promoteBlockByAddr(HeapSource *heapSource, const void *addr)
442{
443 size_t block;
444
445 block = addressToBlock(heapSource, (const u1 *)addr);
446 if (heapSource->blockSpace[block] != BLOCK_TO_SPACE) {
Carl Shapiro8bb533e2010-05-06 15:35:27 -0700447 // LOG_PROM("promoting block %zu %d @ %p", block, heapSource->blockSpace[block], obj);
Carl Shapirod28668c2010-04-15 16:10:00 -0700448 heapSource->blockSpace[block] = BLOCK_TO_SPACE;
449 enqueueBlock(heapSource, block);
450 /* TODO(cshapiro): count continued blocks?*/
451 heapSource->allocBlocks += 1;
452 } else {
Carl Shapiro8bb533e2010-05-06 15:35:27 -0700453 // LOG_PROM("NOT promoting block %zu %d @ %p", block, heapSource->blockSpace[block], obj);
Carl Shapirod28668c2010-04-15 16:10:00 -0700454 }
455}
456
457GcHeap *dvmHeapSourceStartup(size_t startSize, size_t absoluteMaxSize)
458{
459 GcHeap* gcHeap;
460 HeapSource *heapSource;
461
462 assert(startSize <= absoluteMaxSize);
463
464 heapSource = malloc(sizeof(*heapSource));
465 assert(heapSource != NULL);
466 memset(heapSource, 0, sizeof(*heapSource));
467
468 heapSource->minimumSize = alignUp(startSize, BLOCK_SIZE);
469 heapSource->maximumSize = alignUp(absoluteMaxSize, BLOCK_SIZE);
470
471 heapSource->currentSize = heapSource->maximumSize;
472
473 /* Allocate underlying storage for blocks. */
474 heapSource->blockBase = virtualAlloc(heapSource->maximumSize);
475 assert(heapSource->blockBase != NULL);
476 heapSource->baseBlock = (uintptr_t) heapSource->blockBase >> BLOCK_SHIFT;
477 heapSource->limitBlock = ((uintptr_t) heapSource->blockBase + heapSource->maximumSize) >> BLOCK_SHIFT;
478
479 heapSource->allocBlocks = 0;
480 heapSource->totalBlocks = (heapSource->limitBlock - heapSource->baseBlock);
481
482 assert(heapSource->totalBlocks = heapSource->maximumSize / BLOCK_SIZE);
483
484 {
485 size_t size = sizeof(heapSource->blockQueue[0]);
486 heapSource->blockQueue = malloc(heapSource->totalBlocks*size);
487 assert(heapSource->blockQueue != NULL);
488 memset(heapSource->blockQueue, 0xCC, heapSource->totalBlocks*size);
489 heapSource->queueHead = QUEUE_TAIL;
490 }
491
492 /* Byte indicating space residence or free status of block. */
493 {
494 size_t size = sizeof(heapSource->blockSpace[0]);
495 heapSource->blockSpace = malloc(heapSource->totalBlocks*size);
496 assert(heapSource->blockSpace != NULL);
497 memset(heapSource->blockSpace, 0, heapSource->totalBlocks*size);
498 }
499
500 dvmHeapBitmapInit(&heapSource->allocBits,
501 heapSource->blockBase,
502 heapSource->maximumSize,
503 "blockBase");
504
505 /* Initialize allocation pointers. */
506 heapSource->allocPtr = allocateBlocks(heapSource, 1);
507 heapSource->allocLimit = heapSource->allocPtr + BLOCK_SIZE;
508
509 gcHeap = malloc(sizeof(*gcHeap));
510 assert(gcHeap != NULL);
511 memset(gcHeap, 0, sizeof(*gcHeap));
512 gcHeap->heapSource = heapSource;
513
514 return gcHeap;
515}
516
517/*
518 * Perform any required heap initializations after forking from the
519 * zygote process. This is a no-op for the time being. Eventually
520 * this will demarcate the shared region of the heap.
521 */
522bool dvmHeapSourceStartupAfterZygote(void)
523{
524 return true;
525}
526
527bool dvmHeapSourceStartupBeforeFork(void)
528{
529 assert(!"implemented");
530 return false;
531}
532
533void dvmHeapSourceShutdown(GcHeap **gcHeap)
534{
535 if (*gcHeap == NULL || (*gcHeap)->heapSource == NULL)
536 return;
537 virtualFree((*gcHeap)->heapSource->blockBase,
538 (*gcHeap)->heapSource->maximumSize);
539 free((*gcHeap)->heapSource);
540 (*gcHeap)->heapSource = NULL;
541 free(*gcHeap);
542 *gcHeap = NULL;
543}
544
545size_t dvmHeapSourceGetValue(enum HeapSourceValueSpec spec,
546 size_t perHeapStats[],
547 size_t arrayLen)
548{
549 HeapSource *heapSource;
550 size_t value;
551
552 heapSource = gDvm.gcHeap->heapSource;
553 switch (spec) {
554 case HS_EXTERNAL_BYTES_ALLOCATED:
555 value = 0;
556 break;
557 case HS_EXTERNAL_LIMIT:
558 value = 0;
559 break;
560 case HS_FOOTPRINT:
561 value = heapSource->maximumSize;
562 break;
563 case HS_ALLOWED_FOOTPRINT:
564 value = heapSource->maximumSize;
565 break;
566 case HS_BYTES_ALLOCATED:
567 value = heapSource->bytesAllocated;
568 break;
569 case HS_OBJECTS_ALLOCATED:
570 value = sumHeapBitmap(&heapSource->allocBits);
571 break;
572 default:
573 assert(!"implemented");
574 value = 0;
575 }
576 if (perHeapStats) {
577 *perHeapStats = value;
578 }
579 return value;
580}
581
582/*
583 * Performs a shallow copy of the allocation bitmap into the given
584 * vector of heap bitmaps.
585 */
586void dvmHeapSourceGetObjectBitmaps(HeapBitmap objBits[], HeapBitmap markBits[],
587 size_t numHeaps)
588{
589 assert(!"implemented");
590}
591
592HeapBitmap *dvmHeapSourceGetLiveBits(void)
593{
594 assert(!"implemented");
595 return NULL;
596}
597
598/*
599 * Allocate the specified number of bytes from the heap. The
600 * allocation cursor points into a block of free storage. If the
601 * given allocation fits in the remaining space of the block, we
602 * advance the cursor and return a pointer to the free storage. If
603 * the allocation cannot fit in the current block but is smaller than
604 * a block we request a new block and allocate from it instead. If
605 * the allocation is larger than a block we must allocate from a span
606 * of contiguous blocks.
607 */
608void *dvmHeapSourceAlloc(size_t length)
609{
610 HeapSource *heapSource;
611 unsigned char *addr;
612 size_t aligned, available, blocks;
613
614 heapSource = gDvm.gcHeap->heapSource;
615 assert(heapSource != NULL);
616 assert(heapSource->allocPtr != NULL);
617 assert(heapSource->allocLimit != NULL);
618
619 aligned = alignUp(length, ALLOC_ALIGNMENT);
620 available = heapSource->allocLimit - heapSource->allocPtr;
621
622 /* Try allocating inside the current block. */
623 if (aligned <= available) {
624 addr = heapSource->allocPtr;
625 heapSource->allocPtr += aligned;
626 heapSource->bytesAllocated += aligned;
627 dvmHeapBitmapSetObjectBit(&heapSource->allocBits, addr);
628 return addr;
629 }
630
631 /* Try allocating in a new block. */
632 if (aligned <= BLOCK_SIZE) {
633 addr = allocateBlocks(heapSource, 1);
634 if (addr != NULL) {
635 heapSource->allocLimit = addr + BLOCK_SIZE;
636 heapSource->allocPtr = addr + aligned;
637 heapSource->bytesAllocated += aligned;
638 dvmHeapBitmapSetObjectBit(&heapSource->allocBits, addr);
639 /* TODO(cshapiro): pad out the current block. */
640 }
641 return addr;
642 }
643
644 /* Try allocating in a span of blocks. */
645 blocks = alignUp(aligned, BLOCK_SIZE) / BLOCK_SIZE;
646
647 addr = allocateBlocks(heapSource, blocks);
648 /* Propagate failure upward. */
649 if (addr != NULL) {
650 heapSource->bytesAllocated += aligned;
651 dvmHeapBitmapSetObjectBit(&heapSource->allocBits, addr);
652 /* TODO(cshapiro): pad out free space in the last block. */
653 }
654 return addr;
655}
656
657void *dvmHeapSourceAllocAndGrow(size_t size)
658{
659 return dvmHeapSourceAlloc(size);
660}
661
662/* TODO: refactor along with dvmHeapSourceAlloc */
663void *allocateGray(size_t size)
664{
Carl Shapiro7800c092010-05-11 13:46:29 -0700665 HeapSource *heapSource;
666 void *addr;
667 size_t block;
668
Carl Shapiro952e84a2010-05-06 14:35:29 -0700669 /* TODO: add a check that we are in a GC. */
Carl Shapiro7800c092010-05-11 13:46:29 -0700670 heapSource = gDvm.gcHeap->heapSource;
671 addr = dvmHeapSourceAlloc(size);
672 block = addressToBlock(heapSource, (const u1 *)addr);
673 if (heapSource->queueHead == QUEUE_TAIL) {
674 /*
675 * Forcibly append the underlying block to the queue. This
676 * condition occurs when referents are transported following
677 * the initial trace.
678 */
679 enqueueBlock(heapSource, block);
680 LOG_PROM("forced promoting block %zu %d @ %p", block, heapSource->blockSpace[block], addr);
681 }
682 return addr;
Carl Shapirod28668c2010-04-15 16:10:00 -0700683}
684
685/*
686 * Returns true if the given address is within the heap and points to
687 * the header of a live object.
688 */
689bool dvmHeapSourceContains(const void *addr)
690{
691 HeapSource *heapSource;
692 HeapBitmap *bitmap;
693
694 heapSource = gDvm.gcHeap->heapSource;
695 bitmap = &heapSource->allocBits;
Carl Shapirodc1e4f12010-05-01 22:27:56 -0700696 if (!dvmHeapBitmapCoversAddress(bitmap, addr)) {
697 return false;
698 } else {
699 return dvmHeapBitmapIsObjectBitSet(bitmap, addr);
700 }
Carl Shapirod28668c2010-04-15 16:10:00 -0700701}
702
703bool dvmHeapSourceGetPtrFlag(const void *ptr, enum HeapSourcePtrFlag flag)
704{
705 assert(!"implemented");
706 return false;
707}
708
709size_t dvmHeapSourceChunkSize(const void *ptr)
710{
711 assert(!"implemented");
712 return 0;
713}
714
715size_t dvmHeapSourceFootprint(void)
716{
717 assert(!"implemented");
718 return 0;
719}
720
721/*
722 * Returns the "ideal footprint" which appears to be the number of
723 * bytes currently committed to the heap. This starts out at the
724 * start size of the heap and grows toward the maximum size.
725 */
726size_t dvmHeapSourceGetIdealFootprint(void)
727{
728 return gDvm.gcHeap->heapSource->currentSize;
729}
730
731float dvmGetTargetHeapUtilization(void)
732{
733 assert(!"implemented");
734 return 0.0f;
735}
736
737void dvmSetTargetHeapUtilization(float newTarget)
738{
739 assert(!"implemented");
740}
741
742size_t dvmMinimumHeapSize(size_t size, bool set)
743{
744 return gDvm.gcHeap->heapSource->minimumSize;
745}
746
747/*
748 * Expands the size of the heap after a collection. At present we
749 * commit the pages for maximum size of the heap so this routine is
750 * just a no-op. Eventually, we will either allocate or commit pages
751 * on an as-need basis.
752 */
753void dvmHeapSourceGrowForUtilization(void)
754{
755 /* do nothing */
756}
757
758void dvmHeapSourceTrim(size_t bytesTrimmed[], size_t arrayLen)
759{
760 /* do nothing */
761}
762
763void dvmHeapSourceWalk(void (*callback)(const void *chunkptr, size_t chunklen,
764 const void *userptr, size_t userlen,
765 void *arg),
766 void *arg)
767{
768 assert(!"implemented");
769}
770
771size_t dvmHeapSourceGetNumHeaps(void)
772{
773 return 1;
774}
775
776bool dvmTrackExternalAllocation(size_t n)
777{
778 assert(!"implemented");
779 return false;
780}
781
782void dvmTrackExternalFree(size_t n)
783{
784 assert(!"implemented");
785}
786
787size_t dvmGetExternalBytesAllocated(void)
788{
789 assert(!"implemented");
790 return 0;
791}
792
793void dvmHeapSourceFlip(void)
794{
795 HeapSource *heapSource;
796 size_t i;
797
798 heapSource = gDvm.gcHeap->heapSource;
799
800 /* Reset the block queue. */
801 heapSource->allocBlocks = 0;
802 heapSource->queueSize = 0;
803 heapSource->queueHead = QUEUE_TAIL;
804
Carl Shapirod28668c2010-04-15 16:10:00 -0700805 /* TODO(cshapiro): pad the current (prev) block. */
806
807 heapSource->allocPtr = NULL;
808 heapSource->allocLimit = NULL;
809
810 /* Whiten all allocated blocks. */
811 for (i = 0; i < heapSource->totalBlocks; ++i) {
812 if (heapSource->blockSpace[i] == BLOCK_TO_SPACE) {
813 heapSource->blockSpace[i] = BLOCK_FROM_SPACE;
814 }
815 }
816}
817
818static void room(size_t *alloc, size_t *avail, size_t *total)
819{
820 HeapSource *heapSource;
821 size_t i;
822
823 heapSource = gDvm.gcHeap->heapSource;
824 *total = heapSource->totalBlocks*BLOCK_SIZE;
825 *alloc = heapSource->allocBlocks*BLOCK_SIZE;
826 *avail = *total - *alloc;
827}
828
829static bool isSpaceInternal(u1 *addr, int space)
830{
831 HeapSource *heapSource;
832 u1 *base, *limit;
833 size_t offset;
834 char space2;
835
836 heapSource = gDvm.gcHeap->heapSource;
837 base = heapSource->blockBase;
838 assert(addr >= base);
839 limit = heapSource->blockBase + heapSource->maximumSize;
840 assert(addr < limit);
841 offset = addr - base;
842 space2 = heapSource->blockSpace[offset >> BLOCK_SHIFT];
843 return space == space2;
844}
845
Carl Shapiro952e84a2010-05-06 14:35:29 -0700846static bool fromSpaceContains(const void *addr)
Carl Shapirod28668c2010-04-15 16:10:00 -0700847{
848 return isSpaceInternal((u1 *)addr, BLOCK_FROM_SPACE);
849}
850
Carl Shapiro952e84a2010-05-06 14:35:29 -0700851static bool toSpaceContains(const void *addr)
Carl Shapirod28668c2010-04-15 16:10:00 -0700852{
853 return isSpaceInternal((u1 *)addr, BLOCK_TO_SPACE);
854}
855
856/*
857 * Notifies the collector that the object at the given address must
858 * remain stationary during the current collection.
859 */
860static void pinObject(const Object *obj)
861{
862 promoteBlockByAddr(gDvm.gcHeap->heapSource, obj);
863}
864
865static void printHeapBitmap(const HeapBitmap *bitmap)
866{
867 const char *cp;
868 size_t i, length;
869
870 length = bitmap->bitsLen >> 2;
871 fprintf(stderr, "%p", bitmap->bits);
872 for (i = 0; i < length; ++i) {
873 fprintf(stderr, " %lx", bitmap->bits[i]);
874 fputc('\n', stderr);
875 }
876}
877
878static void printHeapBitmapSxS(const HeapBitmap *b1, const HeapBitmap *b2)
879{
880 uintptr_t addr;
881 size_t i, length;
882
883 assert(b1->base == b2->base);
884 assert(b1->bitsLen == b2->bitsLen);
885 addr = b1->base;
886 length = b1->bitsLen >> 2;
887 for (i = 0; i < length; ++i) {
888 int diff = b1->bits[i] == b2->bits[i];
889 fprintf(stderr, "%08x %08lx %08lx %d\n",
890 addr, b1->bits[i], b2->bits[i], diff);
891 addr += sizeof(*b1->bits)*CHAR_BIT;
892 }
893}
894
895static size_t sumHeapBitmap(const HeapBitmap *bitmap)
896{
897 const char *cp;
898 size_t i, sum;
899
900 sum = 0;
901 for (i = 0; i < bitmap->bitsLen >> 2; ++i) {
902 sum += dvmClzImpl(bitmap->bits[i]);
903 }
904 return sum;
905}
906
907/*
908 * Miscellaneous functionality.
909 */
910
911static int isForward(const void *addr)
912{
913 return (uintptr_t)addr & 0x1;
914}
915
916static void setForward(const void *toObj, void *fromObj)
917{
918 *(unsigned long *)fromObj = (uintptr_t)toObj | 0x1;
919}
920
921static void* getForward(const void *fromObj)
922{
923 return (void *)((uintptr_t)fromObj & ~0x1);
924}
925
926/* Beware, uses the same encoding as a forwarding pointers! */
927static int isPermanentString(const StringObject *obj) {
928 return (uintptr_t)obj & 0x1;
929}
930
931static void* getPermanentString(const StringObject *obj)
932{
933 return (void *)((uintptr_t)obj & ~0x1);
934}
935
936
937/*
938 * Scavenging and transporting routines follow. A transporter grays
939 * an object. A scavenger blackens an object. We define these
940 * routines for each fundamental object type. Dispatch is performed
941 * in scavengeObject.
942 */
943
944/*
Carl Shapiro2396fda2010-05-03 20:14:14 -0700945 * Class object scavenging.
Carl Shapirod28668c2010-04-15 16:10:00 -0700946 */
Carl Shapiro2396fda2010-05-03 20:14:14 -0700947static void scavengeClassObject(ClassObject *obj)
Carl Shapirod28668c2010-04-15 16:10:00 -0700948{
Carl Shapirod28668c2010-04-15 16:10:00 -0700949 int i;
950
Carl Shapiro8bb533e2010-05-06 15:35:27 -0700951 LOG_SCAV("scavengeClassObject(obj=%p)", obj);
Carl Shapiro2396fda2010-05-03 20:14:14 -0700952 assert(obj != NULL);
Carl Shapirod28668c2010-04-15 16:10:00 -0700953 assert(obj->obj.clazz != NULL);
954 assert(obj->obj.clazz->descriptor != NULL);
955 assert(!strcmp(obj->obj.clazz->descriptor, "Ljava/lang/Class;"));
956 assert(obj->descriptor != NULL);
Carl Shapiro8bb533e2010-05-06 15:35:27 -0700957 LOG_SCAV("scavengeClassObject: descriptor='%s',vtableCount=%zu",
958 obj->descriptor, obj->vtableCount);
Carl Shapiro2396fda2010-05-03 20:14:14 -0700959 /* Scavenge our class object. */
Carl Shapirod28668c2010-04-15 16:10:00 -0700960 scavengeReference((Object **) obj);
961 /* Scavenge the array element class object. */
962 if (IS_CLASS_FLAG_SET(obj, CLASS_ISARRAY)) {
963 scavengeReference((Object **)(void *)&obj->elementClass);
964 }
965 /* Scavenge the superclass. */
966 scavengeReference((Object **)(void *)&obj->super);
967 /* Scavenge the class loader. */
968 scavengeReference(&obj->classLoader);
969 /* Scavenge static fields. */
970 for (i = 0; i < obj->sfieldCount; ++i) {
971 char ch = obj->sfields[i].field.signature[0];
972 if (ch == '[' || ch == 'L') {
973 scavengeReference((Object **)(void *)&obj->sfields[i].value.l);
974 }
975 }
976 /* Scavenge interface class objects. */
977 for (i = 0; i < obj->interfaceCount; ++i) {
978 scavengeReference((Object **) &obj->interfaces[i]);
979 }
Carl Shapirod28668c2010-04-15 16:10:00 -0700980}
981
982/*
983 * Array object scavenging.
984 */
Carl Shapirod28668c2010-04-15 16:10:00 -0700985static size_t scavengeArrayObject(ArrayObject *array)
986{
987 size_t i, length;
988
Carl Shapiro8bb533e2010-05-06 15:35:27 -0700989 LOG_SCAV("scavengeArrayObject(array=%p)", array);
Carl Shapirod28668c2010-04-15 16:10:00 -0700990 /* Scavenge the class object. */
Carl Shapiro952e84a2010-05-06 14:35:29 -0700991 assert(toSpaceContains(array));
Carl Shapirod28668c2010-04-15 16:10:00 -0700992 assert(array != NULL);
993 assert(array->obj.clazz != NULL);
994 scavengeReference((Object **) array);
995 length = dvmArrayObjectSize(array);
996 /* Scavenge the array contents. */
997 if (IS_CLASS_FLAG_SET(array->obj.clazz, CLASS_ISOBJECTARRAY)) {
998 Object **contents = (Object **)array->contents;
999 for (i = 0; i < array->length; ++i) {
1000 scavengeReference(&contents[i]);
1001 }
1002 }
1003 return length;
1004}
1005
1006/*
1007 * Reference object scavenging.
1008 */
1009
Carl Shapiro952e84a2010-05-06 14:35:29 -07001010static int getReferenceFlags(const Object *obj)
Carl Shapirod28668c2010-04-15 16:10:00 -07001011{
1012 int flags;
1013
1014 flags = CLASS_ISREFERENCE |
1015 CLASS_ISWEAKREFERENCE |
1016 CLASS_ISPHANTOMREFERENCE;
Carl Shapiro952e84a2010-05-06 14:35:29 -07001017 return GET_CLASS_FLAG_GROUP(obj->clazz, flags);
Carl Shapirod28668c2010-04-15 16:10:00 -07001018}
1019
Carl Shapiro952e84a2010-05-06 14:35:29 -07001020static int isReference(const Object *obj)
Carl Shapirod28668c2010-04-15 16:10:00 -07001021{
1022 return getReferenceFlags(obj) != 0;
1023}
1024
Carl Shapiro952e84a2010-05-06 14:35:29 -07001025static int isSoftReference(const Object *obj)
Carl Shapirod28668c2010-04-15 16:10:00 -07001026{
1027 return getReferenceFlags(obj) == CLASS_ISREFERENCE;
1028}
1029
Carl Shapiro952e84a2010-05-06 14:35:29 -07001030static int isWeakReference(const Object *obj)
Carl Shapirod28668c2010-04-15 16:10:00 -07001031{
1032 return getReferenceFlags(obj) & CLASS_ISWEAKREFERENCE;
1033}
1034
Carl Shapiro952e84a2010-05-06 14:35:29 -07001035static bool isPhantomReference(const Object *obj)
Carl Shapirod28668c2010-04-15 16:10:00 -07001036{
1037 return getReferenceFlags(obj) & CLASS_ISPHANTOMREFERENCE;
1038}
1039
Carl Shapiro952e84a2010-05-06 14:35:29 -07001040/*
1041 * Returns true if the reference was registered with a reference queue
1042 * but has not yet been appended to it.
1043 */
1044static bool isReferenceEnqueuable(const Object *ref)
Carl Shapirod28668c2010-04-15 16:10:00 -07001045{
Carl Shapiro952e84a2010-05-06 14:35:29 -07001046 Object *queue, *queueNext;
Carl Shapirod28668c2010-04-15 16:10:00 -07001047
Carl Shapiro952e84a2010-05-06 14:35:29 -07001048 queue = dvmGetFieldObject(ref, gDvm.offJavaLangRefReference_queue);
1049 queueNext = dvmGetFieldObject(ref, gDvm.offJavaLangRefReference_queueNext);
1050 if (queue == NULL || queueNext != NULL) {
1051 /*
1052 * There is no queue, or the reference has already
1053 * been enqueued. The Reference.enqueue() method
1054 * will do nothing even if we call it.
1055 */
1056 return false;
Carl Shapirod28668c2010-04-15 16:10:00 -07001057 }
Carl Shapiro952e84a2010-05-06 14:35:29 -07001058
1059 /*
1060 * We need to call enqueue(), but if we called it from
1061 * here we'd probably deadlock. Schedule a call.
1062 */
1063 return true;
1064}
1065
1066/*
1067 * Schedules a reference to be appended to its reference queue.
1068 */
1069static void enqueueReference(const Object *ref)
1070{
1071 LargeHeapRefTable **table;
1072 Object *op;
1073
1074 assert(((uintptr_t)ref & 3) == 0);
1075 assert((WORKER_ENQUEUE & ~3) == 0);
1076 assert(dvmGetFieldObject(ref, gDvm.offJavaLangRefReference_queue) != NULL);
1077 assert(dvmGetFieldObject(ref, gDvm.offJavaLangRefReference_queueNext) == NULL);
1078 /*
1079 * Set the enqueue bit in the bottom of the pointer. Assumes that
1080 * objects are 8-byte aligned.
1081 *
1082 * Note that we are adding the *Reference* (which is by definition
1083 * already black at this point) to this list; we're not adding the
1084 * referent (which has already been cleared).
1085 */
1086 table = &gDvm.gcHeap->referenceOperations;
1087 op = (Object *)((uintptr_t)ref | WORKER_ENQUEUE);
1088 if (!dvmHeapAddRefToLargeTable(table, op)) {
Carl Shapiro8bb533e2010-05-06 15:35:27 -07001089 LOGE("no room for any more reference operations");
Carl Shapiro952e84a2010-05-06 14:35:29 -07001090 dvmAbort();
1091 }
1092}
1093
1094/*
1095 * Sets the referent field of a reference object to NULL.
1096 */
1097static void clearReference(Object *obj)
1098{
1099 dvmSetFieldObject(obj, gDvm.offJavaLangRefReference_referent, NULL);
1100}
1101
1102/*
1103 * Clears reference objects with white referents.
1104 */
1105void clearWhiteReferences(Object **list)
1106{
1107 size_t referentOffset, vmDataOffset;
1108 bool doSignal;
1109
1110 vmDataOffset = gDvm.offJavaLangRefReference_vmData;
1111 referentOffset = gDvm.offJavaLangRefReference_referent;
1112 doSignal = false;
1113 while (*list != NULL) {
1114 Object *ref = *list;
1115 JValue *field = dvmFieldPtr(ref, referentOffset);
1116 Object *referent = field->l;
1117 *list = dvmGetFieldObject(ref, vmDataOffset);
1118 assert(referent != NULL);
1119 if (isForward(referent->clazz)) {
1120 field->l = referent = getForward(referent->clazz);
1121 continue;
1122 }
1123 if (fromSpaceContains(referent)) {
1124 /* Referent is white, clear it. */
1125 clearReference(ref);
1126 if (isReferenceEnqueuable(ref)) {
1127 enqueueReference(ref);
1128 doSignal = true;
1129 }
1130 }
1131 }
1132 /*
1133 * If we cleared a reference with a reference queue we must notify
1134 * the heap worker to append the reference.
1135 */
1136 if (doSignal) {
1137 dvmSignalHeapWorker(false);
1138 }
1139 assert(*list == NULL);
1140}
1141
1142/*
1143 * Blackens referents subject to the soft reference preservation
1144 * policy.
1145 */
1146void preserveSoftReferences(Object **list)
1147{
1148 Object *ref;
1149 Object *prev, *next;
1150 size_t referentOffset, vmDataOffset;
1151 unsigned counter;
1152 bool white;
1153
1154 vmDataOffset = gDvm.offJavaLangRefReference_vmData;
1155 referentOffset = gDvm.offJavaLangRefReference_referent;
1156 counter = 0;
1157 prev = next = NULL;
1158 ref = *list;
1159 while (ref != NULL) {
1160 JValue *field = dvmFieldPtr(ref, referentOffset);
1161 Object *referent = field->l;
1162 next = dvmGetFieldObject(ref, vmDataOffset);
1163 assert(referent != NULL);
1164 if (isForward(referent->clazz)) {
1165 /* Referent is black. */
1166 field->l = referent = getForward(referent->clazz);
1167 white = false;
1168 } else {
1169 white = fromSpaceContains(referent);
1170 }
1171 if (!white && ((++counter) & 1)) {
1172 /* Referent is white and biased toward saving, gray it. */
1173 scavengeReference((Object **)(void *)&field->l);
1174 white = true;
1175 }
1176 if (white) {
1177 /* Referent is black, unlink it. */
1178 if (prev != NULL) {
1179 dvmSetFieldObject(ref, vmDataOffset, NULL);
1180 dvmSetFieldObject(prev, vmDataOffset, next);
1181 }
1182 } else {
1183 /* Referent is white, skip over it. */
1184 prev = ref;
1185 }
1186 ref = next;
1187 }
1188 /*
1189 * Restart the trace with the newly gray references added to the
1190 * root set.
1191 */
1192 scavengeBlockQueue();
1193}
1194
1195void processFinalizableReferences(void)
1196{
1197 HeapRefTable newPendingRefs;
1198 LargeHeapRefTable *finRefs = gDvm.gcHeap->finalizableRefs;
1199 Object **ref;
1200 Object **lastRef;
1201 size_t totalPendCount;
1202
1203 /*
1204 * All strongly, reachable objects are black.
1205 * Any white finalizable objects need to be finalized.
1206 */
1207
1208 /* Create a table that the new pending refs will
1209 * be added to.
1210 */
1211 if (!dvmHeapInitHeapRefTable(&newPendingRefs, 128)) {
1212 //TODO: mark all finalizable refs and hope that
1213 // we can schedule them next time. Watch out,
1214 // because we may be expecting to free up space
1215 // by calling finalizers.
Carl Shapiro8bb533e2010-05-06 15:35:27 -07001216 LOG_REF("no room for pending finalizations\n");
Carl Shapiro952e84a2010-05-06 14:35:29 -07001217 dvmAbort();
1218 }
1219
1220 /*
1221 * Walk through finalizableRefs and move any white references to
1222 * the list of new pending refs.
1223 */
1224 totalPendCount = 0;
1225 while (finRefs != NULL) {
1226 Object **gapRef;
1227 size_t newPendCount = 0;
1228
1229 gapRef = ref = finRefs->refs.table;
1230 lastRef = finRefs->refs.nextEntry;
1231 while (ref < lastRef) {
1232 if (fromSpaceContains(*ref)) {
1233 if (!dvmHeapAddToHeapRefTable(&newPendingRefs, *ref)) {
1234 //TODO: add the current table and allocate
1235 // a new, smaller one.
Carl Shapiro8bb533e2010-05-06 15:35:27 -07001236 LOG_REF("no room for any more pending finalizations: %zd\n",
Carl Shapiro952e84a2010-05-06 14:35:29 -07001237 dvmHeapNumHeapRefTableEntries(&newPendingRefs));
1238 dvmAbort();
1239 }
1240 newPendCount++;
1241 } else {
1242 /* This ref is black, so will remain on finalizableRefs.
1243 */
1244 if (newPendCount > 0) {
1245 /* Copy it up to fill the holes.
1246 */
1247 *gapRef++ = *ref;
1248 } else {
1249 /* No holes yet; don't bother copying.
1250 */
1251 gapRef++;
1252 }
1253 }
1254 ref++;
1255 }
1256 finRefs->refs.nextEntry = gapRef;
1257 //TODO: if the table is empty when we're done, free it.
1258 totalPendCount += newPendCount;
1259 finRefs = finRefs->next;
1260 }
Carl Shapiro8bb533e2010-05-06 15:35:27 -07001261 LOG_REF("%zd finalizers triggered.\n", totalPendCount);
Carl Shapiro952e84a2010-05-06 14:35:29 -07001262 if (totalPendCount == 0) {
1263 /* No objects required finalization.
1264 * Free the empty temporary table.
1265 */
1266 dvmClearReferenceTable(&newPendingRefs);
1267 return;
1268 }
1269
1270 /* Add the new pending refs to the main list.
1271 */
1272 if (!dvmHeapAddTableToLargeTable(&gDvm.gcHeap->pendingFinalizationRefs,
1273 &newPendingRefs))
1274 {
Carl Shapiro8bb533e2010-05-06 15:35:27 -07001275 LOG_REF("can't insert new pending finalizations\n");
Carl Shapiro952e84a2010-05-06 14:35:29 -07001276 dvmAbort();
1277 }
1278
1279 //TODO: try compacting the main list with a memcpy loop
1280
1281 /* Blacken the refs we just moved; we don't want them or their
1282 * children to get swept yet.
1283 */
1284 ref = newPendingRefs.table;
1285 lastRef = newPendingRefs.nextEntry;
1286 assert(ref < lastRef);
1287 HPROF_SET_GC_SCAN_STATE(HPROF_ROOT_FINALIZING, 0);
1288 while (ref < lastRef) {
1289 scavengeReference(ref);
1290 ref++;
1291 }
1292 HPROF_CLEAR_GC_SCAN_STATE();
1293 scavengeBlockQueue();
1294 dvmSignalHeapWorker(false);
Carl Shapirod28668c2010-04-15 16:10:00 -07001295}
1296
Carl Shapirod28668c2010-04-15 16:10:00 -07001297/*
1298 * If a reference points to from-space and has been forwarded, we snap
1299 * the pointer to its new to-space address. If the reference points
1300 * to an unforwarded from-space address we must enqueue the reference
1301 * for later processing. TODO: implement proper reference processing
1302 * and move the referent scavenging elsewhere.
1303 */
Carl Shapiro952e84a2010-05-06 14:35:29 -07001304static void scavengeReferenceObject(Object *obj)
Carl Shapirod28668c2010-04-15 16:10:00 -07001305{
Carl Shapiro952e84a2010-05-06 14:35:29 -07001306 Object *referent;
1307 Object **queue;
1308 size_t referentOffset, vmDataOffset;
1309
Carl Shapirod28668c2010-04-15 16:10:00 -07001310 assert(obj != NULL);
Carl Shapiro8bb533e2010-05-06 15:35:27 -07001311 LOG_SCAV("scavengeReferenceObject(obj=%p),'%s'", obj, obj->clazz->descriptor);
Carl Shapiro2396fda2010-05-03 20:14:14 -07001312 scavengeDataObject(obj);
Carl Shapiro952e84a2010-05-06 14:35:29 -07001313 referentOffset = gDvm.offJavaLangRefReference_referent;
1314 referent = dvmGetFieldObject(obj, referentOffset);
1315 if (referent == NULL || toSpaceContains(referent)) {
1316 return;
Carl Shapirod28668c2010-04-15 16:10:00 -07001317 }
Carl Shapiro952e84a2010-05-06 14:35:29 -07001318 if (isSoftReference(obj)) {
1319 queue = &gDvm.gcHeap->softReferences;
1320 } else if (isWeakReference(obj)) {
1321 queue = &gDvm.gcHeap->weakReferences;
1322 } else {
1323 assert(isPhantomReference(obj));
1324 queue = &gDvm.gcHeap->phantomReferences;
1325 }
1326 vmDataOffset = gDvm.offJavaLangRefReference_vmData;
Carl Shapiro7800c092010-05-11 13:46:29 -07001327 dvmSetFieldObject(obj, vmDataOffset, *queue);
Carl Shapiro952e84a2010-05-06 14:35:29 -07001328 *queue = obj;
Carl Shapiro8bb533e2010-05-06 15:35:27 -07001329 LOG_SCAV("scavengeReferenceObject: enqueueing %p", obj);
Carl Shapirod28668c2010-04-15 16:10:00 -07001330}
1331
1332/*
1333 * Data object scavenging.
1334 */
Carl Shapiro952e84a2010-05-06 14:35:29 -07001335static void scavengeDataObject(Object *obj)
Carl Shapirod28668c2010-04-15 16:10:00 -07001336{
1337 ClassObject *clazz;
Carl Shapirod28668c2010-04-15 16:10:00 -07001338 int i;
1339
Carl Shapiro8bb533e2010-05-06 15:35:27 -07001340 // LOG_SCAV("scavengeDataObject(obj=%p)", obj);
Carl Shapirod28668c2010-04-15 16:10:00 -07001341 assert(obj != NULL);
Carl Shapiro952e84a2010-05-06 14:35:29 -07001342 assert(obj->clazz != NULL);
1343 assert(obj->clazz->objectSize != 0);
1344 assert(toSpaceContains(obj));
Carl Shapirod28668c2010-04-15 16:10:00 -07001345 /* Scavenge the class object. */
Carl Shapiro952e84a2010-05-06 14:35:29 -07001346 clazz = obj->clazz;
Carl Shapirod28668c2010-04-15 16:10:00 -07001347 scavengeReference((Object **) obj);
Carl Shapirod28668c2010-04-15 16:10:00 -07001348 /* Scavenge instance fields. */
1349 if (clazz->refOffsets != CLASS_WALK_SUPER) {
1350 size_t refOffsets = clazz->refOffsets;
1351 while (refOffsets != 0) {
1352 size_t rshift = CLZ(refOffsets);
1353 size_t offset = CLASS_OFFSET_FROM_CLZ(rshift);
1354 Object **ref = (Object **)((u1 *)obj + offset);
1355 scavengeReference(ref);
1356 refOffsets &= ~(CLASS_HIGH_BIT >> rshift);
1357 }
1358 } else {
1359 for (; clazz != NULL; clazz = clazz->super) {
1360 InstField *field = clazz->ifields;
1361 for (i = 0; i < clazz->ifieldRefCount; ++i, ++field) {
1362 size_t offset = field->byteOffset;
1363 Object **ref = (Object **)((u1 *)obj + offset);
1364 scavengeReference(ref);
1365 }
1366 }
1367 }
Carl Shapiro2396fda2010-05-03 20:14:14 -07001368}
1369
1370static Object *transportObject(const Object *fromObj)
1371{
1372 Object *toObj;
1373 size_t allocSize, copySize;
1374
Carl Shapiro8bb533e2010-05-06 15:35:27 -07001375 LOG_TRAN("transportObject(fromObj=%p) allocBlocks=%zu",
Carl Shapiro2396fda2010-05-03 20:14:14 -07001376 fromObj,
1377 gDvm.gcHeap->heapSource->allocBlocks);
1378 assert(fromObj != NULL);
Carl Shapiro952e84a2010-05-06 14:35:29 -07001379 assert(fromSpaceContains(fromObj));
Carl Shapiro2396fda2010-05-03 20:14:14 -07001380 allocSize = copySize = objectSize(fromObj);
1381 if (LW_HASH_STATE(fromObj->lock) != LW_HASH_STATE_UNHASHED) {
1382 /*
1383 * The object has been hashed or hashed and moved. We must
1384 * reserve an additional word for a hash code.
1385 */
1386 allocSize += sizeof(u4);
1387 }
1388 if (LW_HASH_STATE(fromObj->lock) == LW_HASH_STATE_HASHED_AND_MOVED) {
1389 /*
1390 * The object has its hash code allocated. Ensure the hash
1391 * code is copied along with the instance data.
1392 */
1393 copySize += sizeof(u4);
1394 }
1395 /* TODO(cshapiro): don't copy, re-map large data objects. */
1396 assert(copySize <= allocSize);
1397 toObj = allocateGray(allocSize);
1398 assert(toObj != NULL);
Carl Shapiro952e84a2010-05-06 14:35:29 -07001399 assert(toSpaceContains(toObj));
Carl Shapiro2396fda2010-05-03 20:14:14 -07001400 memcpy(toObj, fromObj, copySize);
1401 if (LW_HASH_STATE(fromObj->lock) == LW_HASH_STATE_HASHED) {
1402 /*
1403 * The object has had its hash code exposed. Append it to the
1404 * instance and set a bit so we know to look for it there.
1405 */
1406 *(u4 *)(((char *)toObj) + copySize) = (u4)fromObj >> 3;
1407 toObj->lock |= LW_HASH_STATE_HASHED_AND_MOVED << LW_HASH_STATE_SHIFT;
1408 }
Carl Shapiro8bb533e2010-05-06 15:35:27 -07001409 LOG_TRAN("transportObject: from %p/%zu to %p/%zu (%zu,%zu) %s",
1410 fromObj, addressToBlock(gDvm.gcHeap->heapSource,fromObj),
1411 toObj, addressToBlock(gDvm.gcHeap->heapSource,toObj),
1412 copySize, allocSize, copySize < allocSize ? "DIFFERENT" : "");
Carl Shapiro2396fda2010-05-03 20:14:14 -07001413 return toObj;
Carl Shapirod28668c2010-04-15 16:10:00 -07001414}
1415
1416/*
1417 * Generic reference scavenging.
1418 */
1419
1420/*
1421 * Given a reference to an object, the scavenge routine will gray the
1422 * reference. Any objects pointed to by the scavenger object will be
1423 * transported to new space and a forwarding pointer will be installed
1424 * in the header of the object.
1425 */
1426
1427/*
1428 * Blacken the given pointer. If the pointer is in from space, it is
1429 * transported to new space. If the object has a forwarding pointer
1430 * installed it has already been transported and the referent is
1431 * snapped to the new address.
1432 */
Carl Shapiro2396fda2010-05-03 20:14:14 -07001433static void scavengeReference(Object **obj)
Carl Shapirod28668c2010-04-15 16:10:00 -07001434{
1435 ClassObject *clazz;
Carl Shapiro2396fda2010-05-03 20:14:14 -07001436 Object *fromObj, *toObj;
Carl Shapirod28668c2010-04-15 16:10:00 -07001437 uintptr_t word;
1438
1439 assert(obj);
1440
Carl Shapiro2396fda2010-05-03 20:14:14 -07001441 if (*obj == NULL) return;
Carl Shapirod28668c2010-04-15 16:10:00 -07001442
1443 assert(dvmIsValidObject(*obj));
1444
1445 /* The entire block is black. */
Carl Shapiro952e84a2010-05-06 14:35:29 -07001446 if (toSpaceContains(*obj)) {
Carl Shapiro8bb533e2010-05-06 15:35:27 -07001447 LOG_SCAV("scavengeReference skipping pinned object @ %p", *obj);
Carl Shapiro2396fda2010-05-03 20:14:14 -07001448 return;
Carl Shapirod28668c2010-04-15 16:10:00 -07001449 }
Carl Shapiro8bb533e2010-05-06 15:35:27 -07001450 LOG_SCAV("scavengeReference(*obj=%p)", *obj);
Carl Shapirod28668c2010-04-15 16:10:00 -07001451
Carl Shapiro952e84a2010-05-06 14:35:29 -07001452 assert(fromSpaceContains(*obj));
Carl Shapirod28668c2010-04-15 16:10:00 -07001453
1454 clazz = (*obj)->clazz;
1455
1456 if (isForward(clazz)) {
Carl Shapiro8bb533e2010-05-06 15:35:27 -07001457 // LOG_SCAV("forwarding %p @ %p to %p", *obj, obj, (void *)((uintptr_t)clazz & ~0x1));
Carl Shapirod28668c2010-04-15 16:10:00 -07001458 *obj = (Object *)getForward(clazz);
Carl Shapiro2396fda2010-05-03 20:14:14 -07001459 return;
Carl Shapirod28668c2010-04-15 16:10:00 -07001460 }
Carl Shapiro2396fda2010-05-03 20:14:14 -07001461 fromObj = *obj;
1462 if (clazz == NULL) {
Carl Shapiro8bb533e2010-05-06 15:35:27 -07001463 // LOG_SCAV("scavangeReference %p has a NULL class object", fromObj);
Carl Shapiro2396fda2010-05-03 20:14:14 -07001464 assert(!"implemented");
1465 toObj = NULL;
1466 } else if (clazz == gDvm.unlinkedJavaLangClass) {
Carl Shapiro8bb533e2010-05-06 15:35:27 -07001467 // LOG_SCAV("scavangeReference %p is an unlinked class object", fromObj);
Carl Shapiro2396fda2010-05-03 20:14:14 -07001468 assert(!"implemented");
1469 toObj = NULL;
1470 } else {
1471 toObj = transportObject(fromObj);
1472 }
1473 setForward(toObj, fromObj);
1474 *obj = (Object *)toObj;
Carl Shapirod28668c2010-04-15 16:10:00 -07001475}
1476
1477static void verifyReference(const void *obj)
1478{
1479 HeapSource *heapSource;
1480 size_t block;
1481 char space;
1482
1483 if (obj == NULL) {
Carl Shapiro8bb533e2010-05-06 15:35:27 -07001484 LOG_VER("verifyReference(obj=%p)", obj);
Carl Shapirod28668c2010-04-15 16:10:00 -07001485 return;
1486 }
1487 heapSource = gDvm.gcHeap->heapSource;
1488 block = addressToBlock(heapSource, obj);
1489 space = heapSource->blockSpace[block];
Carl Shapiro8bb533e2010-05-06 15:35:27 -07001490 LOG_VER("verifyReference(obj=%p),block=%zu,space=%d", obj, block, space);
Carl Shapirod28668c2010-04-15 16:10:00 -07001491 assert(!((uintptr_t)obj & 7));
Carl Shapiro952e84a2010-05-06 14:35:29 -07001492 assert(toSpaceContains(obj));
Carl Shapirod28668c2010-04-15 16:10:00 -07001493 assert(dvmIsValidObject(obj));
1494}
1495
1496/*
1497 * Generic object scavenging.
1498 */
Carl Shapiro2396fda2010-05-03 20:14:14 -07001499static void scavengeObject(Object *obj)
Carl Shapirod28668c2010-04-15 16:10:00 -07001500{
1501 ClassObject *clazz;
Carl Shapirod28668c2010-04-15 16:10:00 -07001502
1503 assert(obj != NULL);
Carl Shapiro952e84a2010-05-06 14:35:29 -07001504 assert(obj->clazz != NULL);
1505 assert(!((uintptr_t)obj->clazz & 0x1));
1506 assert(obj->clazz != gDvm.unlinkedJavaLangClass);
Carl Shapirod28668c2010-04-15 16:10:00 -07001507 clazz = obj->clazz;
Carl Shapirod28668c2010-04-15 16:10:00 -07001508 if (clazz == gDvm.classJavaLangClass) {
Carl Shapiro2396fda2010-05-03 20:14:14 -07001509 scavengeClassObject((ClassObject *)obj);
Carl Shapirod28668c2010-04-15 16:10:00 -07001510 } else if (IS_CLASS_FLAG_SET(clazz, CLASS_ISARRAY)) {
Carl Shapiro2396fda2010-05-03 20:14:14 -07001511 scavengeArrayObject((ArrayObject *)obj);
Carl Shapirod28668c2010-04-15 16:10:00 -07001512 } else if (IS_CLASS_FLAG_SET(clazz, CLASS_ISREFERENCE)) {
Carl Shapiro952e84a2010-05-06 14:35:29 -07001513 scavengeReferenceObject(obj);
Carl Shapirod28668c2010-04-15 16:10:00 -07001514 } else {
Carl Shapiro952e84a2010-05-06 14:35:29 -07001515 scavengeDataObject(obj);
Carl Shapirod28668c2010-04-15 16:10:00 -07001516 }
Carl Shapirod28668c2010-04-15 16:10:00 -07001517}
1518
1519/*
1520 * External root scavenging routines.
1521 */
1522
1523static void scavengeHashTable(HashTable *table)
1524{
1525 HashEntry *entry;
1526 void *obj;
1527 int i;
1528
1529 if (table == NULL) {
1530 return;
1531 }
1532 dvmHashTableLock(table);
1533 for (i = 0; i < table->tableSize; ++i) {
1534 entry = &table->pEntries[i];
1535 obj = entry->data;
1536 if (obj == NULL || obj == HASH_TOMBSTONE) {
1537 continue;
1538 }
1539 scavengeReference((Object **)(void *)&entry->data);
1540 }
1541 dvmHashTableUnlock(table);
1542}
1543
1544static void pinHashTableEntries(HashTable *table)
1545{
1546 HashEntry *entry;
1547 void *obj;
1548 int i;
1549
Carl Shapiro8bb533e2010-05-06 15:35:27 -07001550 LOG_PIN(">>> pinHashTableEntries(table=%p)", table);
Carl Shapirod28668c2010-04-15 16:10:00 -07001551 if (table == NULL) {
1552 return;
1553 }
1554 dvmHashTableLock(table);
1555 for (i = 0; i < table->tableSize; ++i) {
1556 entry = &table->pEntries[i];
1557 obj = entry->data;
1558 if (obj == NULL || obj == HASH_TOMBSTONE) {
1559 continue;
1560 }
1561 pinObject(entry->data);
1562 }
1563 dvmHashTableUnlock(table);
Carl Shapiro8bb533e2010-05-06 15:35:27 -07001564 LOG_PIN("<<< pinHashTableEntries(table=%p)", table);
Carl Shapirod28668c2010-04-15 16:10:00 -07001565}
1566
1567static void pinPrimitiveClasses(void)
1568{
1569 size_t length;
1570 size_t i;
1571
1572 length = ARRAYSIZE(gDvm.primitiveClass);
1573 for (i = 0; i < length; i++) {
1574 if (gDvm.primitiveClass[i] != NULL) {
1575 pinObject((Object *)gDvm.primitiveClass[i]);
1576 }
1577 }
1578}
1579
1580/*
1581 * Scavenge interned strings. Permanent interned strings will have
1582 * been pinned and are therefore ignored. Non-permanent strings that
1583 * have been forwarded are snapped. All other entries are removed.
1584 */
1585static void scavengeInternedStrings(void)
1586{
1587 HashTable *table;
1588 HashEntry *entry;
1589 Object *obj;
1590 int i;
1591
1592 table = gDvm.internedStrings;
1593 if (table == NULL) {
1594 return;
1595 }
1596 dvmHashTableLock(table);
1597 for (i = 0; i < table->tableSize; ++i) {
1598 entry = &table->pEntries[i];
1599 obj = (Object *)entry->data;
1600 if (obj == NULL || obj == HASH_TOMBSTONE) {
1601 continue;
1602 } else if (!isPermanentString((StringObject *)obj)) {
Carl Shapiro8bb533e2010-05-06 15:35:27 -07001603 // LOG_SCAV("entry->data=%p", entry->data);
1604 LOG_SCAV(">>> string obj=%p", entry->data);
Carl Shapirod28668c2010-04-15 16:10:00 -07001605 /* TODO(cshapiro): detach white string objects */
1606 scavengeReference((Object **)(void *)&entry->data);
Carl Shapiro8bb533e2010-05-06 15:35:27 -07001607 LOG_SCAV("<<< string obj=%p", entry->data);
Carl Shapirod28668c2010-04-15 16:10:00 -07001608 }
1609 }
1610 dvmHashTableUnlock(table);
1611}
1612
1613static void pinInternedStrings(void)
1614{
1615 HashTable *table;
1616 HashEntry *entry;
1617 Object *obj;
1618 int i;
1619
1620 table = gDvm.internedStrings;
1621 if (table == NULL) {
1622 return;
1623 }
1624 dvmHashTableLock(table);
1625 for (i = 0; i < table->tableSize; ++i) {
1626 entry = &table->pEntries[i];
1627 obj = (Object *)entry->data;
1628 if (obj == NULL || obj == HASH_TOMBSTONE) {
1629 continue;
1630 } else if (isPermanentString((StringObject *)obj)) {
1631 obj = (Object *)getPermanentString((StringObject*)obj);
Carl Shapiro8bb533e2010-05-06 15:35:27 -07001632 LOG_PROM(">>> pin string obj=%p", obj);
Carl Shapirod28668c2010-04-15 16:10:00 -07001633 pinObject(obj);
Carl Shapiro8bb533e2010-05-06 15:35:27 -07001634 LOG_PROM("<<< pin string obj=%p", obj);
Carl Shapirod28668c2010-04-15 16:10:00 -07001635 }
1636 }
1637 dvmHashTableUnlock(table);
1638}
1639
1640static void verifyInternedStrings(void)
1641{
1642 HashTable *table;
1643 HashEntry *entry;
1644 Object *fwd, *obj;
1645 int i;
1646
1647 table = gDvm.internedStrings;
1648 if (table == NULL) {
1649 return;
1650 }
1651 dvmHashTableLock(table);
1652 for (i = 0; i < table->tableSize; ++i) {
1653 entry = &table->pEntries[i];
1654 obj = (Object *)entry->data;
1655 if (obj == NULL || obj == HASH_TOMBSTONE) {
1656 continue;
1657 } else if (isPermanentString((StringObject *)obj)) {
1658 fwd = (Object *)getForward(obj);
Carl Shapiro8bb533e2010-05-06 15:35:27 -07001659 LOG_VER(">>> verify string fwd=%p obj=%p", fwd, obj);
Carl Shapirod28668c2010-04-15 16:10:00 -07001660 verifyReference(fwd);
Carl Shapiro8bb533e2010-05-06 15:35:27 -07001661 LOG_VER(">>> verify string fwd=%p obj=%p", fwd, obj);
Carl Shapirod28668c2010-04-15 16:10:00 -07001662 } else {
Carl Shapiro8bb533e2010-05-06 15:35:27 -07001663 LOG_SCAV(">>> verify string obj=%p %p", obj, entry->data);
Carl Shapirod28668c2010-04-15 16:10:00 -07001664 verifyReference(obj);
Carl Shapiro8bb533e2010-05-06 15:35:27 -07001665 LOG_SCAV("<<< verify string obj=%p %p", obj, entry->data);
Carl Shapirod28668c2010-04-15 16:10:00 -07001666 }
1667 }
1668 dvmHashTableUnlock(table);
1669}
1670
1671/*
1672 * At present, reference tables contain references that must not be
1673 * moved by the collector. Instead of scavenging each reference in
1674 * the table we pin each referenced object.
1675 */
Carl Shapiro583d64c2010-05-04 10:44:47 -07001676static void pinReferenceTable(const ReferenceTable *table)
Carl Shapirod28668c2010-04-15 16:10:00 -07001677{
1678 Object **entry;
1679 int i;
1680
1681 assert(table != NULL);
1682 assert(table->table != NULL);
1683 assert(table->nextEntry != NULL);
1684 for (entry = table->table; entry < table->nextEntry; ++entry) {
1685 assert(entry != NULL);
1686 assert(!isForward(*entry));
1687 pinObject(*entry);
1688 }
1689}
1690
1691static void verifyReferenceTable(const ReferenceTable *table)
1692{
1693 Object **entry;
1694 int i;
1695
Carl Shapiro8bb533e2010-05-06 15:35:27 -07001696 LOG_VER(">>> verifyReferenceTable(table=%p)", table);
Carl Shapirod28668c2010-04-15 16:10:00 -07001697 for (entry = table->table; entry < table->nextEntry; ++entry) {
1698 assert(entry != NULL);
1699 assert(!isForward(*entry));
1700 verifyReference(*entry);
1701 }
Carl Shapiro8bb533e2010-05-06 15:35:27 -07001702 LOG_VER("<<< verifyReferenceTable(table=%p)", table);
Carl Shapirod28668c2010-04-15 16:10:00 -07001703}
1704
Carl Shapiro7800c092010-05-11 13:46:29 -07001705static void scavengeLargeHeapRefTable(LargeHeapRefTable *table, bool stripLowBits)
Carl Shapirod28668c2010-04-15 16:10:00 -07001706{
Carl Shapirod28668c2010-04-15 16:10:00 -07001707 for (; table != NULL; table = table->next) {
Carl Shapiro7800c092010-05-11 13:46:29 -07001708 Object **ref = table->refs.table;
1709 for (; ref < table->refs.nextEntry; ++ref) {
1710 if (stripLowBits) {
1711 Object *obj = (Object *)((uintptr_t)*ref & ~3);
1712 scavengeReference(&obj);
1713 *ref = (Object *)((uintptr_t)obj | ((uintptr_t)*ref & 3));
1714 } else {
1715 scavengeReference(ref);
Carl Shapirod28668c2010-04-15 16:10:00 -07001716 }
Carl Shapiro7800c092010-05-11 13:46:29 -07001717 }
1718 }
1719}
1720
1721static void verifyLargeHeapRefTable(LargeHeapRefTable *table, bool stripLowBits)
1722{
1723 for (; table != NULL; table = table->next) {
1724 Object **ref = table->refs.table;
1725 for (; ref < table->refs.nextEntry; ++ref) {
1726 if (stripLowBits) {
1727 dvmVerifyObject((Object *)((uintptr_t)*ref & ~3));
1728 } else {
1729 dvmVerifyObject(*ref);
1730 }
Carl Shapirod28668c2010-04-15 16:10:00 -07001731 }
1732 }
1733}
1734
1735/* This code was copied from Thread.c */
1736static void scavengeThreadStack(Thread *thread)
1737{
1738 const u4 *framePtr;
1739#if WITH_EXTRA_GC_CHECKS > 1
1740 bool first = true;
1741#endif
1742
1743 framePtr = (const u4 *)thread->curFrame;
1744 while (framePtr != NULL) {
1745 const StackSaveArea *saveArea;
1746 const Method *method;
1747
1748 saveArea = SAVEAREA_FROM_FP(framePtr);
1749 method = saveArea->method;
Carl Shapiro583d64c2010-05-04 10:44:47 -07001750 if (method != NULL && !dvmIsNativeMethod(method)) {
Carl Shapirod28668c2010-04-15 16:10:00 -07001751#ifdef COUNT_PRECISE_METHODS
1752 /* the GC is running, so no lock required */
1753 if (dvmPointerSetAddEntry(gDvm.preciseMethods, method))
Carl Shapiro8bb533e2010-05-06 15:35:27 -07001754 LOG_SCAV("PGC: added %s.%s %p\n",
1755 method->clazz->descriptor, method->name, method);
Carl Shapirod28668c2010-04-15 16:10:00 -07001756#endif
1757#if WITH_EXTRA_GC_CHECKS > 1
1758 /*
1759 * May also want to enable the memset() in the "invokeMethod"
1760 * goto target in the portable interpreter. That sets the stack
1761 * to a pattern that makes referring to uninitialized data
1762 * very obvious.
1763 */
1764
1765 if (first) {
1766 /*
1767 * First frame, isn't native, check the "alternate" saved PC
1768 * as a sanity check.
1769 *
1770 * It seems like we could check the second frame if the first
1771 * is native, since the PCs should be the same. It turns out
1772 * this doesn't always work. The problem is that we could
1773 * have calls in the sequence:
1774 * interp method #2
1775 * native method
1776 * interp method #1
1777 *
1778 * and then GC while in the native method after returning
1779 * from interp method #2. The currentPc on the stack is
1780 * for interp method #1, but thread->currentPc2 is still
1781 * set for the last thing interp method #2 did.
1782 *
1783 * This can also happen in normal execution:
1784 * - sget-object on not-yet-loaded class
1785 * - class init updates currentPc2
1786 * - static field init is handled by parsing annotations;
1787 * static String init requires creation of a String object,
1788 * which can cause a GC
1789 *
1790 * Essentially, any pattern that involves executing
1791 * interpreted code and then causes an allocation without
1792 * executing instructions in the original method will hit
1793 * this. These are rare enough that the test still has
1794 * some value.
1795 */
1796 if (saveArea->xtra.currentPc != thread->currentPc2) {
1797 LOGW("PGC: savedPC(%p) != current PC(%p), %s.%s ins=%p\n",
1798 saveArea->xtra.currentPc, thread->currentPc2,
1799 method->clazz->descriptor, method->name, method->insns);
1800 if (saveArea->xtra.currentPc != NULL)
1801 LOGE(" pc inst = 0x%04x\n", *saveArea->xtra.currentPc);
1802 if (thread->currentPc2 != NULL)
1803 LOGE(" pc2 inst = 0x%04x\n", *thread->currentPc2);
1804 dvmDumpThread(thread, false);
1805 }
1806 } else {
1807 /*
1808 * It's unusual, but not impossible, for a non-first frame
1809 * to be at something other than a method invocation. For
1810 * example, if we do a new-instance on a nonexistent class,
1811 * we'll have a lot of class loader activity on the stack
1812 * above the frame with the "new" operation. Could also
1813 * happen while we initialize a Throwable when an instruction
1814 * fails.
1815 *
1816 * So there's not much we can do here to verify the PC,
1817 * except to verify that it's a GC point.
1818 */
1819 }
1820 assert(saveArea->xtra.currentPc != NULL);
1821#endif
1822
1823 const RegisterMap* pMap;
1824 const u1* regVector;
1825 int i;
1826
1827 Method* nonConstMethod = (Method*) method; // quiet gcc
1828 pMap = dvmGetExpandedRegisterMap(nonConstMethod);
1829
1830 /* assert(pMap != NULL); */
1831
Carl Shapiro8bb533e2010-05-06 15:35:27 -07001832 //LOG_SCAV("PGC: %s.%s\n", method->clazz->descriptor, method->name);
Carl Shapirod28668c2010-04-15 16:10:00 -07001833
1834 if (pMap != NULL) {
1835 /* found map, get registers for this address */
1836 int addr = saveArea->xtra.currentPc - method->insns;
1837 regVector = dvmRegisterMapGetLine(pMap, addr);
1838 /*
1839 if (regVector == NULL) {
Carl Shapiro8bb533e2010-05-06 15:35:27 -07001840 LOG_SCAV("PGC: map but no entry for %s.%s addr=0x%04x\n",
1841 method->clazz->descriptor, method->name, addr);
Carl Shapirod28668c2010-04-15 16:10:00 -07001842 } else {
Carl Shapiro8bb533e2010-05-06 15:35:27 -07001843 LOG_SCAV("PGC: found map for %s.%s 0x%04x (t=%d)\n",
1844 method->clazz->descriptor, method->name, addr,
1845 thread->threadId);
Carl Shapirod28668c2010-04-15 16:10:00 -07001846 }
1847 */
1848 } else {
1849 /*
1850 * No map found. If precise GC is disabled this is
1851 * expected -- we don't create pointers to the map data even
1852 * if it's present -- but if it's enabled it means we're
1853 * unexpectedly falling back on a conservative scan, so it's
1854 * worth yelling a little.
1855 */
1856 if (gDvm.preciseGc) {
Carl Shapiro8bb533e2010-05-06 15:35:27 -07001857 LOG_SCAV("PGC: no map for %s.%s\n", method->clazz->descriptor, method->name);
Carl Shapirod28668c2010-04-15 16:10:00 -07001858 }
1859 regVector = NULL;
1860 }
1861
1862 /* assert(regVector != NULL); */
1863
1864 if (regVector == NULL) {
1865 /* conservative scan */
1866 for (i = method->registersSize - 1; i >= 0; i--) {
1867 u4 rval = *framePtr++;
1868 if (rval != 0 && (rval & 0x3) == 0) {
1869 abort();
1870 /* dvmMarkIfObject((Object *)rval); */
1871 }
1872 }
1873 } else {
1874 /*
1875 * Precise scan. v0 is at the lowest address on the
1876 * interpreted stack, and is the first bit in the register
1877 * vector, so we can walk through the register map and
1878 * memory in the same direction.
1879 *
1880 * A '1' bit indicates a live reference.
1881 */
1882 u2 bits = 1 << 1;
1883 for (i = method->registersSize - 1; i >= 0; i--) {
1884 /* u4 rval = *framePtr++; */
1885 u4 rval = *framePtr;
1886
1887 bits >>= 1;
1888 if (bits == 1) {
1889 /* set bit 9 so we can tell when we're empty */
1890 bits = *regVector++ | 0x0100;
1891 LOGVV("loaded bits: 0x%02x\n", bits & 0xff);
1892 }
1893
1894 if (rval != 0 && (bits & 0x01) != 0) {
1895 /*
1896 * Non-null, register marked as live reference. This
1897 * should always be a valid object.
1898 */
1899#if WITH_EXTRA_GC_CHECKS > 0
1900 if ((rval & 0x3) != 0 || !dvmIsValidObject((Object*) rval)) {
1901 /* this is very bad */
1902 LOGE("PGC: invalid ref in reg %d: 0x%08x\n",
1903 method->registersSize-1 - i, rval);
1904 } else
1905#endif
1906 {
1907
Carl Shapiro8bb533e2010-05-06 15:35:27 -07001908 // LOG_SCAV("stack reference %u@%p", *framePtr, framePtr);
Carl Shapirod28668c2010-04-15 16:10:00 -07001909 /* dvmMarkObjectNonNull((Object *)rval); */
1910 scavengeReference((Object **) framePtr);
1911 }
1912 } else {
1913 /*
1914 * Null or non-reference, do nothing at all.
1915 */
1916#if WITH_EXTRA_GC_CHECKS > 1
1917 if (dvmIsValidObject((Object*) rval)) {
1918 /* this is normal, but we feel chatty */
1919 LOGD("PGC: ignoring valid ref in reg %d: 0x%08x\n",
1920 method->registersSize-1 - i, rval);
1921 }
1922#endif
1923 }
1924 ++framePtr;
1925 }
1926 dvmReleaseRegisterMapLine(pMap, regVector);
1927 }
1928 }
Carl Shapiro952e84a2010-05-06 14:35:29 -07001929 /* else this is a break frame and there is nothing to gray, or
Carl Shapirod28668c2010-04-15 16:10:00 -07001930 * this is a native method and the registers are just the "ins",
1931 * copied from various registers in the caller's set.
1932 */
1933
1934#if WITH_EXTRA_GC_CHECKS > 1
1935 first = false;
1936#endif
1937
1938 /* Don't fall into an infinite loop if things get corrupted.
1939 */
1940 assert((uintptr_t)saveArea->prevFrame > (uintptr_t)framePtr ||
1941 saveArea->prevFrame == NULL);
1942 framePtr = saveArea->prevFrame;
1943 }
1944}
1945
1946static void scavengeThread(Thread *thread)
1947{
1948 assert(thread->status != THREAD_RUNNING ||
1949 thread->isSuspended ||
1950 thread == dvmThreadSelf());
1951
Carl Shapiro8bb533e2010-05-06 15:35:27 -07001952 // LOG_SCAV("scavengeThread(thread=%p)", thread);
Carl Shapirod28668c2010-04-15 16:10:00 -07001953
Carl Shapiro8bb533e2010-05-06 15:35:27 -07001954 // LOG_SCAV("Scavenging threadObj=%p", thread->threadObj);
Carl Shapirod28668c2010-04-15 16:10:00 -07001955 scavengeReference(&thread->threadObj);
1956
Carl Shapiro8bb533e2010-05-06 15:35:27 -07001957 // LOG_SCAV("Scavenging exception=%p", thread->exception);
Carl Shapirod28668c2010-04-15 16:10:00 -07001958 scavengeReference(&thread->exception);
1959
1960 scavengeThreadStack(thread);
1961}
1962
1963static void scavengeThreadList(void)
1964{
1965 Thread *thread;
1966
1967 dvmLockThreadList(dvmThreadSelf());
1968 thread = gDvm.threadList;
1969 while (thread) {
1970 scavengeThread(thread);
1971 thread = thread->next;
1972 }
1973 dvmUnlockThreadList();
1974}
1975
1976static void verifyThreadStack(const Thread *thread)
1977{
1978 const u4 *framePtr;
1979
1980 assert(thread != NULL);
1981 framePtr = (const u4 *)thread->curFrame;
1982 while (framePtr != NULL) {
1983 const StackSaveArea *saveArea;
1984 const Method *method;
1985
1986 saveArea = SAVEAREA_FROM_FP(framePtr);
1987 method = saveArea->method;
1988 if (method != NULL && !dvmIsNativeMethod(method)) {
1989 const RegisterMap* pMap;
1990 const u1* regVector;
1991 int i;
1992
1993 Method* nonConstMethod = (Method*) method; // quiet gcc
1994 pMap = dvmGetExpandedRegisterMap(nonConstMethod);
1995
1996 /* assert(pMap != NULL); */
1997
Carl Shapiro8bb533e2010-05-06 15:35:27 -07001998 // LOG_VER("PGC: %s.%s\n", method->clazz->descriptor, method->name);
Carl Shapirod28668c2010-04-15 16:10:00 -07001999
2000 if (pMap != NULL) {
2001 /* found map, get registers for this address */
2002 int addr = saveArea->xtra.currentPc - method->insns;
2003 regVector = dvmRegisterMapGetLine(pMap, addr);
2004 if (regVector == NULL) {
Carl Shapiro8bb533e2010-05-06 15:35:27 -07002005 LOG_VER("PGC: map but no entry for %s.%s addr=0x%04x\n",
2006 method->clazz->descriptor, method->name, addr);
Carl Shapirod28668c2010-04-15 16:10:00 -07002007 } else {
Carl Shapiro8bb533e2010-05-06 15:35:27 -07002008 //LOG_VER("PGC: found map for %s.%s 0x%04x (t=%d)\n", method->clazz->descriptor, method->name, addr, thread->threadId);
Carl Shapirod28668c2010-04-15 16:10:00 -07002009 }
2010 } else {
2011 /*
2012 * No map found. If precise GC is disabled this is
2013 * expected -- we don't create pointers to the map data even
2014 * if it's present -- but if it's enabled it means we're
2015 * unexpectedly falling back on a conservative scan, so it's
2016 * worth yelling a little.
2017 */
2018 if (gDvm.preciseGc) {
Carl Shapiro8bb533e2010-05-06 15:35:27 -07002019 LOG_VER("PGC: no map for %s.%s\n",
2020 method->clazz->descriptor, method->name);
Carl Shapirod28668c2010-04-15 16:10:00 -07002021 }
2022 regVector = NULL;
2023 }
2024
2025 /* assert(regVector != NULL); */
2026
2027 if (regVector == NULL) {
2028 /* conservative scan */
2029 for (i = method->registersSize - 1; i >= 0; i--) {
2030 u4 rval = *framePtr++;
2031 if (rval != 0 && (rval & 0x3) == 0) {
2032 abort();
2033 /* dvmMarkIfObject((Object *)rval); */
2034 }
2035 }
2036 } else {
2037 /*
2038 * Precise scan. v0 is at the lowest address on the
2039 * interpreted stack, and is the first bit in the register
2040 * vector, so we can walk through the register map and
2041 * memory in the same direction.
2042 *
2043 * A '1' bit indicates a live reference.
2044 */
2045 u2 bits = 1 << 1;
2046 for (i = method->registersSize - 1; i >= 0; i--) {
2047 u4 rval = *framePtr;
2048
2049 bits >>= 1;
2050 if (bits == 1) {
2051 /* set bit 9 so we can tell when we're empty */
2052 bits = *regVector++ | 0x0100;
2053 LOGVV("loaded bits: 0x%02x\n", bits & 0xff);
2054 }
2055
2056 if (rval != 0 && (bits & 0x01) != 0) {
2057 /*
2058 * Non-null, register marked as live reference. This
2059 * should always be a valid object.
2060 */
Carl Shapiro8bb533e2010-05-06 15:35:27 -07002061 //LOG_VER("verify stack reference %p", (Object *)*framePtr);
Carl Shapirod28668c2010-04-15 16:10:00 -07002062 verifyReference((Object *)*framePtr);
2063 } else {
2064 /*
2065 * Null or non-reference, do nothing at all.
2066 */
2067 }
2068 ++framePtr;
2069 }
2070 dvmReleaseRegisterMapLine(pMap, regVector);
2071 }
2072 }
Carl Shapiro952e84a2010-05-06 14:35:29 -07002073 /* else this is a break frame and there is nothing to gray, or
Carl Shapirod28668c2010-04-15 16:10:00 -07002074 * this is a native method and the registers are just the "ins",
2075 * copied from various registers in the caller's set.
2076 */
2077
2078 /* Don't fall into an infinite loop if things get corrupted.
2079 */
2080 assert((uintptr_t)saveArea->prevFrame > (uintptr_t)framePtr ||
2081 saveArea->prevFrame == NULL);
2082 framePtr = saveArea->prevFrame;
2083 }
2084}
2085
2086static void verifyThread(const Thread *thread)
2087{
2088 assert(thread->status != THREAD_RUNNING ||
2089 thread->isSuspended ||
2090 thread == dvmThreadSelf());
2091
Carl Shapiro8bb533e2010-05-06 15:35:27 -07002092 LOG_VER("verifyThread(thread=%p)", thread);
Carl Shapirod28668c2010-04-15 16:10:00 -07002093
Carl Shapiro8bb533e2010-05-06 15:35:27 -07002094 LOG_VER("verify threadObj=%p", thread->threadObj);
Carl Shapirod28668c2010-04-15 16:10:00 -07002095 verifyReference(thread->threadObj);
2096
Carl Shapiro8bb533e2010-05-06 15:35:27 -07002097 LOG_VER("verify exception=%p", thread->exception);
Carl Shapirod28668c2010-04-15 16:10:00 -07002098 verifyReference(thread->exception);
2099
Carl Shapiro8bb533e2010-05-06 15:35:27 -07002100 LOG_VER("verify thread->internalLocalRefTable");
Carl Shapirod28668c2010-04-15 16:10:00 -07002101 verifyReferenceTable(&thread->internalLocalRefTable);
2102
Carl Shapiro8bb533e2010-05-06 15:35:27 -07002103 LOG_VER("verify thread->jniLocalRefTable");
Carl Shapirod28668c2010-04-15 16:10:00 -07002104 verifyReferenceTable(&thread->jniLocalRefTable);
2105
2106 /* Can the check be pushed into the promote routine? */
2107 if (thread->jniMonitorRefTable.table) {
Carl Shapiro8bb533e2010-05-06 15:35:27 -07002108 LOG_VER("verify thread->jniMonitorRefTable");
Carl Shapirod28668c2010-04-15 16:10:00 -07002109 verifyReferenceTable(&thread->jniMonitorRefTable);
2110 }
2111
2112 verifyThreadStack(thread);
2113}
2114
2115static void verifyThreadList(void)
2116{
2117 Thread *thread;
2118
2119 dvmLockThreadList(dvmThreadSelf());
2120 thread = gDvm.threadList;
2121 while (thread) {
2122 verifyThread(thread);
2123 thread = thread->next;
2124 }
2125 dvmUnlockThreadList();
2126}
2127
Carl Shapiro583d64c2010-05-04 10:44:47 -07002128static void pinNativeMethodArguments(const Thread *thread)
2129{
2130 const u4 *framePtr;
2131 const StackSaveArea *saveArea;
2132 const Method *method;
2133 const char *shorty;
2134 Object *obj;
2135 int i;
2136
2137 saveArea = NULL;
2138 framePtr = (const u4 *)thread->curFrame;
2139 for (; framePtr != NULL; framePtr = saveArea->prevFrame) {
2140 saveArea = SAVEAREA_FROM_FP(framePtr);
2141 method = saveArea->method;
2142 if (method != NULL && dvmIsNativeMethod(method)) {
2143 /*
Carl Shapiro952e84a2010-05-06 14:35:29 -07002144 * For purposes of graying references, we don't need to do
Carl Shapiro583d64c2010-05-04 10:44:47 -07002145 * anything here, because all of the native "ins" were copied
2146 * from registers in the caller's stack frame and won't be
2147 * changed (an interpreted method can freely use registers
2148 * with parameters like any other register, but natives don't
2149 * work that way).
2150 *
2151 * However, we need to ensure that references visible to
2152 * native methods don't move around. We can do a precise scan
2153 * of the arguments by examining the method signature.
2154 */
Carl Shapiro8bb533e2010-05-06 15:35:27 -07002155 LOG_PIN("+++ native scan %s.%s\n",
2156 method->clazz->descriptor, method->name);
Carl Shapiro583d64c2010-05-04 10:44:47 -07002157 assert(method->registersSize == method->insSize);
2158 if (!dvmIsStaticMethod(method)) {
2159 /* grab the "this" pointer */
2160 obj = (Object *)*framePtr++;
2161 if (obj == NULL) {
2162 /*
2163 * This can happen for the "fake" entry frame inserted
2164 * for threads created outside the VM. There's no actual
2165 * call so there's no object. If we changed the fake
2166 * entry method to be declared "static" then this
2167 * situation should never occur.
2168 */
2169 } else {
2170 assert(dvmIsValidObject(obj));
2171 pinObject(obj);
2172 }
2173 }
2174 shorty = method->shorty+1; // skip return value
2175 for (i = method->registersSize - 1; i >= 0; i--, framePtr++) {
2176 switch (*shorty++) {
2177 case 'L':
2178 obj = (Object *)*framePtr;
2179 if (obj != NULL) {
2180 assert(dvmIsValidObject(obj));
2181 pinObject(obj);
2182 }
2183 break;
2184 case 'D':
2185 case 'J':
2186 framePtr++;
2187 break;
2188 default:
2189 /* 32-bit non-reference value */
2190 obj = (Object *)*framePtr; // debug, remove
2191 if (dvmIsValidObject(obj)) { // debug, remove
2192 /* if we see a lot of these, our scan might be off */
Carl Shapiro8bb533e2010-05-06 15:35:27 -07002193 LOG_PIN("+++ did NOT pin obj %p\n", obj);
Carl Shapiro583d64c2010-05-04 10:44:47 -07002194 }
2195 break;
2196 }
2197 }
2198 }
2199 /*
2200 * Don't fall into an infinite loop if things get corrupted.
2201 */
2202 assert((uintptr_t)saveArea->prevFrame > (uintptr_t)framePtr ||
2203 saveArea->prevFrame == NULL);
2204 }
2205}
2206
2207static void pinThread(const Thread *thread)
Carl Shapirod28668c2010-04-15 16:10:00 -07002208{
2209 assert(thread != NULL);
2210 assert(thread->status != THREAD_RUNNING ||
2211 thread->isSuspended ||
2212 thread == dvmThreadSelf());
Carl Shapiro8bb533e2010-05-06 15:35:27 -07002213 LOG_PIN("pinThread(thread=%p)", thread);
Carl Shapirod28668c2010-04-15 16:10:00 -07002214
Carl Shapiro8bb533e2010-05-06 15:35:27 -07002215 LOG_PIN("Pin native method arguments");
Carl Shapiro583d64c2010-05-04 10:44:47 -07002216 pinNativeMethodArguments(thread);
2217
Carl Shapiro8bb533e2010-05-06 15:35:27 -07002218 LOG_PIN("Pin internalLocalRefTable");
Carl Shapirod28668c2010-04-15 16:10:00 -07002219 pinReferenceTable(&thread->internalLocalRefTable);
2220
Carl Shapiro8bb533e2010-05-06 15:35:27 -07002221 LOG_PIN("Pin jniLocalRefTable");
Carl Shapirod28668c2010-04-15 16:10:00 -07002222 pinReferenceTable(&thread->jniLocalRefTable);
2223
2224 /* Can the check be pushed into the promote routine? */
2225 if (thread->jniMonitorRefTable.table) {
Carl Shapiro8bb533e2010-05-06 15:35:27 -07002226 LOG_PIN("Pin jniMonitorRefTable");
Carl Shapirod28668c2010-04-15 16:10:00 -07002227 pinReferenceTable(&thread->jniMonitorRefTable);
2228 }
2229}
2230
2231static void pinThreadList(void)
2232{
2233 Thread *thread;
2234
2235 dvmLockThreadList(dvmThreadSelf());
2236 thread = gDvm.threadList;
2237 while (thread) {
2238 pinThread(thread);
2239 thread = thread->next;
2240 }
2241 dvmUnlockThreadList();
2242}
2243
2244/*
2245 * Heap block scavenging.
2246 */
2247
2248/*
2249 * Scavenge objects in the current block. Scavenging terminates when
2250 * the pointer reaches the highest address in the block or when a run
2251 * of zero words that continues to the highest address is reached.
2252 */
2253static void scavengeBlock(HeapSource *heapSource, size_t block)
2254{
2255 u1 *cursor;
2256 u1 *end;
2257 size_t size;
2258
Carl Shapiro8bb533e2010-05-06 15:35:27 -07002259 LOG_SCAV("scavengeBlock(heapSource=%p,block=%zu)", heapSource, block);
Carl Shapirod28668c2010-04-15 16:10:00 -07002260
2261 assert(heapSource != NULL);
2262 assert(block < heapSource->totalBlocks);
2263 assert(heapSource->blockSpace[block] == BLOCK_TO_SPACE);
2264
2265 cursor = blockToAddress(heapSource, block);
2266 end = cursor + BLOCK_SIZE;
Carl Shapiro8bb533e2010-05-06 15:35:27 -07002267 LOG_SCAV("scavengeBlock start=%p, end=%p", cursor, end);
Carl Shapirod28668c2010-04-15 16:10:00 -07002268
2269 /* Parse and scavenge the current block. */
2270 size = 0;
2271 while (cursor < end) {
2272 u4 word = *(u4 *)cursor;
2273 if (word != 0) {
Carl Shapiro2396fda2010-05-03 20:14:14 -07002274 scavengeObject((Object *)cursor);
2275 size = objectSize((Object *)cursor);
Carl Shapirod28668c2010-04-15 16:10:00 -07002276 size = alignUp(size, ALLOC_ALIGNMENT);
2277 cursor += size;
2278 } else if (word == 0 && cursor == (u1 *)gDvm.unlinkedJavaLangClass) {
2279 size = sizeof(ClassObject);
2280 cursor += size;
2281 } else {
2282 /* Check for padding. */
2283 while (*(u4 *)cursor == 0) {
2284 cursor += 4;
2285 if (cursor == end) break;
2286 }
2287 /* Punt if something went wrong. */
2288 assert(cursor == end);
2289 }
2290 }
2291}
2292
Carl Shapiro2396fda2010-05-03 20:14:14 -07002293static size_t objectSize(const Object *obj)
Carl Shapirod28668c2010-04-15 16:10:00 -07002294{
2295 size_t size;
2296
Carl Shapiro2396fda2010-05-03 20:14:14 -07002297 assert(obj != NULL);
2298 assert(obj->clazz != NULL);
Carl Shapirod28668c2010-04-15 16:10:00 -07002299 if (obj->clazz == gDvm.classJavaLangClass ||
2300 obj->clazz == gDvm.unlinkedJavaLangClass) {
2301 size = dvmClassObjectSize((ClassObject *)obj);
2302 } else if (IS_CLASS_FLAG_SET(obj->clazz, CLASS_ISARRAY)) {
2303 size = dvmArrayObjectSize((ArrayObject *)obj);
2304 } else {
Carl Shapiro2396fda2010-05-03 20:14:14 -07002305 assert(obj->clazz->objectSize != 0);
Carl Shapirod28668c2010-04-15 16:10:00 -07002306 size = obj->clazz->objectSize;
2307 }
Carl Shapiro2396fda2010-05-03 20:14:14 -07002308 if (LW_HASH_STATE(obj->lock) == LW_HASH_STATE_HASHED_AND_MOVED) {
2309 size += sizeof(u4);
2310 }
Carl Shapirod28668c2010-04-15 16:10:00 -07002311 return size;
2312}
2313
2314static void verifyBlock(HeapSource *heapSource, size_t block)
2315{
2316 u1 *cursor;
2317 u1 *end;
2318 size_t size;
2319
Carl Shapiro8bb533e2010-05-06 15:35:27 -07002320 // LOG_VER("verifyBlock(heapSource=%p,block=%zu)", heapSource, block);
Carl Shapirod28668c2010-04-15 16:10:00 -07002321
2322 assert(heapSource != NULL);
2323 assert(block < heapSource->totalBlocks);
2324 assert(heapSource->blockSpace[block] == BLOCK_TO_SPACE);
2325
2326 cursor = blockToAddress(heapSource, block);
2327 end = cursor + BLOCK_SIZE;
Carl Shapiro8bb533e2010-05-06 15:35:27 -07002328 // LOG_VER("verifyBlock start=%p, end=%p", cursor, end);
Carl Shapirod28668c2010-04-15 16:10:00 -07002329
2330 /* Parse and scavenge the current block. */
2331 size = 0;
2332 while (cursor < end) {
2333 u4 word = *(u4 *)cursor;
2334 if (word != 0) {
2335 dvmVerifyObject((Object *)cursor);
2336 size = objectSize((Object *)cursor);
2337 size = alignUp(size, ALLOC_ALIGNMENT);
2338 cursor += size;
2339 } else if (word == 0 && cursor == (u1 *)gDvm.unlinkedJavaLangClass) {
2340 size = sizeof(ClassObject);
2341 cursor += size;
2342 } else {
2343 /* Check for padding. */
2344 while (*(unsigned long *)cursor == 0) {
2345 cursor += 4;
2346 if (cursor == end) break;
2347 }
2348 /* Punt if something went wrong. */
2349 assert(cursor == end);
2350 }
2351 }
2352}
2353
2354static void describeBlockQueue(const HeapSource *heapSource)
2355{
2356 size_t block, count;
2357 char space;
2358
2359 block = heapSource->queueHead;
2360 count = 0;
Carl Shapiro8bb533e2010-05-06 15:35:27 -07002361 LOG_SCAV(">>> describeBlockQueue(heapSource=%p)", heapSource);
Carl Shapirod28668c2010-04-15 16:10:00 -07002362 /* Count the number of blocks enqueued. */
2363 while (block != QUEUE_TAIL) {
2364 block = heapSource->blockQueue[block];
2365 ++count;
2366 }
Carl Shapiro8bb533e2010-05-06 15:35:27 -07002367 LOG_SCAV("blockQueue %zu elements, enqueued %zu",
Carl Shapirod28668c2010-04-15 16:10:00 -07002368 count, heapSource->queueSize);
2369 block = heapSource->queueHead;
2370 while (block != QUEUE_TAIL) {
2371 space = heapSource->blockSpace[block];
Carl Shapiro8bb533e2010-05-06 15:35:27 -07002372 LOG_SCAV("block=%zu@%p,space=%zu", block, blockToAddress(heapSource,block), space);
Carl Shapirod28668c2010-04-15 16:10:00 -07002373 block = heapSource->blockQueue[block];
2374 }
2375
Carl Shapiro8bb533e2010-05-06 15:35:27 -07002376 LOG_SCAV("<<< describeBlockQueue(heapSource=%p)", heapSource);
Carl Shapirod28668c2010-04-15 16:10:00 -07002377}
2378
2379/*
2380 * Blackens promoted objects.
2381 */
2382static void scavengeBlockQueue(void)
2383{
2384 HeapSource *heapSource;
2385 size_t block;
2386
Carl Shapiro8bb533e2010-05-06 15:35:27 -07002387 LOG_SCAV(">>> scavengeBlockQueue()");
Carl Shapirod28668c2010-04-15 16:10:00 -07002388 heapSource = gDvm.gcHeap->heapSource;
2389 describeBlockQueue(heapSource);
2390 while (heapSource->queueHead != QUEUE_TAIL) {
2391 block = heapSource->queueHead;
Carl Shapiro8bb533e2010-05-06 15:35:27 -07002392 LOG_SCAV("Dequeueing block %zu\n", block);
Carl Shapirod28668c2010-04-15 16:10:00 -07002393 scavengeBlock(heapSource, block);
2394 heapSource->queueHead = heapSource->blockQueue[block];
Carl Shapiro8bb533e2010-05-06 15:35:27 -07002395 LOG_SCAV("New queue head is %zu\n", heapSource->queueHead);
Carl Shapirod28668c2010-04-15 16:10:00 -07002396 }
Carl Shapiro8bb533e2010-05-06 15:35:27 -07002397 LOG_SCAV("<<< scavengeBlockQueue()");
Carl Shapirod28668c2010-04-15 16:10:00 -07002398}
2399
2400/*
2401 * Scan the block list and verify all blocks that are marked as being
2402 * in new space. This should be parametrized so we can invoke this
2403 * routine outside of the context of a collection.
2404 */
2405static void verifyNewSpace(void)
2406{
2407 HeapSource *heapSource;
2408 size_t i;
2409 size_t c0, c1, c2, c7;
2410
2411 c0 = c1 = c2 = c7 = 0;
2412 heapSource = gDvm.gcHeap->heapSource;
2413 for (i = 0; i < heapSource->totalBlocks; ++i) {
2414 switch (heapSource->blockSpace[i]) {
2415 case BLOCK_FREE: ++c0; break;
2416 case BLOCK_TO_SPACE: ++c1; break;
2417 case BLOCK_FROM_SPACE: ++c2; break;
2418 case BLOCK_CONTINUED: ++c7; break;
2419 default: assert(!"reached");
2420 }
2421 }
Carl Shapiro8bb533e2010-05-06 15:35:27 -07002422 LOG_VER("Block Demographics: "
2423 "Free=%zu,ToSpace=%zu,FromSpace=%zu,Continued=%zu",
2424 c0, c1, c2, c7);
Carl Shapirod28668c2010-04-15 16:10:00 -07002425 for (i = 0; i < heapSource->totalBlocks; ++i) {
2426 if (heapSource->blockSpace[i] != BLOCK_TO_SPACE) {
2427 continue;
2428 }
2429 verifyBlock(heapSource, i);
2430 }
2431}
2432
2433static void scavengeGlobals(void)
2434{
2435 scavengeReference((Object **)(void *)&gDvm.classJavaLangClass);
2436 scavengeReference((Object **)(void *)&gDvm.classJavaLangClassArray);
2437 scavengeReference((Object **)(void *)&gDvm.classJavaLangError);
2438 scavengeReference((Object **)(void *)&gDvm.classJavaLangObject);
2439 scavengeReference((Object **)(void *)&gDvm.classJavaLangObjectArray);
2440 scavengeReference((Object **)(void *)&gDvm.classJavaLangRuntimeException);
2441 scavengeReference((Object **)(void *)&gDvm.classJavaLangString);
2442 scavengeReference((Object **)(void *)&gDvm.classJavaLangThread);
2443 scavengeReference((Object **)(void *)&gDvm.classJavaLangVMThread);
2444 scavengeReference((Object **)(void *)&gDvm.classJavaLangThreadGroup);
2445 scavengeReference((Object **)(void *)&gDvm.classJavaLangThrowable);
2446 scavengeReference((Object **)(void *)&gDvm.classJavaLangStackTraceElement);
2447 scavengeReference((Object **)(void *)&gDvm.classJavaLangStackTraceElementArray);
2448 scavengeReference((Object **)(void *)&gDvm.classJavaLangAnnotationAnnotationArray);
2449 scavengeReference((Object **)(void *)&gDvm.classJavaLangAnnotationAnnotationArrayArray);
2450 scavengeReference((Object **)(void *)&gDvm.classJavaLangReflectAccessibleObject);
2451 scavengeReference((Object **)(void *)&gDvm.classJavaLangReflectConstructor);
2452 scavengeReference((Object **)(void *)&gDvm.classJavaLangReflectConstructorArray);
2453 scavengeReference((Object **)(void *)&gDvm.classJavaLangReflectField);
2454 scavengeReference((Object **)(void *)&gDvm.classJavaLangReflectFieldArray);
2455 scavengeReference((Object **)(void *)&gDvm.classJavaLangReflectMethod);
2456 scavengeReference((Object **)(void *)&gDvm.classJavaLangReflectMethodArray);
2457 scavengeReference((Object **)(void *)&gDvm.classJavaLangReflectProxy);
2458 scavengeReference((Object **)(void *)&gDvm.classJavaLangExceptionInInitializerError);
2459 scavengeReference((Object **)(void *)&gDvm.classJavaLangRefReference);
2460 scavengeReference((Object **)(void *)&gDvm.classJavaNioReadWriteDirectByteBuffer);
2461 scavengeReference((Object **)(void *)&gDvm.classJavaSecurityAccessController);
2462 scavengeReference((Object **)(void *)&gDvm.classOrgApacheHarmonyLangAnnotationAnnotationFactory);
2463 scavengeReference((Object **)(void *)&gDvm.classOrgApacheHarmonyLangAnnotationAnnotationMember);
2464 scavengeReference((Object **)(void *)&gDvm.classOrgApacheHarmonyLangAnnotationAnnotationMemberArray);
2465 scavengeReference((Object **)(void *)&gDvm.classOrgApacheHarmonyNioInternalDirectBuffer);
2466 scavengeReference((Object **)(void *)&gDvm.classArrayBoolean);
2467 scavengeReference((Object **)(void *)&gDvm.classArrayChar);
2468 scavengeReference((Object **)(void *)&gDvm.classArrayFloat);
2469 scavengeReference((Object **)(void *)&gDvm.classArrayDouble);
2470 scavengeReference((Object **)(void *)&gDvm.classArrayByte);
2471 scavengeReference((Object **)(void *)&gDvm.classArrayShort);
2472 scavengeReference((Object **)(void *)&gDvm.classArrayInt);
2473 scavengeReference((Object **)(void *)&gDvm.classArrayLong);
2474}
2475
2476void describeHeap(void)
2477{
2478 HeapSource *heapSource;
2479
2480 heapSource = gDvm.gcHeap->heapSource;
2481 describeBlocks(heapSource);
2482}
2483
2484/*
2485 * The collection interface. Collection has a few distinct phases.
2486 * The first is flipping AKA condemning AKA whitening the heap. The
2487 * second is to promote all objects which are pointed to by pinned or
2488 * ambiguous references. The third phase is tracing from the stacks,
2489 * registers and various globals. Lastly, a verification of the heap
2490 * is performed. The last phase should be optional.
2491 */
2492void dvmScavengeRoots(void) /* Needs a new name badly */
2493{
2494 HeapRefTable *refs;
2495 GcHeap *gcHeap;
2496
2497 {
2498 size_t alloc, unused, total;
2499
2500 room(&alloc, &unused, &total);
Carl Shapiro8bb533e2010-05-06 15:35:27 -07002501 LOG_SCAV("BEFORE GC: %zu alloc, %zu free, %zu total.",
2502 alloc, unused, total);
Carl Shapirod28668c2010-04-15 16:10:00 -07002503 }
2504
2505 gcHeap = gDvm.gcHeap;
2506 dvmHeapSourceFlip();
2507
2508 /*
2509 * Promote blocks with stationary objects.
2510 */
Carl Shapirod28668c2010-04-15 16:10:00 -07002511 pinThreadList();
Carl Shapirod28668c2010-04-15 16:10:00 -07002512 pinReferenceTable(&gDvm.jniGlobalRefTable);
Carl Shapirod28668c2010-04-15 16:10:00 -07002513 pinReferenceTable(&gDvm.jniPinRefTable);
Carl Shapirod28668c2010-04-15 16:10:00 -07002514 pinReferenceTable(&gcHeap->nonCollectableRefs);
Carl Shapirod28668c2010-04-15 16:10:00 -07002515 pinHashTableEntries(gDvm.loadedClasses);
Carl Shapirod28668c2010-04-15 16:10:00 -07002516 pinPrimitiveClasses();
Carl Shapirod28668c2010-04-15 16:10:00 -07002517 pinInternedStrings();
2518
2519 // describeBlocks(gcHeap->heapSource);
2520
2521 /*
2522 * Create first, open new-space page right here.
2523 */
2524
2525 /* Reset allocation to an unallocated block. */
2526 gDvm.gcHeap->heapSource->allocPtr = allocateBlocks(gDvm.gcHeap->heapSource, 1);
2527 gDvm.gcHeap->heapSource->allocLimit = gDvm.gcHeap->heapSource->allocPtr + BLOCK_SIZE;
2528 /*
2529 * Hack: promote the empty block allocated above. If the
2530 * promotions that occurred above did not actually gray any
2531 * objects, the block queue may be empty. We must force a
2532 * promotion to be safe.
2533 */
2534 promoteBlockByAddr(gDvm.gcHeap->heapSource, gDvm.gcHeap->heapSource->allocPtr);
2535
2536 /*
2537 * Scavenge blocks and relocate movable objects.
2538 */
2539
Carl Shapiro8bb533e2010-05-06 15:35:27 -07002540 LOG_SCAV("Scavenging gDvm.threadList");
Carl Shapirod28668c2010-04-15 16:10:00 -07002541 scavengeThreadList();
2542
Carl Shapiro8bb533e2010-05-06 15:35:27 -07002543 LOG_SCAV("Scavenging gDvm.gcHeap->referenceOperations");
Carl Shapiro7800c092010-05-11 13:46:29 -07002544 scavengeLargeHeapRefTable(gcHeap->referenceOperations, true);
Carl Shapirod28668c2010-04-15 16:10:00 -07002545
Carl Shapiro8bb533e2010-05-06 15:35:27 -07002546 LOG_SCAV("Scavenging gDvm.gcHeap->pendingFinalizationRefs");
Carl Shapiro7800c092010-05-11 13:46:29 -07002547 scavengeLargeHeapRefTable(gcHeap->pendingFinalizationRefs, false);
Carl Shapirod28668c2010-04-15 16:10:00 -07002548
Carl Shapiro8bb533e2010-05-06 15:35:27 -07002549 LOG_SCAV("Scavenging random global stuff");
Carl Shapirod28668c2010-04-15 16:10:00 -07002550 scavengeReference(&gDvm.outOfMemoryObj);
2551 scavengeReference(&gDvm.internalErrorObj);
2552 scavengeReference(&gDvm.noClassDefFoundErrorObj);
2553
Carl Shapiro8bb533e2010-05-06 15:35:27 -07002554 LOG_SCAV("Scavenging gDvm.dbgRegistry");
Carl Shapirod28668c2010-04-15 16:10:00 -07002555 scavengeHashTable(gDvm.dbgRegistry);
2556
Carl Shapiro8bb533e2010-05-06 15:35:27 -07002557 // LOG_SCAV("Scavenging gDvm.internedString");
Carl Shapirod28668c2010-04-15 16:10:00 -07002558 scavengeInternedStrings();
2559
Carl Shapiro8bb533e2010-05-06 15:35:27 -07002560 LOG_SCAV("Root scavenge has completed.");
Carl Shapirod28668c2010-04-15 16:10:00 -07002561
2562 scavengeBlockQueue();
2563
Carl Shapiro8bb533e2010-05-06 15:35:27 -07002564 LOG_SCAV("Re-snap global class pointers.");
Carl Shapirod28668c2010-04-15 16:10:00 -07002565 scavengeGlobals();
2566
Carl Shapiro8bb533e2010-05-06 15:35:27 -07002567 LOG_SCAV("New space scavenge has completed.");
Carl Shapirod28668c2010-04-15 16:10:00 -07002568
2569 /*
Carl Shapiro952e84a2010-05-06 14:35:29 -07002570 * Process reference objects in strength order.
2571 */
2572
Carl Shapiro8bb533e2010-05-06 15:35:27 -07002573 LOG_REF("Processing soft references...");
Carl Shapiro952e84a2010-05-06 14:35:29 -07002574 preserveSoftReferences(&gDvm.gcHeap->softReferences);
2575 clearWhiteReferences(&gDvm.gcHeap->softReferences);
2576
Carl Shapiro8bb533e2010-05-06 15:35:27 -07002577 LOG_REF("Processing weak references...");
Carl Shapiro952e84a2010-05-06 14:35:29 -07002578 clearWhiteReferences(&gDvm.gcHeap->weakReferences);
2579
Carl Shapiro8bb533e2010-05-06 15:35:27 -07002580 LOG_REF("Finding finalizations...");
Carl Shapiro952e84a2010-05-06 14:35:29 -07002581 processFinalizableReferences();
2582
Carl Shapiro8bb533e2010-05-06 15:35:27 -07002583 LOG_REF("Processing f-reachable soft references...");
Carl Shapiro952e84a2010-05-06 14:35:29 -07002584 clearWhiteReferences(&gDvm.gcHeap->softReferences);
2585
Carl Shapiro8bb533e2010-05-06 15:35:27 -07002586 LOG_REF("Processing f-reachable weak references...");
Carl Shapiro952e84a2010-05-06 14:35:29 -07002587 clearWhiteReferences(&gDvm.gcHeap->weakReferences);
2588
Carl Shapiro8bb533e2010-05-06 15:35:27 -07002589 LOG_REF("Processing phantom references...");
Carl Shapiro952e84a2010-05-06 14:35:29 -07002590 clearWhiteReferences(&gDvm.gcHeap->phantomReferences);
2591
2592 /*
Carl Shapirod28668c2010-04-15 16:10:00 -07002593 * Verify the stack and heap.
2594 */
Carl Shapiro7800c092010-05-11 13:46:29 -07002595 verifyLargeHeapRefTable(gcHeap->referenceOperations, true);
2596 verifyLargeHeapRefTable(gcHeap->pendingFinalizationRefs, false);
Carl Shapirod28668c2010-04-15 16:10:00 -07002597 verifyInternedStrings();
Carl Shapirod28668c2010-04-15 16:10:00 -07002598 verifyThreadList();
Carl Shapirod28668c2010-04-15 16:10:00 -07002599 verifyNewSpace();
2600
Carl Shapirod28668c2010-04-15 16:10:00 -07002601 //describeBlocks(gcHeap->heapSource);
2602
2603 clearFromSpace(gcHeap->heapSource);
2604
2605 {
2606 size_t alloc, rem, total;
2607
2608 room(&alloc, &rem, &total);
Carl Shapiro8bb533e2010-05-06 15:35:27 -07002609 LOG_SCAV("AFTER GC: %zu alloc, %zu free, %zu total.", alloc, rem, total);
Carl Shapirod28668c2010-04-15 16:10:00 -07002610 }
2611}
2612
2613/*
2614 * Interface compatibility routines.
2615 */
2616
2617void dvmClearWhiteRefs(Object **list)
2618{
Carl Shapiro952e84a2010-05-06 14:35:29 -07002619 /* do nothing */
Carl Shapirod28668c2010-04-15 16:10:00 -07002620 assert(*list == NULL);
2621}
2622
2623void dvmHandleSoftRefs(Object **list)
2624{
Carl Shapiro952e84a2010-05-06 14:35:29 -07002625 /* do nothing */
Carl Shapirod28668c2010-04-15 16:10:00 -07002626 assert(*list == NULL);
2627}
2628
2629bool dvmHeapBeginMarkStep(GcMode mode)
2630{
2631 /* do nothing */
2632 return true;
2633}
2634
2635void dvmHeapFinishMarkStep(void)
2636{
2637 /* do nothing */
2638}
2639
2640void dvmHeapMarkRootSet(void)
2641{
2642 /* do nothing */
2643}
2644
2645void dvmHeapScanMarkedObjects(void)
2646{
2647 dvmScavengeRoots();
2648}
2649
2650void dvmHeapScheduleFinalizations(void)
2651{
2652 /* do nothing */
2653}
2654
2655void dvmHeapSweepUnmarkedObjects(GcMode mode, int *numFreed, size_t *sizeFreed)
2656{
2657 /* do nothing */
2658}
2659
2660void dvmMarkObjectNonNull(const Object *obj)
2661{
2662 assert(!"implemented");
2663}