blob: 3c410570ab219701cbc459035a653ac9bf60d52d [file] [log] [blame]
Carl Shapirod28668c2010-04-15 16:10:00 -07001/*
2 * Copyright (C) 2009 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17#include <errno.h>
18#include <limits.h>
19#include <sys/mman.h>
20
21#include "Dalvik.h"
22#include "alloc/Heap.h"
23#include "alloc/HeapBitmap.h"
24#include "alloc/HeapInternal.h"
25#include "alloc/HeapSource.h"
26#include "alloc/Verify.h"
27#include "alloc/clz.h"
28
29/*
30 * A "mostly copying", generational, garbage collector.
31 *
32 * TODO: we allocate our own contiguous tract of page frames to back
33 * object allocations. To cooperate with other heaps active in the
34 * virtual machine we need to move the responsibility of allocating
35 * pages someplace outside of this code.
36 *
37 * The other major data structures that maintain the state of the heap
38 * are the block space table and the block queue.
39 *
40 * The block space table records the state of a block. We must track
41 * whether a block is:
42 *
43 * - Free or allocated in some space.
44 *
45 * - If the block holds part of a large object allocation, whether the
46 * block is the initial or a continued block of the allocation.
47 *
48 * - Whether the block is pinned, that is to say whether at least one
49 * object in the block must remain stationary. Only needed during a
50 * GC.
51 *
52 * - Which space the object belongs to. At present this means
53 * from-space or to-space.
54 *
55 * The block queue is used during garbage collection. Unlike Cheney's
56 * algorithm, from-space and to-space are not contiguous. Therefore,
57 * one cannot maintain the state of the copy with just two pointers.
58 * The block queue exists to thread lists of blocks from the various
59 * spaces together.
60 *
61 * Additionally, we record the free space frontier of the heap, as
62 * well as the address of the first object within a block, which is
63 * required to copy objects following a large object (not currently
64 * implemented). This is stored in the heap source structure. This
65 * should be moved elsewhere to support in-line allocations from Java
66 * threads.
67 *
68 * Allocation requests are satisfied by reserving storage from one or
69 * more contiguous blocks. Objects that are small enough to fit
70 * inside a block are packed together within a block. Objects that
71 * are larger than a block are allocated from contiguous sequences of
72 * blocks. When half the available blocks are filled, a garbage
73 * collection occurs. We "flip" spaces (exchange from- and to-space),
74 * copy live objects into to space, and perform pointer adjustment.
75 *
76 * Copying is made more complicated by the requirement that some
77 * objects must not be moved. This property is known as "pinning".
78 * These objects must be dealt with specially. We use Bartlett's
79 * scheme; blocks containing such objects are grayed (promoted) at the
Carl Shapiro952e84a2010-05-06 14:35:29 -070080 * start of a garbage collection. By virtue of this trick, tracing
Carl Shapirod28668c2010-04-15 16:10:00 -070081 * from the roots proceeds as usual but all objects on those pages are
82 * considered promoted and therefore not moved.
83 *
84 * TODO: there is sufficient information within the garbage collector
85 * to implement Attardi's scheme for evacuating unpinned objects from
86 * a page that is otherwise pinned. This would eliminate false
87 * retention caused by the large pinning granularity.
88 *
89 * We need a scheme for medium and large objects. Ignore that for
90 * now, we can return to this later.
91 *
92 * Eventually we need to worry about promoting objects out of the
93 * copy-collected heap (tenuring) into a less volatile space. Copying
94 * may not always be the best policy for such spaces. We should
95 * consider a variant of mark, sweep, compact.
96 *
97 * The block scheme allows us to use VM page faults to maintain a
98 * write barrier. Consider having a special leaf state for a page.
99 *
100 * Bibliography:
101 *
102 * C. J. Cheney. 1970. A non-recursive list compacting
103 * algorithm. CACM. 13-11 pp677--678.
104 *
105 * Joel F. Bartlett. 1988. Compacting Garbage Collection with
106 * Ambiguous Roots. Digital Equipment Corporation.
107 *
108 * Joel F. Bartlett. 1989. Mostly-Copying Garbage Collection Picks Up
109 * Generations and C++. Digital Equipment Corporation.
110 *
111 * G. May Yip. 1991. Incremental, Generational Mostly-Copying Garbage
112 * Collection in Uncooperative Environments. Digital Equipment
113 * Corporation.
114 *
115 * Giuseppe Attardi, Tito Flagella. 1994. A Customisable Memory
116 * Management Framework. TR-94-010
117 *
118 * Giuseppe Attardi, Tito Flagella, Pietro Iglio. 1998. A customisable
119 * memory management framework for C++. Software -- Practice and
120 * Experience. 28(11), 1143-1183.
121 *
122 */
123
124#define ARRAYSIZE(x) (sizeof(x) / sizeof(x[0]))
125
Carl Shapiro8bb533e2010-05-06 15:35:27 -0700126#if 0
Carl Shapirod28668c2010-04-15 16:10:00 -0700127#define LOG_ALLOC LOGI
Carl Shapiro8bb533e2010-05-06 15:35:27 -0700128#define LOG_PIN LOGI
129#define LOG_PROM LOGI
130#define LOG_REF LOGI
131#define LOG_SCAV LOGI
132#define LOG_TRAN LOGI
133#define LOG_VER LOGI
Carl Shapirod28668c2010-04-15 16:10:00 -0700134#else
Carl Shapiro8bb533e2010-05-06 15:35:27 -0700135#define LOG_ALLOC(...) ((void)0)
136#define LOG_PIN(...) ((void)0)
137#define LOG_PROM(...) ((void)0)
138#define LOG_REF(...) ((void)0)
139#define LOG_SCAV(...) ((void)0)
140#define LOG_TRAN(...) ((void)0)
141#define LOG_VER(...) ((void)0)
Carl Shapirod28668c2010-04-15 16:10:00 -0700142#endif
143
144static void enqueueBlock(HeapSource *heapSource, size_t block);
Carl Shapiro2396fda2010-05-03 20:14:14 -0700145static void scavengeReference(Object **obj);
Carl Shapiro952e84a2010-05-06 14:35:29 -0700146static bool toSpaceContains(const void *addr);
147static bool fromSpaceContains(const void *addr);
Carl Shapirod28668c2010-04-15 16:10:00 -0700148static size_t sumHeapBitmap(const HeapBitmap *bitmap);
Carl Shapiro2396fda2010-05-03 20:14:14 -0700149static size_t objectSize(const Object *obj);
Carl Shapiro952e84a2010-05-06 14:35:29 -0700150static void scavengeDataObject(Object *obj);
151static void scavengeBlockQueue(void);
Carl Shapirod28668c2010-04-15 16:10:00 -0700152
153/*
154 * We use 512-byte blocks.
155 */
156enum { BLOCK_SHIFT = 9 };
157enum { BLOCK_SIZE = 1 << BLOCK_SHIFT };
158
159/*
160 * Space identifiers, stored into the blockSpace array.
161 */
162enum {
163 BLOCK_FREE = 0,
164 BLOCK_FROM_SPACE = 1,
165 BLOCK_TO_SPACE = 2,
166 BLOCK_CONTINUED = 7
167};
168
169/*
170 * Alignment for all allocations, in bytes.
171 */
172enum { ALLOC_ALIGNMENT = 8 };
173
174/*
175 * Sentinel value for the queue end.
176 */
177#define QUEUE_TAIL (~(size_t)0)
178
179struct HeapSource {
180
181 /* The base address of backing store. */
182 u1 *blockBase;
183
184 /* Total number of blocks available for allocation. */
185 size_t totalBlocks;
186 size_t allocBlocks;
187
188 /*
189 * The scavenger work queue. Implemented as an array of index
190 * values into the queue.
191 */
192 size_t *blockQueue;
193
194 /*
195 * Base and limit blocks. Basically the shifted start address of
196 * the block. We convert blocks to a relative number when
197 * indexing in the block queue. TODO: make the block queue base
198 * relative rather than the index into the block queue.
199 */
200 size_t baseBlock, limitBlock;
201
202 size_t queueHead;
203 size_t queueTail;
204 size_t queueSize;
205
206 /* The space of the current block 0 (free), 1 or 2. */
207 char *blockSpace;
208
209 /* Start of free space in the current block. */
210 u1 *allocPtr;
211 /* Exclusive limit of free space in the current block. */
212 u1 *allocLimit;
213
214 HeapBitmap allocBits;
215
216 /*
Carl Shapirod28668c2010-04-15 16:10:00 -0700217 * The starting size of the heap. This value is the same as the
218 * value provided to the -Xms flag.
219 */
220 size_t minimumSize;
221
222 /*
223 * The maximum size of the heap. This value is the same as the
224 * -Xmx flag.
225 */
226 size_t maximumSize;
227
228 /*
229 * The current, committed size of the heap. At present, this is
230 * equivalent to the maximumSize.
231 */
232 size_t currentSize;
233
234 size_t bytesAllocated;
235};
236
237static unsigned long alignDown(unsigned long x, unsigned long n)
238{
239 return x & -n;
240}
241
242static unsigned long alignUp(unsigned long x, unsigned long n)
243{
244 return alignDown(x + (n - 1), n);
245}
246
247static void describeBlocks(const HeapSource *heapSource)
248{
249 size_t i;
250
251 for (i = 0; i < heapSource->totalBlocks; ++i) {
252 if ((i % 32) == 0) putchar('\n');
253 printf("%d ", heapSource->blockSpace[i]);
254 }
255 putchar('\n');
256}
257
258/*
259 * Virtual memory interface.
260 */
261
262static void *virtualAlloc(size_t length)
263{
264 void *addr;
265 int flags, prot;
266
267 flags = MAP_PRIVATE | MAP_ANONYMOUS;
268 prot = PROT_READ | PROT_WRITE;
269 addr = mmap(NULL, length, prot, flags, -1, 0);
270 if (addr == MAP_FAILED) {
271 LOGE_HEAP("mmap: %s", strerror(errno));
272 addr = NULL;
273 }
274 return addr;
275}
276
277static void virtualFree(void *addr, size_t length)
278{
279 int res;
280
281 assert(addr != NULL);
282 assert((uintptr_t)addr % SYSTEM_PAGE_SIZE == 0);
283 res = munmap(addr, length);
284 if (res == -1) {
285 LOGE_HEAP("munmap: %s", strerror(errno));
286 }
287}
288
Carl Shapiroeff0df72010-06-23 14:25:07 -0700289#ifndef NDEBUG
Carl Shapirod28668c2010-04-15 16:10:00 -0700290static int isValidAddress(const HeapSource *heapSource, const u1 *addr)
291{
292 size_t block;
293
294 block = (uintptr_t)addr >> BLOCK_SHIFT;
295 return heapSource->baseBlock <= block &&
296 heapSource->limitBlock > block;
297}
Carl Shapiroeff0df72010-06-23 14:25:07 -0700298#endif
Carl Shapirod28668c2010-04-15 16:10:00 -0700299
300/*
301 * Iterate over the block map looking for a contiguous run of free
302 * blocks.
303 */
304static void *allocateBlocks(HeapSource *heapSource, size_t blocks)
305{
306 void *addr;
307 size_t allocBlocks, totalBlocks;
308 size_t i, j;
309
310 allocBlocks = heapSource->allocBlocks;
311 totalBlocks = heapSource->totalBlocks;
312 /* Check underflow. */
313 assert(blocks != 0);
314 /* Check overflow. */
315 if (allocBlocks + blocks > totalBlocks / 2) {
316 return NULL;
317 }
318 /* Scan block map. */
319 for (i = 0; i < totalBlocks; ++i) {
320 /* Check fit. */
321 for (j = 0; j < blocks; ++j) { /* runs over totalBlocks */
322 if (heapSource->blockSpace[i+j] != BLOCK_FREE) {
323 break;
324 }
325 }
326 /* No fit? */
327 if (j != blocks) {
328 i += j;
329 continue;
330 }
331 /* Fit, allocate. */
332 heapSource->blockSpace[i] = BLOCK_TO_SPACE; /* why to-space? */
333 for (j = 1; j < blocks; ++j) {
334 heapSource->blockSpace[i+j] = BLOCK_CONTINUED;
335 }
336 heapSource->allocBlocks += blocks;
337 addr = &heapSource->blockBase[i*BLOCK_SIZE];
338 memset(addr, 0, blocks*BLOCK_SIZE);
339 /* Collecting? */
340 if (heapSource->queueHead != QUEUE_TAIL) {
341 LOG_ALLOC("allocateBlocks allocBlocks=%zu,block#=%zu", heapSource->allocBlocks, i);
342 /*
343 * This allocated was on behalf of the transporter when it
344 * shaded a white object gray. We enqueue the block so
345 * the scavenger can further shade the gray objects black.
346 */
347 enqueueBlock(heapSource, i);
348 }
349
350 return addr;
351 }
352 /* Insufficient space, fail. */
353 LOGE("Insufficient space, %zu blocks, %zu blocks allocated and %zu bytes allocated",
354 heapSource->totalBlocks,
355 heapSource->allocBlocks,
356 heapSource->bytesAllocated);
357 return NULL;
358}
359
360/* Converts an absolute address to a relative block number. */
361static size_t addressToBlock(const HeapSource *heapSource, const void *addr)
362{
363 assert(heapSource != NULL);
364 assert(isValidAddress(heapSource, addr));
365 return (((uintptr_t)addr) >> BLOCK_SHIFT) - heapSource->baseBlock;
366}
367
368/* Converts a relative block number to an absolute address. */
369static u1 *blockToAddress(const HeapSource *heapSource, size_t block)
370{
371 u1 *addr;
372
373 addr = (u1 *) (((uintptr_t) heapSource->baseBlock + block) * BLOCK_SIZE);
374 assert(isValidAddress(heapSource, addr));
375 return addr;
376}
377
378static void clearBlock(HeapSource *heapSource, size_t block)
379{
380 u1 *addr;
381 size_t i;
382
383 assert(heapSource != NULL);
384 assert(block < heapSource->totalBlocks);
385 addr = heapSource->blockBase + block*BLOCK_SIZE;
386 memset(addr, 0xCC, BLOCK_SIZE);
387 for (i = 0; i < BLOCK_SIZE; i += 8) {
388 dvmHeapBitmapClearObjectBit(&heapSource->allocBits, addr + i);
389 }
390}
391
392static void clearFromSpace(HeapSource *heapSource)
393{
394 size_t i, count;
395
396 assert(heapSource != NULL);
397 i = count = 0;
398 while (i < heapSource->totalBlocks) {
399 if (heapSource->blockSpace[i] != BLOCK_FROM_SPACE) {
400 ++i;
401 continue;
402 }
403 heapSource->blockSpace[i] = BLOCK_FREE;
404 clearBlock(heapSource, i);
405 ++i;
406 ++count;
407 while (i < heapSource->totalBlocks &&
408 heapSource->blockSpace[i] == BLOCK_CONTINUED) {
409 heapSource->blockSpace[i] = BLOCK_FREE;
410 clearBlock(heapSource, i);
411 ++i;
412 ++count;
413 }
414 }
Carl Shapiro8bb533e2010-05-06 15:35:27 -0700415 LOG_SCAV("freed %zu blocks (%zu bytes)", count, count*BLOCK_SIZE);
Carl Shapirod28668c2010-04-15 16:10:00 -0700416}
417
418/*
419 * Appends the given block to the block queue. The block queue is
420 * processed in-order by the scavenger.
421 */
422static void enqueueBlock(HeapSource *heapSource, size_t block)
423{
424 assert(heapSource != NULL);
425 assert(block < heapSource->totalBlocks);
426 if (heapSource->queueHead != QUEUE_TAIL) {
427 heapSource->blockQueue[heapSource->queueTail] = block;
428 } else {
429 heapSource->queueHead = block;
430 }
431 heapSource->blockQueue[block] = QUEUE_TAIL;
432 heapSource->queueTail = block;
433 ++heapSource->queueSize;
434}
435
436/*
437 * Grays all objects within the block corresponding to the given
438 * address.
439 */
440static void promoteBlockByAddr(HeapSource *heapSource, const void *addr)
441{
442 size_t block;
443
444 block = addressToBlock(heapSource, (const u1 *)addr);
445 if (heapSource->blockSpace[block] != BLOCK_TO_SPACE) {
Carl Shapiro8bb533e2010-05-06 15:35:27 -0700446 // LOG_PROM("promoting block %zu %d @ %p", block, heapSource->blockSpace[block], obj);
Carl Shapirod28668c2010-04-15 16:10:00 -0700447 heapSource->blockSpace[block] = BLOCK_TO_SPACE;
448 enqueueBlock(heapSource, block);
449 /* TODO(cshapiro): count continued blocks?*/
450 heapSource->allocBlocks += 1;
451 } else {
Carl Shapiro8bb533e2010-05-06 15:35:27 -0700452 // LOG_PROM("NOT promoting block %zu %d @ %p", block, heapSource->blockSpace[block], obj);
Carl Shapirod28668c2010-04-15 16:10:00 -0700453 }
454}
455
456GcHeap *dvmHeapSourceStartup(size_t startSize, size_t absoluteMaxSize)
457{
458 GcHeap* gcHeap;
459 HeapSource *heapSource;
460
461 assert(startSize <= absoluteMaxSize);
462
463 heapSource = malloc(sizeof(*heapSource));
464 assert(heapSource != NULL);
465 memset(heapSource, 0, sizeof(*heapSource));
466
467 heapSource->minimumSize = alignUp(startSize, BLOCK_SIZE);
468 heapSource->maximumSize = alignUp(absoluteMaxSize, BLOCK_SIZE);
469
470 heapSource->currentSize = heapSource->maximumSize;
471
472 /* Allocate underlying storage for blocks. */
473 heapSource->blockBase = virtualAlloc(heapSource->maximumSize);
474 assert(heapSource->blockBase != NULL);
475 heapSource->baseBlock = (uintptr_t) heapSource->blockBase >> BLOCK_SHIFT;
476 heapSource->limitBlock = ((uintptr_t) heapSource->blockBase + heapSource->maximumSize) >> BLOCK_SHIFT;
477
478 heapSource->allocBlocks = 0;
479 heapSource->totalBlocks = (heapSource->limitBlock - heapSource->baseBlock);
480
481 assert(heapSource->totalBlocks = heapSource->maximumSize / BLOCK_SIZE);
482
483 {
484 size_t size = sizeof(heapSource->blockQueue[0]);
485 heapSource->blockQueue = malloc(heapSource->totalBlocks*size);
486 assert(heapSource->blockQueue != NULL);
487 memset(heapSource->blockQueue, 0xCC, heapSource->totalBlocks*size);
488 heapSource->queueHead = QUEUE_TAIL;
489 }
490
491 /* Byte indicating space residence or free status of block. */
492 {
493 size_t size = sizeof(heapSource->blockSpace[0]);
494 heapSource->blockSpace = malloc(heapSource->totalBlocks*size);
495 assert(heapSource->blockSpace != NULL);
496 memset(heapSource->blockSpace, 0, heapSource->totalBlocks*size);
497 }
498
499 dvmHeapBitmapInit(&heapSource->allocBits,
500 heapSource->blockBase,
501 heapSource->maximumSize,
502 "blockBase");
503
504 /* Initialize allocation pointers. */
505 heapSource->allocPtr = allocateBlocks(heapSource, 1);
506 heapSource->allocLimit = heapSource->allocPtr + BLOCK_SIZE;
507
508 gcHeap = malloc(sizeof(*gcHeap));
509 assert(gcHeap != NULL);
510 memset(gcHeap, 0, sizeof(*gcHeap));
511 gcHeap->heapSource = heapSource;
512
513 return gcHeap;
514}
515
516/*
517 * Perform any required heap initializations after forking from the
518 * zygote process. This is a no-op for the time being. Eventually
519 * this will demarcate the shared region of the heap.
520 */
521bool dvmHeapSourceStartupAfterZygote(void)
522{
523 return true;
524}
525
526bool dvmHeapSourceStartupBeforeFork(void)
527{
528 assert(!"implemented");
529 return false;
530}
531
532void dvmHeapSourceShutdown(GcHeap **gcHeap)
533{
534 if (*gcHeap == NULL || (*gcHeap)->heapSource == NULL)
535 return;
Carl Shapiro88b00352010-05-19 17:38:33 -0700536 free((*gcHeap)->heapSource->blockQueue);
537 free((*gcHeap)->heapSource->blockSpace);
Carl Shapirod28668c2010-04-15 16:10:00 -0700538 virtualFree((*gcHeap)->heapSource->blockBase,
539 (*gcHeap)->heapSource->maximumSize);
540 free((*gcHeap)->heapSource);
541 (*gcHeap)->heapSource = NULL;
542 free(*gcHeap);
543 *gcHeap = NULL;
544}
545
546size_t dvmHeapSourceGetValue(enum HeapSourceValueSpec spec,
547 size_t perHeapStats[],
548 size_t arrayLen)
549{
550 HeapSource *heapSource;
551 size_t value;
552
553 heapSource = gDvm.gcHeap->heapSource;
554 switch (spec) {
Carl Shapirod28668c2010-04-15 16:10:00 -0700555 case HS_FOOTPRINT:
556 value = heapSource->maximumSize;
557 break;
558 case HS_ALLOWED_FOOTPRINT:
559 value = heapSource->maximumSize;
560 break;
561 case HS_BYTES_ALLOCATED:
562 value = heapSource->bytesAllocated;
563 break;
564 case HS_OBJECTS_ALLOCATED:
565 value = sumHeapBitmap(&heapSource->allocBits);
566 break;
567 default:
568 assert(!"implemented");
569 value = 0;
570 }
571 if (perHeapStats) {
572 *perHeapStats = value;
573 }
574 return value;
575}
576
577/*
578 * Performs a shallow copy of the allocation bitmap into the given
579 * vector of heap bitmaps.
580 */
581void dvmHeapSourceGetObjectBitmaps(HeapBitmap objBits[], HeapBitmap markBits[],
582 size_t numHeaps)
583{
584 assert(!"implemented");
585}
586
587HeapBitmap *dvmHeapSourceGetLiveBits(void)
588{
Carl Shapiro603469a2010-05-20 20:22:31 -0700589 return &gDvm.gcHeap->heapSource->allocBits;
Carl Shapirod28668c2010-04-15 16:10:00 -0700590}
591
592/*
593 * Allocate the specified number of bytes from the heap. The
594 * allocation cursor points into a block of free storage. If the
595 * given allocation fits in the remaining space of the block, we
596 * advance the cursor and return a pointer to the free storage. If
597 * the allocation cannot fit in the current block but is smaller than
598 * a block we request a new block and allocate from it instead. If
599 * the allocation is larger than a block we must allocate from a span
600 * of contiguous blocks.
601 */
602void *dvmHeapSourceAlloc(size_t length)
603{
604 HeapSource *heapSource;
605 unsigned char *addr;
606 size_t aligned, available, blocks;
607
608 heapSource = gDvm.gcHeap->heapSource;
609 assert(heapSource != NULL);
610 assert(heapSource->allocPtr != NULL);
611 assert(heapSource->allocLimit != NULL);
612
613 aligned = alignUp(length, ALLOC_ALIGNMENT);
614 available = heapSource->allocLimit - heapSource->allocPtr;
615
616 /* Try allocating inside the current block. */
617 if (aligned <= available) {
618 addr = heapSource->allocPtr;
619 heapSource->allocPtr += aligned;
620 heapSource->bytesAllocated += aligned;
621 dvmHeapBitmapSetObjectBit(&heapSource->allocBits, addr);
622 return addr;
623 }
624
625 /* Try allocating in a new block. */
626 if (aligned <= BLOCK_SIZE) {
627 addr = allocateBlocks(heapSource, 1);
628 if (addr != NULL) {
629 heapSource->allocLimit = addr + BLOCK_SIZE;
630 heapSource->allocPtr = addr + aligned;
631 heapSource->bytesAllocated += aligned;
632 dvmHeapBitmapSetObjectBit(&heapSource->allocBits, addr);
633 /* TODO(cshapiro): pad out the current block. */
634 }
635 return addr;
636 }
637
638 /* Try allocating in a span of blocks. */
639 blocks = alignUp(aligned, BLOCK_SIZE) / BLOCK_SIZE;
640
641 addr = allocateBlocks(heapSource, blocks);
642 /* Propagate failure upward. */
643 if (addr != NULL) {
644 heapSource->bytesAllocated += aligned;
645 dvmHeapBitmapSetObjectBit(&heapSource->allocBits, addr);
646 /* TODO(cshapiro): pad out free space in the last block. */
647 }
648 return addr;
649}
650
651void *dvmHeapSourceAllocAndGrow(size_t size)
652{
653 return dvmHeapSourceAlloc(size);
654}
655
656/* TODO: refactor along with dvmHeapSourceAlloc */
657void *allocateGray(size_t size)
658{
Carl Shapiro7800c092010-05-11 13:46:29 -0700659 HeapSource *heapSource;
660 void *addr;
661 size_t block;
662
Carl Shapiro952e84a2010-05-06 14:35:29 -0700663 /* TODO: add a check that we are in a GC. */
Carl Shapiro7800c092010-05-11 13:46:29 -0700664 heapSource = gDvm.gcHeap->heapSource;
665 addr = dvmHeapSourceAlloc(size);
Carl Shapiro703a2f32010-05-12 23:11:37 -0700666 assert(addr != NULL);
Carl Shapiro7800c092010-05-11 13:46:29 -0700667 block = addressToBlock(heapSource, (const u1 *)addr);
668 if (heapSource->queueHead == QUEUE_TAIL) {
669 /*
670 * Forcibly append the underlying block to the queue. This
671 * condition occurs when referents are transported following
672 * the initial trace.
673 */
674 enqueueBlock(heapSource, block);
675 LOG_PROM("forced promoting block %zu %d @ %p", block, heapSource->blockSpace[block], addr);
676 }
677 return addr;
Carl Shapirod28668c2010-04-15 16:10:00 -0700678}
679
Carl Shapiroeff0df72010-06-23 14:25:07 -0700680bool dvmHeapSourceContainsAddress(const void *ptr)
681{
682 HeapSource *heapSource = gDvm.gcHeap->heapSource;
683 return dvmHeapBitmapCoversAddress(&heapSource->allocBits, ptr);
684}
685
Carl Shapirod28668c2010-04-15 16:10:00 -0700686/*
687 * Returns true if the given address is within the heap and points to
688 * the header of a live object.
689 */
690bool dvmHeapSourceContains(const void *addr)
691{
692 HeapSource *heapSource;
693 HeapBitmap *bitmap;
694
695 heapSource = gDvm.gcHeap->heapSource;
696 bitmap = &heapSource->allocBits;
Carl Shapirodc1e4f12010-05-01 22:27:56 -0700697 if (!dvmHeapBitmapCoversAddress(bitmap, addr)) {
698 return false;
699 } else {
700 return dvmHeapBitmapIsObjectBitSet(bitmap, addr);
701 }
Carl Shapirod28668c2010-04-15 16:10:00 -0700702}
703
704bool dvmHeapSourceGetPtrFlag(const void *ptr, enum HeapSourcePtrFlag flag)
705{
706 assert(!"implemented");
707 return false;
708}
709
710size_t dvmHeapSourceChunkSize(const void *ptr)
711{
712 assert(!"implemented");
713 return 0;
714}
715
716size_t dvmHeapSourceFootprint(void)
717{
718 assert(!"implemented");
719 return 0;
720}
721
722/*
723 * Returns the "ideal footprint" which appears to be the number of
724 * bytes currently committed to the heap. This starts out at the
725 * start size of the heap and grows toward the maximum size.
726 */
727size_t dvmHeapSourceGetIdealFootprint(void)
728{
729 return gDvm.gcHeap->heapSource->currentSize;
730}
731
732float dvmGetTargetHeapUtilization(void)
733{
Carl Shapiro603469a2010-05-20 20:22:31 -0700734 return 0.5f;
Carl Shapirod28668c2010-04-15 16:10:00 -0700735}
736
737void dvmSetTargetHeapUtilization(float newTarget)
738{
Carl Shapiro603469a2010-05-20 20:22:31 -0700739 assert(newTarget > 0.0f && newTarget < 1.0f);
Carl Shapirod28668c2010-04-15 16:10:00 -0700740}
741
Carl Shapirod28668c2010-04-15 16:10:00 -0700742/*
743 * Expands the size of the heap after a collection. At present we
744 * commit the pages for maximum size of the heap so this routine is
745 * just a no-op. Eventually, we will either allocate or commit pages
746 * on an as-need basis.
747 */
748void dvmHeapSourceGrowForUtilization(void)
749{
750 /* do nothing */
751}
752
753void dvmHeapSourceTrim(size_t bytesTrimmed[], size_t arrayLen)
754{
755 /* do nothing */
756}
757
758void dvmHeapSourceWalk(void (*callback)(const void *chunkptr, size_t chunklen,
759 const void *userptr, size_t userlen,
760 void *arg),
761 void *arg)
762{
763 assert(!"implemented");
764}
765
766size_t dvmHeapSourceGetNumHeaps(void)
767{
768 return 1;
769}
770
771bool dvmTrackExternalAllocation(size_t n)
772{
Carl Shapiro528f3812010-05-18 14:16:26 -0700773 /* do nothing */
774 return true;
Carl Shapirod28668c2010-04-15 16:10:00 -0700775}
776
777void dvmTrackExternalFree(size_t n)
778{
Carl Shapiro528f3812010-05-18 14:16:26 -0700779 /* do nothing */
Carl Shapirod28668c2010-04-15 16:10:00 -0700780}
781
782size_t dvmGetExternalBytesAllocated(void)
783{
784 assert(!"implemented");
785 return 0;
786}
787
788void dvmHeapSourceFlip(void)
789{
790 HeapSource *heapSource;
791 size_t i;
792
793 heapSource = gDvm.gcHeap->heapSource;
794
795 /* Reset the block queue. */
796 heapSource->allocBlocks = 0;
797 heapSource->queueSize = 0;
798 heapSource->queueHead = QUEUE_TAIL;
799
Carl Shapirod28668c2010-04-15 16:10:00 -0700800 /* TODO(cshapiro): pad the current (prev) block. */
801
802 heapSource->allocPtr = NULL;
803 heapSource->allocLimit = NULL;
804
805 /* Whiten all allocated blocks. */
806 for (i = 0; i < heapSource->totalBlocks; ++i) {
807 if (heapSource->blockSpace[i] == BLOCK_TO_SPACE) {
808 heapSource->blockSpace[i] = BLOCK_FROM_SPACE;
809 }
810 }
811}
812
813static void room(size_t *alloc, size_t *avail, size_t *total)
814{
815 HeapSource *heapSource;
Carl Shapirod28668c2010-04-15 16:10:00 -0700816
817 heapSource = gDvm.gcHeap->heapSource;
818 *total = heapSource->totalBlocks*BLOCK_SIZE;
819 *alloc = heapSource->allocBlocks*BLOCK_SIZE;
820 *avail = *total - *alloc;
821}
822
823static bool isSpaceInternal(u1 *addr, int space)
824{
825 HeapSource *heapSource;
826 u1 *base, *limit;
827 size_t offset;
828 char space2;
829
830 heapSource = gDvm.gcHeap->heapSource;
831 base = heapSource->blockBase;
832 assert(addr >= base);
833 limit = heapSource->blockBase + heapSource->maximumSize;
834 assert(addr < limit);
835 offset = addr - base;
836 space2 = heapSource->blockSpace[offset >> BLOCK_SHIFT];
837 return space == space2;
838}
839
Carl Shapiro952e84a2010-05-06 14:35:29 -0700840static bool fromSpaceContains(const void *addr)
Carl Shapirod28668c2010-04-15 16:10:00 -0700841{
842 return isSpaceInternal((u1 *)addr, BLOCK_FROM_SPACE);
843}
844
Carl Shapiro952e84a2010-05-06 14:35:29 -0700845static bool toSpaceContains(const void *addr)
Carl Shapirod28668c2010-04-15 16:10:00 -0700846{
847 return isSpaceInternal((u1 *)addr, BLOCK_TO_SPACE);
848}
849
850/*
851 * Notifies the collector that the object at the given address must
852 * remain stationary during the current collection.
853 */
854static void pinObject(const Object *obj)
855{
856 promoteBlockByAddr(gDvm.gcHeap->heapSource, obj);
857}
858
Carl Shapirod28668c2010-04-15 16:10:00 -0700859static size_t sumHeapBitmap(const HeapBitmap *bitmap)
860{
Carl Shapirod28668c2010-04-15 16:10:00 -0700861 size_t i, sum;
862
863 sum = 0;
864 for (i = 0; i < bitmap->bitsLen >> 2; ++i) {
Carl Shapiro603469a2010-05-20 20:22:31 -0700865 sum += CLZ(bitmap->bits[i]);
Carl Shapirod28668c2010-04-15 16:10:00 -0700866 }
867 return sum;
868}
869
870/*
871 * Miscellaneous functionality.
872 */
873
874static int isForward(const void *addr)
875{
876 return (uintptr_t)addr & 0x1;
877}
878
879static void setForward(const void *toObj, void *fromObj)
880{
881 *(unsigned long *)fromObj = (uintptr_t)toObj | 0x1;
882}
883
884static void* getForward(const void *fromObj)
885{
886 return (void *)((uintptr_t)fromObj & ~0x1);
887}
888
889/* Beware, uses the same encoding as a forwarding pointers! */
890static int isPermanentString(const StringObject *obj) {
891 return (uintptr_t)obj & 0x1;
892}
893
894static void* getPermanentString(const StringObject *obj)
895{
896 return (void *)((uintptr_t)obj & ~0x1);
897}
898
899
900/*
901 * Scavenging and transporting routines follow. A transporter grays
902 * an object. A scavenger blackens an object. We define these
903 * routines for each fundamental object type. Dispatch is performed
904 * in scavengeObject.
905 */
906
907/*
Carl Shapiro2396fda2010-05-03 20:14:14 -0700908 * Class object scavenging.
Carl Shapirod28668c2010-04-15 16:10:00 -0700909 */
Carl Shapiro2396fda2010-05-03 20:14:14 -0700910static void scavengeClassObject(ClassObject *obj)
Carl Shapirod28668c2010-04-15 16:10:00 -0700911{
Carl Shapirod28668c2010-04-15 16:10:00 -0700912 int i;
913
Carl Shapiro8bb533e2010-05-06 15:35:27 -0700914 LOG_SCAV("scavengeClassObject(obj=%p)", obj);
Carl Shapiro2396fda2010-05-03 20:14:14 -0700915 assert(obj != NULL);
Carl Shapirod28668c2010-04-15 16:10:00 -0700916 assert(obj->obj.clazz != NULL);
917 assert(obj->obj.clazz->descriptor != NULL);
918 assert(!strcmp(obj->obj.clazz->descriptor, "Ljava/lang/Class;"));
919 assert(obj->descriptor != NULL);
Carl Shapiro8bb533e2010-05-06 15:35:27 -0700920 LOG_SCAV("scavengeClassObject: descriptor='%s',vtableCount=%zu",
921 obj->descriptor, obj->vtableCount);
Carl Shapiro703a2f32010-05-12 23:11:37 -0700922 /* Delegate class object and instance field scavenging. */
923 scavengeDataObject((Object *)obj);
Carl Shapirod28668c2010-04-15 16:10:00 -0700924 /* Scavenge the array element class object. */
925 if (IS_CLASS_FLAG_SET(obj, CLASS_ISARRAY)) {
926 scavengeReference((Object **)(void *)&obj->elementClass);
927 }
928 /* Scavenge the superclass. */
929 scavengeReference((Object **)(void *)&obj->super);
930 /* Scavenge the class loader. */
931 scavengeReference(&obj->classLoader);
932 /* Scavenge static fields. */
933 for (i = 0; i < obj->sfieldCount; ++i) {
934 char ch = obj->sfields[i].field.signature[0];
935 if (ch == '[' || ch == 'L') {
936 scavengeReference((Object **)(void *)&obj->sfields[i].value.l);
937 }
938 }
939 /* Scavenge interface class objects. */
940 for (i = 0; i < obj->interfaceCount; ++i) {
941 scavengeReference((Object **) &obj->interfaces[i]);
942 }
Carl Shapirod28668c2010-04-15 16:10:00 -0700943}
944
945/*
946 * Array object scavenging.
947 */
Carl Shapirod28668c2010-04-15 16:10:00 -0700948static size_t scavengeArrayObject(ArrayObject *array)
949{
950 size_t i, length;
951
Carl Shapiro8bb533e2010-05-06 15:35:27 -0700952 LOG_SCAV("scavengeArrayObject(array=%p)", array);
Carl Shapirod28668c2010-04-15 16:10:00 -0700953 /* Scavenge the class object. */
Carl Shapiro952e84a2010-05-06 14:35:29 -0700954 assert(toSpaceContains(array));
Carl Shapirod28668c2010-04-15 16:10:00 -0700955 assert(array != NULL);
956 assert(array->obj.clazz != NULL);
957 scavengeReference((Object **) array);
958 length = dvmArrayObjectSize(array);
959 /* Scavenge the array contents. */
960 if (IS_CLASS_FLAG_SET(array->obj.clazz, CLASS_ISOBJECTARRAY)) {
961 Object **contents = (Object **)array->contents;
962 for (i = 0; i < array->length; ++i) {
963 scavengeReference(&contents[i]);
964 }
965 }
966 return length;
967}
968
969/*
970 * Reference object scavenging.
971 */
972
Carl Shapiro952e84a2010-05-06 14:35:29 -0700973static int getReferenceFlags(const Object *obj)
Carl Shapirod28668c2010-04-15 16:10:00 -0700974{
975 int flags;
976
977 flags = CLASS_ISREFERENCE |
978 CLASS_ISWEAKREFERENCE |
979 CLASS_ISPHANTOMREFERENCE;
Carl Shapiro952e84a2010-05-06 14:35:29 -0700980 return GET_CLASS_FLAG_GROUP(obj->clazz, flags);
Carl Shapirod28668c2010-04-15 16:10:00 -0700981}
982
Carl Shapiro952e84a2010-05-06 14:35:29 -0700983static int isSoftReference(const Object *obj)
Carl Shapirod28668c2010-04-15 16:10:00 -0700984{
985 return getReferenceFlags(obj) == CLASS_ISREFERENCE;
986}
987
Carl Shapiro952e84a2010-05-06 14:35:29 -0700988static int isWeakReference(const Object *obj)
Carl Shapirod28668c2010-04-15 16:10:00 -0700989{
990 return getReferenceFlags(obj) & CLASS_ISWEAKREFERENCE;
991}
992
Carl Shapiroeff0df72010-06-23 14:25:07 -0700993#ifndef NDEBUG
Carl Shapiro952e84a2010-05-06 14:35:29 -0700994static bool isPhantomReference(const Object *obj)
Carl Shapirod28668c2010-04-15 16:10:00 -0700995{
996 return getReferenceFlags(obj) & CLASS_ISPHANTOMREFERENCE;
997}
Carl Shapiroeff0df72010-06-23 14:25:07 -0700998#endif
Carl Shapirod28668c2010-04-15 16:10:00 -0700999
Carl Shapiro952e84a2010-05-06 14:35:29 -07001000/*
1001 * Returns true if the reference was registered with a reference queue
1002 * but has not yet been appended to it.
1003 */
1004static bool isReferenceEnqueuable(const Object *ref)
Carl Shapirod28668c2010-04-15 16:10:00 -07001005{
Carl Shapiro952e84a2010-05-06 14:35:29 -07001006 Object *queue, *queueNext;
Carl Shapirod28668c2010-04-15 16:10:00 -07001007
Carl Shapiro952e84a2010-05-06 14:35:29 -07001008 queue = dvmGetFieldObject(ref, gDvm.offJavaLangRefReference_queue);
1009 queueNext = dvmGetFieldObject(ref, gDvm.offJavaLangRefReference_queueNext);
1010 if (queue == NULL || queueNext != NULL) {
1011 /*
1012 * There is no queue, or the reference has already
1013 * been enqueued. The Reference.enqueue() method
1014 * will do nothing even if we call it.
1015 */
1016 return false;
Carl Shapirod28668c2010-04-15 16:10:00 -07001017 }
Carl Shapiro952e84a2010-05-06 14:35:29 -07001018
1019 /*
1020 * We need to call enqueue(), but if we called it from
1021 * here we'd probably deadlock. Schedule a call.
1022 */
1023 return true;
1024}
1025
1026/*
1027 * Schedules a reference to be appended to its reference queue.
1028 */
Carl Shapiroeff0df72010-06-23 14:25:07 -07001029static void enqueueReference(Object *ref)
Carl Shapiro952e84a2010-05-06 14:35:29 -07001030{
Carl Shapiro646ba092010-06-10 15:17:00 -07001031 assert(ref != NULL);
Carl Shapiro952e84a2010-05-06 14:35:29 -07001032 assert(dvmGetFieldObject(ref, gDvm.offJavaLangRefReference_queue) != NULL);
1033 assert(dvmGetFieldObject(ref, gDvm.offJavaLangRefReference_queueNext) == NULL);
Carl Shapiro646ba092010-06-10 15:17:00 -07001034 if (!dvmHeapAddRefToLargeTable(&gDvm.gcHeap->referenceOperations, ref)) {
Carl Shapiro8bb533e2010-05-06 15:35:27 -07001035 LOGE("no room for any more reference operations");
Carl Shapiro952e84a2010-05-06 14:35:29 -07001036 dvmAbort();
1037 }
1038}
1039
1040/*
1041 * Sets the referent field of a reference object to NULL.
1042 */
1043static void clearReference(Object *obj)
1044{
1045 dvmSetFieldObject(obj, gDvm.offJavaLangRefReference_referent, NULL);
1046}
1047
1048/*
1049 * Clears reference objects with white referents.
1050 */
1051void clearWhiteReferences(Object **list)
1052{
Barry Hayes697b5a92010-06-23 11:38:52 -07001053 size_t referentOffset, queueNextOffset;
Carl Shapiro952e84a2010-05-06 14:35:29 -07001054 bool doSignal;
1055
Barry Hayes697b5a92010-06-23 11:38:52 -07001056 queueNextOffset = gDvm.offJavaLangRefReference_queueNext;
Carl Shapiro952e84a2010-05-06 14:35:29 -07001057 referentOffset = gDvm.offJavaLangRefReference_referent;
1058 doSignal = false;
1059 while (*list != NULL) {
1060 Object *ref = *list;
1061 JValue *field = dvmFieldPtr(ref, referentOffset);
1062 Object *referent = field->l;
Barry Hayes697b5a92010-06-23 11:38:52 -07001063 *list = dvmGetFieldObject(ref, queueNextOffset);
1064 dvmSetFieldObject(ref, queueNextOffset, NULL);
Carl Shapiro952e84a2010-05-06 14:35:29 -07001065 assert(referent != NULL);
1066 if (isForward(referent->clazz)) {
1067 field->l = referent = getForward(referent->clazz);
1068 continue;
1069 }
1070 if (fromSpaceContains(referent)) {
1071 /* Referent is white, clear it. */
1072 clearReference(ref);
1073 if (isReferenceEnqueuable(ref)) {
1074 enqueueReference(ref);
1075 doSignal = true;
1076 }
1077 }
1078 }
1079 /*
1080 * If we cleared a reference with a reference queue we must notify
1081 * the heap worker to append the reference.
1082 */
1083 if (doSignal) {
1084 dvmSignalHeapWorker(false);
1085 }
1086 assert(*list == NULL);
1087}
1088
1089/*
1090 * Blackens referents subject to the soft reference preservation
1091 * policy.
1092 */
1093void preserveSoftReferences(Object **list)
1094{
1095 Object *ref;
1096 Object *prev, *next;
Barry Hayes697b5a92010-06-23 11:38:52 -07001097 size_t referentOffset, queueNextOffset;
Carl Shapiro952e84a2010-05-06 14:35:29 -07001098 unsigned counter;
1099 bool white;
1100
Barry Hayes697b5a92010-06-23 11:38:52 -07001101 queueNextOffset = gDvm.offJavaLangRefReference_queueNext;
Carl Shapiro952e84a2010-05-06 14:35:29 -07001102 referentOffset = gDvm.offJavaLangRefReference_referent;
1103 counter = 0;
1104 prev = next = NULL;
1105 ref = *list;
1106 while (ref != NULL) {
1107 JValue *field = dvmFieldPtr(ref, referentOffset);
1108 Object *referent = field->l;
Barry Hayes697b5a92010-06-23 11:38:52 -07001109 next = dvmGetFieldObject(ref, queueNextOffset);
Carl Shapiro952e84a2010-05-06 14:35:29 -07001110 assert(referent != NULL);
1111 if (isForward(referent->clazz)) {
1112 /* Referent is black. */
1113 field->l = referent = getForward(referent->clazz);
1114 white = false;
1115 } else {
1116 white = fromSpaceContains(referent);
1117 }
1118 if (!white && ((++counter) & 1)) {
1119 /* Referent is white and biased toward saving, gray it. */
1120 scavengeReference((Object **)(void *)&field->l);
1121 white = true;
1122 }
1123 if (white) {
1124 /* Referent is black, unlink it. */
1125 if (prev != NULL) {
Barry Hayes697b5a92010-06-23 11:38:52 -07001126 dvmSetFieldObject(ref, queueNextOffset, NULL);
1127 dvmSetFieldObject(prev, queueNextOffset, next);
Carl Shapiro952e84a2010-05-06 14:35:29 -07001128 }
1129 } else {
1130 /* Referent is white, skip over it. */
1131 prev = ref;
1132 }
1133 ref = next;
1134 }
1135 /*
1136 * Restart the trace with the newly gray references added to the
1137 * root set.
1138 */
1139 scavengeBlockQueue();
1140}
1141
1142void processFinalizableReferences(void)
1143{
1144 HeapRefTable newPendingRefs;
1145 LargeHeapRefTable *finRefs = gDvm.gcHeap->finalizableRefs;
1146 Object **ref;
1147 Object **lastRef;
1148 size_t totalPendCount;
1149
1150 /*
1151 * All strongly, reachable objects are black.
1152 * Any white finalizable objects need to be finalized.
1153 */
1154
1155 /* Create a table that the new pending refs will
1156 * be added to.
1157 */
Carl Shapiroeff0df72010-06-23 14:25:07 -07001158 if (!dvmHeapInitHeapRefTable(&newPendingRefs)) {
Carl Shapiro952e84a2010-05-06 14:35:29 -07001159 //TODO: mark all finalizable refs and hope that
1160 // we can schedule them next time. Watch out,
1161 // because we may be expecting to free up space
1162 // by calling finalizers.
Carl Shapiro8bb533e2010-05-06 15:35:27 -07001163 LOG_REF("no room for pending finalizations\n");
Carl Shapiro952e84a2010-05-06 14:35:29 -07001164 dvmAbort();
1165 }
1166
1167 /*
1168 * Walk through finalizableRefs and move any white references to
1169 * the list of new pending refs.
1170 */
1171 totalPendCount = 0;
1172 while (finRefs != NULL) {
1173 Object **gapRef;
1174 size_t newPendCount = 0;
1175
1176 gapRef = ref = finRefs->refs.table;
1177 lastRef = finRefs->refs.nextEntry;
1178 while (ref < lastRef) {
1179 if (fromSpaceContains(*ref)) {
1180 if (!dvmHeapAddToHeapRefTable(&newPendingRefs, *ref)) {
1181 //TODO: add the current table and allocate
1182 // a new, smaller one.
Carl Shapiro8bb533e2010-05-06 15:35:27 -07001183 LOG_REF("no room for any more pending finalizations: %zd\n",
Carl Shapiro952e84a2010-05-06 14:35:29 -07001184 dvmHeapNumHeapRefTableEntries(&newPendingRefs));
1185 dvmAbort();
1186 }
1187 newPendCount++;
1188 } else {
1189 /* This ref is black, so will remain on finalizableRefs.
1190 */
1191 if (newPendCount > 0) {
1192 /* Copy it up to fill the holes.
1193 */
1194 *gapRef++ = *ref;
1195 } else {
1196 /* No holes yet; don't bother copying.
1197 */
1198 gapRef++;
1199 }
1200 }
1201 ref++;
1202 }
1203 finRefs->refs.nextEntry = gapRef;
1204 //TODO: if the table is empty when we're done, free it.
1205 totalPendCount += newPendCount;
1206 finRefs = finRefs->next;
1207 }
Carl Shapiro8bb533e2010-05-06 15:35:27 -07001208 LOG_REF("%zd finalizers triggered.\n", totalPendCount);
Carl Shapiro952e84a2010-05-06 14:35:29 -07001209 if (totalPendCount == 0) {
1210 /* No objects required finalization.
1211 * Free the empty temporary table.
1212 */
1213 dvmClearReferenceTable(&newPendingRefs);
1214 return;
1215 }
1216
1217 /* Add the new pending refs to the main list.
1218 */
1219 if (!dvmHeapAddTableToLargeTable(&gDvm.gcHeap->pendingFinalizationRefs,
1220 &newPendingRefs))
1221 {
Carl Shapiro8bb533e2010-05-06 15:35:27 -07001222 LOG_REF("can't insert new pending finalizations\n");
Carl Shapiro952e84a2010-05-06 14:35:29 -07001223 dvmAbort();
1224 }
1225
1226 //TODO: try compacting the main list with a memcpy loop
1227
1228 /* Blacken the refs we just moved; we don't want them or their
1229 * children to get swept yet.
1230 */
1231 ref = newPendingRefs.table;
1232 lastRef = newPendingRefs.nextEntry;
1233 assert(ref < lastRef);
1234 HPROF_SET_GC_SCAN_STATE(HPROF_ROOT_FINALIZING, 0);
1235 while (ref < lastRef) {
1236 scavengeReference(ref);
1237 ref++;
1238 }
1239 HPROF_CLEAR_GC_SCAN_STATE();
1240 scavengeBlockQueue();
1241 dvmSignalHeapWorker(false);
Carl Shapirod28668c2010-04-15 16:10:00 -07001242}
1243
Carl Shapirod28668c2010-04-15 16:10:00 -07001244/*
1245 * If a reference points to from-space and has been forwarded, we snap
1246 * the pointer to its new to-space address. If the reference points
1247 * to an unforwarded from-space address we must enqueue the reference
1248 * for later processing. TODO: implement proper reference processing
1249 * and move the referent scavenging elsewhere.
1250 */
Carl Shapiro952e84a2010-05-06 14:35:29 -07001251static void scavengeReferenceObject(Object *obj)
Carl Shapirod28668c2010-04-15 16:10:00 -07001252{
Carl Shapiro952e84a2010-05-06 14:35:29 -07001253 Object *referent;
1254 Object **queue;
Barry Hayes697b5a92010-06-23 11:38:52 -07001255 size_t referentOffset, queueNextOffset;
Carl Shapiro952e84a2010-05-06 14:35:29 -07001256
Carl Shapirod28668c2010-04-15 16:10:00 -07001257 assert(obj != NULL);
Carl Shapiro8bb533e2010-05-06 15:35:27 -07001258 LOG_SCAV("scavengeReferenceObject(obj=%p),'%s'", obj, obj->clazz->descriptor);
Carl Shapiro2396fda2010-05-03 20:14:14 -07001259 scavengeDataObject(obj);
Carl Shapiro952e84a2010-05-06 14:35:29 -07001260 referentOffset = gDvm.offJavaLangRefReference_referent;
1261 referent = dvmGetFieldObject(obj, referentOffset);
1262 if (referent == NULL || toSpaceContains(referent)) {
1263 return;
Carl Shapirod28668c2010-04-15 16:10:00 -07001264 }
Carl Shapiro952e84a2010-05-06 14:35:29 -07001265 if (isSoftReference(obj)) {
1266 queue = &gDvm.gcHeap->softReferences;
1267 } else if (isWeakReference(obj)) {
1268 queue = &gDvm.gcHeap->weakReferences;
1269 } else {
1270 assert(isPhantomReference(obj));
1271 queue = &gDvm.gcHeap->phantomReferences;
1272 }
Barry Hayes697b5a92010-06-23 11:38:52 -07001273 queueNextOffset = gDvm.offJavaLangRefReference_queueNext;
1274 dvmSetFieldObject(obj, queueNextOffset, *queue);
Carl Shapiro952e84a2010-05-06 14:35:29 -07001275 *queue = obj;
Carl Shapiro8bb533e2010-05-06 15:35:27 -07001276 LOG_SCAV("scavengeReferenceObject: enqueueing %p", obj);
Carl Shapirod28668c2010-04-15 16:10:00 -07001277}
1278
1279/*
1280 * Data object scavenging.
1281 */
Carl Shapiro952e84a2010-05-06 14:35:29 -07001282static void scavengeDataObject(Object *obj)
Carl Shapirod28668c2010-04-15 16:10:00 -07001283{
1284 ClassObject *clazz;
Carl Shapirod28668c2010-04-15 16:10:00 -07001285 int i;
1286
Carl Shapiro8bb533e2010-05-06 15:35:27 -07001287 // LOG_SCAV("scavengeDataObject(obj=%p)", obj);
Carl Shapirod28668c2010-04-15 16:10:00 -07001288 assert(obj != NULL);
Carl Shapiro952e84a2010-05-06 14:35:29 -07001289 assert(obj->clazz != NULL);
1290 assert(obj->clazz->objectSize != 0);
1291 assert(toSpaceContains(obj));
Carl Shapirod28668c2010-04-15 16:10:00 -07001292 /* Scavenge the class object. */
Carl Shapiro952e84a2010-05-06 14:35:29 -07001293 clazz = obj->clazz;
Carl Shapirod28668c2010-04-15 16:10:00 -07001294 scavengeReference((Object **) obj);
Carl Shapirod28668c2010-04-15 16:10:00 -07001295 /* Scavenge instance fields. */
1296 if (clazz->refOffsets != CLASS_WALK_SUPER) {
1297 size_t refOffsets = clazz->refOffsets;
1298 while (refOffsets != 0) {
1299 size_t rshift = CLZ(refOffsets);
1300 size_t offset = CLASS_OFFSET_FROM_CLZ(rshift);
1301 Object **ref = (Object **)((u1 *)obj + offset);
1302 scavengeReference(ref);
1303 refOffsets &= ~(CLASS_HIGH_BIT >> rshift);
1304 }
1305 } else {
1306 for (; clazz != NULL; clazz = clazz->super) {
1307 InstField *field = clazz->ifields;
1308 for (i = 0; i < clazz->ifieldRefCount; ++i, ++field) {
1309 size_t offset = field->byteOffset;
1310 Object **ref = (Object **)((u1 *)obj + offset);
1311 scavengeReference(ref);
1312 }
1313 }
1314 }
Carl Shapiro2396fda2010-05-03 20:14:14 -07001315}
1316
1317static Object *transportObject(const Object *fromObj)
1318{
1319 Object *toObj;
1320 size_t allocSize, copySize;
1321
Carl Shapiro8bb533e2010-05-06 15:35:27 -07001322 LOG_TRAN("transportObject(fromObj=%p) allocBlocks=%zu",
Carl Shapiro2396fda2010-05-03 20:14:14 -07001323 fromObj,
1324 gDvm.gcHeap->heapSource->allocBlocks);
1325 assert(fromObj != NULL);
Carl Shapiro952e84a2010-05-06 14:35:29 -07001326 assert(fromSpaceContains(fromObj));
Carl Shapiro2396fda2010-05-03 20:14:14 -07001327 allocSize = copySize = objectSize(fromObj);
1328 if (LW_HASH_STATE(fromObj->lock) != LW_HASH_STATE_UNHASHED) {
1329 /*
1330 * The object has been hashed or hashed and moved. We must
1331 * reserve an additional word for a hash code.
1332 */
1333 allocSize += sizeof(u4);
1334 }
1335 if (LW_HASH_STATE(fromObj->lock) == LW_HASH_STATE_HASHED_AND_MOVED) {
1336 /*
1337 * The object has its hash code allocated. Ensure the hash
1338 * code is copied along with the instance data.
1339 */
1340 copySize += sizeof(u4);
1341 }
1342 /* TODO(cshapiro): don't copy, re-map large data objects. */
1343 assert(copySize <= allocSize);
1344 toObj = allocateGray(allocSize);
1345 assert(toObj != NULL);
Carl Shapiro952e84a2010-05-06 14:35:29 -07001346 assert(toSpaceContains(toObj));
Carl Shapiro2396fda2010-05-03 20:14:14 -07001347 memcpy(toObj, fromObj, copySize);
1348 if (LW_HASH_STATE(fromObj->lock) == LW_HASH_STATE_HASHED) {
1349 /*
1350 * The object has had its hash code exposed. Append it to the
1351 * instance and set a bit so we know to look for it there.
1352 */
1353 *(u4 *)(((char *)toObj) + copySize) = (u4)fromObj >> 3;
1354 toObj->lock |= LW_HASH_STATE_HASHED_AND_MOVED << LW_HASH_STATE_SHIFT;
1355 }
Carl Shapiro8bb533e2010-05-06 15:35:27 -07001356 LOG_TRAN("transportObject: from %p/%zu to %p/%zu (%zu,%zu) %s",
1357 fromObj, addressToBlock(gDvm.gcHeap->heapSource,fromObj),
1358 toObj, addressToBlock(gDvm.gcHeap->heapSource,toObj),
1359 copySize, allocSize, copySize < allocSize ? "DIFFERENT" : "");
Carl Shapiro2396fda2010-05-03 20:14:14 -07001360 return toObj;
Carl Shapirod28668c2010-04-15 16:10:00 -07001361}
1362
1363/*
1364 * Generic reference scavenging.
1365 */
1366
1367/*
1368 * Given a reference to an object, the scavenge routine will gray the
1369 * reference. Any objects pointed to by the scavenger object will be
1370 * transported to new space and a forwarding pointer will be installed
1371 * in the header of the object.
1372 */
1373
1374/*
1375 * Blacken the given pointer. If the pointer is in from space, it is
1376 * transported to new space. If the object has a forwarding pointer
1377 * installed it has already been transported and the referent is
1378 * snapped to the new address.
1379 */
Carl Shapiro2396fda2010-05-03 20:14:14 -07001380static void scavengeReference(Object **obj)
Carl Shapirod28668c2010-04-15 16:10:00 -07001381{
1382 ClassObject *clazz;
Carl Shapiro2396fda2010-05-03 20:14:14 -07001383 Object *fromObj, *toObj;
Carl Shapirod28668c2010-04-15 16:10:00 -07001384
1385 assert(obj);
1386
Carl Shapiro2396fda2010-05-03 20:14:14 -07001387 if (*obj == NULL) return;
Carl Shapirod28668c2010-04-15 16:10:00 -07001388
1389 assert(dvmIsValidObject(*obj));
1390
1391 /* The entire block is black. */
Carl Shapiro952e84a2010-05-06 14:35:29 -07001392 if (toSpaceContains(*obj)) {
Carl Shapiro8bb533e2010-05-06 15:35:27 -07001393 LOG_SCAV("scavengeReference skipping pinned object @ %p", *obj);
Carl Shapiro2396fda2010-05-03 20:14:14 -07001394 return;
Carl Shapirod28668c2010-04-15 16:10:00 -07001395 }
Carl Shapiro8bb533e2010-05-06 15:35:27 -07001396 LOG_SCAV("scavengeReference(*obj=%p)", *obj);
Carl Shapirod28668c2010-04-15 16:10:00 -07001397
Carl Shapiro952e84a2010-05-06 14:35:29 -07001398 assert(fromSpaceContains(*obj));
Carl Shapirod28668c2010-04-15 16:10:00 -07001399
1400 clazz = (*obj)->clazz;
1401
1402 if (isForward(clazz)) {
Carl Shapiro8bb533e2010-05-06 15:35:27 -07001403 // LOG_SCAV("forwarding %p @ %p to %p", *obj, obj, (void *)((uintptr_t)clazz & ~0x1));
Carl Shapirod28668c2010-04-15 16:10:00 -07001404 *obj = (Object *)getForward(clazz);
Carl Shapiro2396fda2010-05-03 20:14:14 -07001405 return;
Carl Shapirod28668c2010-04-15 16:10:00 -07001406 }
Carl Shapiro2396fda2010-05-03 20:14:14 -07001407 fromObj = *obj;
1408 if (clazz == NULL) {
Carl Shapiro8bb533e2010-05-06 15:35:27 -07001409 // LOG_SCAV("scavangeReference %p has a NULL class object", fromObj);
Carl Shapiro2396fda2010-05-03 20:14:14 -07001410 assert(!"implemented");
1411 toObj = NULL;
Carl Shapiro2396fda2010-05-03 20:14:14 -07001412 } else {
1413 toObj = transportObject(fromObj);
1414 }
1415 setForward(toObj, fromObj);
1416 *obj = (Object *)toObj;
Carl Shapirod28668c2010-04-15 16:10:00 -07001417}
1418
Carl Shapirod28668c2010-04-15 16:10:00 -07001419/*
1420 * Generic object scavenging.
1421 */
Carl Shapiro2396fda2010-05-03 20:14:14 -07001422static void scavengeObject(Object *obj)
Carl Shapirod28668c2010-04-15 16:10:00 -07001423{
1424 ClassObject *clazz;
Carl Shapirod28668c2010-04-15 16:10:00 -07001425
1426 assert(obj != NULL);
Carl Shapiro952e84a2010-05-06 14:35:29 -07001427 assert(obj->clazz != NULL);
1428 assert(!((uintptr_t)obj->clazz & 0x1));
Carl Shapirod28668c2010-04-15 16:10:00 -07001429 clazz = obj->clazz;
Carl Shapirod28668c2010-04-15 16:10:00 -07001430 if (clazz == gDvm.classJavaLangClass) {
Carl Shapiro2396fda2010-05-03 20:14:14 -07001431 scavengeClassObject((ClassObject *)obj);
Carl Shapirod28668c2010-04-15 16:10:00 -07001432 } else if (IS_CLASS_FLAG_SET(clazz, CLASS_ISARRAY)) {
Carl Shapiro2396fda2010-05-03 20:14:14 -07001433 scavengeArrayObject((ArrayObject *)obj);
Carl Shapirod28668c2010-04-15 16:10:00 -07001434 } else if (IS_CLASS_FLAG_SET(clazz, CLASS_ISREFERENCE)) {
Carl Shapiro952e84a2010-05-06 14:35:29 -07001435 scavengeReferenceObject(obj);
Carl Shapirod28668c2010-04-15 16:10:00 -07001436 } else {
Carl Shapiro952e84a2010-05-06 14:35:29 -07001437 scavengeDataObject(obj);
Carl Shapirod28668c2010-04-15 16:10:00 -07001438 }
Carl Shapirod28668c2010-04-15 16:10:00 -07001439}
1440
1441/*
1442 * External root scavenging routines.
1443 */
1444
Carl Shapirod28668c2010-04-15 16:10:00 -07001445static void pinHashTableEntries(HashTable *table)
1446{
1447 HashEntry *entry;
1448 void *obj;
1449 int i;
1450
Carl Shapiro8bb533e2010-05-06 15:35:27 -07001451 LOG_PIN(">>> pinHashTableEntries(table=%p)", table);
Carl Shapirod28668c2010-04-15 16:10:00 -07001452 if (table == NULL) {
1453 return;
1454 }
1455 dvmHashTableLock(table);
1456 for (i = 0; i < table->tableSize; ++i) {
1457 entry = &table->pEntries[i];
1458 obj = entry->data;
1459 if (obj == NULL || obj == HASH_TOMBSTONE) {
1460 continue;
1461 }
1462 pinObject(entry->data);
1463 }
1464 dvmHashTableUnlock(table);
Carl Shapiro8bb533e2010-05-06 15:35:27 -07001465 LOG_PIN("<<< pinHashTableEntries(table=%p)", table);
Carl Shapirod28668c2010-04-15 16:10:00 -07001466}
1467
1468static void pinPrimitiveClasses(void)
1469{
1470 size_t length;
1471 size_t i;
1472
1473 length = ARRAYSIZE(gDvm.primitiveClass);
1474 for (i = 0; i < length; i++) {
1475 if (gDvm.primitiveClass[i] != NULL) {
1476 pinObject((Object *)gDvm.primitiveClass[i]);
1477 }
1478 }
1479}
1480
1481/*
1482 * Scavenge interned strings. Permanent interned strings will have
1483 * been pinned and are therefore ignored. Non-permanent strings that
1484 * have been forwarded are snapped. All other entries are removed.
1485 */
1486static void scavengeInternedStrings(void)
1487{
1488 HashTable *table;
1489 HashEntry *entry;
1490 Object *obj;
1491 int i;
1492
1493 table = gDvm.internedStrings;
1494 if (table == NULL) {
1495 return;
1496 }
1497 dvmHashTableLock(table);
1498 for (i = 0; i < table->tableSize; ++i) {
1499 entry = &table->pEntries[i];
1500 obj = (Object *)entry->data;
1501 if (obj == NULL || obj == HASH_TOMBSTONE) {
1502 continue;
1503 } else if (!isPermanentString((StringObject *)obj)) {
Carl Shapiro8bb533e2010-05-06 15:35:27 -07001504 // LOG_SCAV("entry->data=%p", entry->data);
1505 LOG_SCAV(">>> string obj=%p", entry->data);
Carl Shapirod28668c2010-04-15 16:10:00 -07001506 /* TODO(cshapiro): detach white string objects */
1507 scavengeReference((Object **)(void *)&entry->data);
Carl Shapiro8bb533e2010-05-06 15:35:27 -07001508 LOG_SCAV("<<< string obj=%p", entry->data);
Carl Shapirod28668c2010-04-15 16:10:00 -07001509 }
1510 }
1511 dvmHashTableUnlock(table);
1512}
1513
1514static void pinInternedStrings(void)
1515{
1516 HashTable *table;
1517 HashEntry *entry;
1518 Object *obj;
1519 int i;
1520
1521 table = gDvm.internedStrings;
1522 if (table == NULL) {
1523 return;
1524 }
1525 dvmHashTableLock(table);
1526 for (i = 0; i < table->tableSize; ++i) {
1527 entry = &table->pEntries[i];
1528 obj = (Object *)entry->data;
1529 if (obj == NULL || obj == HASH_TOMBSTONE) {
1530 continue;
1531 } else if (isPermanentString((StringObject *)obj)) {
1532 obj = (Object *)getPermanentString((StringObject*)obj);
Carl Shapiro8bb533e2010-05-06 15:35:27 -07001533 LOG_PROM(">>> pin string obj=%p", obj);
Carl Shapirod28668c2010-04-15 16:10:00 -07001534 pinObject(obj);
Carl Shapiro8bb533e2010-05-06 15:35:27 -07001535 LOG_PROM("<<< pin string obj=%p", obj);
Carl Shapirod28668c2010-04-15 16:10:00 -07001536 }
1537 }
1538 dvmHashTableUnlock(table);
1539}
1540
Carl Shapirod28668c2010-04-15 16:10:00 -07001541/*
1542 * At present, reference tables contain references that must not be
1543 * moved by the collector. Instead of scavenging each reference in
1544 * the table we pin each referenced object.
1545 */
Carl Shapiro583d64c2010-05-04 10:44:47 -07001546static void pinReferenceTable(const ReferenceTable *table)
Carl Shapirod28668c2010-04-15 16:10:00 -07001547{
1548 Object **entry;
Carl Shapirod28668c2010-04-15 16:10:00 -07001549
1550 assert(table != NULL);
1551 assert(table->table != NULL);
1552 assert(table->nextEntry != NULL);
1553 for (entry = table->table; entry < table->nextEntry; ++entry) {
1554 assert(entry != NULL);
1555 assert(!isForward(*entry));
1556 pinObject(*entry);
1557 }
1558}
1559
Carl Shapiro646ba092010-06-10 15:17:00 -07001560static void scavengeLargeHeapRefTable(LargeHeapRefTable *table)
Carl Shapirod28668c2010-04-15 16:10:00 -07001561{
Carl Shapirod28668c2010-04-15 16:10:00 -07001562 for (; table != NULL; table = table->next) {
Carl Shapiro7800c092010-05-11 13:46:29 -07001563 Object **ref = table->refs.table;
1564 for (; ref < table->refs.nextEntry; ++ref) {
Carl Shapiro646ba092010-06-10 15:17:00 -07001565 scavengeReference(ref);
Carl Shapiro7800c092010-05-11 13:46:29 -07001566 }
1567 }
1568}
1569
Carl Shapirod28668c2010-04-15 16:10:00 -07001570/* This code was copied from Thread.c */
1571static void scavengeThreadStack(Thread *thread)
1572{
1573 const u4 *framePtr;
1574#if WITH_EXTRA_GC_CHECKS > 1
1575 bool first = true;
1576#endif
1577
1578 framePtr = (const u4 *)thread->curFrame;
1579 while (framePtr != NULL) {
1580 const StackSaveArea *saveArea;
1581 const Method *method;
1582
1583 saveArea = SAVEAREA_FROM_FP(framePtr);
1584 method = saveArea->method;
Carl Shapiro583d64c2010-05-04 10:44:47 -07001585 if (method != NULL && !dvmIsNativeMethod(method)) {
Carl Shapirod28668c2010-04-15 16:10:00 -07001586#ifdef COUNT_PRECISE_METHODS
1587 /* the GC is running, so no lock required */
1588 if (dvmPointerSetAddEntry(gDvm.preciseMethods, method))
Carl Shapiro8bb533e2010-05-06 15:35:27 -07001589 LOG_SCAV("PGC: added %s.%s %p\n",
1590 method->clazz->descriptor, method->name, method);
Carl Shapirod28668c2010-04-15 16:10:00 -07001591#endif
1592#if WITH_EXTRA_GC_CHECKS > 1
1593 /*
1594 * May also want to enable the memset() in the "invokeMethod"
1595 * goto target in the portable interpreter. That sets the stack
1596 * to a pattern that makes referring to uninitialized data
1597 * very obvious.
1598 */
1599
1600 if (first) {
1601 /*
1602 * First frame, isn't native, check the "alternate" saved PC
1603 * as a sanity check.
1604 *
1605 * It seems like we could check the second frame if the first
1606 * is native, since the PCs should be the same. It turns out
1607 * this doesn't always work. The problem is that we could
1608 * have calls in the sequence:
1609 * interp method #2
1610 * native method
1611 * interp method #1
1612 *
1613 * and then GC while in the native method after returning
1614 * from interp method #2. The currentPc on the stack is
1615 * for interp method #1, but thread->currentPc2 is still
1616 * set for the last thing interp method #2 did.
1617 *
1618 * This can also happen in normal execution:
1619 * - sget-object on not-yet-loaded class
1620 * - class init updates currentPc2
1621 * - static field init is handled by parsing annotations;
1622 * static String init requires creation of a String object,
1623 * which can cause a GC
1624 *
1625 * Essentially, any pattern that involves executing
1626 * interpreted code and then causes an allocation without
1627 * executing instructions in the original method will hit
1628 * this. These are rare enough that the test still has
1629 * some value.
1630 */
1631 if (saveArea->xtra.currentPc != thread->currentPc2) {
1632 LOGW("PGC: savedPC(%p) != current PC(%p), %s.%s ins=%p\n",
1633 saveArea->xtra.currentPc, thread->currentPc2,
1634 method->clazz->descriptor, method->name, method->insns);
1635 if (saveArea->xtra.currentPc != NULL)
1636 LOGE(" pc inst = 0x%04x\n", *saveArea->xtra.currentPc);
1637 if (thread->currentPc2 != NULL)
1638 LOGE(" pc2 inst = 0x%04x\n", *thread->currentPc2);
1639 dvmDumpThread(thread, false);
1640 }
1641 } else {
1642 /*
1643 * It's unusual, but not impossible, for a non-first frame
1644 * to be at something other than a method invocation. For
1645 * example, if we do a new-instance on a nonexistent class,
1646 * we'll have a lot of class loader activity on the stack
1647 * above the frame with the "new" operation. Could also
1648 * happen while we initialize a Throwable when an instruction
1649 * fails.
1650 *
1651 * So there's not much we can do here to verify the PC,
1652 * except to verify that it's a GC point.
1653 */
1654 }
1655 assert(saveArea->xtra.currentPc != NULL);
1656#endif
1657
1658 const RegisterMap* pMap;
1659 const u1* regVector;
1660 int i;
1661
1662 Method* nonConstMethod = (Method*) method; // quiet gcc
1663 pMap = dvmGetExpandedRegisterMap(nonConstMethod);
1664
Carl Shapiro8bb533e2010-05-06 15:35:27 -07001665 //LOG_SCAV("PGC: %s.%s\n", method->clazz->descriptor, method->name);
Carl Shapirod28668c2010-04-15 16:10:00 -07001666
1667 if (pMap != NULL) {
1668 /* found map, get registers for this address */
1669 int addr = saveArea->xtra.currentPc - method->insns;
1670 regVector = dvmRegisterMapGetLine(pMap, addr);
1671 /*
1672 if (regVector == NULL) {
Carl Shapiro8bb533e2010-05-06 15:35:27 -07001673 LOG_SCAV("PGC: map but no entry for %s.%s addr=0x%04x\n",
1674 method->clazz->descriptor, method->name, addr);
Carl Shapirod28668c2010-04-15 16:10:00 -07001675 } else {
Carl Shapiro8bb533e2010-05-06 15:35:27 -07001676 LOG_SCAV("PGC: found map for %s.%s 0x%04x (t=%d)\n",
1677 method->clazz->descriptor, method->name, addr,
1678 thread->threadId);
Carl Shapirod28668c2010-04-15 16:10:00 -07001679 }
1680 */
1681 } else {
1682 /*
1683 * No map found. If precise GC is disabled this is
1684 * expected -- we don't create pointers to the map data even
1685 * if it's present -- but if it's enabled it means we're
1686 * unexpectedly falling back on a conservative scan, so it's
1687 * worth yelling a little.
1688 */
1689 if (gDvm.preciseGc) {
Carl Shapiro8bb533e2010-05-06 15:35:27 -07001690 LOG_SCAV("PGC: no map for %s.%s\n", method->clazz->descriptor, method->name);
Carl Shapirod28668c2010-04-15 16:10:00 -07001691 }
1692 regVector = NULL;
1693 }
Carl Shapirod28668c2010-04-15 16:10:00 -07001694 if (regVector == NULL) {
Carl Shapiro88b00352010-05-19 17:38:33 -07001695 /*
1696 * There are no roots to scavenge. Skip over the entire frame.
1697 */
1698 framePtr += method->registersSize;
Carl Shapirod28668c2010-04-15 16:10:00 -07001699 } else {
1700 /*
1701 * Precise scan. v0 is at the lowest address on the
1702 * interpreted stack, and is the first bit in the register
1703 * vector, so we can walk through the register map and
1704 * memory in the same direction.
1705 *
1706 * A '1' bit indicates a live reference.
1707 */
1708 u2 bits = 1 << 1;
1709 for (i = method->registersSize - 1; i >= 0; i--) {
Carl Shapirod28668c2010-04-15 16:10:00 -07001710 u4 rval = *framePtr;
1711
1712 bits >>= 1;
1713 if (bits == 1) {
1714 /* set bit 9 so we can tell when we're empty */
1715 bits = *regVector++ | 0x0100;
Carl Shapirod28668c2010-04-15 16:10:00 -07001716 }
1717
1718 if (rval != 0 && (bits & 0x01) != 0) {
1719 /*
1720 * Non-null, register marked as live reference. This
1721 * should always be a valid object.
1722 */
1723#if WITH_EXTRA_GC_CHECKS > 0
1724 if ((rval & 0x3) != 0 || !dvmIsValidObject((Object*) rval)) {
1725 /* this is very bad */
1726 LOGE("PGC: invalid ref in reg %d: 0x%08x\n",
1727 method->registersSize-1 - i, rval);
1728 } else
1729#endif
1730 {
1731
Carl Shapiro8bb533e2010-05-06 15:35:27 -07001732 // LOG_SCAV("stack reference %u@%p", *framePtr, framePtr);
Carl Shapirod28668c2010-04-15 16:10:00 -07001733 /* dvmMarkObjectNonNull((Object *)rval); */
1734 scavengeReference((Object **) framePtr);
1735 }
1736 } else {
1737 /*
1738 * Null or non-reference, do nothing at all.
1739 */
1740#if WITH_EXTRA_GC_CHECKS > 1
1741 if (dvmIsValidObject((Object*) rval)) {
1742 /* this is normal, but we feel chatty */
1743 LOGD("PGC: ignoring valid ref in reg %d: 0x%08x\n",
1744 method->registersSize-1 - i, rval);
1745 }
1746#endif
1747 }
1748 ++framePtr;
1749 }
1750 dvmReleaseRegisterMapLine(pMap, regVector);
1751 }
1752 }
Carl Shapiro952e84a2010-05-06 14:35:29 -07001753 /* else this is a break frame and there is nothing to gray, or
Carl Shapirod28668c2010-04-15 16:10:00 -07001754 * this is a native method and the registers are just the "ins",
1755 * copied from various registers in the caller's set.
1756 */
1757
1758#if WITH_EXTRA_GC_CHECKS > 1
1759 first = false;
1760#endif
1761
1762 /* Don't fall into an infinite loop if things get corrupted.
1763 */
1764 assert((uintptr_t)saveArea->prevFrame > (uintptr_t)framePtr ||
1765 saveArea->prevFrame == NULL);
1766 framePtr = saveArea->prevFrame;
1767 }
1768}
1769
1770static void scavengeThread(Thread *thread)
1771{
Carl Shapiro8bb533e2010-05-06 15:35:27 -07001772 // LOG_SCAV("scavengeThread(thread=%p)", thread);
Carl Shapirod28668c2010-04-15 16:10:00 -07001773
Carl Shapiro8bb533e2010-05-06 15:35:27 -07001774 // LOG_SCAV("Scavenging threadObj=%p", thread->threadObj);
Carl Shapirod28668c2010-04-15 16:10:00 -07001775 scavengeReference(&thread->threadObj);
1776
Carl Shapiro8bb533e2010-05-06 15:35:27 -07001777 // LOG_SCAV("Scavenging exception=%p", thread->exception);
Carl Shapirod28668c2010-04-15 16:10:00 -07001778 scavengeReference(&thread->exception);
1779
1780 scavengeThreadStack(thread);
1781}
1782
1783static void scavengeThreadList(void)
1784{
1785 Thread *thread;
1786
1787 dvmLockThreadList(dvmThreadSelf());
1788 thread = gDvm.threadList;
1789 while (thread) {
1790 scavengeThread(thread);
1791 thread = thread->next;
1792 }
1793 dvmUnlockThreadList();
1794}
1795
Carl Shapiro88b00352010-05-19 17:38:33 -07001796static void pinThreadStack(const Thread *thread)
Carl Shapiro583d64c2010-05-04 10:44:47 -07001797{
1798 const u4 *framePtr;
1799 const StackSaveArea *saveArea;
Carl Shapiro88b00352010-05-19 17:38:33 -07001800 Method *method;
Carl Shapiro583d64c2010-05-04 10:44:47 -07001801 const char *shorty;
1802 Object *obj;
1803 int i;
1804
1805 saveArea = NULL;
1806 framePtr = (const u4 *)thread->curFrame;
1807 for (; framePtr != NULL; framePtr = saveArea->prevFrame) {
1808 saveArea = SAVEAREA_FROM_FP(framePtr);
Carl Shapiro88b00352010-05-19 17:38:33 -07001809 method = (Method *)saveArea->method;
Carl Shapiro583d64c2010-05-04 10:44:47 -07001810 if (method != NULL && dvmIsNativeMethod(method)) {
1811 /*
Carl Shapiro88b00352010-05-19 17:38:33 -07001812 * This is native method, pin its arguments.
1813 *
Carl Shapiro952e84a2010-05-06 14:35:29 -07001814 * For purposes of graying references, we don't need to do
Carl Shapiro583d64c2010-05-04 10:44:47 -07001815 * anything here, because all of the native "ins" were copied
1816 * from registers in the caller's stack frame and won't be
1817 * changed (an interpreted method can freely use registers
1818 * with parameters like any other register, but natives don't
1819 * work that way).
1820 *
1821 * However, we need to ensure that references visible to
1822 * native methods don't move around. We can do a precise scan
1823 * of the arguments by examining the method signature.
1824 */
Carl Shapiro8bb533e2010-05-06 15:35:27 -07001825 LOG_PIN("+++ native scan %s.%s\n",
1826 method->clazz->descriptor, method->name);
Carl Shapiro583d64c2010-05-04 10:44:47 -07001827 assert(method->registersSize == method->insSize);
1828 if (!dvmIsStaticMethod(method)) {
1829 /* grab the "this" pointer */
1830 obj = (Object *)*framePtr++;
1831 if (obj == NULL) {
1832 /*
1833 * This can happen for the "fake" entry frame inserted
1834 * for threads created outside the VM. There's no actual
1835 * call so there's no object. If we changed the fake
1836 * entry method to be declared "static" then this
1837 * situation should never occur.
1838 */
1839 } else {
1840 assert(dvmIsValidObject(obj));
1841 pinObject(obj);
1842 }
1843 }
1844 shorty = method->shorty+1; // skip return value
1845 for (i = method->registersSize - 1; i >= 0; i--, framePtr++) {
1846 switch (*shorty++) {
1847 case 'L':
1848 obj = (Object *)*framePtr;
1849 if (obj != NULL) {
1850 assert(dvmIsValidObject(obj));
1851 pinObject(obj);
1852 }
1853 break;
1854 case 'D':
1855 case 'J':
1856 framePtr++;
1857 break;
1858 default:
1859 /* 32-bit non-reference value */
1860 obj = (Object *)*framePtr; // debug, remove
1861 if (dvmIsValidObject(obj)) { // debug, remove
1862 /* if we see a lot of these, our scan might be off */
Carl Shapiro8bb533e2010-05-06 15:35:27 -07001863 LOG_PIN("+++ did NOT pin obj %p\n", obj);
Carl Shapiro583d64c2010-05-04 10:44:47 -07001864 }
1865 break;
1866 }
1867 }
Carl Shapiro88b00352010-05-19 17:38:33 -07001868 } else if (method != NULL && !dvmIsNativeMethod(method)) {
1869 const RegisterMap* pMap = dvmGetExpandedRegisterMap(method);
1870 const u1* regVector = NULL;
1871
1872 LOGI("conservative : %s.%s\n", method->clazz->descriptor, method->name);
1873
1874 if (pMap != NULL) {
1875 int addr = saveArea->xtra.currentPc - method->insns;
1876 regVector = dvmRegisterMapGetLine(pMap, addr);
1877 }
1878 if (regVector == NULL) {
1879 /*
1880 * No register info for this frame, conservatively pin.
1881 */
1882 for (i = 0; i < method->registersSize; ++i) {
1883 u4 regValue = framePtr[i];
1884 if (regValue != 0 && (regValue & 0x3) == 0 && dvmIsValidObject((Object *)regValue)) {
1885 pinObject((Object *)regValue);
1886 }
1887 }
1888 }
Carl Shapiro583d64c2010-05-04 10:44:47 -07001889 }
1890 /*
1891 * Don't fall into an infinite loop if things get corrupted.
1892 */
1893 assert((uintptr_t)saveArea->prevFrame > (uintptr_t)framePtr ||
1894 saveArea->prevFrame == NULL);
1895 }
1896}
1897
1898static void pinThread(const Thread *thread)
Carl Shapirod28668c2010-04-15 16:10:00 -07001899{
1900 assert(thread != NULL);
Carl Shapiro8bb533e2010-05-06 15:35:27 -07001901 LOG_PIN("pinThread(thread=%p)", thread);
Carl Shapirod28668c2010-04-15 16:10:00 -07001902
Carl Shapiro8bb533e2010-05-06 15:35:27 -07001903 LOG_PIN("Pin native method arguments");
Carl Shapiro88b00352010-05-19 17:38:33 -07001904 pinThreadStack(thread);
Carl Shapiro583d64c2010-05-04 10:44:47 -07001905
Carl Shapiro8bb533e2010-05-06 15:35:27 -07001906 LOG_PIN("Pin internalLocalRefTable");
Carl Shapirod28668c2010-04-15 16:10:00 -07001907 pinReferenceTable(&thread->internalLocalRefTable);
1908
Carl Shapiro8bb533e2010-05-06 15:35:27 -07001909 LOG_PIN("Pin jniLocalRefTable");
Carl Shapirod28668c2010-04-15 16:10:00 -07001910 pinReferenceTable(&thread->jniLocalRefTable);
1911
1912 /* Can the check be pushed into the promote routine? */
1913 if (thread->jniMonitorRefTable.table) {
Carl Shapiro8bb533e2010-05-06 15:35:27 -07001914 LOG_PIN("Pin jniMonitorRefTable");
Carl Shapirod28668c2010-04-15 16:10:00 -07001915 pinReferenceTable(&thread->jniMonitorRefTable);
1916 }
1917}
1918
1919static void pinThreadList(void)
1920{
1921 Thread *thread;
1922
1923 dvmLockThreadList(dvmThreadSelf());
1924 thread = gDvm.threadList;
1925 while (thread) {
1926 pinThread(thread);
1927 thread = thread->next;
1928 }
1929 dvmUnlockThreadList();
1930}
1931
1932/*
1933 * Heap block scavenging.
1934 */
1935
1936/*
1937 * Scavenge objects in the current block. Scavenging terminates when
1938 * the pointer reaches the highest address in the block or when a run
1939 * of zero words that continues to the highest address is reached.
1940 */
1941static void scavengeBlock(HeapSource *heapSource, size_t block)
1942{
1943 u1 *cursor;
1944 u1 *end;
1945 size_t size;
1946
Carl Shapiro8bb533e2010-05-06 15:35:27 -07001947 LOG_SCAV("scavengeBlock(heapSource=%p,block=%zu)", heapSource, block);
Carl Shapirod28668c2010-04-15 16:10:00 -07001948
1949 assert(heapSource != NULL);
1950 assert(block < heapSource->totalBlocks);
1951 assert(heapSource->blockSpace[block] == BLOCK_TO_SPACE);
1952
1953 cursor = blockToAddress(heapSource, block);
1954 end = cursor + BLOCK_SIZE;
Carl Shapiro8bb533e2010-05-06 15:35:27 -07001955 LOG_SCAV("scavengeBlock start=%p, end=%p", cursor, end);
Carl Shapirod28668c2010-04-15 16:10:00 -07001956
1957 /* Parse and scavenge the current block. */
1958 size = 0;
1959 while (cursor < end) {
1960 u4 word = *(u4 *)cursor;
1961 if (word != 0) {
Carl Shapiro2396fda2010-05-03 20:14:14 -07001962 scavengeObject((Object *)cursor);
1963 size = objectSize((Object *)cursor);
Carl Shapirod28668c2010-04-15 16:10:00 -07001964 size = alignUp(size, ALLOC_ALIGNMENT);
1965 cursor += size;
Carl Shapirod28668c2010-04-15 16:10:00 -07001966 } else {
1967 /* Check for padding. */
1968 while (*(u4 *)cursor == 0) {
1969 cursor += 4;
1970 if (cursor == end) break;
1971 }
1972 /* Punt if something went wrong. */
1973 assert(cursor == end);
1974 }
1975 }
1976}
1977
Carl Shapiro2396fda2010-05-03 20:14:14 -07001978static size_t objectSize(const Object *obj)
Carl Shapirod28668c2010-04-15 16:10:00 -07001979{
1980 size_t size;
1981
Carl Shapiro2396fda2010-05-03 20:14:14 -07001982 assert(obj != NULL);
1983 assert(obj->clazz != NULL);
Barry Hayesc49db852010-05-14 13:43:34 -07001984 if (obj->clazz == gDvm.classJavaLangClass) {
Carl Shapirod28668c2010-04-15 16:10:00 -07001985 size = dvmClassObjectSize((ClassObject *)obj);
1986 } else if (IS_CLASS_FLAG_SET(obj->clazz, CLASS_ISARRAY)) {
1987 size = dvmArrayObjectSize((ArrayObject *)obj);
1988 } else {
Carl Shapiro2396fda2010-05-03 20:14:14 -07001989 assert(obj->clazz->objectSize != 0);
Carl Shapirod28668c2010-04-15 16:10:00 -07001990 size = obj->clazz->objectSize;
1991 }
Carl Shapiro2396fda2010-05-03 20:14:14 -07001992 if (LW_HASH_STATE(obj->lock) == LW_HASH_STATE_HASHED_AND_MOVED) {
1993 size += sizeof(u4);
1994 }
Carl Shapirod28668c2010-04-15 16:10:00 -07001995 return size;
1996}
1997
1998static void verifyBlock(HeapSource *heapSource, size_t block)
1999{
2000 u1 *cursor;
2001 u1 *end;
2002 size_t size;
2003
Carl Shapiro8bb533e2010-05-06 15:35:27 -07002004 // LOG_VER("verifyBlock(heapSource=%p,block=%zu)", heapSource, block);
Carl Shapirod28668c2010-04-15 16:10:00 -07002005
2006 assert(heapSource != NULL);
2007 assert(block < heapSource->totalBlocks);
2008 assert(heapSource->blockSpace[block] == BLOCK_TO_SPACE);
2009
2010 cursor = blockToAddress(heapSource, block);
2011 end = cursor + BLOCK_SIZE;
Carl Shapiro8bb533e2010-05-06 15:35:27 -07002012 // LOG_VER("verifyBlock start=%p, end=%p", cursor, end);
Carl Shapirod28668c2010-04-15 16:10:00 -07002013
2014 /* Parse and scavenge the current block. */
2015 size = 0;
2016 while (cursor < end) {
2017 u4 word = *(u4 *)cursor;
2018 if (word != 0) {
2019 dvmVerifyObject((Object *)cursor);
2020 size = objectSize((Object *)cursor);
2021 size = alignUp(size, ALLOC_ALIGNMENT);
2022 cursor += size;
Carl Shapirod28668c2010-04-15 16:10:00 -07002023 } else {
2024 /* Check for padding. */
2025 while (*(unsigned long *)cursor == 0) {
2026 cursor += 4;
2027 if (cursor == end) break;
2028 }
2029 /* Punt if something went wrong. */
2030 assert(cursor == end);
2031 }
2032 }
2033}
2034
2035static void describeBlockQueue(const HeapSource *heapSource)
2036{
2037 size_t block, count;
2038 char space;
2039
2040 block = heapSource->queueHead;
2041 count = 0;
Carl Shapiro8bb533e2010-05-06 15:35:27 -07002042 LOG_SCAV(">>> describeBlockQueue(heapSource=%p)", heapSource);
Carl Shapirod28668c2010-04-15 16:10:00 -07002043 /* Count the number of blocks enqueued. */
2044 while (block != QUEUE_TAIL) {
2045 block = heapSource->blockQueue[block];
2046 ++count;
2047 }
Carl Shapiro8bb533e2010-05-06 15:35:27 -07002048 LOG_SCAV("blockQueue %zu elements, enqueued %zu",
Carl Shapirod28668c2010-04-15 16:10:00 -07002049 count, heapSource->queueSize);
2050 block = heapSource->queueHead;
2051 while (block != QUEUE_TAIL) {
2052 space = heapSource->blockSpace[block];
Carl Shapiro8bb533e2010-05-06 15:35:27 -07002053 LOG_SCAV("block=%zu@%p,space=%zu", block, blockToAddress(heapSource,block), space);
Carl Shapirod28668c2010-04-15 16:10:00 -07002054 block = heapSource->blockQueue[block];
2055 }
2056
Carl Shapiro8bb533e2010-05-06 15:35:27 -07002057 LOG_SCAV("<<< describeBlockQueue(heapSource=%p)", heapSource);
Carl Shapirod28668c2010-04-15 16:10:00 -07002058}
2059
2060/*
2061 * Blackens promoted objects.
2062 */
2063static void scavengeBlockQueue(void)
2064{
2065 HeapSource *heapSource;
2066 size_t block;
2067
Carl Shapiro8bb533e2010-05-06 15:35:27 -07002068 LOG_SCAV(">>> scavengeBlockQueue()");
Carl Shapirod28668c2010-04-15 16:10:00 -07002069 heapSource = gDvm.gcHeap->heapSource;
2070 describeBlockQueue(heapSource);
2071 while (heapSource->queueHead != QUEUE_TAIL) {
2072 block = heapSource->queueHead;
Carl Shapiro8bb533e2010-05-06 15:35:27 -07002073 LOG_SCAV("Dequeueing block %zu\n", block);
Carl Shapirod28668c2010-04-15 16:10:00 -07002074 scavengeBlock(heapSource, block);
2075 heapSource->queueHead = heapSource->blockQueue[block];
Carl Shapiro8bb533e2010-05-06 15:35:27 -07002076 LOG_SCAV("New queue head is %zu\n", heapSource->queueHead);
Carl Shapirod28668c2010-04-15 16:10:00 -07002077 }
Carl Shapiro8bb533e2010-05-06 15:35:27 -07002078 LOG_SCAV("<<< scavengeBlockQueue()");
Carl Shapirod28668c2010-04-15 16:10:00 -07002079}
2080
2081/*
2082 * Scan the block list and verify all blocks that are marked as being
2083 * in new space. This should be parametrized so we can invoke this
2084 * routine outside of the context of a collection.
2085 */
2086static void verifyNewSpace(void)
2087{
2088 HeapSource *heapSource;
2089 size_t i;
2090 size_t c0, c1, c2, c7;
2091
2092 c0 = c1 = c2 = c7 = 0;
2093 heapSource = gDvm.gcHeap->heapSource;
2094 for (i = 0; i < heapSource->totalBlocks; ++i) {
2095 switch (heapSource->blockSpace[i]) {
2096 case BLOCK_FREE: ++c0; break;
2097 case BLOCK_TO_SPACE: ++c1; break;
2098 case BLOCK_FROM_SPACE: ++c2; break;
2099 case BLOCK_CONTINUED: ++c7; break;
2100 default: assert(!"reached");
2101 }
2102 }
Carl Shapiro8bb533e2010-05-06 15:35:27 -07002103 LOG_VER("Block Demographics: "
2104 "Free=%zu,ToSpace=%zu,FromSpace=%zu,Continued=%zu",
2105 c0, c1, c2, c7);
Carl Shapirod28668c2010-04-15 16:10:00 -07002106 for (i = 0; i < heapSource->totalBlocks; ++i) {
2107 if (heapSource->blockSpace[i] != BLOCK_TO_SPACE) {
2108 continue;
2109 }
2110 verifyBlock(heapSource, i);
2111 }
2112}
2113
2114static void scavengeGlobals(void)
2115{
2116 scavengeReference((Object **)(void *)&gDvm.classJavaLangClass);
2117 scavengeReference((Object **)(void *)&gDvm.classJavaLangClassArray);
2118 scavengeReference((Object **)(void *)&gDvm.classJavaLangError);
2119 scavengeReference((Object **)(void *)&gDvm.classJavaLangObject);
2120 scavengeReference((Object **)(void *)&gDvm.classJavaLangObjectArray);
2121 scavengeReference((Object **)(void *)&gDvm.classJavaLangRuntimeException);
2122 scavengeReference((Object **)(void *)&gDvm.classJavaLangString);
2123 scavengeReference((Object **)(void *)&gDvm.classJavaLangThread);
2124 scavengeReference((Object **)(void *)&gDvm.classJavaLangVMThread);
2125 scavengeReference((Object **)(void *)&gDvm.classJavaLangThreadGroup);
2126 scavengeReference((Object **)(void *)&gDvm.classJavaLangThrowable);
2127 scavengeReference((Object **)(void *)&gDvm.classJavaLangStackTraceElement);
2128 scavengeReference((Object **)(void *)&gDvm.classJavaLangStackTraceElementArray);
2129 scavengeReference((Object **)(void *)&gDvm.classJavaLangAnnotationAnnotationArray);
2130 scavengeReference((Object **)(void *)&gDvm.classJavaLangAnnotationAnnotationArrayArray);
2131 scavengeReference((Object **)(void *)&gDvm.classJavaLangReflectAccessibleObject);
2132 scavengeReference((Object **)(void *)&gDvm.classJavaLangReflectConstructor);
2133 scavengeReference((Object **)(void *)&gDvm.classJavaLangReflectConstructorArray);
2134 scavengeReference((Object **)(void *)&gDvm.classJavaLangReflectField);
2135 scavengeReference((Object **)(void *)&gDvm.classJavaLangReflectFieldArray);
2136 scavengeReference((Object **)(void *)&gDvm.classJavaLangReflectMethod);
2137 scavengeReference((Object **)(void *)&gDvm.classJavaLangReflectMethodArray);
2138 scavengeReference((Object **)(void *)&gDvm.classJavaLangReflectProxy);
2139 scavengeReference((Object **)(void *)&gDvm.classJavaLangExceptionInInitializerError);
2140 scavengeReference((Object **)(void *)&gDvm.classJavaLangRefReference);
2141 scavengeReference((Object **)(void *)&gDvm.classJavaNioReadWriteDirectByteBuffer);
2142 scavengeReference((Object **)(void *)&gDvm.classJavaSecurityAccessController);
2143 scavengeReference((Object **)(void *)&gDvm.classOrgApacheHarmonyLangAnnotationAnnotationFactory);
2144 scavengeReference((Object **)(void *)&gDvm.classOrgApacheHarmonyLangAnnotationAnnotationMember);
2145 scavengeReference((Object **)(void *)&gDvm.classOrgApacheHarmonyLangAnnotationAnnotationMemberArray);
Carl Shapirod28668c2010-04-15 16:10:00 -07002146 scavengeReference((Object **)(void *)&gDvm.classArrayBoolean);
2147 scavengeReference((Object **)(void *)&gDvm.classArrayChar);
2148 scavengeReference((Object **)(void *)&gDvm.classArrayFloat);
2149 scavengeReference((Object **)(void *)&gDvm.classArrayDouble);
2150 scavengeReference((Object **)(void *)&gDvm.classArrayByte);
2151 scavengeReference((Object **)(void *)&gDvm.classArrayShort);
2152 scavengeReference((Object **)(void *)&gDvm.classArrayInt);
2153 scavengeReference((Object **)(void *)&gDvm.classArrayLong);
2154}
2155
2156void describeHeap(void)
2157{
2158 HeapSource *heapSource;
2159
2160 heapSource = gDvm.gcHeap->heapSource;
2161 describeBlocks(heapSource);
2162}
2163
2164/*
2165 * The collection interface. Collection has a few distinct phases.
2166 * The first is flipping AKA condemning AKA whitening the heap. The
2167 * second is to promote all objects which are pointed to by pinned or
2168 * ambiguous references. The third phase is tracing from the stacks,
2169 * registers and various globals. Lastly, a verification of the heap
2170 * is performed. The last phase should be optional.
2171 */
2172void dvmScavengeRoots(void) /* Needs a new name badly */
2173{
Carl Shapirod28668c2010-04-15 16:10:00 -07002174 GcHeap *gcHeap;
2175
2176 {
2177 size_t alloc, unused, total;
2178
2179 room(&alloc, &unused, &total);
Carl Shapiro8bb533e2010-05-06 15:35:27 -07002180 LOG_SCAV("BEFORE GC: %zu alloc, %zu free, %zu total.",
2181 alloc, unused, total);
Carl Shapirod28668c2010-04-15 16:10:00 -07002182 }
2183
2184 gcHeap = gDvm.gcHeap;
2185 dvmHeapSourceFlip();
2186
2187 /*
2188 * Promote blocks with stationary objects.
2189 */
Carl Shapirod28668c2010-04-15 16:10:00 -07002190 pinThreadList();
Carl Shapirod28668c2010-04-15 16:10:00 -07002191 pinReferenceTable(&gDvm.jniGlobalRefTable);
Carl Shapirod28668c2010-04-15 16:10:00 -07002192 pinReferenceTable(&gDvm.jniPinRefTable);
Carl Shapirod28668c2010-04-15 16:10:00 -07002193 pinHashTableEntries(gDvm.loadedClasses);
Carl Shapiro427bf462010-06-04 00:03:18 -07002194 pinHashTableEntries(gDvm.dbgRegistry);
Carl Shapirod28668c2010-04-15 16:10:00 -07002195 pinPrimitiveClasses();
Carl Shapirod28668c2010-04-15 16:10:00 -07002196 pinInternedStrings();
2197
2198 // describeBlocks(gcHeap->heapSource);
2199
2200 /*
2201 * Create first, open new-space page right here.
2202 */
2203
2204 /* Reset allocation to an unallocated block. */
2205 gDvm.gcHeap->heapSource->allocPtr = allocateBlocks(gDvm.gcHeap->heapSource, 1);
2206 gDvm.gcHeap->heapSource->allocLimit = gDvm.gcHeap->heapSource->allocPtr + BLOCK_SIZE;
2207 /*
2208 * Hack: promote the empty block allocated above. If the
2209 * promotions that occurred above did not actually gray any
2210 * objects, the block queue may be empty. We must force a
2211 * promotion to be safe.
2212 */
2213 promoteBlockByAddr(gDvm.gcHeap->heapSource, gDvm.gcHeap->heapSource->allocPtr);
2214
2215 /*
2216 * Scavenge blocks and relocate movable objects.
2217 */
2218
Carl Shapiro8bb533e2010-05-06 15:35:27 -07002219 LOG_SCAV("Scavenging gDvm.threadList");
Carl Shapirod28668c2010-04-15 16:10:00 -07002220 scavengeThreadList();
2221
Carl Shapiro8bb533e2010-05-06 15:35:27 -07002222 LOG_SCAV("Scavenging gDvm.gcHeap->referenceOperations");
Carl Shapiro646ba092010-06-10 15:17:00 -07002223 scavengeLargeHeapRefTable(gcHeap->referenceOperations);
Carl Shapirod28668c2010-04-15 16:10:00 -07002224
Carl Shapiro8bb533e2010-05-06 15:35:27 -07002225 LOG_SCAV("Scavenging gDvm.gcHeap->pendingFinalizationRefs");
Carl Shapiro646ba092010-06-10 15:17:00 -07002226 scavengeLargeHeapRefTable(gcHeap->pendingFinalizationRefs);
Carl Shapirod28668c2010-04-15 16:10:00 -07002227
Carl Shapiro8bb533e2010-05-06 15:35:27 -07002228 LOG_SCAV("Scavenging random global stuff");
Carl Shapirod28668c2010-04-15 16:10:00 -07002229 scavengeReference(&gDvm.outOfMemoryObj);
2230 scavengeReference(&gDvm.internalErrorObj);
2231 scavengeReference(&gDvm.noClassDefFoundErrorObj);
2232
Carl Shapiro8bb533e2010-05-06 15:35:27 -07002233 // LOG_SCAV("Scavenging gDvm.internedString");
Carl Shapirod28668c2010-04-15 16:10:00 -07002234 scavengeInternedStrings();
2235
Carl Shapiro8bb533e2010-05-06 15:35:27 -07002236 LOG_SCAV("Root scavenge has completed.");
Carl Shapirod28668c2010-04-15 16:10:00 -07002237
2238 scavengeBlockQueue();
2239
Carl Shapiro8bb533e2010-05-06 15:35:27 -07002240 LOG_SCAV("Re-snap global class pointers.");
Carl Shapirod28668c2010-04-15 16:10:00 -07002241 scavengeGlobals();
2242
Carl Shapiro8bb533e2010-05-06 15:35:27 -07002243 LOG_SCAV("New space scavenge has completed.");
Carl Shapirod28668c2010-04-15 16:10:00 -07002244
2245 /*
Carl Shapiro952e84a2010-05-06 14:35:29 -07002246 * Process reference objects in strength order.
2247 */
2248
Carl Shapiro8bb533e2010-05-06 15:35:27 -07002249 LOG_REF("Processing soft references...");
Carl Shapiro952e84a2010-05-06 14:35:29 -07002250 preserveSoftReferences(&gDvm.gcHeap->softReferences);
2251 clearWhiteReferences(&gDvm.gcHeap->softReferences);
2252
Carl Shapiro8bb533e2010-05-06 15:35:27 -07002253 LOG_REF("Processing weak references...");
Carl Shapiro952e84a2010-05-06 14:35:29 -07002254 clearWhiteReferences(&gDvm.gcHeap->weakReferences);
2255
Carl Shapiro8bb533e2010-05-06 15:35:27 -07002256 LOG_REF("Finding finalizations...");
Carl Shapiro952e84a2010-05-06 14:35:29 -07002257 processFinalizableReferences();
2258
Carl Shapiro8bb533e2010-05-06 15:35:27 -07002259 LOG_REF("Processing f-reachable soft references...");
Carl Shapiro952e84a2010-05-06 14:35:29 -07002260 clearWhiteReferences(&gDvm.gcHeap->softReferences);
2261
Carl Shapiro8bb533e2010-05-06 15:35:27 -07002262 LOG_REF("Processing f-reachable weak references...");
Carl Shapiro952e84a2010-05-06 14:35:29 -07002263 clearWhiteReferences(&gDvm.gcHeap->weakReferences);
2264
Carl Shapiro8bb533e2010-05-06 15:35:27 -07002265 LOG_REF("Processing phantom references...");
Carl Shapiro952e84a2010-05-06 14:35:29 -07002266 clearWhiteReferences(&gDvm.gcHeap->phantomReferences);
2267
2268 /*
Carl Shapirod28668c2010-04-15 16:10:00 -07002269 * Verify the stack and heap.
2270 */
Carl Shapirof5718252010-05-11 20:55:13 -07002271 dvmVerifyRoots();
Carl Shapirod28668c2010-04-15 16:10:00 -07002272 verifyNewSpace();
2273
Carl Shapirod28668c2010-04-15 16:10:00 -07002274 //describeBlocks(gcHeap->heapSource);
2275
2276 clearFromSpace(gcHeap->heapSource);
2277
2278 {
2279 size_t alloc, rem, total;
2280
2281 room(&alloc, &rem, &total);
Carl Shapiro8bb533e2010-05-06 15:35:27 -07002282 LOG_SCAV("AFTER GC: %zu alloc, %zu free, %zu total.", alloc, rem, total);
Carl Shapirod28668c2010-04-15 16:10:00 -07002283 }
2284}
2285
2286/*
2287 * Interface compatibility routines.
2288 */
2289
2290void dvmClearWhiteRefs(Object **list)
2291{
Carl Shapiro952e84a2010-05-06 14:35:29 -07002292 /* do nothing */
Carl Shapirod28668c2010-04-15 16:10:00 -07002293 assert(*list == NULL);
2294}
2295
2296void dvmHandleSoftRefs(Object **list)
2297{
Carl Shapiro952e84a2010-05-06 14:35:29 -07002298 /* do nothing */
Carl Shapirod28668c2010-04-15 16:10:00 -07002299 assert(*list == NULL);
2300}
2301
2302bool dvmHeapBeginMarkStep(GcMode mode)
2303{
2304 /* do nothing */
2305 return true;
2306}
2307
2308void dvmHeapFinishMarkStep(void)
2309{
2310 /* do nothing */
2311}
2312
2313void dvmHeapMarkRootSet(void)
2314{
2315 /* do nothing */
2316}
2317
2318void dvmHeapScanMarkedObjects(void)
2319{
2320 dvmScavengeRoots();
2321}
2322
2323void dvmHeapScheduleFinalizations(void)
2324{
2325 /* do nothing */
2326}
2327
2328void dvmHeapSweepUnmarkedObjects(GcMode mode, int *numFreed, size_t *sizeFreed)
2329{
Carl Shapiro703a2f32010-05-12 23:11:37 -07002330 *numFreed = 0;
2331 *sizeFreed = 0;
Carl Shapirod28668c2010-04-15 16:10:00 -07002332 /* do nothing */
2333}
2334
Carl Shapiro791802c2010-07-02 11:13:35 -07002335void dvmMarkDirtyObjects(void)
2336{
2337 assert(!"implemented");
2338}
2339
2340void dvmHeapSourceThreadShutdown(void)
2341{
2342 /* do nothing */
2343}