blob: a2532ac2b4ca29e1cf592adeadb10412980b9f02 [file] [log] [blame]
Carl Shapirod28668c2010-04-15 16:10:00 -07001/*
2 * Copyright (C) 2009 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17#include <errno.h>
18#include <limits.h>
19#include <sys/mman.h>
20
21#include "Dalvik.h"
22#include "alloc/Heap.h"
23#include "alloc/HeapBitmap.h"
24#include "alloc/HeapInternal.h"
25#include "alloc/HeapSource.h"
26#include "alloc/Verify.h"
27#include "alloc/clz.h"
28
29/*
30 * A "mostly copying", generational, garbage collector.
31 *
32 * TODO: we allocate our own contiguous tract of page frames to back
33 * object allocations. To cooperate with other heaps active in the
34 * virtual machine we need to move the responsibility of allocating
35 * pages someplace outside of this code.
36 *
37 * The other major data structures that maintain the state of the heap
38 * are the block space table and the block queue.
39 *
40 * The block space table records the state of a block. We must track
41 * whether a block is:
42 *
43 * - Free or allocated in some space.
44 *
45 * - If the block holds part of a large object allocation, whether the
46 * block is the initial or a continued block of the allocation.
47 *
48 * - Whether the block is pinned, that is to say whether at least one
49 * object in the block must remain stationary. Only needed during a
50 * GC.
51 *
52 * - Which space the object belongs to. At present this means
53 * from-space or to-space.
54 *
55 * The block queue is used during garbage collection. Unlike Cheney's
56 * algorithm, from-space and to-space are not contiguous. Therefore,
57 * one cannot maintain the state of the copy with just two pointers.
58 * The block queue exists to thread lists of blocks from the various
59 * spaces together.
60 *
61 * Additionally, we record the free space frontier of the heap, as
62 * well as the address of the first object within a block, which is
63 * required to copy objects following a large object (not currently
64 * implemented). This is stored in the heap source structure. This
65 * should be moved elsewhere to support in-line allocations from Java
66 * threads.
67 *
68 * Allocation requests are satisfied by reserving storage from one or
69 * more contiguous blocks. Objects that are small enough to fit
70 * inside a block are packed together within a block. Objects that
71 * are larger than a block are allocated from contiguous sequences of
72 * blocks. When half the available blocks are filled, a garbage
73 * collection occurs. We "flip" spaces (exchange from- and to-space),
74 * copy live objects into to space, and perform pointer adjustment.
75 *
76 * Copying is made more complicated by the requirement that some
77 * objects must not be moved. This property is known as "pinning".
78 * These objects must be dealt with specially. We use Bartlett's
79 * scheme; blocks containing such objects are grayed (promoted) at the
Carl Shapiro952e84a2010-05-06 14:35:29 -070080 * start of a garbage collection. By virtue of this trick, tracing
Carl Shapirod28668c2010-04-15 16:10:00 -070081 * from the roots proceeds as usual but all objects on those pages are
82 * considered promoted and therefore not moved.
83 *
84 * TODO: there is sufficient information within the garbage collector
85 * to implement Attardi's scheme for evacuating unpinned objects from
86 * a page that is otherwise pinned. This would eliminate false
87 * retention caused by the large pinning granularity.
88 *
89 * We need a scheme for medium and large objects. Ignore that for
90 * now, we can return to this later.
91 *
92 * Eventually we need to worry about promoting objects out of the
93 * copy-collected heap (tenuring) into a less volatile space. Copying
94 * may not always be the best policy for such spaces. We should
95 * consider a variant of mark, sweep, compact.
96 *
97 * The block scheme allows us to use VM page faults to maintain a
98 * write barrier. Consider having a special leaf state for a page.
99 *
100 * Bibliography:
101 *
102 * C. J. Cheney. 1970. A non-recursive list compacting
103 * algorithm. CACM. 13-11 pp677--678.
104 *
105 * Joel F. Bartlett. 1988. Compacting Garbage Collection with
106 * Ambiguous Roots. Digital Equipment Corporation.
107 *
108 * Joel F. Bartlett. 1989. Mostly-Copying Garbage Collection Picks Up
109 * Generations and C++. Digital Equipment Corporation.
110 *
111 * G. May Yip. 1991. Incremental, Generational Mostly-Copying Garbage
112 * Collection in Uncooperative Environments. Digital Equipment
113 * Corporation.
114 *
115 * Giuseppe Attardi, Tito Flagella. 1994. A Customisable Memory
116 * Management Framework. TR-94-010
117 *
118 * Giuseppe Attardi, Tito Flagella, Pietro Iglio. 1998. A customisable
119 * memory management framework for C++. Software -- Practice and
120 * Experience. 28(11), 1143-1183.
121 *
122 */
123
124#define ARRAYSIZE(x) (sizeof(x) / sizeof(x[0]))
125
Carl Shapiro8bb533e2010-05-06 15:35:27 -0700126#if 0
Carl Shapirod28668c2010-04-15 16:10:00 -0700127#define LOG_ALLOC LOGI
Carl Shapiro8bb533e2010-05-06 15:35:27 -0700128#define LOG_PIN LOGI
129#define LOG_PROM LOGI
130#define LOG_REF LOGI
131#define LOG_SCAV LOGI
132#define LOG_TRAN LOGI
133#define LOG_VER LOGI
Carl Shapirod28668c2010-04-15 16:10:00 -0700134#else
Carl Shapiro8bb533e2010-05-06 15:35:27 -0700135#define LOG_ALLOC(...) ((void)0)
136#define LOG_PIN(...) ((void)0)
137#define LOG_PROM(...) ((void)0)
138#define LOG_REF(...) ((void)0)
139#define LOG_SCAV(...) ((void)0)
140#define LOG_TRAN(...) ((void)0)
141#define LOG_VER(...) ((void)0)
Carl Shapirod28668c2010-04-15 16:10:00 -0700142#endif
143
144static void enqueueBlock(HeapSource *heapSource, size_t block);
Carl Shapiro2396fda2010-05-03 20:14:14 -0700145static void scavengeReference(Object **obj);
Carl Shapiro952e84a2010-05-06 14:35:29 -0700146static bool toSpaceContains(const void *addr);
147static bool fromSpaceContains(const void *addr);
Carl Shapirod28668c2010-04-15 16:10:00 -0700148static size_t sumHeapBitmap(const HeapBitmap *bitmap);
Carl Shapiro2396fda2010-05-03 20:14:14 -0700149static size_t objectSize(const Object *obj);
Carl Shapiro952e84a2010-05-06 14:35:29 -0700150static void scavengeDataObject(Object *obj);
151static void scavengeBlockQueue(void);
Carl Shapirod28668c2010-04-15 16:10:00 -0700152
153/*
154 * We use 512-byte blocks.
155 */
156enum { BLOCK_SHIFT = 9 };
157enum { BLOCK_SIZE = 1 << BLOCK_SHIFT };
158
159/*
160 * Space identifiers, stored into the blockSpace array.
161 */
162enum {
163 BLOCK_FREE = 0,
164 BLOCK_FROM_SPACE = 1,
165 BLOCK_TO_SPACE = 2,
166 BLOCK_CONTINUED = 7
167};
168
169/*
170 * Alignment for all allocations, in bytes.
171 */
172enum { ALLOC_ALIGNMENT = 8 };
173
174/*
175 * Sentinel value for the queue end.
176 */
177#define QUEUE_TAIL (~(size_t)0)
178
179struct HeapSource {
180
181 /* The base address of backing store. */
182 u1 *blockBase;
183
184 /* Total number of blocks available for allocation. */
185 size_t totalBlocks;
186 size_t allocBlocks;
187
188 /*
189 * The scavenger work queue. Implemented as an array of index
190 * values into the queue.
191 */
192 size_t *blockQueue;
193
194 /*
195 * Base and limit blocks. Basically the shifted start address of
196 * the block. We convert blocks to a relative number when
197 * indexing in the block queue. TODO: make the block queue base
198 * relative rather than the index into the block queue.
199 */
200 size_t baseBlock, limitBlock;
201
202 size_t queueHead;
203 size_t queueTail;
204 size_t queueSize;
205
206 /* The space of the current block 0 (free), 1 or 2. */
207 char *blockSpace;
208
209 /* Start of free space in the current block. */
210 u1 *allocPtr;
211 /* Exclusive limit of free space in the current block. */
212 u1 *allocLimit;
213
214 HeapBitmap allocBits;
215
216 /*
Carl Shapirod28668c2010-04-15 16:10:00 -0700217 * The starting size of the heap. This value is the same as the
218 * value provided to the -Xms flag.
219 */
220 size_t minimumSize;
221
222 /*
223 * The maximum size of the heap. This value is the same as the
224 * -Xmx flag.
225 */
226 size_t maximumSize;
227
228 /*
229 * The current, committed size of the heap. At present, this is
230 * equivalent to the maximumSize.
231 */
232 size_t currentSize;
233
234 size_t bytesAllocated;
235};
236
237static unsigned long alignDown(unsigned long x, unsigned long n)
238{
239 return x & -n;
240}
241
242static unsigned long alignUp(unsigned long x, unsigned long n)
243{
244 return alignDown(x + (n - 1), n);
245}
246
247static void describeBlocks(const HeapSource *heapSource)
248{
249 size_t i;
250
251 for (i = 0; i < heapSource->totalBlocks; ++i) {
252 if ((i % 32) == 0) putchar('\n');
253 printf("%d ", heapSource->blockSpace[i]);
254 }
255 putchar('\n');
256}
257
258/*
259 * Virtual memory interface.
260 */
261
262static void *virtualAlloc(size_t length)
263{
264 void *addr;
265 int flags, prot;
266
267 flags = MAP_PRIVATE | MAP_ANONYMOUS;
268 prot = PROT_READ | PROT_WRITE;
269 addr = mmap(NULL, length, prot, flags, -1, 0);
270 if (addr == MAP_FAILED) {
271 LOGE_HEAP("mmap: %s", strerror(errno));
272 addr = NULL;
273 }
274 return addr;
275}
276
277static void virtualFree(void *addr, size_t length)
278{
279 int res;
280
281 assert(addr != NULL);
282 assert((uintptr_t)addr % SYSTEM_PAGE_SIZE == 0);
283 res = munmap(addr, length);
284 if (res == -1) {
285 LOGE_HEAP("munmap: %s", strerror(errno));
286 }
287}
288
289static int isValidAddress(const HeapSource *heapSource, const u1 *addr)
290{
291 size_t block;
292
293 block = (uintptr_t)addr >> BLOCK_SHIFT;
294 return heapSource->baseBlock <= block &&
295 heapSource->limitBlock > block;
296}
297
298/*
299 * Iterate over the block map looking for a contiguous run of free
300 * blocks.
301 */
302static void *allocateBlocks(HeapSource *heapSource, size_t blocks)
303{
304 void *addr;
305 size_t allocBlocks, totalBlocks;
306 size_t i, j;
307
308 allocBlocks = heapSource->allocBlocks;
309 totalBlocks = heapSource->totalBlocks;
310 /* Check underflow. */
311 assert(blocks != 0);
312 /* Check overflow. */
313 if (allocBlocks + blocks > totalBlocks / 2) {
314 return NULL;
315 }
316 /* Scan block map. */
317 for (i = 0; i < totalBlocks; ++i) {
318 /* Check fit. */
319 for (j = 0; j < blocks; ++j) { /* runs over totalBlocks */
320 if (heapSource->blockSpace[i+j] != BLOCK_FREE) {
321 break;
322 }
323 }
324 /* No fit? */
325 if (j != blocks) {
326 i += j;
327 continue;
328 }
329 /* Fit, allocate. */
330 heapSource->blockSpace[i] = BLOCK_TO_SPACE; /* why to-space? */
331 for (j = 1; j < blocks; ++j) {
332 heapSource->blockSpace[i+j] = BLOCK_CONTINUED;
333 }
334 heapSource->allocBlocks += blocks;
335 addr = &heapSource->blockBase[i*BLOCK_SIZE];
336 memset(addr, 0, blocks*BLOCK_SIZE);
337 /* Collecting? */
338 if (heapSource->queueHead != QUEUE_TAIL) {
339 LOG_ALLOC("allocateBlocks allocBlocks=%zu,block#=%zu", heapSource->allocBlocks, i);
340 /*
341 * This allocated was on behalf of the transporter when it
342 * shaded a white object gray. We enqueue the block so
343 * the scavenger can further shade the gray objects black.
344 */
345 enqueueBlock(heapSource, i);
346 }
347
348 return addr;
349 }
350 /* Insufficient space, fail. */
351 LOGE("Insufficient space, %zu blocks, %zu blocks allocated and %zu bytes allocated",
352 heapSource->totalBlocks,
353 heapSource->allocBlocks,
354 heapSource->bytesAllocated);
355 return NULL;
356}
357
358/* Converts an absolute address to a relative block number. */
359static size_t addressToBlock(const HeapSource *heapSource, const void *addr)
360{
361 assert(heapSource != NULL);
362 assert(isValidAddress(heapSource, addr));
363 return (((uintptr_t)addr) >> BLOCK_SHIFT) - heapSource->baseBlock;
364}
365
366/* Converts a relative block number to an absolute address. */
367static u1 *blockToAddress(const HeapSource *heapSource, size_t block)
368{
369 u1 *addr;
370
371 addr = (u1 *) (((uintptr_t) heapSource->baseBlock + block) * BLOCK_SIZE);
372 assert(isValidAddress(heapSource, addr));
373 return addr;
374}
375
376static void clearBlock(HeapSource *heapSource, size_t block)
377{
378 u1 *addr;
379 size_t i;
380
381 assert(heapSource != NULL);
382 assert(block < heapSource->totalBlocks);
383 addr = heapSource->blockBase + block*BLOCK_SIZE;
384 memset(addr, 0xCC, BLOCK_SIZE);
385 for (i = 0; i < BLOCK_SIZE; i += 8) {
386 dvmHeapBitmapClearObjectBit(&heapSource->allocBits, addr + i);
387 }
388}
389
390static void clearFromSpace(HeapSource *heapSource)
391{
392 size_t i, count;
393
394 assert(heapSource != NULL);
395 i = count = 0;
396 while (i < heapSource->totalBlocks) {
397 if (heapSource->blockSpace[i] != BLOCK_FROM_SPACE) {
398 ++i;
399 continue;
400 }
401 heapSource->blockSpace[i] = BLOCK_FREE;
402 clearBlock(heapSource, i);
403 ++i;
404 ++count;
405 while (i < heapSource->totalBlocks &&
406 heapSource->blockSpace[i] == BLOCK_CONTINUED) {
407 heapSource->blockSpace[i] = BLOCK_FREE;
408 clearBlock(heapSource, i);
409 ++i;
410 ++count;
411 }
412 }
Carl Shapiro8bb533e2010-05-06 15:35:27 -0700413 LOG_SCAV("freed %zu blocks (%zu bytes)", count, count*BLOCK_SIZE);
Carl Shapirod28668c2010-04-15 16:10:00 -0700414}
415
416/*
417 * Appends the given block to the block queue. The block queue is
418 * processed in-order by the scavenger.
419 */
420static void enqueueBlock(HeapSource *heapSource, size_t block)
421{
422 assert(heapSource != NULL);
423 assert(block < heapSource->totalBlocks);
424 if (heapSource->queueHead != QUEUE_TAIL) {
425 heapSource->blockQueue[heapSource->queueTail] = block;
426 } else {
427 heapSource->queueHead = block;
428 }
429 heapSource->blockQueue[block] = QUEUE_TAIL;
430 heapSource->queueTail = block;
431 ++heapSource->queueSize;
432}
433
434/*
435 * Grays all objects within the block corresponding to the given
436 * address.
437 */
438static void promoteBlockByAddr(HeapSource *heapSource, const void *addr)
439{
440 size_t block;
441
442 block = addressToBlock(heapSource, (const u1 *)addr);
443 if (heapSource->blockSpace[block] != BLOCK_TO_SPACE) {
Carl Shapiro8bb533e2010-05-06 15:35:27 -0700444 // LOG_PROM("promoting block %zu %d @ %p", block, heapSource->blockSpace[block], obj);
Carl Shapirod28668c2010-04-15 16:10:00 -0700445 heapSource->blockSpace[block] = BLOCK_TO_SPACE;
446 enqueueBlock(heapSource, block);
447 /* TODO(cshapiro): count continued blocks?*/
448 heapSource->allocBlocks += 1;
449 } else {
Carl Shapiro8bb533e2010-05-06 15:35:27 -0700450 // LOG_PROM("NOT promoting block %zu %d @ %p", block, heapSource->blockSpace[block], obj);
Carl Shapirod28668c2010-04-15 16:10:00 -0700451 }
452}
453
454GcHeap *dvmHeapSourceStartup(size_t startSize, size_t absoluteMaxSize)
455{
456 GcHeap* gcHeap;
457 HeapSource *heapSource;
458
459 assert(startSize <= absoluteMaxSize);
460
461 heapSource = malloc(sizeof(*heapSource));
462 assert(heapSource != NULL);
463 memset(heapSource, 0, sizeof(*heapSource));
464
465 heapSource->minimumSize = alignUp(startSize, BLOCK_SIZE);
466 heapSource->maximumSize = alignUp(absoluteMaxSize, BLOCK_SIZE);
467
468 heapSource->currentSize = heapSource->maximumSize;
469
470 /* Allocate underlying storage for blocks. */
471 heapSource->blockBase = virtualAlloc(heapSource->maximumSize);
472 assert(heapSource->blockBase != NULL);
473 heapSource->baseBlock = (uintptr_t) heapSource->blockBase >> BLOCK_SHIFT;
474 heapSource->limitBlock = ((uintptr_t) heapSource->blockBase + heapSource->maximumSize) >> BLOCK_SHIFT;
475
476 heapSource->allocBlocks = 0;
477 heapSource->totalBlocks = (heapSource->limitBlock - heapSource->baseBlock);
478
479 assert(heapSource->totalBlocks = heapSource->maximumSize / BLOCK_SIZE);
480
481 {
482 size_t size = sizeof(heapSource->blockQueue[0]);
483 heapSource->blockQueue = malloc(heapSource->totalBlocks*size);
484 assert(heapSource->blockQueue != NULL);
485 memset(heapSource->blockQueue, 0xCC, heapSource->totalBlocks*size);
486 heapSource->queueHead = QUEUE_TAIL;
487 }
488
489 /* Byte indicating space residence or free status of block. */
490 {
491 size_t size = sizeof(heapSource->blockSpace[0]);
492 heapSource->blockSpace = malloc(heapSource->totalBlocks*size);
493 assert(heapSource->blockSpace != NULL);
494 memset(heapSource->blockSpace, 0, heapSource->totalBlocks*size);
495 }
496
497 dvmHeapBitmapInit(&heapSource->allocBits,
498 heapSource->blockBase,
499 heapSource->maximumSize,
500 "blockBase");
501
502 /* Initialize allocation pointers. */
503 heapSource->allocPtr = allocateBlocks(heapSource, 1);
504 heapSource->allocLimit = heapSource->allocPtr + BLOCK_SIZE;
505
506 gcHeap = malloc(sizeof(*gcHeap));
507 assert(gcHeap != NULL);
508 memset(gcHeap, 0, sizeof(*gcHeap));
509 gcHeap->heapSource = heapSource;
510
511 return gcHeap;
512}
513
514/*
515 * Perform any required heap initializations after forking from the
516 * zygote process. This is a no-op for the time being. Eventually
517 * this will demarcate the shared region of the heap.
518 */
519bool dvmHeapSourceStartupAfterZygote(void)
520{
521 return true;
522}
523
524bool dvmHeapSourceStartupBeforeFork(void)
525{
526 assert(!"implemented");
527 return false;
528}
529
530void dvmHeapSourceShutdown(GcHeap **gcHeap)
531{
532 if (*gcHeap == NULL || (*gcHeap)->heapSource == NULL)
533 return;
Carl Shapiro88b00352010-05-19 17:38:33 -0700534 free((*gcHeap)->heapSource->blockQueue);
535 free((*gcHeap)->heapSource->blockSpace);
Carl Shapirod28668c2010-04-15 16:10:00 -0700536 virtualFree((*gcHeap)->heapSource->blockBase,
537 (*gcHeap)->heapSource->maximumSize);
538 free((*gcHeap)->heapSource);
539 (*gcHeap)->heapSource = NULL;
540 free(*gcHeap);
541 *gcHeap = NULL;
542}
543
544size_t dvmHeapSourceGetValue(enum HeapSourceValueSpec spec,
545 size_t perHeapStats[],
546 size_t arrayLen)
547{
548 HeapSource *heapSource;
549 size_t value;
550
551 heapSource = gDvm.gcHeap->heapSource;
552 switch (spec) {
553 case HS_EXTERNAL_BYTES_ALLOCATED:
554 value = 0;
555 break;
556 case HS_EXTERNAL_LIMIT:
557 value = 0;
558 break;
559 case HS_FOOTPRINT:
560 value = heapSource->maximumSize;
561 break;
562 case HS_ALLOWED_FOOTPRINT:
563 value = heapSource->maximumSize;
564 break;
565 case HS_BYTES_ALLOCATED:
566 value = heapSource->bytesAllocated;
567 break;
568 case HS_OBJECTS_ALLOCATED:
569 value = sumHeapBitmap(&heapSource->allocBits);
570 break;
571 default:
572 assert(!"implemented");
573 value = 0;
574 }
575 if (perHeapStats) {
576 *perHeapStats = value;
577 }
578 return value;
579}
580
581/*
582 * Performs a shallow copy of the allocation bitmap into the given
583 * vector of heap bitmaps.
584 */
585void dvmHeapSourceGetObjectBitmaps(HeapBitmap objBits[], HeapBitmap markBits[],
586 size_t numHeaps)
587{
588 assert(!"implemented");
589}
590
591HeapBitmap *dvmHeapSourceGetLiveBits(void)
592{
Carl Shapiro603469a2010-05-20 20:22:31 -0700593 return &gDvm.gcHeap->heapSource->allocBits;
Carl Shapirod28668c2010-04-15 16:10:00 -0700594}
595
596/*
597 * Allocate the specified number of bytes from the heap. The
598 * allocation cursor points into a block of free storage. If the
599 * given allocation fits in the remaining space of the block, we
600 * advance the cursor and return a pointer to the free storage. If
601 * the allocation cannot fit in the current block but is smaller than
602 * a block we request a new block and allocate from it instead. If
603 * the allocation is larger than a block we must allocate from a span
604 * of contiguous blocks.
605 */
606void *dvmHeapSourceAlloc(size_t length)
607{
608 HeapSource *heapSource;
609 unsigned char *addr;
610 size_t aligned, available, blocks;
611
612 heapSource = gDvm.gcHeap->heapSource;
613 assert(heapSource != NULL);
614 assert(heapSource->allocPtr != NULL);
615 assert(heapSource->allocLimit != NULL);
616
617 aligned = alignUp(length, ALLOC_ALIGNMENT);
618 available = heapSource->allocLimit - heapSource->allocPtr;
619
620 /* Try allocating inside the current block. */
621 if (aligned <= available) {
622 addr = heapSource->allocPtr;
623 heapSource->allocPtr += aligned;
624 heapSource->bytesAllocated += aligned;
625 dvmHeapBitmapSetObjectBit(&heapSource->allocBits, addr);
626 return addr;
627 }
628
629 /* Try allocating in a new block. */
630 if (aligned <= BLOCK_SIZE) {
631 addr = allocateBlocks(heapSource, 1);
632 if (addr != NULL) {
633 heapSource->allocLimit = addr + BLOCK_SIZE;
634 heapSource->allocPtr = addr + aligned;
635 heapSource->bytesAllocated += aligned;
636 dvmHeapBitmapSetObjectBit(&heapSource->allocBits, addr);
637 /* TODO(cshapiro): pad out the current block. */
638 }
639 return addr;
640 }
641
642 /* Try allocating in a span of blocks. */
643 blocks = alignUp(aligned, BLOCK_SIZE) / BLOCK_SIZE;
644
645 addr = allocateBlocks(heapSource, blocks);
646 /* Propagate failure upward. */
647 if (addr != NULL) {
648 heapSource->bytesAllocated += aligned;
649 dvmHeapBitmapSetObjectBit(&heapSource->allocBits, addr);
650 /* TODO(cshapiro): pad out free space in the last block. */
651 }
652 return addr;
653}
654
655void *dvmHeapSourceAllocAndGrow(size_t size)
656{
657 return dvmHeapSourceAlloc(size);
658}
659
660/* TODO: refactor along with dvmHeapSourceAlloc */
661void *allocateGray(size_t size)
662{
Carl Shapiro7800c092010-05-11 13:46:29 -0700663 HeapSource *heapSource;
664 void *addr;
665 size_t block;
666
Carl Shapiro952e84a2010-05-06 14:35:29 -0700667 /* TODO: add a check that we are in a GC. */
Carl Shapiro7800c092010-05-11 13:46:29 -0700668 heapSource = gDvm.gcHeap->heapSource;
669 addr = dvmHeapSourceAlloc(size);
Carl Shapiro703a2f32010-05-12 23:11:37 -0700670 assert(addr != NULL);
Carl Shapiro7800c092010-05-11 13:46:29 -0700671 block = addressToBlock(heapSource, (const u1 *)addr);
672 if (heapSource->queueHead == QUEUE_TAIL) {
673 /*
674 * Forcibly append the underlying block to the queue. This
675 * condition occurs when referents are transported following
676 * the initial trace.
677 */
678 enqueueBlock(heapSource, block);
679 LOG_PROM("forced promoting block %zu %d @ %p", block, heapSource->blockSpace[block], addr);
680 }
681 return addr;
Carl Shapirod28668c2010-04-15 16:10:00 -0700682}
683
684/*
685 * Returns true if the given address is within the heap and points to
686 * the header of a live object.
687 */
688bool dvmHeapSourceContains(const void *addr)
689{
690 HeapSource *heapSource;
691 HeapBitmap *bitmap;
692
693 heapSource = gDvm.gcHeap->heapSource;
694 bitmap = &heapSource->allocBits;
Carl Shapirodc1e4f12010-05-01 22:27:56 -0700695 if (!dvmHeapBitmapCoversAddress(bitmap, addr)) {
696 return false;
697 } else {
698 return dvmHeapBitmapIsObjectBitSet(bitmap, addr);
699 }
Carl Shapirod28668c2010-04-15 16:10:00 -0700700}
701
702bool dvmHeapSourceGetPtrFlag(const void *ptr, enum HeapSourcePtrFlag flag)
703{
704 assert(!"implemented");
705 return false;
706}
707
708size_t dvmHeapSourceChunkSize(const void *ptr)
709{
710 assert(!"implemented");
711 return 0;
712}
713
714size_t dvmHeapSourceFootprint(void)
715{
716 assert(!"implemented");
717 return 0;
718}
719
720/*
721 * Returns the "ideal footprint" which appears to be the number of
722 * bytes currently committed to the heap. This starts out at the
723 * start size of the heap and grows toward the maximum size.
724 */
725size_t dvmHeapSourceGetIdealFootprint(void)
726{
727 return gDvm.gcHeap->heapSource->currentSize;
728}
729
730float dvmGetTargetHeapUtilization(void)
731{
Carl Shapiro603469a2010-05-20 20:22:31 -0700732 return 0.5f;
Carl Shapirod28668c2010-04-15 16:10:00 -0700733}
734
735void dvmSetTargetHeapUtilization(float newTarget)
736{
Carl Shapiro603469a2010-05-20 20:22:31 -0700737 assert(newTarget > 0.0f && newTarget < 1.0f);
Carl Shapirod28668c2010-04-15 16:10:00 -0700738}
739
740size_t dvmMinimumHeapSize(size_t size, bool set)
741{
742 return gDvm.gcHeap->heapSource->minimumSize;
743}
744
745/*
746 * Expands the size of the heap after a collection. At present we
747 * commit the pages for maximum size of the heap so this routine is
748 * just a no-op. Eventually, we will either allocate or commit pages
749 * on an as-need basis.
750 */
751void dvmHeapSourceGrowForUtilization(void)
752{
753 /* do nothing */
754}
755
756void dvmHeapSourceTrim(size_t bytesTrimmed[], size_t arrayLen)
757{
758 /* do nothing */
759}
760
761void dvmHeapSourceWalk(void (*callback)(const void *chunkptr, size_t chunklen,
762 const void *userptr, size_t userlen,
763 void *arg),
764 void *arg)
765{
766 assert(!"implemented");
767}
768
769size_t dvmHeapSourceGetNumHeaps(void)
770{
771 return 1;
772}
773
774bool dvmTrackExternalAllocation(size_t n)
775{
Carl Shapiro528f3812010-05-18 14:16:26 -0700776 /* do nothing */
777 return true;
Carl Shapirod28668c2010-04-15 16:10:00 -0700778}
779
780void dvmTrackExternalFree(size_t n)
781{
Carl Shapiro528f3812010-05-18 14:16:26 -0700782 /* do nothing */
Carl Shapirod28668c2010-04-15 16:10:00 -0700783}
784
785size_t dvmGetExternalBytesAllocated(void)
786{
787 assert(!"implemented");
788 return 0;
789}
790
791void dvmHeapSourceFlip(void)
792{
793 HeapSource *heapSource;
794 size_t i;
795
796 heapSource = gDvm.gcHeap->heapSource;
797
798 /* Reset the block queue. */
799 heapSource->allocBlocks = 0;
800 heapSource->queueSize = 0;
801 heapSource->queueHead = QUEUE_TAIL;
802
Carl Shapirod28668c2010-04-15 16:10:00 -0700803 /* TODO(cshapiro): pad the current (prev) block. */
804
805 heapSource->allocPtr = NULL;
806 heapSource->allocLimit = NULL;
807
808 /* Whiten all allocated blocks. */
809 for (i = 0; i < heapSource->totalBlocks; ++i) {
810 if (heapSource->blockSpace[i] == BLOCK_TO_SPACE) {
811 heapSource->blockSpace[i] = BLOCK_FROM_SPACE;
812 }
813 }
814}
815
816static void room(size_t *alloc, size_t *avail, size_t *total)
817{
818 HeapSource *heapSource;
Carl Shapirod28668c2010-04-15 16:10:00 -0700819
820 heapSource = gDvm.gcHeap->heapSource;
821 *total = heapSource->totalBlocks*BLOCK_SIZE;
822 *alloc = heapSource->allocBlocks*BLOCK_SIZE;
823 *avail = *total - *alloc;
824}
825
826static bool isSpaceInternal(u1 *addr, int space)
827{
828 HeapSource *heapSource;
829 u1 *base, *limit;
830 size_t offset;
831 char space2;
832
833 heapSource = gDvm.gcHeap->heapSource;
834 base = heapSource->blockBase;
835 assert(addr >= base);
836 limit = heapSource->blockBase + heapSource->maximumSize;
837 assert(addr < limit);
838 offset = addr - base;
839 space2 = heapSource->blockSpace[offset >> BLOCK_SHIFT];
840 return space == space2;
841}
842
Carl Shapiro952e84a2010-05-06 14:35:29 -0700843static bool fromSpaceContains(const void *addr)
Carl Shapirod28668c2010-04-15 16:10:00 -0700844{
845 return isSpaceInternal((u1 *)addr, BLOCK_FROM_SPACE);
846}
847
Carl Shapiro952e84a2010-05-06 14:35:29 -0700848static bool toSpaceContains(const void *addr)
Carl Shapirod28668c2010-04-15 16:10:00 -0700849{
850 return isSpaceInternal((u1 *)addr, BLOCK_TO_SPACE);
851}
852
853/*
854 * Notifies the collector that the object at the given address must
855 * remain stationary during the current collection.
856 */
857static void pinObject(const Object *obj)
858{
859 promoteBlockByAddr(gDvm.gcHeap->heapSource, obj);
860}
861
Carl Shapirod28668c2010-04-15 16:10:00 -0700862static size_t sumHeapBitmap(const HeapBitmap *bitmap)
863{
Carl Shapirod28668c2010-04-15 16:10:00 -0700864 size_t i, sum;
865
866 sum = 0;
867 for (i = 0; i < bitmap->bitsLen >> 2; ++i) {
Carl Shapiro603469a2010-05-20 20:22:31 -0700868 sum += CLZ(bitmap->bits[i]);
Carl Shapirod28668c2010-04-15 16:10:00 -0700869 }
870 return sum;
871}
872
873/*
874 * Miscellaneous functionality.
875 */
876
877static int isForward(const void *addr)
878{
879 return (uintptr_t)addr & 0x1;
880}
881
882static void setForward(const void *toObj, void *fromObj)
883{
884 *(unsigned long *)fromObj = (uintptr_t)toObj | 0x1;
885}
886
887static void* getForward(const void *fromObj)
888{
889 return (void *)((uintptr_t)fromObj & ~0x1);
890}
891
892/* Beware, uses the same encoding as a forwarding pointers! */
893static int isPermanentString(const StringObject *obj) {
894 return (uintptr_t)obj & 0x1;
895}
896
897static void* getPermanentString(const StringObject *obj)
898{
899 return (void *)((uintptr_t)obj & ~0x1);
900}
901
902
903/*
904 * Scavenging and transporting routines follow. A transporter grays
905 * an object. A scavenger blackens an object. We define these
906 * routines for each fundamental object type. Dispatch is performed
907 * in scavengeObject.
908 */
909
910/*
Carl Shapiro2396fda2010-05-03 20:14:14 -0700911 * Class object scavenging.
Carl Shapirod28668c2010-04-15 16:10:00 -0700912 */
Carl Shapiro2396fda2010-05-03 20:14:14 -0700913static void scavengeClassObject(ClassObject *obj)
Carl Shapirod28668c2010-04-15 16:10:00 -0700914{
Carl Shapirod28668c2010-04-15 16:10:00 -0700915 int i;
916
Carl Shapiro8bb533e2010-05-06 15:35:27 -0700917 LOG_SCAV("scavengeClassObject(obj=%p)", obj);
Carl Shapiro2396fda2010-05-03 20:14:14 -0700918 assert(obj != NULL);
Carl Shapirod28668c2010-04-15 16:10:00 -0700919 assert(obj->obj.clazz != NULL);
920 assert(obj->obj.clazz->descriptor != NULL);
921 assert(!strcmp(obj->obj.clazz->descriptor, "Ljava/lang/Class;"));
922 assert(obj->descriptor != NULL);
Carl Shapiro8bb533e2010-05-06 15:35:27 -0700923 LOG_SCAV("scavengeClassObject: descriptor='%s',vtableCount=%zu",
924 obj->descriptor, obj->vtableCount);
Carl Shapiro703a2f32010-05-12 23:11:37 -0700925 /* Delegate class object and instance field scavenging. */
926 scavengeDataObject((Object *)obj);
Carl Shapirod28668c2010-04-15 16:10:00 -0700927 /* Scavenge the array element class object. */
928 if (IS_CLASS_FLAG_SET(obj, CLASS_ISARRAY)) {
929 scavengeReference((Object **)(void *)&obj->elementClass);
930 }
931 /* Scavenge the superclass. */
932 scavengeReference((Object **)(void *)&obj->super);
933 /* Scavenge the class loader. */
934 scavengeReference(&obj->classLoader);
935 /* Scavenge static fields. */
936 for (i = 0; i < obj->sfieldCount; ++i) {
937 char ch = obj->sfields[i].field.signature[0];
938 if (ch == '[' || ch == 'L') {
939 scavengeReference((Object **)(void *)&obj->sfields[i].value.l);
940 }
941 }
942 /* Scavenge interface class objects. */
943 for (i = 0; i < obj->interfaceCount; ++i) {
944 scavengeReference((Object **) &obj->interfaces[i]);
945 }
Carl Shapirod28668c2010-04-15 16:10:00 -0700946}
947
948/*
949 * Array object scavenging.
950 */
Carl Shapirod28668c2010-04-15 16:10:00 -0700951static size_t scavengeArrayObject(ArrayObject *array)
952{
953 size_t i, length;
954
Carl Shapiro8bb533e2010-05-06 15:35:27 -0700955 LOG_SCAV("scavengeArrayObject(array=%p)", array);
Carl Shapirod28668c2010-04-15 16:10:00 -0700956 /* Scavenge the class object. */
Carl Shapiro952e84a2010-05-06 14:35:29 -0700957 assert(toSpaceContains(array));
Carl Shapirod28668c2010-04-15 16:10:00 -0700958 assert(array != NULL);
959 assert(array->obj.clazz != NULL);
960 scavengeReference((Object **) array);
961 length = dvmArrayObjectSize(array);
962 /* Scavenge the array contents. */
963 if (IS_CLASS_FLAG_SET(array->obj.clazz, CLASS_ISOBJECTARRAY)) {
964 Object **contents = (Object **)array->contents;
965 for (i = 0; i < array->length; ++i) {
966 scavengeReference(&contents[i]);
967 }
968 }
969 return length;
970}
971
972/*
973 * Reference object scavenging.
974 */
975
Carl Shapiro952e84a2010-05-06 14:35:29 -0700976static int getReferenceFlags(const Object *obj)
Carl Shapirod28668c2010-04-15 16:10:00 -0700977{
978 int flags;
979
980 flags = CLASS_ISREFERENCE |
981 CLASS_ISWEAKREFERENCE |
982 CLASS_ISPHANTOMREFERENCE;
Carl Shapiro952e84a2010-05-06 14:35:29 -0700983 return GET_CLASS_FLAG_GROUP(obj->clazz, flags);
Carl Shapirod28668c2010-04-15 16:10:00 -0700984}
985
Carl Shapiro952e84a2010-05-06 14:35:29 -0700986static int isSoftReference(const Object *obj)
Carl Shapirod28668c2010-04-15 16:10:00 -0700987{
988 return getReferenceFlags(obj) == CLASS_ISREFERENCE;
989}
990
Carl Shapiro952e84a2010-05-06 14:35:29 -0700991static int isWeakReference(const Object *obj)
Carl Shapirod28668c2010-04-15 16:10:00 -0700992{
993 return getReferenceFlags(obj) & CLASS_ISWEAKREFERENCE;
994}
995
Carl Shapiro952e84a2010-05-06 14:35:29 -0700996static bool isPhantomReference(const Object *obj)
Carl Shapirod28668c2010-04-15 16:10:00 -0700997{
998 return getReferenceFlags(obj) & CLASS_ISPHANTOMREFERENCE;
999}
1000
Carl Shapiro952e84a2010-05-06 14:35:29 -07001001/*
1002 * Returns true if the reference was registered with a reference queue
1003 * but has not yet been appended to it.
1004 */
1005static bool isReferenceEnqueuable(const Object *ref)
Carl Shapirod28668c2010-04-15 16:10:00 -07001006{
Carl Shapiro952e84a2010-05-06 14:35:29 -07001007 Object *queue, *queueNext;
Carl Shapirod28668c2010-04-15 16:10:00 -07001008
Carl Shapiro952e84a2010-05-06 14:35:29 -07001009 queue = dvmGetFieldObject(ref, gDvm.offJavaLangRefReference_queue);
1010 queueNext = dvmGetFieldObject(ref, gDvm.offJavaLangRefReference_queueNext);
1011 if (queue == NULL || queueNext != NULL) {
1012 /*
1013 * There is no queue, or the reference has already
1014 * been enqueued. The Reference.enqueue() method
1015 * will do nothing even if we call it.
1016 */
1017 return false;
Carl Shapirod28668c2010-04-15 16:10:00 -07001018 }
Carl Shapiro952e84a2010-05-06 14:35:29 -07001019
1020 /*
1021 * We need to call enqueue(), but if we called it from
1022 * here we'd probably deadlock. Schedule a call.
1023 */
1024 return true;
1025}
1026
1027/*
1028 * Schedules a reference to be appended to its reference queue.
1029 */
1030static void enqueueReference(const Object *ref)
1031{
1032 LargeHeapRefTable **table;
1033 Object *op;
1034
1035 assert(((uintptr_t)ref & 3) == 0);
1036 assert((WORKER_ENQUEUE & ~3) == 0);
1037 assert(dvmGetFieldObject(ref, gDvm.offJavaLangRefReference_queue) != NULL);
1038 assert(dvmGetFieldObject(ref, gDvm.offJavaLangRefReference_queueNext) == NULL);
1039 /*
1040 * Set the enqueue bit in the bottom of the pointer. Assumes that
1041 * objects are 8-byte aligned.
1042 *
1043 * Note that we are adding the *Reference* (which is by definition
1044 * already black at this point) to this list; we're not adding the
1045 * referent (which has already been cleared).
1046 */
1047 table = &gDvm.gcHeap->referenceOperations;
1048 op = (Object *)((uintptr_t)ref | WORKER_ENQUEUE);
1049 if (!dvmHeapAddRefToLargeTable(table, op)) {
Carl Shapiro8bb533e2010-05-06 15:35:27 -07001050 LOGE("no room for any more reference operations");
Carl Shapiro952e84a2010-05-06 14:35:29 -07001051 dvmAbort();
1052 }
1053}
1054
1055/*
1056 * Sets the referent field of a reference object to NULL.
1057 */
1058static void clearReference(Object *obj)
1059{
1060 dvmSetFieldObject(obj, gDvm.offJavaLangRefReference_referent, NULL);
1061}
1062
1063/*
1064 * Clears reference objects with white referents.
1065 */
1066void clearWhiteReferences(Object **list)
1067{
1068 size_t referentOffset, vmDataOffset;
1069 bool doSignal;
1070
1071 vmDataOffset = gDvm.offJavaLangRefReference_vmData;
1072 referentOffset = gDvm.offJavaLangRefReference_referent;
1073 doSignal = false;
1074 while (*list != NULL) {
1075 Object *ref = *list;
1076 JValue *field = dvmFieldPtr(ref, referentOffset);
1077 Object *referent = field->l;
1078 *list = dvmGetFieldObject(ref, vmDataOffset);
1079 assert(referent != NULL);
1080 if (isForward(referent->clazz)) {
1081 field->l = referent = getForward(referent->clazz);
1082 continue;
1083 }
1084 if (fromSpaceContains(referent)) {
1085 /* Referent is white, clear it. */
1086 clearReference(ref);
1087 if (isReferenceEnqueuable(ref)) {
1088 enqueueReference(ref);
1089 doSignal = true;
1090 }
1091 }
1092 }
1093 /*
1094 * If we cleared a reference with a reference queue we must notify
1095 * the heap worker to append the reference.
1096 */
1097 if (doSignal) {
1098 dvmSignalHeapWorker(false);
1099 }
1100 assert(*list == NULL);
1101}
1102
1103/*
1104 * Blackens referents subject to the soft reference preservation
1105 * policy.
1106 */
1107void preserveSoftReferences(Object **list)
1108{
1109 Object *ref;
1110 Object *prev, *next;
1111 size_t referentOffset, vmDataOffset;
1112 unsigned counter;
1113 bool white;
1114
1115 vmDataOffset = gDvm.offJavaLangRefReference_vmData;
1116 referentOffset = gDvm.offJavaLangRefReference_referent;
1117 counter = 0;
1118 prev = next = NULL;
1119 ref = *list;
1120 while (ref != NULL) {
1121 JValue *field = dvmFieldPtr(ref, referentOffset);
1122 Object *referent = field->l;
1123 next = dvmGetFieldObject(ref, vmDataOffset);
1124 assert(referent != NULL);
1125 if (isForward(referent->clazz)) {
1126 /* Referent is black. */
1127 field->l = referent = getForward(referent->clazz);
1128 white = false;
1129 } else {
1130 white = fromSpaceContains(referent);
1131 }
1132 if (!white && ((++counter) & 1)) {
1133 /* Referent is white and biased toward saving, gray it. */
1134 scavengeReference((Object **)(void *)&field->l);
1135 white = true;
1136 }
1137 if (white) {
1138 /* Referent is black, unlink it. */
1139 if (prev != NULL) {
1140 dvmSetFieldObject(ref, vmDataOffset, NULL);
1141 dvmSetFieldObject(prev, vmDataOffset, next);
1142 }
1143 } else {
1144 /* Referent is white, skip over it. */
1145 prev = ref;
1146 }
1147 ref = next;
1148 }
1149 /*
1150 * Restart the trace with the newly gray references added to the
1151 * root set.
1152 */
1153 scavengeBlockQueue();
1154}
1155
1156void processFinalizableReferences(void)
1157{
1158 HeapRefTable newPendingRefs;
1159 LargeHeapRefTable *finRefs = gDvm.gcHeap->finalizableRefs;
1160 Object **ref;
1161 Object **lastRef;
1162 size_t totalPendCount;
1163
1164 /*
1165 * All strongly, reachable objects are black.
1166 * Any white finalizable objects need to be finalized.
1167 */
1168
1169 /* Create a table that the new pending refs will
1170 * be added to.
1171 */
1172 if (!dvmHeapInitHeapRefTable(&newPendingRefs, 128)) {
1173 //TODO: mark all finalizable refs and hope that
1174 // we can schedule them next time. Watch out,
1175 // because we may be expecting to free up space
1176 // by calling finalizers.
Carl Shapiro8bb533e2010-05-06 15:35:27 -07001177 LOG_REF("no room for pending finalizations\n");
Carl Shapiro952e84a2010-05-06 14:35:29 -07001178 dvmAbort();
1179 }
1180
1181 /*
1182 * Walk through finalizableRefs and move any white references to
1183 * the list of new pending refs.
1184 */
1185 totalPendCount = 0;
1186 while (finRefs != NULL) {
1187 Object **gapRef;
1188 size_t newPendCount = 0;
1189
1190 gapRef = ref = finRefs->refs.table;
1191 lastRef = finRefs->refs.nextEntry;
1192 while (ref < lastRef) {
1193 if (fromSpaceContains(*ref)) {
1194 if (!dvmHeapAddToHeapRefTable(&newPendingRefs, *ref)) {
1195 //TODO: add the current table and allocate
1196 // a new, smaller one.
Carl Shapiro8bb533e2010-05-06 15:35:27 -07001197 LOG_REF("no room for any more pending finalizations: %zd\n",
Carl Shapiro952e84a2010-05-06 14:35:29 -07001198 dvmHeapNumHeapRefTableEntries(&newPendingRefs));
1199 dvmAbort();
1200 }
1201 newPendCount++;
1202 } else {
1203 /* This ref is black, so will remain on finalizableRefs.
1204 */
1205 if (newPendCount > 0) {
1206 /* Copy it up to fill the holes.
1207 */
1208 *gapRef++ = *ref;
1209 } else {
1210 /* No holes yet; don't bother copying.
1211 */
1212 gapRef++;
1213 }
1214 }
1215 ref++;
1216 }
1217 finRefs->refs.nextEntry = gapRef;
1218 //TODO: if the table is empty when we're done, free it.
1219 totalPendCount += newPendCount;
1220 finRefs = finRefs->next;
1221 }
Carl Shapiro8bb533e2010-05-06 15:35:27 -07001222 LOG_REF("%zd finalizers triggered.\n", totalPendCount);
Carl Shapiro952e84a2010-05-06 14:35:29 -07001223 if (totalPendCount == 0) {
1224 /* No objects required finalization.
1225 * Free the empty temporary table.
1226 */
1227 dvmClearReferenceTable(&newPendingRefs);
1228 return;
1229 }
1230
1231 /* Add the new pending refs to the main list.
1232 */
1233 if (!dvmHeapAddTableToLargeTable(&gDvm.gcHeap->pendingFinalizationRefs,
1234 &newPendingRefs))
1235 {
Carl Shapiro8bb533e2010-05-06 15:35:27 -07001236 LOG_REF("can't insert new pending finalizations\n");
Carl Shapiro952e84a2010-05-06 14:35:29 -07001237 dvmAbort();
1238 }
1239
1240 //TODO: try compacting the main list with a memcpy loop
1241
1242 /* Blacken the refs we just moved; we don't want them or their
1243 * children to get swept yet.
1244 */
1245 ref = newPendingRefs.table;
1246 lastRef = newPendingRefs.nextEntry;
1247 assert(ref < lastRef);
1248 HPROF_SET_GC_SCAN_STATE(HPROF_ROOT_FINALIZING, 0);
1249 while (ref < lastRef) {
1250 scavengeReference(ref);
1251 ref++;
1252 }
1253 HPROF_CLEAR_GC_SCAN_STATE();
1254 scavengeBlockQueue();
1255 dvmSignalHeapWorker(false);
Carl Shapirod28668c2010-04-15 16:10:00 -07001256}
1257
Carl Shapirod28668c2010-04-15 16:10:00 -07001258/*
1259 * If a reference points to from-space and has been forwarded, we snap
1260 * the pointer to its new to-space address. If the reference points
1261 * to an unforwarded from-space address we must enqueue the reference
1262 * for later processing. TODO: implement proper reference processing
1263 * and move the referent scavenging elsewhere.
1264 */
Carl Shapiro952e84a2010-05-06 14:35:29 -07001265static void scavengeReferenceObject(Object *obj)
Carl Shapirod28668c2010-04-15 16:10:00 -07001266{
Carl Shapiro952e84a2010-05-06 14:35:29 -07001267 Object *referent;
1268 Object **queue;
1269 size_t referentOffset, vmDataOffset;
1270
Carl Shapirod28668c2010-04-15 16:10:00 -07001271 assert(obj != NULL);
Carl Shapiro8bb533e2010-05-06 15:35:27 -07001272 LOG_SCAV("scavengeReferenceObject(obj=%p),'%s'", obj, obj->clazz->descriptor);
Carl Shapiro2396fda2010-05-03 20:14:14 -07001273 scavengeDataObject(obj);
Carl Shapiro952e84a2010-05-06 14:35:29 -07001274 referentOffset = gDvm.offJavaLangRefReference_referent;
1275 referent = dvmGetFieldObject(obj, referentOffset);
1276 if (referent == NULL || toSpaceContains(referent)) {
1277 return;
Carl Shapirod28668c2010-04-15 16:10:00 -07001278 }
Carl Shapiro952e84a2010-05-06 14:35:29 -07001279 if (isSoftReference(obj)) {
1280 queue = &gDvm.gcHeap->softReferences;
1281 } else if (isWeakReference(obj)) {
1282 queue = &gDvm.gcHeap->weakReferences;
1283 } else {
1284 assert(isPhantomReference(obj));
1285 queue = &gDvm.gcHeap->phantomReferences;
1286 }
1287 vmDataOffset = gDvm.offJavaLangRefReference_vmData;
Carl Shapiro7800c092010-05-11 13:46:29 -07001288 dvmSetFieldObject(obj, vmDataOffset, *queue);
Carl Shapiro952e84a2010-05-06 14:35:29 -07001289 *queue = obj;
Carl Shapiro8bb533e2010-05-06 15:35:27 -07001290 LOG_SCAV("scavengeReferenceObject: enqueueing %p", obj);
Carl Shapirod28668c2010-04-15 16:10:00 -07001291}
1292
1293/*
1294 * Data object scavenging.
1295 */
Carl Shapiro952e84a2010-05-06 14:35:29 -07001296static void scavengeDataObject(Object *obj)
Carl Shapirod28668c2010-04-15 16:10:00 -07001297{
1298 ClassObject *clazz;
Carl Shapirod28668c2010-04-15 16:10:00 -07001299 int i;
1300
Carl Shapiro8bb533e2010-05-06 15:35:27 -07001301 // LOG_SCAV("scavengeDataObject(obj=%p)", obj);
Carl Shapirod28668c2010-04-15 16:10:00 -07001302 assert(obj != NULL);
Carl Shapiro952e84a2010-05-06 14:35:29 -07001303 assert(obj->clazz != NULL);
1304 assert(obj->clazz->objectSize != 0);
1305 assert(toSpaceContains(obj));
Carl Shapirod28668c2010-04-15 16:10:00 -07001306 /* Scavenge the class object. */
Carl Shapiro952e84a2010-05-06 14:35:29 -07001307 clazz = obj->clazz;
Carl Shapirod28668c2010-04-15 16:10:00 -07001308 scavengeReference((Object **) obj);
Carl Shapirod28668c2010-04-15 16:10:00 -07001309 /* Scavenge instance fields. */
1310 if (clazz->refOffsets != CLASS_WALK_SUPER) {
1311 size_t refOffsets = clazz->refOffsets;
1312 while (refOffsets != 0) {
1313 size_t rshift = CLZ(refOffsets);
1314 size_t offset = CLASS_OFFSET_FROM_CLZ(rshift);
1315 Object **ref = (Object **)((u1 *)obj + offset);
1316 scavengeReference(ref);
1317 refOffsets &= ~(CLASS_HIGH_BIT >> rshift);
1318 }
1319 } else {
1320 for (; clazz != NULL; clazz = clazz->super) {
1321 InstField *field = clazz->ifields;
1322 for (i = 0; i < clazz->ifieldRefCount; ++i, ++field) {
1323 size_t offset = field->byteOffset;
1324 Object **ref = (Object **)((u1 *)obj + offset);
1325 scavengeReference(ref);
1326 }
1327 }
1328 }
Carl Shapiro2396fda2010-05-03 20:14:14 -07001329}
1330
1331static Object *transportObject(const Object *fromObj)
1332{
1333 Object *toObj;
1334 size_t allocSize, copySize;
1335
Carl Shapiro8bb533e2010-05-06 15:35:27 -07001336 LOG_TRAN("transportObject(fromObj=%p) allocBlocks=%zu",
Carl Shapiro2396fda2010-05-03 20:14:14 -07001337 fromObj,
1338 gDvm.gcHeap->heapSource->allocBlocks);
1339 assert(fromObj != NULL);
Carl Shapiro952e84a2010-05-06 14:35:29 -07001340 assert(fromSpaceContains(fromObj));
Carl Shapiro2396fda2010-05-03 20:14:14 -07001341 allocSize = copySize = objectSize(fromObj);
1342 if (LW_HASH_STATE(fromObj->lock) != LW_HASH_STATE_UNHASHED) {
1343 /*
1344 * The object has been hashed or hashed and moved. We must
1345 * reserve an additional word for a hash code.
1346 */
1347 allocSize += sizeof(u4);
1348 }
1349 if (LW_HASH_STATE(fromObj->lock) == LW_HASH_STATE_HASHED_AND_MOVED) {
1350 /*
1351 * The object has its hash code allocated. Ensure the hash
1352 * code is copied along with the instance data.
1353 */
1354 copySize += sizeof(u4);
1355 }
1356 /* TODO(cshapiro): don't copy, re-map large data objects. */
1357 assert(copySize <= allocSize);
1358 toObj = allocateGray(allocSize);
1359 assert(toObj != NULL);
Carl Shapiro952e84a2010-05-06 14:35:29 -07001360 assert(toSpaceContains(toObj));
Carl Shapiro2396fda2010-05-03 20:14:14 -07001361 memcpy(toObj, fromObj, copySize);
1362 if (LW_HASH_STATE(fromObj->lock) == LW_HASH_STATE_HASHED) {
1363 /*
1364 * The object has had its hash code exposed. Append it to the
1365 * instance and set a bit so we know to look for it there.
1366 */
1367 *(u4 *)(((char *)toObj) + copySize) = (u4)fromObj >> 3;
1368 toObj->lock |= LW_HASH_STATE_HASHED_AND_MOVED << LW_HASH_STATE_SHIFT;
1369 }
Carl Shapiro8bb533e2010-05-06 15:35:27 -07001370 LOG_TRAN("transportObject: from %p/%zu to %p/%zu (%zu,%zu) %s",
1371 fromObj, addressToBlock(gDvm.gcHeap->heapSource,fromObj),
1372 toObj, addressToBlock(gDvm.gcHeap->heapSource,toObj),
1373 copySize, allocSize, copySize < allocSize ? "DIFFERENT" : "");
Carl Shapiro2396fda2010-05-03 20:14:14 -07001374 return toObj;
Carl Shapirod28668c2010-04-15 16:10:00 -07001375}
1376
1377/*
1378 * Generic reference scavenging.
1379 */
1380
1381/*
1382 * Given a reference to an object, the scavenge routine will gray the
1383 * reference. Any objects pointed to by the scavenger object will be
1384 * transported to new space and a forwarding pointer will be installed
1385 * in the header of the object.
1386 */
1387
1388/*
1389 * Blacken the given pointer. If the pointer is in from space, it is
1390 * transported to new space. If the object has a forwarding pointer
1391 * installed it has already been transported and the referent is
1392 * snapped to the new address.
1393 */
Carl Shapiro2396fda2010-05-03 20:14:14 -07001394static void scavengeReference(Object **obj)
Carl Shapirod28668c2010-04-15 16:10:00 -07001395{
1396 ClassObject *clazz;
Carl Shapiro2396fda2010-05-03 20:14:14 -07001397 Object *fromObj, *toObj;
Carl Shapirod28668c2010-04-15 16:10:00 -07001398
1399 assert(obj);
1400
Carl Shapiro2396fda2010-05-03 20:14:14 -07001401 if (*obj == NULL) return;
Carl Shapirod28668c2010-04-15 16:10:00 -07001402
1403 assert(dvmIsValidObject(*obj));
1404
1405 /* The entire block is black. */
Carl Shapiro952e84a2010-05-06 14:35:29 -07001406 if (toSpaceContains(*obj)) {
Carl Shapiro8bb533e2010-05-06 15:35:27 -07001407 LOG_SCAV("scavengeReference skipping pinned object @ %p", *obj);
Carl Shapiro2396fda2010-05-03 20:14:14 -07001408 return;
Carl Shapirod28668c2010-04-15 16:10:00 -07001409 }
Carl Shapiro8bb533e2010-05-06 15:35:27 -07001410 LOG_SCAV("scavengeReference(*obj=%p)", *obj);
Carl Shapirod28668c2010-04-15 16:10:00 -07001411
Carl Shapiro952e84a2010-05-06 14:35:29 -07001412 assert(fromSpaceContains(*obj));
Carl Shapirod28668c2010-04-15 16:10:00 -07001413
1414 clazz = (*obj)->clazz;
1415
1416 if (isForward(clazz)) {
Carl Shapiro8bb533e2010-05-06 15:35:27 -07001417 // LOG_SCAV("forwarding %p @ %p to %p", *obj, obj, (void *)((uintptr_t)clazz & ~0x1));
Carl Shapirod28668c2010-04-15 16:10:00 -07001418 *obj = (Object *)getForward(clazz);
Carl Shapiro2396fda2010-05-03 20:14:14 -07001419 return;
Carl Shapirod28668c2010-04-15 16:10:00 -07001420 }
Carl Shapiro2396fda2010-05-03 20:14:14 -07001421 fromObj = *obj;
1422 if (clazz == NULL) {
Carl Shapiro8bb533e2010-05-06 15:35:27 -07001423 // LOG_SCAV("scavangeReference %p has a NULL class object", fromObj);
Carl Shapiro2396fda2010-05-03 20:14:14 -07001424 assert(!"implemented");
1425 toObj = NULL;
1426 } else if (clazz == gDvm.unlinkedJavaLangClass) {
Carl Shapiro8bb533e2010-05-06 15:35:27 -07001427 // LOG_SCAV("scavangeReference %p is an unlinked class object", fromObj);
Carl Shapiro2396fda2010-05-03 20:14:14 -07001428 assert(!"implemented");
1429 toObj = NULL;
1430 } else {
1431 toObj = transportObject(fromObj);
1432 }
1433 setForward(toObj, fromObj);
1434 *obj = (Object *)toObj;
Carl Shapirod28668c2010-04-15 16:10:00 -07001435}
1436
Carl Shapirod28668c2010-04-15 16:10:00 -07001437/*
1438 * Generic object scavenging.
1439 */
Carl Shapiro2396fda2010-05-03 20:14:14 -07001440static void scavengeObject(Object *obj)
Carl Shapirod28668c2010-04-15 16:10:00 -07001441{
1442 ClassObject *clazz;
Carl Shapirod28668c2010-04-15 16:10:00 -07001443
1444 assert(obj != NULL);
Carl Shapiro952e84a2010-05-06 14:35:29 -07001445 assert(obj->clazz != NULL);
1446 assert(!((uintptr_t)obj->clazz & 0x1));
1447 assert(obj->clazz != gDvm.unlinkedJavaLangClass);
Carl Shapirod28668c2010-04-15 16:10:00 -07001448 clazz = obj->clazz;
Carl Shapirod28668c2010-04-15 16:10:00 -07001449 if (clazz == gDvm.classJavaLangClass) {
Carl Shapiro2396fda2010-05-03 20:14:14 -07001450 scavengeClassObject((ClassObject *)obj);
Carl Shapirod28668c2010-04-15 16:10:00 -07001451 } else if (IS_CLASS_FLAG_SET(clazz, CLASS_ISARRAY)) {
Carl Shapiro2396fda2010-05-03 20:14:14 -07001452 scavengeArrayObject((ArrayObject *)obj);
Carl Shapirod28668c2010-04-15 16:10:00 -07001453 } else if (IS_CLASS_FLAG_SET(clazz, CLASS_ISREFERENCE)) {
Carl Shapiro952e84a2010-05-06 14:35:29 -07001454 scavengeReferenceObject(obj);
Carl Shapirod28668c2010-04-15 16:10:00 -07001455 } else {
Carl Shapiro952e84a2010-05-06 14:35:29 -07001456 scavengeDataObject(obj);
Carl Shapirod28668c2010-04-15 16:10:00 -07001457 }
Carl Shapirod28668c2010-04-15 16:10:00 -07001458}
1459
1460/*
1461 * External root scavenging routines.
1462 */
1463
1464static void scavengeHashTable(HashTable *table)
1465{
1466 HashEntry *entry;
1467 void *obj;
1468 int i;
1469
1470 if (table == NULL) {
1471 return;
1472 }
1473 dvmHashTableLock(table);
1474 for (i = 0; i < table->tableSize; ++i) {
1475 entry = &table->pEntries[i];
1476 obj = entry->data;
1477 if (obj == NULL || obj == HASH_TOMBSTONE) {
1478 continue;
1479 }
1480 scavengeReference((Object **)(void *)&entry->data);
1481 }
1482 dvmHashTableUnlock(table);
1483}
1484
1485static void pinHashTableEntries(HashTable *table)
1486{
1487 HashEntry *entry;
1488 void *obj;
1489 int i;
1490
Carl Shapiro8bb533e2010-05-06 15:35:27 -07001491 LOG_PIN(">>> pinHashTableEntries(table=%p)", table);
Carl Shapirod28668c2010-04-15 16:10:00 -07001492 if (table == NULL) {
1493 return;
1494 }
1495 dvmHashTableLock(table);
1496 for (i = 0; i < table->tableSize; ++i) {
1497 entry = &table->pEntries[i];
1498 obj = entry->data;
1499 if (obj == NULL || obj == HASH_TOMBSTONE) {
1500 continue;
1501 }
1502 pinObject(entry->data);
1503 }
1504 dvmHashTableUnlock(table);
Carl Shapiro8bb533e2010-05-06 15:35:27 -07001505 LOG_PIN("<<< pinHashTableEntries(table=%p)", table);
Carl Shapirod28668c2010-04-15 16:10:00 -07001506}
1507
1508static void pinPrimitiveClasses(void)
1509{
1510 size_t length;
1511 size_t i;
1512
1513 length = ARRAYSIZE(gDvm.primitiveClass);
1514 for (i = 0; i < length; i++) {
1515 if (gDvm.primitiveClass[i] != NULL) {
1516 pinObject((Object *)gDvm.primitiveClass[i]);
1517 }
1518 }
1519}
1520
1521/*
1522 * Scavenge interned strings. Permanent interned strings will have
1523 * been pinned and are therefore ignored. Non-permanent strings that
1524 * have been forwarded are snapped. All other entries are removed.
1525 */
1526static void scavengeInternedStrings(void)
1527{
1528 HashTable *table;
1529 HashEntry *entry;
1530 Object *obj;
1531 int i;
1532
1533 table = gDvm.internedStrings;
1534 if (table == NULL) {
1535 return;
1536 }
1537 dvmHashTableLock(table);
1538 for (i = 0; i < table->tableSize; ++i) {
1539 entry = &table->pEntries[i];
1540 obj = (Object *)entry->data;
1541 if (obj == NULL || obj == HASH_TOMBSTONE) {
1542 continue;
1543 } else if (!isPermanentString((StringObject *)obj)) {
Carl Shapiro8bb533e2010-05-06 15:35:27 -07001544 // LOG_SCAV("entry->data=%p", entry->data);
1545 LOG_SCAV(">>> string obj=%p", entry->data);
Carl Shapirod28668c2010-04-15 16:10:00 -07001546 /* TODO(cshapiro): detach white string objects */
1547 scavengeReference((Object **)(void *)&entry->data);
Carl Shapiro8bb533e2010-05-06 15:35:27 -07001548 LOG_SCAV("<<< string obj=%p", entry->data);
Carl Shapirod28668c2010-04-15 16:10:00 -07001549 }
1550 }
1551 dvmHashTableUnlock(table);
1552}
1553
1554static void pinInternedStrings(void)
1555{
1556 HashTable *table;
1557 HashEntry *entry;
1558 Object *obj;
1559 int i;
1560
1561 table = gDvm.internedStrings;
1562 if (table == NULL) {
1563 return;
1564 }
1565 dvmHashTableLock(table);
1566 for (i = 0; i < table->tableSize; ++i) {
1567 entry = &table->pEntries[i];
1568 obj = (Object *)entry->data;
1569 if (obj == NULL || obj == HASH_TOMBSTONE) {
1570 continue;
1571 } else if (isPermanentString((StringObject *)obj)) {
1572 obj = (Object *)getPermanentString((StringObject*)obj);
Carl Shapiro8bb533e2010-05-06 15:35:27 -07001573 LOG_PROM(">>> pin string obj=%p", obj);
Carl Shapirod28668c2010-04-15 16:10:00 -07001574 pinObject(obj);
Carl Shapiro8bb533e2010-05-06 15:35:27 -07001575 LOG_PROM("<<< pin string obj=%p", obj);
Carl Shapirod28668c2010-04-15 16:10:00 -07001576 }
1577 }
1578 dvmHashTableUnlock(table);
1579}
1580
Carl Shapirod28668c2010-04-15 16:10:00 -07001581/*
1582 * At present, reference tables contain references that must not be
1583 * moved by the collector. Instead of scavenging each reference in
1584 * the table we pin each referenced object.
1585 */
Carl Shapiro583d64c2010-05-04 10:44:47 -07001586static void pinReferenceTable(const ReferenceTable *table)
Carl Shapirod28668c2010-04-15 16:10:00 -07001587{
1588 Object **entry;
Carl Shapirod28668c2010-04-15 16:10:00 -07001589
1590 assert(table != NULL);
1591 assert(table->table != NULL);
1592 assert(table->nextEntry != NULL);
1593 for (entry = table->table; entry < table->nextEntry; ++entry) {
1594 assert(entry != NULL);
1595 assert(!isForward(*entry));
1596 pinObject(*entry);
1597 }
1598}
1599
Carl Shapiro7800c092010-05-11 13:46:29 -07001600static void scavengeLargeHeapRefTable(LargeHeapRefTable *table, bool stripLowBits)
Carl Shapirod28668c2010-04-15 16:10:00 -07001601{
Carl Shapirod28668c2010-04-15 16:10:00 -07001602 for (; table != NULL; table = table->next) {
Carl Shapiro7800c092010-05-11 13:46:29 -07001603 Object **ref = table->refs.table;
1604 for (; ref < table->refs.nextEntry; ++ref) {
1605 if (stripLowBits) {
1606 Object *obj = (Object *)((uintptr_t)*ref & ~3);
1607 scavengeReference(&obj);
1608 *ref = (Object *)((uintptr_t)obj | ((uintptr_t)*ref & 3));
1609 } else {
1610 scavengeReference(ref);
Carl Shapirod28668c2010-04-15 16:10:00 -07001611 }
Carl Shapiro7800c092010-05-11 13:46:29 -07001612 }
1613 }
1614}
1615
Carl Shapirod28668c2010-04-15 16:10:00 -07001616/* This code was copied from Thread.c */
1617static void scavengeThreadStack(Thread *thread)
1618{
1619 const u4 *framePtr;
1620#if WITH_EXTRA_GC_CHECKS > 1
1621 bool first = true;
1622#endif
1623
1624 framePtr = (const u4 *)thread->curFrame;
1625 while (framePtr != NULL) {
1626 const StackSaveArea *saveArea;
1627 const Method *method;
1628
1629 saveArea = SAVEAREA_FROM_FP(framePtr);
1630 method = saveArea->method;
Carl Shapiro583d64c2010-05-04 10:44:47 -07001631 if (method != NULL && !dvmIsNativeMethod(method)) {
Carl Shapirod28668c2010-04-15 16:10:00 -07001632#ifdef COUNT_PRECISE_METHODS
1633 /* the GC is running, so no lock required */
1634 if (dvmPointerSetAddEntry(gDvm.preciseMethods, method))
Carl Shapiro8bb533e2010-05-06 15:35:27 -07001635 LOG_SCAV("PGC: added %s.%s %p\n",
1636 method->clazz->descriptor, method->name, method);
Carl Shapirod28668c2010-04-15 16:10:00 -07001637#endif
1638#if WITH_EXTRA_GC_CHECKS > 1
1639 /*
1640 * May also want to enable the memset() in the "invokeMethod"
1641 * goto target in the portable interpreter. That sets the stack
1642 * to a pattern that makes referring to uninitialized data
1643 * very obvious.
1644 */
1645
1646 if (first) {
1647 /*
1648 * First frame, isn't native, check the "alternate" saved PC
1649 * as a sanity check.
1650 *
1651 * It seems like we could check the second frame if the first
1652 * is native, since the PCs should be the same. It turns out
1653 * this doesn't always work. The problem is that we could
1654 * have calls in the sequence:
1655 * interp method #2
1656 * native method
1657 * interp method #1
1658 *
1659 * and then GC while in the native method after returning
1660 * from interp method #2. The currentPc on the stack is
1661 * for interp method #1, but thread->currentPc2 is still
1662 * set for the last thing interp method #2 did.
1663 *
1664 * This can also happen in normal execution:
1665 * - sget-object on not-yet-loaded class
1666 * - class init updates currentPc2
1667 * - static field init is handled by parsing annotations;
1668 * static String init requires creation of a String object,
1669 * which can cause a GC
1670 *
1671 * Essentially, any pattern that involves executing
1672 * interpreted code and then causes an allocation without
1673 * executing instructions in the original method will hit
1674 * this. These are rare enough that the test still has
1675 * some value.
1676 */
1677 if (saveArea->xtra.currentPc != thread->currentPc2) {
1678 LOGW("PGC: savedPC(%p) != current PC(%p), %s.%s ins=%p\n",
1679 saveArea->xtra.currentPc, thread->currentPc2,
1680 method->clazz->descriptor, method->name, method->insns);
1681 if (saveArea->xtra.currentPc != NULL)
1682 LOGE(" pc inst = 0x%04x\n", *saveArea->xtra.currentPc);
1683 if (thread->currentPc2 != NULL)
1684 LOGE(" pc2 inst = 0x%04x\n", *thread->currentPc2);
1685 dvmDumpThread(thread, false);
1686 }
1687 } else {
1688 /*
1689 * It's unusual, but not impossible, for a non-first frame
1690 * to be at something other than a method invocation. For
1691 * example, if we do a new-instance on a nonexistent class,
1692 * we'll have a lot of class loader activity on the stack
1693 * above the frame with the "new" operation. Could also
1694 * happen while we initialize a Throwable when an instruction
1695 * fails.
1696 *
1697 * So there's not much we can do here to verify the PC,
1698 * except to verify that it's a GC point.
1699 */
1700 }
1701 assert(saveArea->xtra.currentPc != NULL);
1702#endif
1703
1704 const RegisterMap* pMap;
1705 const u1* regVector;
1706 int i;
1707
1708 Method* nonConstMethod = (Method*) method; // quiet gcc
1709 pMap = dvmGetExpandedRegisterMap(nonConstMethod);
1710
Carl Shapiro8bb533e2010-05-06 15:35:27 -07001711 //LOG_SCAV("PGC: %s.%s\n", method->clazz->descriptor, method->name);
Carl Shapirod28668c2010-04-15 16:10:00 -07001712
1713 if (pMap != NULL) {
1714 /* found map, get registers for this address */
1715 int addr = saveArea->xtra.currentPc - method->insns;
1716 regVector = dvmRegisterMapGetLine(pMap, addr);
1717 /*
1718 if (regVector == NULL) {
Carl Shapiro8bb533e2010-05-06 15:35:27 -07001719 LOG_SCAV("PGC: map but no entry for %s.%s addr=0x%04x\n",
1720 method->clazz->descriptor, method->name, addr);
Carl Shapirod28668c2010-04-15 16:10:00 -07001721 } else {
Carl Shapiro8bb533e2010-05-06 15:35:27 -07001722 LOG_SCAV("PGC: found map for %s.%s 0x%04x (t=%d)\n",
1723 method->clazz->descriptor, method->name, addr,
1724 thread->threadId);
Carl Shapirod28668c2010-04-15 16:10:00 -07001725 }
1726 */
1727 } else {
1728 /*
1729 * No map found. If precise GC is disabled this is
1730 * expected -- we don't create pointers to the map data even
1731 * if it's present -- but if it's enabled it means we're
1732 * unexpectedly falling back on a conservative scan, so it's
1733 * worth yelling a little.
1734 */
1735 if (gDvm.preciseGc) {
Carl Shapiro8bb533e2010-05-06 15:35:27 -07001736 LOG_SCAV("PGC: no map for %s.%s\n", method->clazz->descriptor, method->name);
Carl Shapirod28668c2010-04-15 16:10:00 -07001737 }
1738 regVector = NULL;
1739 }
Carl Shapirod28668c2010-04-15 16:10:00 -07001740 if (regVector == NULL) {
Carl Shapiro88b00352010-05-19 17:38:33 -07001741 /*
1742 * There are no roots to scavenge. Skip over the entire frame.
1743 */
1744 framePtr += method->registersSize;
Carl Shapirod28668c2010-04-15 16:10:00 -07001745 } else {
1746 /*
1747 * Precise scan. v0 is at the lowest address on the
1748 * interpreted stack, and is the first bit in the register
1749 * vector, so we can walk through the register map and
1750 * memory in the same direction.
1751 *
1752 * A '1' bit indicates a live reference.
1753 */
1754 u2 bits = 1 << 1;
1755 for (i = method->registersSize - 1; i >= 0; i--) {
Carl Shapirod28668c2010-04-15 16:10:00 -07001756 u4 rval = *framePtr;
1757
1758 bits >>= 1;
1759 if (bits == 1) {
1760 /* set bit 9 so we can tell when we're empty */
1761 bits = *regVector++ | 0x0100;
1762 LOGVV("loaded bits: 0x%02x\n", bits & 0xff);
1763 }
1764
1765 if (rval != 0 && (bits & 0x01) != 0) {
1766 /*
1767 * Non-null, register marked as live reference. This
1768 * should always be a valid object.
1769 */
1770#if WITH_EXTRA_GC_CHECKS > 0
1771 if ((rval & 0x3) != 0 || !dvmIsValidObject((Object*) rval)) {
1772 /* this is very bad */
1773 LOGE("PGC: invalid ref in reg %d: 0x%08x\n",
1774 method->registersSize-1 - i, rval);
1775 } else
1776#endif
1777 {
1778
Carl Shapiro8bb533e2010-05-06 15:35:27 -07001779 // LOG_SCAV("stack reference %u@%p", *framePtr, framePtr);
Carl Shapirod28668c2010-04-15 16:10:00 -07001780 /* dvmMarkObjectNonNull((Object *)rval); */
1781 scavengeReference((Object **) framePtr);
1782 }
1783 } else {
1784 /*
1785 * Null or non-reference, do nothing at all.
1786 */
1787#if WITH_EXTRA_GC_CHECKS > 1
1788 if (dvmIsValidObject((Object*) rval)) {
1789 /* this is normal, but we feel chatty */
1790 LOGD("PGC: ignoring valid ref in reg %d: 0x%08x\n",
1791 method->registersSize-1 - i, rval);
1792 }
1793#endif
1794 }
1795 ++framePtr;
1796 }
1797 dvmReleaseRegisterMapLine(pMap, regVector);
1798 }
1799 }
Carl Shapiro952e84a2010-05-06 14:35:29 -07001800 /* else this is a break frame and there is nothing to gray, or
Carl Shapirod28668c2010-04-15 16:10:00 -07001801 * this is a native method and the registers are just the "ins",
1802 * copied from various registers in the caller's set.
1803 */
1804
1805#if WITH_EXTRA_GC_CHECKS > 1
1806 first = false;
1807#endif
1808
1809 /* Don't fall into an infinite loop if things get corrupted.
1810 */
1811 assert((uintptr_t)saveArea->prevFrame > (uintptr_t)framePtr ||
1812 saveArea->prevFrame == NULL);
1813 framePtr = saveArea->prevFrame;
1814 }
1815}
1816
1817static void scavengeThread(Thread *thread)
1818{
1819 assert(thread->status != THREAD_RUNNING ||
1820 thread->isSuspended ||
1821 thread == dvmThreadSelf());
1822
Carl Shapiro8bb533e2010-05-06 15:35:27 -07001823 // LOG_SCAV("scavengeThread(thread=%p)", thread);
Carl Shapirod28668c2010-04-15 16:10:00 -07001824
Carl Shapiro8bb533e2010-05-06 15:35:27 -07001825 // LOG_SCAV("Scavenging threadObj=%p", thread->threadObj);
Carl Shapirod28668c2010-04-15 16:10:00 -07001826 scavengeReference(&thread->threadObj);
1827
Carl Shapiro8bb533e2010-05-06 15:35:27 -07001828 // LOG_SCAV("Scavenging exception=%p", thread->exception);
Carl Shapirod28668c2010-04-15 16:10:00 -07001829 scavengeReference(&thread->exception);
1830
1831 scavengeThreadStack(thread);
1832}
1833
1834static void scavengeThreadList(void)
1835{
1836 Thread *thread;
1837
1838 dvmLockThreadList(dvmThreadSelf());
1839 thread = gDvm.threadList;
1840 while (thread) {
1841 scavengeThread(thread);
1842 thread = thread->next;
1843 }
1844 dvmUnlockThreadList();
1845}
1846
Carl Shapiro88b00352010-05-19 17:38:33 -07001847static void pinThreadStack(const Thread *thread)
Carl Shapiro583d64c2010-05-04 10:44:47 -07001848{
1849 const u4 *framePtr;
1850 const StackSaveArea *saveArea;
Carl Shapiro88b00352010-05-19 17:38:33 -07001851 Method *method;
Carl Shapiro583d64c2010-05-04 10:44:47 -07001852 const char *shorty;
1853 Object *obj;
1854 int i;
1855
1856 saveArea = NULL;
1857 framePtr = (const u4 *)thread->curFrame;
1858 for (; framePtr != NULL; framePtr = saveArea->prevFrame) {
1859 saveArea = SAVEAREA_FROM_FP(framePtr);
Carl Shapiro88b00352010-05-19 17:38:33 -07001860 method = (Method *)saveArea->method;
Carl Shapiro583d64c2010-05-04 10:44:47 -07001861 if (method != NULL && dvmIsNativeMethod(method)) {
1862 /*
Carl Shapiro88b00352010-05-19 17:38:33 -07001863 * This is native method, pin its arguments.
1864 *
Carl Shapiro952e84a2010-05-06 14:35:29 -07001865 * For purposes of graying references, we don't need to do
Carl Shapiro583d64c2010-05-04 10:44:47 -07001866 * anything here, because all of the native "ins" were copied
1867 * from registers in the caller's stack frame and won't be
1868 * changed (an interpreted method can freely use registers
1869 * with parameters like any other register, but natives don't
1870 * work that way).
1871 *
1872 * However, we need to ensure that references visible to
1873 * native methods don't move around. We can do a precise scan
1874 * of the arguments by examining the method signature.
1875 */
Carl Shapiro8bb533e2010-05-06 15:35:27 -07001876 LOG_PIN("+++ native scan %s.%s\n",
1877 method->clazz->descriptor, method->name);
Carl Shapiro583d64c2010-05-04 10:44:47 -07001878 assert(method->registersSize == method->insSize);
1879 if (!dvmIsStaticMethod(method)) {
1880 /* grab the "this" pointer */
1881 obj = (Object *)*framePtr++;
1882 if (obj == NULL) {
1883 /*
1884 * This can happen for the "fake" entry frame inserted
1885 * for threads created outside the VM. There's no actual
1886 * call so there's no object. If we changed the fake
1887 * entry method to be declared "static" then this
1888 * situation should never occur.
1889 */
1890 } else {
1891 assert(dvmIsValidObject(obj));
1892 pinObject(obj);
1893 }
1894 }
1895 shorty = method->shorty+1; // skip return value
1896 for (i = method->registersSize - 1; i >= 0; i--, framePtr++) {
1897 switch (*shorty++) {
1898 case 'L':
1899 obj = (Object *)*framePtr;
1900 if (obj != NULL) {
1901 assert(dvmIsValidObject(obj));
1902 pinObject(obj);
1903 }
1904 break;
1905 case 'D':
1906 case 'J':
1907 framePtr++;
1908 break;
1909 default:
1910 /* 32-bit non-reference value */
1911 obj = (Object *)*framePtr; // debug, remove
1912 if (dvmIsValidObject(obj)) { // debug, remove
1913 /* if we see a lot of these, our scan might be off */
Carl Shapiro8bb533e2010-05-06 15:35:27 -07001914 LOG_PIN("+++ did NOT pin obj %p\n", obj);
Carl Shapiro583d64c2010-05-04 10:44:47 -07001915 }
1916 break;
1917 }
1918 }
Carl Shapiro88b00352010-05-19 17:38:33 -07001919 } else if (method != NULL && !dvmIsNativeMethod(method)) {
1920 const RegisterMap* pMap = dvmGetExpandedRegisterMap(method);
1921 const u1* regVector = NULL;
1922
1923 LOGI("conservative : %s.%s\n", method->clazz->descriptor, method->name);
1924
1925 if (pMap != NULL) {
1926 int addr = saveArea->xtra.currentPc - method->insns;
1927 regVector = dvmRegisterMapGetLine(pMap, addr);
1928 }
1929 if (regVector == NULL) {
1930 /*
1931 * No register info for this frame, conservatively pin.
1932 */
1933 for (i = 0; i < method->registersSize; ++i) {
1934 u4 regValue = framePtr[i];
1935 if (regValue != 0 && (regValue & 0x3) == 0 && dvmIsValidObject((Object *)regValue)) {
1936 pinObject((Object *)regValue);
1937 }
1938 }
1939 }
Carl Shapiro583d64c2010-05-04 10:44:47 -07001940 }
1941 /*
1942 * Don't fall into an infinite loop if things get corrupted.
1943 */
1944 assert((uintptr_t)saveArea->prevFrame > (uintptr_t)framePtr ||
1945 saveArea->prevFrame == NULL);
1946 }
1947}
1948
1949static void pinThread(const Thread *thread)
Carl Shapirod28668c2010-04-15 16:10:00 -07001950{
1951 assert(thread != NULL);
1952 assert(thread->status != THREAD_RUNNING ||
1953 thread->isSuspended ||
1954 thread == dvmThreadSelf());
Carl Shapiro8bb533e2010-05-06 15:35:27 -07001955 LOG_PIN("pinThread(thread=%p)", thread);
Carl Shapirod28668c2010-04-15 16:10:00 -07001956
Carl Shapiro8bb533e2010-05-06 15:35:27 -07001957 LOG_PIN("Pin native method arguments");
Carl Shapiro88b00352010-05-19 17:38:33 -07001958 pinThreadStack(thread);
Carl Shapiro583d64c2010-05-04 10:44:47 -07001959
Carl Shapiro8bb533e2010-05-06 15:35:27 -07001960 LOG_PIN("Pin internalLocalRefTable");
Carl Shapirod28668c2010-04-15 16:10:00 -07001961 pinReferenceTable(&thread->internalLocalRefTable);
1962
Carl Shapiro8bb533e2010-05-06 15:35:27 -07001963 LOG_PIN("Pin jniLocalRefTable");
Carl Shapirod28668c2010-04-15 16:10:00 -07001964 pinReferenceTable(&thread->jniLocalRefTable);
1965
1966 /* Can the check be pushed into the promote routine? */
1967 if (thread->jniMonitorRefTable.table) {
Carl Shapiro8bb533e2010-05-06 15:35:27 -07001968 LOG_PIN("Pin jniMonitorRefTable");
Carl Shapirod28668c2010-04-15 16:10:00 -07001969 pinReferenceTable(&thread->jniMonitorRefTable);
1970 }
1971}
1972
1973static void pinThreadList(void)
1974{
1975 Thread *thread;
1976
1977 dvmLockThreadList(dvmThreadSelf());
1978 thread = gDvm.threadList;
1979 while (thread) {
1980 pinThread(thread);
1981 thread = thread->next;
1982 }
1983 dvmUnlockThreadList();
1984}
1985
1986/*
1987 * Heap block scavenging.
1988 */
1989
1990/*
1991 * Scavenge objects in the current block. Scavenging terminates when
1992 * the pointer reaches the highest address in the block or when a run
1993 * of zero words that continues to the highest address is reached.
1994 */
1995static void scavengeBlock(HeapSource *heapSource, size_t block)
1996{
1997 u1 *cursor;
1998 u1 *end;
1999 size_t size;
2000
Carl Shapiro8bb533e2010-05-06 15:35:27 -07002001 LOG_SCAV("scavengeBlock(heapSource=%p,block=%zu)", heapSource, block);
Carl Shapirod28668c2010-04-15 16:10:00 -07002002
2003 assert(heapSource != NULL);
2004 assert(block < heapSource->totalBlocks);
2005 assert(heapSource->blockSpace[block] == BLOCK_TO_SPACE);
2006
2007 cursor = blockToAddress(heapSource, block);
2008 end = cursor + BLOCK_SIZE;
Carl Shapiro8bb533e2010-05-06 15:35:27 -07002009 LOG_SCAV("scavengeBlock start=%p, end=%p", cursor, end);
Carl Shapirod28668c2010-04-15 16:10:00 -07002010
2011 /* Parse and scavenge the current block. */
2012 size = 0;
2013 while (cursor < end) {
2014 u4 word = *(u4 *)cursor;
2015 if (word != 0) {
Carl Shapiro2396fda2010-05-03 20:14:14 -07002016 scavengeObject((Object *)cursor);
2017 size = objectSize((Object *)cursor);
Carl Shapirod28668c2010-04-15 16:10:00 -07002018 size = alignUp(size, ALLOC_ALIGNMENT);
2019 cursor += size;
2020 } else if (word == 0 && cursor == (u1 *)gDvm.unlinkedJavaLangClass) {
2021 size = sizeof(ClassObject);
2022 cursor += size;
2023 } else {
2024 /* Check for padding. */
2025 while (*(u4 *)cursor == 0) {
2026 cursor += 4;
2027 if (cursor == end) break;
2028 }
2029 /* Punt if something went wrong. */
2030 assert(cursor == end);
2031 }
2032 }
2033}
2034
Carl Shapiro2396fda2010-05-03 20:14:14 -07002035static size_t objectSize(const Object *obj)
Carl Shapirod28668c2010-04-15 16:10:00 -07002036{
2037 size_t size;
2038
Carl Shapiro2396fda2010-05-03 20:14:14 -07002039 assert(obj != NULL);
2040 assert(obj->clazz != NULL);
Carl Shapirod28668c2010-04-15 16:10:00 -07002041 if (obj->clazz == gDvm.classJavaLangClass ||
2042 obj->clazz == gDvm.unlinkedJavaLangClass) {
2043 size = dvmClassObjectSize((ClassObject *)obj);
2044 } else if (IS_CLASS_FLAG_SET(obj->clazz, CLASS_ISARRAY)) {
2045 size = dvmArrayObjectSize((ArrayObject *)obj);
2046 } else {
Carl Shapiro2396fda2010-05-03 20:14:14 -07002047 assert(obj->clazz->objectSize != 0);
Carl Shapirod28668c2010-04-15 16:10:00 -07002048 size = obj->clazz->objectSize;
2049 }
Carl Shapiro2396fda2010-05-03 20:14:14 -07002050 if (LW_HASH_STATE(obj->lock) == LW_HASH_STATE_HASHED_AND_MOVED) {
2051 size += sizeof(u4);
2052 }
Carl Shapirod28668c2010-04-15 16:10:00 -07002053 return size;
2054}
2055
2056static void verifyBlock(HeapSource *heapSource, size_t block)
2057{
2058 u1 *cursor;
2059 u1 *end;
2060 size_t size;
2061
Carl Shapiro8bb533e2010-05-06 15:35:27 -07002062 // LOG_VER("verifyBlock(heapSource=%p,block=%zu)", heapSource, block);
Carl Shapirod28668c2010-04-15 16:10:00 -07002063
2064 assert(heapSource != NULL);
2065 assert(block < heapSource->totalBlocks);
2066 assert(heapSource->blockSpace[block] == BLOCK_TO_SPACE);
2067
2068 cursor = blockToAddress(heapSource, block);
2069 end = cursor + BLOCK_SIZE;
Carl Shapiro8bb533e2010-05-06 15:35:27 -07002070 // LOG_VER("verifyBlock start=%p, end=%p", cursor, end);
Carl Shapirod28668c2010-04-15 16:10:00 -07002071
2072 /* Parse and scavenge the current block. */
2073 size = 0;
2074 while (cursor < end) {
2075 u4 word = *(u4 *)cursor;
2076 if (word != 0) {
2077 dvmVerifyObject((Object *)cursor);
2078 size = objectSize((Object *)cursor);
2079 size = alignUp(size, ALLOC_ALIGNMENT);
2080 cursor += size;
2081 } else if (word == 0 && cursor == (u1 *)gDvm.unlinkedJavaLangClass) {
2082 size = sizeof(ClassObject);
2083 cursor += size;
2084 } else {
2085 /* Check for padding. */
2086 while (*(unsigned long *)cursor == 0) {
2087 cursor += 4;
2088 if (cursor == end) break;
2089 }
2090 /* Punt if something went wrong. */
2091 assert(cursor == end);
2092 }
2093 }
2094}
2095
2096static void describeBlockQueue(const HeapSource *heapSource)
2097{
2098 size_t block, count;
2099 char space;
2100
2101 block = heapSource->queueHead;
2102 count = 0;
Carl Shapiro8bb533e2010-05-06 15:35:27 -07002103 LOG_SCAV(">>> describeBlockQueue(heapSource=%p)", heapSource);
Carl Shapirod28668c2010-04-15 16:10:00 -07002104 /* Count the number of blocks enqueued. */
2105 while (block != QUEUE_TAIL) {
2106 block = heapSource->blockQueue[block];
2107 ++count;
2108 }
Carl Shapiro8bb533e2010-05-06 15:35:27 -07002109 LOG_SCAV("blockQueue %zu elements, enqueued %zu",
Carl Shapirod28668c2010-04-15 16:10:00 -07002110 count, heapSource->queueSize);
2111 block = heapSource->queueHead;
2112 while (block != QUEUE_TAIL) {
2113 space = heapSource->blockSpace[block];
Carl Shapiro8bb533e2010-05-06 15:35:27 -07002114 LOG_SCAV("block=%zu@%p,space=%zu", block, blockToAddress(heapSource,block), space);
Carl Shapirod28668c2010-04-15 16:10:00 -07002115 block = heapSource->blockQueue[block];
2116 }
2117
Carl Shapiro8bb533e2010-05-06 15:35:27 -07002118 LOG_SCAV("<<< describeBlockQueue(heapSource=%p)", heapSource);
Carl Shapirod28668c2010-04-15 16:10:00 -07002119}
2120
2121/*
2122 * Blackens promoted objects.
2123 */
2124static void scavengeBlockQueue(void)
2125{
2126 HeapSource *heapSource;
2127 size_t block;
2128
Carl Shapiro8bb533e2010-05-06 15:35:27 -07002129 LOG_SCAV(">>> scavengeBlockQueue()");
Carl Shapirod28668c2010-04-15 16:10:00 -07002130 heapSource = gDvm.gcHeap->heapSource;
2131 describeBlockQueue(heapSource);
2132 while (heapSource->queueHead != QUEUE_TAIL) {
2133 block = heapSource->queueHead;
Carl Shapiro8bb533e2010-05-06 15:35:27 -07002134 LOG_SCAV("Dequeueing block %zu\n", block);
Carl Shapirod28668c2010-04-15 16:10:00 -07002135 scavengeBlock(heapSource, block);
2136 heapSource->queueHead = heapSource->blockQueue[block];
Carl Shapiro8bb533e2010-05-06 15:35:27 -07002137 LOG_SCAV("New queue head is %zu\n", heapSource->queueHead);
Carl Shapirod28668c2010-04-15 16:10:00 -07002138 }
Carl Shapiro8bb533e2010-05-06 15:35:27 -07002139 LOG_SCAV("<<< scavengeBlockQueue()");
Carl Shapirod28668c2010-04-15 16:10:00 -07002140}
2141
2142/*
2143 * Scan the block list and verify all blocks that are marked as being
2144 * in new space. This should be parametrized so we can invoke this
2145 * routine outside of the context of a collection.
2146 */
2147static void verifyNewSpace(void)
2148{
2149 HeapSource *heapSource;
2150 size_t i;
2151 size_t c0, c1, c2, c7;
2152
2153 c0 = c1 = c2 = c7 = 0;
2154 heapSource = gDvm.gcHeap->heapSource;
2155 for (i = 0; i < heapSource->totalBlocks; ++i) {
2156 switch (heapSource->blockSpace[i]) {
2157 case BLOCK_FREE: ++c0; break;
2158 case BLOCK_TO_SPACE: ++c1; break;
2159 case BLOCK_FROM_SPACE: ++c2; break;
2160 case BLOCK_CONTINUED: ++c7; break;
2161 default: assert(!"reached");
2162 }
2163 }
Carl Shapiro8bb533e2010-05-06 15:35:27 -07002164 LOG_VER("Block Demographics: "
2165 "Free=%zu,ToSpace=%zu,FromSpace=%zu,Continued=%zu",
2166 c0, c1, c2, c7);
Carl Shapirod28668c2010-04-15 16:10:00 -07002167 for (i = 0; i < heapSource->totalBlocks; ++i) {
2168 if (heapSource->blockSpace[i] != BLOCK_TO_SPACE) {
2169 continue;
2170 }
2171 verifyBlock(heapSource, i);
2172 }
2173}
2174
2175static void scavengeGlobals(void)
2176{
2177 scavengeReference((Object **)(void *)&gDvm.classJavaLangClass);
2178 scavengeReference((Object **)(void *)&gDvm.classJavaLangClassArray);
2179 scavengeReference((Object **)(void *)&gDvm.classJavaLangError);
2180 scavengeReference((Object **)(void *)&gDvm.classJavaLangObject);
2181 scavengeReference((Object **)(void *)&gDvm.classJavaLangObjectArray);
2182 scavengeReference((Object **)(void *)&gDvm.classJavaLangRuntimeException);
2183 scavengeReference((Object **)(void *)&gDvm.classJavaLangString);
2184 scavengeReference((Object **)(void *)&gDvm.classJavaLangThread);
2185 scavengeReference((Object **)(void *)&gDvm.classJavaLangVMThread);
2186 scavengeReference((Object **)(void *)&gDvm.classJavaLangThreadGroup);
2187 scavengeReference((Object **)(void *)&gDvm.classJavaLangThrowable);
2188 scavengeReference((Object **)(void *)&gDvm.classJavaLangStackTraceElement);
2189 scavengeReference((Object **)(void *)&gDvm.classJavaLangStackTraceElementArray);
2190 scavengeReference((Object **)(void *)&gDvm.classJavaLangAnnotationAnnotationArray);
2191 scavengeReference((Object **)(void *)&gDvm.classJavaLangAnnotationAnnotationArrayArray);
2192 scavengeReference((Object **)(void *)&gDvm.classJavaLangReflectAccessibleObject);
2193 scavengeReference((Object **)(void *)&gDvm.classJavaLangReflectConstructor);
2194 scavengeReference((Object **)(void *)&gDvm.classJavaLangReflectConstructorArray);
2195 scavengeReference((Object **)(void *)&gDvm.classJavaLangReflectField);
2196 scavengeReference((Object **)(void *)&gDvm.classJavaLangReflectFieldArray);
2197 scavengeReference((Object **)(void *)&gDvm.classJavaLangReflectMethod);
2198 scavengeReference((Object **)(void *)&gDvm.classJavaLangReflectMethodArray);
2199 scavengeReference((Object **)(void *)&gDvm.classJavaLangReflectProxy);
2200 scavengeReference((Object **)(void *)&gDvm.classJavaLangExceptionInInitializerError);
2201 scavengeReference((Object **)(void *)&gDvm.classJavaLangRefReference);
2202 scavengeReference((Object **)(void *)&gDvm.classJavaNioReadWriteDirectByteBuffer);
2203 scavengeReference((Object **)(void *)&gDvm.classJavaSecurityAccessController);
2204 scavengeReference((Object **)(void *)&gDvm.classOrgApacheHarmonyLangAnnotationAnnotationFactory);
2205 scavengeReference((Object **)(void *)&gDvm.classOrgApacheHarmonyLangAnnotationAnnotationMember);
2206 scavengeReference((Object **)(void *)&gDvm.classOrgApacheHarmonyLangAnnotationAnnotationMemberArray);
2207 scavengeReference((Object **)(void *)&gDvm.classOrgApacheHarmonyNioInternalDirectBuffer);
2208 scavengeReference((Object **)(void *)&gDvm.classArrayBoolean);
2209 scavengeReference((Object **)(void *)&gDvm.classArrayChar);
2210 scavengeReference((Object **)(void *)&gDvm.classArrayFloat);
2211 scavengeReference((Object **)(void *)&gDvm.classArrayDouble);
2212 scavengeReference((Object **)(void *)&gDvm.classArrayByte);
2213 scavengeReference((Object **)(void *)&gDvm.classArrayShort);
2214 scavengeReference((Object **)(void *)&gDvm.classArrayInt);
2215 scavengeReference((Object **)(void *)&gDvm.classArrayLong);
2216}
2217
2218void describeHeap(void)
2219{
2220 HeapSource *heapSource;
2221
2222 heapSource = gDvm.gcHeap->heapSource;
2223 describeBlocks(heapSource);
2224}
2225
2226/*
2227 * The collection interface. Collection has a few distinct phases.
2228 * The first is flipping AKA condemning AKA whitening the heap. The
2229 * second is to promote all objects which are pointed to by pinned or
2230 * ambiguous references. The third phase is tracing from the stacks,
2231 * registers and various globals. Lastly, a verification of the heap
2232 * is performed. The last phase should be optional.
2233 */
2234void dvmScavengeRoots(void) /* Needs a new name badly */
2235{
Carl Shapirod28668c2010-04-15 16:10:00 -07002236 GcHeap *gcHeap;
2237
2238 {
2239 size_t alloc, unused, total;
2240
2241 room(&alloc, &unused, &total);
Carl Shapiro8bb533e2010-05-06 15:35:27 -07002242 LOG_SCAV("BEFORE GC: %zu alloc, %zu free, %zu total.",
2243 alloc, unused, total);
Carl Shapirod28668c2010-04-15 16:10:00 -07002244 }
2245
2246 gcHeap = gDvm.gcHeap;
2247 dvmHeapSourceFlip();
2248
2249 /*
2250 * Promote blocks with stationary objects.
2251 */
Carl Shapirod28668c2010-04-15 16:10:00 -07002252 pinThreadList();
Carl Shapirod28668c2010-04-15 16:10:00 -07002253 pinReferenceTable(&gDvm.jniGlobalRefTable);
Carl Shapirod28668c2010-04-15 16:10:00 -07002254 pinReferenceTable(&gDvm.jniPinRefTable);
Carl Shapirod28668c2010-04-15 16:10:00 -07002255 pinReferenceTable(&gcHeap->nonCollectableRefs);
Carl Shapirod28668c2010-04-15 16:10:00 -07002256 pinHashTableEntries(gDvm.loadedClasses);
Carl Shapirod28668c2010-04-15 16:10:00 -07002257 pinPrimitiveClasses();
Carl Shapirod28668c2010-04-15 16:10:00 -07002258 pinInternedStrings();
2259
2260 // describeBlocks(gcHeap->heapSource);
2261
2262 /*
2263 * Create first, open new-space page right here.
2264 */
2265
2266 /* Reset allocation to an unallocated block. */
2267 gDvm.gcHeap->heapSource->allocPtr = allocateBlocks(gDvm.gcHeap->heapSource, 1);
2268 gDvm.gcHeap->heapSource->allocLimit = gDvm.gcHeap->heapSource->allocPtr + BLOCK_SIZE;
2269 /*
2270 * Hack: promote the empty block allocated above. If the
2271 * promotions that occurred above did not actually gray any
2272 * objects, the block queue may be empty. We must force a
2273 * promotion to be safe.
2274 */
2275 promoteBlockByAddr(gDvm.gcHeap->heapSource, gDvm.gcHeap->heapSource->allocPtr);
2276
2277 /*
2278 * Scavenge blocks and relocate movable objects.
2279 */
2280
Carl Shapiro8bb533e2010-05-06 15:35:27 -07002281 LOG_SCAV("Scavenging gDvm.threadList");
Carl Shapirod28668c2010-04-15 16:10:00 -07002282 scavengeThreadList();
2283
Carl Shapiro8bb533e2010-05-06 15:35:27 -07002284 LOG_SCAV("Scavenging gDvm.gcHeap->referenceOperations");
Carl Shapiro7800c092010-05-11 13:46:29 -07002285 scavengeLargeHeapRefTable(gcHeap->referenceOperations, true);
Carl Shapirod28668c2010-04-15 16:10:00 -07002286
Carl Shapiro8bb533e2010-05-06 15:35:27 -07002287 LOG_SCAV("Scavenging gDvm.gcHeap->pendingFinalizationRefs");
Carl Shapiro7800c092010-05-11 13:46:29 -07002288 scavengeLargeHeapRefTable(gcHeap->pendingFinalizationRefs, false);
Carl Shapirod28668c2010-04-15 16:10:00 -07002289
Carl Shapiro8bb533e2010-05-06 15:35:27 -07002290 LOG_SCAV("Scavenging random global stuff");
Carl Shapirod28668c2010-04-15 16:10:00 -07002291 scavengeReference(&gDvm.outOfMemoryObj);
2292 scavengeReference(&gDvm.internalErrorObj);
2293 scavengeReference(&gDvm.noClassDefFoundErrorObj);
2294
Carl Shapiro8bb533e2010-05-06 15:35:27 -07002295 LOG_SCAV("Scavenging gDvm.dbgRegistry");
Carl Shapirod28668c2010-04-15 16:10:00 -07002296 scavengeHashTable(gDvm.dbgRegistry);
2297
Carl Shapiro8bb533e2010-05-06 15:35:27 -07002298 // LOG_SCAV("Scavenging gDvm.internedString");
Carl Shapirod28668c2010-04-15 16:10:00 -07002299 scavengeInternedStrings();
2300
Carl Shapiro8bb533e2010-05-06 15:35:27 -07002301 LOG_SCAV("Root scavenge has completed.");
Carl Shapirod28668c2010-04-15 16:10:00 -07002302
2303 scavengeBlockQueue();
2304
Carl Shapiro8bb533e2010-05-06 15:35:27 -07002305 LOG_SCAV("Re-snap global class pointers.");
Carl Shapirod28668c2010-04-15 16:10:00 -07002306 scavengeGlobals();
2307
Carl Shapiro8bb533e2010-05-06 15:35:27 -07002308 LOG_SCAV("New space scavenge has completed.");
Carl Shapirod28668c2010-04-15 16:10:00 -07002309
2310 /*
Carl Shapiro952e84a2010-05-06 14:35:29 -07002311 * Process reference objects in strength order.
2312 */
2313
Carl Shapiro8bb533e2010-05-06 15:35:27 -07002314 LOG_REF("Processing soft references...");
Carl Shapiro952e84a2010-05-06 14:35:29 -07002315 preserveSoftReferences(&gDvm.gcHeap->softReferences);
2316 clearWhiteReferences(&gDvm.gcHeap->softReferences);
2317
Carl Shapiro8bb533e2010-05-06 15:35:27 -07002318 LOG_REF("Processing weak references...");
Carl Shapiro952e84a2010-05-06 14:35:29 -07002319 clearWhiteReferences(&gDvm.gcHeap->weakReferences);
2320
Carl Shapiro8bb533e2010-05-06 15:35:27 -07002321 LOG_REF("Finding finalizations...");
Carl Shapiro952e84a2010-05-06 14:35:29 -07002322 processFinalizableReferences();
2323
Carl Shapiro8bb533e2010-05-06 15:35:27 -07002324 LOG_REF("Processing f-reachable soft references...");
Carl Shapiro952e84a2010-05-06 14:35:29 -07002325 clearWhiteReferences(&gDvm.gcHeap->softReferences);
2326
Carl Shapiro8bb533e2010-05-06 15:35:27 -07002327 LOG_REF("Processing f-reachable weak references...");
Carl Shapiro952e84a2010-05-06 14:35:29 -07002328 clearWhiteReferences(&gDvm.gcHeap->weakReferences);
2329
Carl Shapiro8bb533e2010-05-06 15:35:27 -07002330 LOG_REF("Processing phantom references...");
Carl Shapiro952e84a2010-05-06 14:35:29 -07002331 clearWhiteReferences(&gDvm.gcHeap->phantomReferences);
2332
2333 /*
Carl Shapirod28668c2010-04-15 16:10:00 -07002334 * Verify the stack and heap.
2335 */
Carl Shapirof5718252010-05-11 20:55:13 -07002336 dvmVerifyRoots();
Carl Shapirod28668c2010-04-15 16:10:00 -07002337 verifyNewSpace();
2338
Carl Shapirod28668c2010-04-15 16:10:00 -07002339 //describeBlocks(gcHeap->heapSource);
2340
2341 clearFromSpace(gcHeap->heapSource);
2342
2343 {
2344 size_t alloc, rem, total;
2345
2346 room(&alloc, &rem, &total);
Carl Shapiro8bb533e2010-05-06 15:35:27 -07002347 LOG_SCAV("AFTER GC: %zu alloc, %zu free, %zu total.", alloc, rem, total);
Carl Shapirod28668c2010-04-15 16:10:00 -07002348 }
2349}
2350
2351/*
2352 * Interface compatibility routines.
2353 */
2354
2355void dvmClearWhiteRefs(Object **list)
2356{
Carl Shapiro952e84a2010-05-06 14:35:29 -07002357 /* do nothing */
Carl Shapirod28668c2010-04-15 16:10:00 -07002358 assert(*list == NULL);
2359}
2360
2361void dvmHandleSoftRefs(Object **list)
2362{
Carl Shapiro952e84a2010-05-06 14:35:29 -07002363 /* do nothing */
Carl Shapirod28668c2010-04-15 16:10:00 -07002364 assert(*list == NULL);
2365}
2366
2367bool dvmHeapBeginMarkStep(GcMode mode)
2368{
2369 /* do nothing */
2370 return true;
2371}
2372
2373void dvmHeapFinishMarkStep(void)
2374{
2375 /* do nothing */
2376}
2377
2378void dvmHeapMarkRootSet(void)
2379{
2380 /* do nothing */
2381}
2382
2383void dvmHeapScanMarkedObjects(void)
2384{
2385 dvmScavengeRoots();
2386}
2387
2388void dvmHeapScheduleFinalizations(void)
2389{
2390 /* do nothing */
2391}
2392
2393void dvmHeapSweepUnmarkedObjects(GcMode mode, int *numFreed, size_t *sizeFreed)
2394{
Carl Shapiro703a2f32010-05-12 23:11:37 -07002395 *numFreed = 0;
2396 *sizeFreed = 0;
Carl Shapirod28668c2010-04-15 16:10:00 -07002397 /* do nothing */
2398}
2399
2400void dvmMarkObjectNonNull(const Object *obj)
2401{
2402 assert(!"implemented");
2403}