blob: cf0525ab9e47927fc8d2aa7906f05642ade4bb5c [file] [log] [blame]
Carl Shapirod28668c2010-04-15 16:10:00 -07001/*
2 * Copyright (C) 2009 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17#include <errno.h>
18#include <limits.h>
19#include <sys/mman.h>
20
21#include "Dalvik.h"
22#include "alloc/Heap.h"
23#include "alloc/HeapBitmap.h"
24#include "alloc/HeapInternal.h"
25#include "alloc/HeapSource.h"
26#include "alloc/Verify.h"
27#include "alloc/clz.h"
28
29/*
30 * A "mostly copying", generational, garbage collector.
31 *
32 * TODO: we allocate our own contiguous tract of page frames to back
33 * object allocations. To cooperate with other heaps active in the
34 * virtual machine we need to move the responsibility of allocating
35 * pages someplace outside of this code.
36 *
37 * The other major data structures that maintain the state of the heap
38 * are the block space table and the block queue.
39 *
40 * The block space table records the state of a block. We must track
41 * whether a block is:
42 *
43 * - Free or allocated in some space.
44 *
45 * - If the block holds part of a large object allocation, whether the
46 * block is the initial or a continued block of the allocation.
47 *
48 * - Whether the block is pinned, that is to say whether at least one
49 * object in the block must remain stationary. Only needed during a
50 * GC.
51 *
52 * - Which space the object belongs to. At present this means
53 * from-space or to-space.
54 *
55 * The block queue is used during garbage collection. Unlike Cheney's
56 * algorithm, from-space and to-space are not contiguous. Therefore,
57 * one cannot maintain the state of the copy with just two pointers.
58 * The block queue exists to thread lists of blocks from the various
59 * spaces together.
60 *
61 * Additionally, we record the free space frontier of the heap, as
62 * well as the address of the first object within a block, which is
63 * required to copy objects following a large object (not currently
64 * implemented). This is stored in the heap source structure. This
65 * should be moved elsewhere to support in-line allocations from Java
66 * threads.
67 *
68 * Allocation requests are satisfied by reserving storage from one or
69 * more contiguous blocks. Objects that are small enough to fit
70 * inside a block are packed together within a block. Objects that
71 * are larger than a block are allocated from contiguous sequences of
72 * blocks. When half the available blocks are filled, a garbage
73 * collection occurs. We "flip" spaces (exchange from- and to-space),
74 * copy live objects into to space, and perform pointer adjustment.
75 *
76 * Copying is made more complicated by the requirement that some
77 * objects must not be moved. This property is known as "pinning".
78 * These objects must be dealt with specially. We use Bartlett's
79 * scheme; blocks containing such objects are grayed (promoted) at the
Carl Shapiro952e84a2010-05-06 14:35:29 -070080 * start of a garbage collection. By virtue of this trick, tracing
Carl Shapirod28668c2010-04-15 16:10:00 -070081 * from the roots proceeds as usual but all objects on those pages are
82 * considered promoted and therefore not moved.
83 *
84 * TODO: there is sufficient information within the garbage collector
85 * to implement Attardi's scheme for evacuating unpinned objects from
86 * a page that is otherwise pinned. This would eliminate false
87 * retention caused by the large pinning granularity.
88 *
89 * We need a scheme for medium and large objects. Ignore that for
90 * now, we can return to this later.
91 *
92 * Eventually we need to worry about promoting objects out of the
93 * copy-collected heap (tenuring) into a less volatile space. Copying
94 * may not always be the best policy for such spaces. We should
95 * consider a variant of mark, sweep, compact.
96 *
97 * The block scheme allows us to use VM page faults to maintain a
98 * write barrier. Consider having a special leaf state for a page.
99 *
100 * Bibliography:
101 *
102 * C. J. Cheney. 1970. A non-recursive list compacting
103 * algorithm. CACM. 13-11 pp677--678.
104 *
105 * Joel F. Bartlett. 1988. Compacting Garbage Collection with
106 * Ambiguous Roots. Digital Equipment Corporation.
107 *
108 * Joel F. Bartlett. 1989. Mostly-Copying Garbage Collection Picks Up
109 * Generations and C++. Digital Equipment Corporation.
110 *
111 * G. May Yip. 1991. Incremental, Generational Mostly-Copying Garbage
112 * Collection in Uncooperative Environments. Digital Equipment
113 * Corporation.
114 *
115 * Giuseppe Attardi, Tito Flagella. 1994. A Customisable Memory
116 * Management Framework. TR-94-010
117 *
118 * Giuseppe Attardi, Tito Flagella, Pietro Iglio. 1998. A customisable
119 * memory management framework for C++. Software -- Practice and
120 * Experience. 28(11), 1143-1183.
121 *
122 */
123
124#define ARRAYSIZE(x) (sizeof(x) / sizeof(x[0]))
125
Carl Shapiro8bb533e2010-05-06 15:35:27 -0700126#if 0
Carl Shapirod28668c2010-04-15 16:10:00 -0700127#define LOG_ALLOC LOGI
Carl Shapiro8bb533e2010-05-06 15:35:27 -0700128#define LOG_PIN LOGI
129#define LOG_PROM LOGI
130#define LOG_REF LOGI
131#define LOG_SCAV LOGI
132#define LOG_TRAN LOGI
133#define LOG_VER LOGI
Carl Shapirod28668c2010-04-15 16:10:00 -0700134#else
Carl Shapiro8bb533e2010-05-06 15:35:27 -0700135#define LOG_ALLOC(...) ((void)0)
136#define LOG_PIN(...) ((void)0)
137#define LOG_PROM(...) ((void)0)
138#define LOG_REF(...) ((void)0)
139#define LOG_SCAV(...) ((void)0)
140#define LOG_TRAN(...) ((void)0)
141#define LOG_VER(...) ((void)0)
Carl Shapirod28668c2010-04-15 16:10:00 -0700142#endif
143
144static void enqueueBlock(HeapSource *heapSource, size_t block);
Carl Shapiro2396fda2010-05-03 20:14:14 -0700145static void scavengeReference(Object **obj);
Carl Shapiro952e84a2010-05-06 14:35:29 -0700146static bool toSpaceContains(const void *addr);
147static bool fromSpaceContains(const void *addr);
Carl Shapirod28668c2010-04-15 16:10:00 -0700148static size_t sumHeapBitmap(const HeapBitmap *bitmap);
Carl Shapiro2396fda2010-05-03 20:14:14 -0700149static size_t objectSize(const Object *obj);
Carl Shapiro952e84a2010-05-06 14:35:29 -0700150static void scavengeDataObject(Object *obj);
151static void scavengeBlockQueue(void);
Carl Shapirod28668c2010-04-15 16:10:00 -0700152
153/*
154 * We use 512-byte blocks.
155 */
156enum { BLOCK_SHIFT = 9 };
157enum { BLOCK_SIZE = 1 << BLOCK_SHIFT };
158
159/*
160 * Space identifiers, stored into the blockSpace array.
161 */
162enum {
163 BLOCK_FREE = 0,
164 BLOCK_FROM_SPACE = 1,
165 BLOCK_TO_SPACE = 2,
166 BLOCK_CONTINUED = 7
167};
168
169/*
170 * Alignment for all allocations, in bytes.
171 */
172enum { ALLOC_ALIGNMENT = 8 };
173
174/*
175 * Sentinel value for the queue end.
176 */
177#define QUEUE_TAIL (~(size_t)0)
178
179struct HeapSource {
180
181 /* The base address of backing store. */
182 u1 *blockBase;
183
184 /* Total number of blocks available for allocation. */
185 size_t totalBlocks;
186 size_t allocBlocks;
187
188 /*
189 * The scavenger work queue. Implemented as an array of index
190 * values into the queue.
191 */
192 size_t *blockQueue;
193
194 /*
195 * Base and limit blocks. Basically the shifted start address of
196 * the block. We convert blocks to a relative number when
197 * indexing in the block queue. TODO: make the block queue base
198 * relative rather than the index into the block queue.
199 */
200 size_t baseBlock, limitBlock;
201
202 size_t queueHead;
203 size_t queueTail;
204 size_t queueSize;
205
206 /* The space of the current block 0 (free), 1 or 2. */
207 char *blockSpace;
208
209 /* Start of free space in the current block. */
210 u1 *allocPtr;
211 /* Exclusive limit of free space in the current block. */
212 u1 *allocLimit;
213
214 HeapBitmap allocBits;
215
216 /*
Carl Shapirod28668c2010-04-15 16:10:00 -0700217 * The starting size of the heap. This value is the same as the
218 * value provided to the -Xms flag.
219 */
220 size_t minimumSize;
221
222 /*
223 * The maximum size of the heap. This value is the same as the
224 * -Xmx flag.
225 */
226 size_t maximumSize;
227
228 /*
229 * The current, committed size of the heap. At present, this is
230 * equivalent to the maximumSize.
231 */
232 size_t currentSize;
233
234 size_t bytesAllocated;
235};
236
237static unsigned long alignDown(unsigned long x, unsigned long n)
238{
239 return x & -n;
240}
241
242static unsigned long alignUp(unsigned long x, unsigned long n)
243{
244 return alignDown(x + (n - 1), n);
245}
246
247static void describeBlocks(const HeapSource *heapSource)
248{
249 size_t i;
250
251 for (i = 0; i < heapSource->totalBlocks; ++i) {
252 if ((i % 32) == 0) putchar('\n');
253 printf("%d ", heapSource->blockSpace[i]);
254 }
255 putchar('\n');
256}
257
258/*
259 * Virtual memory interface.
260 */
261
262static void *virtualAlloc(size_t length)
263{
264 void *addr;
265 int flags, prot;
266
267 flags = MAP_PRIVATE | MAP_ANONYMOUS;
268 prot = PROT_READ | PROT_WRITE;
269 addr = mmap(NULL, length, prot, flags, -1, 0);
270 if (addr == MAP_FAILED) {
271 LOGE_HEAP("mmap: %s", strerror(errno));
272 addr = NULL;
273 }
274 return addr;
275}
276
277static void virtualFree(void *addr, size_t length)
278{
279 int res;
280
281 assert(addr != NULL);
282 assert((uintptr_t)addr % SYSTEM_PAGE_SIZE == 0);
283 res = munmap(addr, length);
284 if (res == -1) {
285 LOGE_HEAP("munmap: %s", strerror(errno));
286 }
287}
288
Carl Shapiroeff0df72010-06-23 14:25:07 -0700289#ifndef NDEBUG
Carl Shapirod28668c2010-04-15 16:10:00 -0700290static int isValidAddress(const HeapSource *heapSource, const u1 *addr)
291{
292 size_t block;
293
294 block = (uintptr_t)addr >> BLOCK_SHIFT;
295 return heapSource->baseBlock <= block &&
296 heapSource->limitBlock > block;
297}
Carl Shapiroeff0df72010-06-23 14:25:07 -0700298#endif
Carl Shapirod28668c2010-04-15 16:10:00 -0700299
300/*
301 * Iterate over the block map looking for a contiguous run of free
302 * blocks.
303 */
304static void *allocateBlocks(HeapSource *heapSource, size_t blocks)
305{
306 void *addr;
307 size_t allocBlocks, totalBlocks;
308 size_t i, j;
309
310 allocBlocks = heapSource->allocBlocks;
311 totalBlocks = heapSource->totalBlocks;
312 /* Check underflow. */
313 assert(blocks != 0);
314 /* Check overflow. */
315 if (allocBlocks + blocks > totalBlocks / 2) {
316 return NULL;
317 }
318 /* Scan block map. */
319 for (i = 0; i < totalBlocks; ++i) {
320 /* Check fit. */
321 for (j = 0; j < blocks; ++j) { /* runs over totalBlocks */
322 if (heapSource->blockSpace[i+j] != BLOCK_FREE) {
323 break;
324 }
325 }
326 /* No fit? */
327 if (j != blocks) {
328 i += j;
329 continue;
330 }
331 /* Fit, allocate. */
332 heapSource->blockSpace[i] = BLOCK_TO_SPACE; /* why to-space? */
333 for (j = 1; j < blocks; ++j) {
334 heapSource->blockSpace[i+j] = BLOCK_CONTINUED;
335 }
336 heapSource->allocBlocks += blocks;
337 addr = &heapSource->blockBase[i*BLOCK_SIZE];
338 memset(addr, 0, blocks*BLOCK_SIZE);
339 /* Collecting? */
340 if (heapSource->queueHead != QUEUE_TAIL) {
341 LOG_ALLOC("allocateBlocks allocBlocks=%zu,block#=%zu", heapSource->allocBlocks, i);
342 /*
343 * This allocated was on behalf of the transporter when it
344 * shaded a white object gray. We enqueue the block so
345 * the scavenger can further shade the gray objects black.
346 */
347 enqueueBlock(heapSource, i);
348 }
349
350 return addr;
351 }
352 /* Insufficient space, fail. */
353 LOGE("Insufficient space, %zu blocks, %zu blocks allocated and %zu bytes allocated",
354 heapSource->totalBlocks,
355 heapSource->allocBlocks,
356 heapSource->bytesAllocated);
357 return NULL;
358}
359
360/* Converts an absolute address to a relative block number. */
361static size_t addressToBlock(const HeapSource *heapSource, const void *addr)
362{
363 assert(heapSource != NULL);
364 assert(isValidAddress(heapSource, addr));
365 return (((uintptr_t)addr) >> BLOCK_SHIFT) - heapSource->baseBlock;
366}
367
368/* Converts a relative block number to an absolute address. */
369static u1 *blockToAddress(const HeapSource *heapSource, size_t block)
370{
371 u1 *addr;
372
373 addr = (u1 *) (((uintptr_t) heapSource->baseBlock + block) * BLOCK_SIZE);
374 assert(isValidAddress(heapSource, addr));
375 return addr;
376}
377
378static void clearBlock(HeapSource *heapSource, size_t block)
379{
380 u1 *addr;
381 size_t i;
382
383 assert(heapSource != NULL);
384 assert(block < heapSource->totalBlocks);
385 addr = heapSource->blockBase + block*BLOCK_SIZE;
386 memset(addr, 0xCC, BLOCK_SIZE);
387 for (i = 0; i < BLOCK_SIZE; i += 8) {
388 dvmHeapBitmapClearObjectBit(&heapSource->allocBits, addr + i);
389 }
390}
391
392static void clearFromSpace(HeapSource *heapSource)
393{
394 size_t i, count;
395
396 assert(heapSource != NULL);
397 i = count = 0;
398 while (i < heapSource->totalBlocks) {
399 if (heapSource->blockSpace[i] != BLOCK_FROM_SPACE) {
400 ++i;
401 continue;
402 }
403 heapSource->blockSpace[i] = BLOCK_FREE;
404 clearBlock(heapSource, i);
405 ++i;
406 ++count;
407 while (i < heapSource->totalBlocks &&
408 heapSource->blockSpace[i] == BLOCK_CONTINUED) {
409 heapSource->blockSpace[i] = BLOCK_FREE;
410 clearBlock(heapSource, i);
411 ++i;
412 ++count;
413 }
414 }
Carl Shapiro8bb533e2010-05-06 15:35:27 -0700415 LOG_SCAV("freed %zu blocks (%zu bytes)", count, count*BLOCK_SIZE);
Carl Shapirod28668c2010-04-15 16:10:00 -0700416}
417
418/*
419 * Appends the given block to the block queue. The block queue is
420 * processed in-order by the scavenger.
421 */
422static void enqueueBlock(HeapSource *heapSource, size_t block)
423{
424 assert(heapSource != NULL);
425 assert(block < heapSource->totalBlocks);
426 if (heapSource->queueHead != QUEUE_TAIL) {
427 heapSource->blockQueue[heapSource->queueTail] = block;
428 } else {
429 heapSource->queueHead = block;
430 }
431 heapSource->blockQueue[block] = QUEUE_TAIL;
432 heapSource->queueTail = block;
433 ++heapSource->queueSize;
434}
435
436/*
437 * Grays all objects within the block corresponding to the given
438 * address.
439 */
440static void promoteBlockByAddr(HeapSource *heapSource, const void *addr)
441{
442 size_t block;
443
444 block = addressToBlock(heapSource, (const u1 *)addr);
445 if (heapSource->blockSpace[block] != BLOCK_TO_SPACE) {
Carl Shapiro8bb533e2010-05-06 15:35:27 -0700446 // LOG_PROM("promoting block %zu %d @ %p", block, heapSource->blockSpace[block], obj);
Carl Shapirod28668c2010-04-15 16:10:00 -0700447 heapSource->blockSpace[block] = BLOCK_TO_SPACE;
448 enqueueBlock(heapSource, block);
449 /* TODO(cshapiro): count continued blocks?*/
450 heapSource->allocBlocks += 1;
451 } else {
Carl Shapiro8bb533e2010-05-06 15:35:27 -0700452 // LOG_PROM("NOT promoting block %zu %d @ %p", block, heapSource->blockSpace[block], obj);
Carl Shapirod28668c2010-04-15 16:10:00 -0700453 }
454}
455
456GcHeap *dvmHeapSourceStartup(size_t startSize, size_t absoluteMaxSize)
457{
458 GcHeap* gcHeap;
459 HeapSource *heapSource;
460
461 assert(startSize <= absoluteMaxSize);
462
463 heapSource = malloc(sizeof(*heapSource));
464 assert(heapSource != NULL);
465 memset(heapSource, 0, sizeof(*heapSource));
466
467 heapSource->minimumSize = alignUp(startSize, BLOCK_SIZE);
468 heapSource->maximumSize = alignUp(absoluteMaxSize, BLOCK_SIZE);
469
470 heapSource->currentSize = heapSource->maximumSize;
471
472 /* Allocate underlying storage for blocks. */
473 heapSource->blockBase = virtualAlloc(heapSource->maximumSize);
474 assert(heapSource->blockBase != NULL);
475 heapSource->baseBlock = (uintptr_t) heapSource->blockBase >> BLOCK_SHIFT;
476 heapSource->limitBlock = ((uintptr_t) heapSource->blockBase + heapSource->maximumSize) >> BLOCK_SHIFT;
477
478 heapSource->allocBlocks = 0;
479 heapSource->totalBlocks = (heapSource->limitBlock - heapSource->baseBlock);
480
481 assert(heapSource->totalBlocks = heapSource->maximumSize / BLOCK_SIZE);
482
483 {
484 size_t size = sizeof(heapSource->blockQueue[0]);
485 heapSource->blockQueue = malloc(heapSource->totalBlocks*size);
486 assert(heapSource->blockQueue != NULL);
487 memset(heapSource->blockQueue, 0xCC, heapSource->totalBlocks*size);
488 heapSource->queueHead = QUEUE_TAIL;
489 }
490
491 /* Byte indicating space residence or free status of block. */
492 {
493 size_t size = sizeof(heapSource->blockSpace[0]);
494 heapSource->blockSpace = malloc(heapSource->totalBlocks*size);
495 assert(heapSource->blockSpace != NULL);
496 memset(heapSource->blockSpace, 0, heapSource->totalBlocks*size);
497 }
498
499 dvmHeapBitmapInit(&heapSource->allocBits,
500 heapSource->blockBase,
501 heapSource->maximumSize,
502 "blockBase");
503
504 /* Initialize allocation pointers. */
505 heapSource->allocPtr = allocateBlocks(heapSource, 1);
506 heapSource->allocLimit = heapSource->allocPtr + BLOCK_SIZE;
507
508 gcHeap = malloc(sizeof(*gcHeap));
509 assert(gcHeap != NULL);
510 memset(gcHeap, 0, sizeof(*gcHeap));
511 gcHeap->heapSource = heapSource;
512
513 return gcHeap;
514}
515
516/*
517 * Perform any required heap initializations after forking from the
518 * zygote process. This is a no-op for the time being. Eventually
519 * this will demarcate the shared region of the heap.
520 */
521bool dvmHeapSourceStartupAfterZygote(void)
522{
523 return true;
524}
525
526bool dvmHeapSourceStartupBeforeFork(void)
527{
528 assert(!"implemented");
529 return false;
530}
531
532void dvmHeapSourceShutdown(GcHeap **gcHeap)
533{
534 if (*gcHeap == NULL || (*gcHeap)->heapSource == NULL)
535 return;
Carl Shapiro88b00352010-05-19 17:38:33 -0700536 free((*gcHeap)->heapSource->blockQueue);
537 free((*gcHeap)->heapSource->blockSpace);
Carl Shapirod28668c2010-04-15 16:10:00 -0700538 virtualFree((*gcHeap)->heapSource->blockBase,
539 (*gcHeap)->heapSource->maximumSize);
540 free((*gcHeap)->heapSource);
541 (*gcHeap)->heapSource = NULL;
542 free(*gcHeap);
543 *gcHeap = NULL;
544}
545
546size_t dvmHeapSourceGetValue(enum HeapSourceValueSpec spec,
547 size_t perHeapStats[],
548 size_t arrayLen)
549{
550 HeapSource *heapSource;
551 size_t value;
552
553 heapSource = gDvm.gcHeap->heapSource;
554 switch (spec) {
555 case HS_EXTERNAL_BYTES_ALLOCATED:
556 value = 0;
557 break;
558 case HS_EXTERNAL_LIMIT:
559 value = 0;
560 break;
561 case HS_FOOTPRINT:
562 value = heapSource->maximumSize;
563 break;
564 case HS_ALLOWED_FOOTPRINT:
565 value = heapSource->maximumSize;
566 break;
567 case HS_BYTES_ALLOCATED:
568 value = heapSource->bytesAllocated;
569 break;
570 case HS_OBJECTS_ALLOCATED:
571 value = sumHeapBitmap(&heapSource->allocBits);
572 break;
573 default:
574 assert(!"implemented");
575 value = 0;
576 }
577 if (perHeapStats) {
578 *perHeapStats = value;
579 }
580 return value;
581}
582
583/*
584 * Performs a shallow copy of the allocation bitmap into the given
585 * vector of heap bitmaps.
586 */
587void dvmHeapSourceGetObjectBitmaps(HeapBitmap objBits[], HeapBitmap markBits[],
588 size_t numHeaps)
589{
590 assert(!"implemented");
591}
592
593HeapBitmap *dvmHeapSourceGetLiveBits(void)
594{
Carl Shapiro603469a2010-05-20 20:22:31 -0700595 return &gDvm.gcHeap->heapSource->allocBits;
Carl Shapirod28668c2010-04-15 16:10:00 -0700596}
597
598/*
599 * Allocate the specified number of bytes from the heap. The
600 * allocation cursor points into a block of free storage. If the
601 * given allocation fits in the remaining space of the block, we
602 * advance the cursor and return a pointer to the free storage. If
603 * the allocation cannot fit in the current block but is smaller than
604 * a block we request a new block and allocate from it instead. If
605 * the allocation is larger than a block we must allocate from a span
606 * of contiguous blocks.
607 */
608void *dvmHeapSourceAlloc(size_t length)
609{
610 HeapSource *heapSource;
611 unsigned char *addr;
612 size_t aligned, available, blocks;
613
614 heapSource = gDvm.gcHeap->heapSource;
615 assert(heapSource != NULL);
616 assert(heapSource->allocPtr != NULL);
617 assert(heapSource->allocLimit != NULL);
618
619 aligned = alignUp(length, ALLOC_ALIGNMENT);
620 available = heapSource->allocLimit - heapSource->allocPtr;
621
622 /* Try allocating inside the current block. */
623 if (aligned <= available) {
624 addr = heapSource->allocPtr;
625 heapSource->allocPtr += aligned;
626 heapSource->bytesAllocated += aligned;
627 dvmHeapBitmapSetObjectBit(&heapSource->allocBits, addr);
628 return addr;
629 }
630
631 /* Try allocating in a new block. */
632 if (aligned <= BLOCK_SIZE) {
633 addr = allocateBlocks(heapSource, 1);
634 if (addr != NULL) {
635 heapSource->allocLimit = addr + BLOCK_SIZE;
636 heapSource->allocPtr = addr + aligned;
637 heapSource->bytesAllocated += aligned;
638 dvmHeapBitmapSetObjectBit(&heapSource->allocBits, addr);
639 /* TODO(cshapiro): pad out the current block. */
640 }
641 return addr;
642 }
643
644 /* Try allocating in a span of blocks. */
645 blocks = alignUp(aligned, BLOCK_SIZE) / BLOCK_SIZE;
646
647 addr = allocateBlocks(heapSource, blocks);
648 /* Propagate failure upward. */
649 if (addr != NULL) {
650 heapSource->bytesAllocated += aligned;
651 dvmHeapBitmapSetObjectBit(&heapSource->allocBits, addr);
652 /* TODO(cshapiro): pad out free space in the last block. */
653 }
654 return addr;
655}
656
657void *dvmHeapSourceAllocAndGrow(size_t size)
658{
659 return dvmHeapSourceAlloc(size);
660}
661
662/* TODO: refactor along with dvmHeapSourceAlloc */
663void *allocateGray(size_t size)
664{
Carl Shapiro7800c092010-05-11 13:46:29 -0700665 HeapSource *heapSource;
666 void *addr;
667 size_t block;
668
Carl Shapiro952e84a2010-05-06 14:35:29 -0700669 /* TODO: add a check that we are in a GC. */
Carl Shapiro7800c092010-05-11 13:46:29 -0700670 heapSource = gDvm.gcHeap->heapSource;
671 addr = dvmHeapSourceAlloc(size);
Carl Shapiro703a2f32010-05-12 23:11:37 -0700672 assert(addr != NULL);
Carl Shapiro7800c092010-05-11 13:46:29 -0700673 block = addressToBlock(heapSource, (const u1 *)addr);
674 if (heapSource->queueHead == QUEUE_TAIL) {
675 /*
676 * Forcibly append the underlying block to the queue. This
677 * condition occurs when referents are transported following
678 * the initial trace.
679 */
680 enqueueBlock(heapSource, block);
681 LOG_PROM("forced promoting block %zu %d @ %p", block, heapSource->blockSpace[block], addr);
682 }
683 return addr;
Carl Shapirod28668c2010-04-15 16:10:00 -0700684}
685
Carl Shapiroeff0df72010-06-23 14:25:07 -0700686bool dvmHeapSourceContainsAddress(const void *ptr)
687{
688 HeapSource *heapSource = gDvm.gcHeap->heapSource;
689 return dvmHeapBitmapCoversAddress(&heapSource->allocBits, ptr);
690}
691
Carl Shapirod28668c2010-04-15 16:10:00 -0700692/*
693 * Returns true if the given address is within the heap and points to
694 * the header of a live object.
695 */
696bool dvmHeapSourceContains(const void *addr)
697{
698 HeapSource *heapSource;
699 HeapBitmap *bitmap;
700
701 heapSource = gDvm.gcHeap->heapSource;
702 bitmap = &heapSource->allocBits;
Carl Shapirodc1e4f12010-05-01 22:27:56 -0700703 if (!dvmHeapBitmapCoversAddress(bitmap, addr)) {
704 return false;
705 } else {
706 return dvmHeapBitmapIsObjectBitSet(bitmap, addr);
707 }
Carl Shapirod28668c2010-04-15 16:10:00 -0700708}
709
710bool dvmHeapSourceGetPtrFlag(const void *ptr, enum HeapSourcePtrFlag flag)
711{
712 assert(!"implemented");
713 return false;
714}
715
716size_t dvmHeapSourceChunkSize(const void *ptr)
717{
718 assert(!"implemented");
719 return 0;
720}
721
722size_t dvmHeapSourceFootprint(void)
723{
724 assert(!"implemented");
725 return 0;
726}
727
728/*
729 * Returns the "ideal footprint" which appears to be the number of
730 * bytes currently committed to the heap. This starts out at the
731 * start size of the heap and grows toward the maximum size.
732 */
733size_t dvmHeapSourceGetIdealFootprint(void)
734{
735 return gDvm.gcHeap->heapSource->currentSize;
736}
737
738float dvmGetTargetHeapUtilization(void)
739{
Carl Shapiro603469a2010-05-20 20:22:31 -0700740 return 0.5f;
Carl Shapirod28668c2010-04-15 16:10:00 -0700741}
742
743void dvmSetTargetHeapUtilization(float newTarget)
744{
Carl Shapiro603469a2010-05-20 20:22:31 -0700745 assert(newTarget > 0.0f && newTarget < 1.0f);
Carl Shapirod28668c2010-04-15 16:10:00 -0700746}
747
748size_t dvmMinimumHeapSize(size_t size, bool set)
749{
750 return gDvm.gcHeap->heapSource->minimumSize;
751}
752
753/*
754 * Expands the size of the heap after a collection. At present we
755 * commit the pages for maximum size of the heap so this routine is
756 * just a no-op. Eventually, we will either allocate or commit pages
757 * on an as-need basis.
758 */
759void dvmHeapSourceGrowForUtilization(void)
760{
761 /* do nothing */
762}
763
764void dvmHeapSourceTrim(size_t bytesTrimmed[], size_t arrayLen)
765{
766 /* do nothing */
767}
768
769void dvmHeapSourceWalk(void (*callback)(const void *chunkptr, size_t chunklen,
770 const void *userptr, size_t userlen,
771 void *arg),
772 void *arg)
773{
774 assert(!"implemented");
775}
776
777size_t dvmHeapSourceGetNumHeaps(void)
778{
779 return 1;
780}
781
782bool dvmTrackExternalAllocation(size_t n)
783{
Carl Shapiro528f3812010-05-18 14:16:26 -0700784 /* do nothing */
785 return true;
Carl Shapirod28668c2010-04-15 16:10:00 -0700786}
787
788void dvmTrackExternalFree(size_t n)
789{
Carl Shapiro528f3812010-05-18 14:16:26 -0700790 /* do nothing */
Carl Shapirod28668c2010-04-15 16:10:00 -0700791}
792
793size_t dvmGetExternalBytesAllocated(void)
794{
795 assert(!"implemented");
796 return 0;
797}
798
799void dvmHeapSourceFlip(void)
800{
801 HeapSource *heapSource;
802 size_t i;
803
804 heapSource = gDvm.gcHeap->heapSource;
805
806 /* Reset the block queue. */
807 heapSource->allocBlocks = 0;
808 heapSource->queueSize = 0;
809 heapSource->queueHead = QUEUE_TAIL;
810
Carl Shapirod28668c2010-04-15 16:10:00 -0700811 /* TODO(cshapiro): pad the current (prev) block. */
812
813 heapSource->allocPtr = NULL;
814 heapSource->allocLimit = NULL;
815
816 /* Whiten all allocated blocks. */
817 for (i = 0; i < heapSource->totalBlocks; ++i) {
818 if (heapSource->blockSpace[i] == BLOCK_TO_SPACE) {
819 heapSource->blockSpace[i] = BLOCK_FROM_SPACE;
820 }
821 }
822}
823
824static void room(size_t *alloc, size_t *avail, size_t *total)
825{
826 HeapSource *heapSource;
Carl Shapirod28668c2010-04-15 16:10:00 -0700827
828 heapSource = gDvm.gcHeap->heapSource;
829 *total = heapSource->totalBlocks*BLOCK_SIZE;
830 *alloc = heapSource->allocBlocks*BLOCK_SIZE;
831 *avail = *total - *alloc;
832}
833
834static bool isSpaceInternal(u1 *addr, int space)
835{
836 HeapSource *heapSource;
837 u1 *base, *limit;
838 size_t offset;
839 char space2;
840
841 heapSource = gDvm.gcHeap->heapSource;
842 base = heapSource->blockBase;
843 assert(addr >= base);
844 limit = heapSource->blockBase + heapSource->maximumSize;
845 assert(addr < limit);
846 offset = addr - base;
847 space2 = heapSource->blockSpace[offset >> BLOCK_SHIFT];
848 return space == space2;
849}
850
Carl Shapiro952e84a2010-05-06 14:35:29 -0700851static bool fromSpaceContains(const void *addr)
Carl Shapirod28668c2010-04-15 16:10:00 -0700852{
853 return isSpaceInternal((u1 *)addr, BLOCK_FROM_SPACE);
854}
855
Carl Shapiro952e84a2010-05-06 14:35:29 -0700856static bool toSpaceContains(const void *addr)
Carl Shapirod28668c2010-04-15 16:10:00 -0700857{
858 return isSpaceInternal((u1 *)addr, BLOCK_TO_SPACE);
859}
860
861/*
862 * Notifies the collector that the object at the given address must
863 * remain stationary during the current collection.
864 */
865static void pinObject(const Object *obj)
866{
867 promoteBlockByAddr(gDvm.gcHeap->heapSource, obj);
868}
869
Carl Shapirod28668c2010-04-15 16:10:00 -0700870static size_t sumHeapBitmap(const HeapBitmap *bitmap)
871{
Carl Shapirod28668c2010-04-15 16:10:00 -0700872 size_t i, sum;
873
874 sum = 0;
875 for (i = 0; i < bitmap->bitsLen >> 2; ++i) {
Carl Shapiro603469a2010-05-20 20:22:31 -0700876 sum += CLZ(bitmap->bits[i]);
Carl Shapirod28668c2010-04-15 16:10:00 -0700877 }
878 return sum;
879}
880
881/*
882 * Miscellaneous functionality.
883 */
884
885static int isForward(const void *addr)
886{
887 return (uintptr_t)addr & 0x1;
888}
889
890static void setForward(const void *toObj, void *fromObj)
891{
892 *(unsigned long *)fromObj = (uintptr_t)toObj | 0x1;
893}
894
895static void* getForward(const void *fromObj)
896{
897 return (void *)((uintptr_t)fromObj & ~0x1);
898}
899
900/* Beware, uses the same encoding as a forwarding pointers! */
901static int isPermanentString(const StringObject *obj) {
902 return (uintptr_t)obj & 0x1;
903}
904
905static void* getPermanentString(const StringObject *obj)
906{
907 return (void *)((uintptr_t)obj & ~0x1);
908}
909
910
911/*
912 * Scavenging and transporting routines follow. A transporter grays
913 * an object. A scavenger blackens an object. We define these
914 * routines for each fundamental object type. Dispatch is performed
915 * in scavengeObject.
916 */
917
918/*
Carl Shapiro2396fda2010-05-03 20:14:14 -0700919 * Class object scavenging.
Carl Shapirod28668c2010-04-15 16:10:00 -0700920 */
Carl Shapiro2396fda2010-05-03 20:14:14 -0700921static void scavengeClassObject(ClassObject *obj)
Carl Shapirod28668c2010-04-15 16:10:00 -0700922{
Carl Shapirod28668c2010-04-15 16:10:00 -0700923 int i;
924
Carl Shapiro8bb533e2010-05-06 15:35:27 -0700925 LOG_SCAV("scavengeClassObject(obj=%p)", obj);
Carl Shapiro2396fda2010-05-03 20:14:14 -0700926 assert(obj != NULL);
Carl Shapirod28668c2010-04-15 16:10:00 -0700927 assert(obj->obj.clazz != NULL);
928 assert(obj->obj.clazz->descriptor != NULL);
929 assert(!strcmp(obj->obj.clazz->descriptor, "Ljava/lang/Class;"));
930 assert(obj->descriptor != NULL);
Carl Shapiro8bb533e2010-05-06 15:35:27 -0700931 LOG_SCAV("scavengeClassObject: descriptor='%s',vtableCount=%zu",
932 obj->descriptor, obj->vtableCount);
Carl Shapiro703a2f32010-05-12 23:11:37 -0700933 /* Delegate class object and instance field scavenging. */
934 scavengeDataObject((Object *)obj);
Carl Shapirod28668c2010-04-15 16:10:00 -0700935 /* Scavenge the array element class object. */
936 if (IS_CLASS_FLAG_SET(obj, CLASS_ISARRAY)) {
937 scavengeReference((Object **)(void *)&obj->elementClass);
938 }
939 /* Scavenge the superclass. */
940 scavengeReference((Object **)(void *)&obj->super);
941 /* Scavenge the class loader. */
942 scavengeReference(&obj->classLoader);
943 /* Scavenge static fields. */
944 for (i = 0; i < obj->sfieldCount; ++i) {
945 char ch = obj->sfields[i].field.signature[0];
946 if (ch == '[' || ch == 'L') {
947 scavengeReference((Object **)(void *)&obj->sfields[i].value.l);
948 }
949 }
950 /* Scavenge interface class objects. */
951 for (i = 0; i < obj->interfaceCount; ++i) {
952 scavengeReference((Object **) &obj->interfaces[i]);
953 }
Carl Shapirod28668c2010-04-15 16:10:00 -0700954}
955
956/*
957 * Array object scavenging.
958 */
Carl Shapirod28668c2010-04-15 16:10:00 -0700959static size_t scavengeArrayObject(ArrayObject *array)
960{
961 size_t i, length;
962
Carl Shapiro8bb533e2010-05-06 15:35:27 -0700963 LOG_SCAV("scavengeArrayObject(array=%p)", array);
Carl Shapirod28668c2010-04-15 16:10:00 -0700964 /* Scavenge the class object. */
Carl Shapiro952e84a2010-05-06 14:35:29 -0700965 assert(toSpaceContains(array));
Carl Shapirod28668c2010-04-15 16:10:00 -0700966 assert(array != NULL);
967 assert(array->obj.clazz != NULL);
968 scavengeReference((Object **) array);
969 length = dvmArrayObjectSize(array);
970 /* Scavenge the array contents. */
971 if (IS_CLASS_FLAG_SET(array->obj.clazz, CLASS_ISOBJECTARRAY)) {
972 Object **contents = (Object **)array->contents;
973 for (i = 0; i < array->length; ++i) {
974 scavengeReference(&contents[i]);
975 }
976 }
977 return length;
978}
979
980/*
981 * Reference object scavenging.
982 */
983
Carl Shapiro952e84a2010-05-06 14:35:29 -0700984static int getReferenceFlags(const Object *obj)
Carl Shapirod28668c2010-04-15 16:10:00 -0700985{
986 int flags;
987
988 flags = CLASS_ISREFERENCE |
989 CLASS_ISWEAKREFERENCE |
990 CLASS_ISPHANTOMREFERENCE;
Carl Shapiro952e84a2010-05-06 14:35:29 -0700991 return GET_CLASS_FLAG_GROUP(obj->clazz, flags);
Carl Shapirod28668c2010-04-15 16:10:00 -0700992}
993
Carl Shapiro952e84a2010-05-06 14:35:29 -0700994static int isSoftReference(const Object *obj)
Carl Shapirod28668c2010-04-15 16:10:00 -0700995{
996 return getReferenceFlags(obj) == CLASS_ISREFERENCE;
997}
998
Carl Shapiro952e84a2010-05-06 14:35:29 -0700999static int isWeakReference(const Object *obj)
Carl Shapirod28668c2010-04-15 16:10:00 -07001000{
1001 return getReferenceFlags(obj) & CLASS_ISWEAKREFERENCE;
1002}
1003
Carl Shapiroeff0df72010-06-23 14:25:07 -07001004#ifndef NDEBUG
Carl Shapiro952e84a2010-05-06 14:35:29 -07001005static bool isPhantomReference(const Object *obj)
Carl Shapirod28668c2010-04-15 16:10:00 -07001006{
1007 return getReferenceFlags(obj) & CLASS_ISPHANTOMREFERENCE;
1008}
Carl Shapiroeff0df72010-06-23 14:25:07 -07001009#endif
Carl Shapirod28668c2010-04-15 16:10:00 -07001010
Carl Shapiro952e84a2010-05-06 14:35:29 -07001011/*
1012 * Returns true if the reference was registered with a reference queue
1013 * but has not yet been appended to it.
1014 */
1015static bool isReferenceEnqueuable(const Object *ref)
Carl Shapirod28668c2010-04-15 16:10:00 -07001016{
Carl Shapiro952e84a2010-05-06 14:35:29 -07001017 Object *queue, *queueNext;
Carl Shapirod28668c2010-04-15 16:10:00 -07001018
Carl Shapiro952e84a2010-05-06 14:35:29 -07001019 queue = dvmGetFieldObject(ref, gDvm.offJavaLangRefReference_queue);
1020 queueNext = dvmGetFieldObject(ref, gDvm.offJavaLangRefReference_queueNext);
1021 if (queue == NULL || queueNext != NULL) {
1022 /*
1023 * There is no queue, or the reference has already
1024 * been enqueued. The Reference.enqueue() method
1025 * will do nothing even if we call it.
1026 */
1027 return false;
Carl Shapirod28668c2010-04-15 16:10:00 -07001028 }
Carl Shapiro952e84a2010-05-06 14:35:29 -07001029
1030 /*
1031 * We need to call enqueue(), but if we called it from
1032 * here we'd probably deadlock. Schedule a call.
1033 */
1034 return true;
1035}
1036
1037/*
1038 * Schedules a reference to be appended to its reference queue.
1039 */
Carl Shapiroeff0df72010-06-23 14:25:07 -07001040static void enqueueReference(Object *ref)
Carl Shapiro952e84a2010-05-06 14:35:29 -07001041{
Carl Shapiro646ba092010-06-10 15:17:00 -07001042 assert(ref != NULL);
Carl Shapiro952e84a2010-05-06 14:35:29 -07001043 assert(dvmGetFieldObject(ref, gDvm.offJavaLangRefReference_queue) != NULL);
1044 assert(dvmGetFieldObject(ref, gDvm.offJavaLangRefReference_queueNext) == NULL);
Carl Shapiro646ba092010-06-10 15:17:00 -07001045 if (!dvmHeapAddRefToLargeTable(&gDvm.gcHeap->referenceOperations, ref)) {
Carl Shapiro8bb533e2010-05-06 15:35:27 -07001046 LOGE("no room for any more reference operations");
Carl Shapiro952e84a2010-05-06 14:35:29 -07001047 dvmAbort();
1048 }
1049}
1050
1051/*
1052 * Sets the referent field of a reference object to NULL.
1053 */
1054static void clearReference(Object *obj)
1055{
1056 dvmSetFieldObject(obj, gDvm.offJavaLangRefReference_referent, NULL);
1057}
1058
1059/*
1060 * Clears reference objects with white referents.
1061 */
1062void clearWhiteReferences(Object **list)
1063{
1064 size_t referentOffset, vmDataOffset;
1065 bool doSignal;
1066
1067 vmDataOffset = gDvm.offJavaLangRefReference_vmData;
1068 referentOffset = gDvm.offJavaLangRefReference_referent;
1069 doSignal = false;
1070 while (*list != NULL) {
1071 Object *ref = *list;
1072 JValue *field = dvmFieldPtr(ref, referentOffset);
1073 Object *referent = field->l;
1074 *list = dvmGetFieldObject(ref, vmDataOffset);
1075 assert(referent != NULL);
1076 if (isForward(referent->clazz)) {
1077 field->l = referent = getForward(referent->clazz);
1078 continue;
1079 }
1080 if (fromSpaceContains(referent)) {
1081 /* Referent is white, clear it. */
1082 clearReference(ref);
1083 if (isReferenceEnqueuable(ref)) {
1084 enqueueReference(ref);
1085 doSignal = true;
1086 }
1087 }
1088 }
1089 /*
1090 * If we cleared a reference with a reference queue we must notify
1091 * the heap worker to append the reference.
1092 */
1093 if (doSignal) {
1094 dvmSignalHeapWorker(false);
1095 }
1096 assert(*list == NULL);
1097}
1098
1099/*
1100 * Blackens referents subject to the soft reference preservation
1101 * policy.
1102 */
1103void preserveSoftReferences(Object **list)
1104{
1105 Object *ref;
1106 Object *prev, *next;
1107 size_t referentOffset, vmDataOffset;
1108 unsigned counter;
1109 bool white;
1110
1111 vmDataOffset = gDvm.offJavaLangRefReference_vmData;
1112 referentOffset = gDvm.offJavaLangRefReference_referent;
1113 counter = 0;
1114 prev = next = NULL;
1115 ref = *list;
1116 while (ref != NULL) {
1117 JValue *field = dvmFieldPtr(ref, referentOffset);
1118 Object *referent = field->l;
1119 next = dvmGetFieldObject(ref, vmDataOffset);
1120 assert(referent != NULL);
1121 if (isForward(referent->clazz)) {
1122 /* Referent is black. */
1123 field->l = referent = getForward(referent->clazz);
1124 white = false;
1125 } else {
1126 white = fromSpaceContains(referent);
1127 }
1128 if (!white && ((++counter) & 1)) {
1129 /* Referent is white and biased toward saving, gray it. */
1130 scavengeReference((Object **)(void *)&field->l);
1131 white = true;
1132 }
1133 if (white) {
1134 /* Referent is black, unlink it. */
1135 if (prev != NULL) {
1136 dvmSetFieldObject(ref, vmDataOffset, NULL);
1137 dvmSetFieldObject(prev, vmDataOffset, next);
1138 }
1139 } else {
1140 /* Referent is white, skip over it. */
1141 prev = ref;
1142 }
1143 ref = next;
1144 }
1145 /*
1146 * Restart the trace with the newly gray references added to the
1147 * root set.
1148 */
1149 scavengeBlockQueue();
1150}
1151
1152void processFinalizableReferences(void)
1153{
1154 HeapRefTable newPendingRefs;
1155 LargeHeapRefTable *finRefs = gDvm.gcHeap->finalizableRefs;
1156 Object **ref;
1157 Object **lastRef;
1158 size_t totalPendCount;
1159
1160 /*
1161 * All strongly, reachable objects are black.
1162 * Any white finalizable objects need to be finalized.
1163 */
1164
1165 /* Create a table that the new pending refs will
1166 * be added to.
1167 */
Carl Shapiroeff0df72010-06-23 14:25:07 -07001168 if (!dvmHeapInitHeapRefTable(&newPendingRefs)) {
Carl Shapiro952e84a2010-05-06 14:35:29 -07001169 //TODO: mark all finalizable refs and hope that
1170 // we can schedule them next time. Watch out,
1171 // because we may be expecting to free up space
1172 // by calling finalizers.
Carl Shapiro8bb533e2010-05-06 15:35:27 -07001173 LOG_REF("no room for pending finalizations\n");
Carl Shapiro952e84a2010-05-06 14:35:29 -07001174 dvmAbort();
1175 }
1176
1177 /*
1178 * Walk through finalizableRefs and move any white references to
1179 * the list of new pending refs.
1180 */
1181 totalPendCount = 0;
1182 while (finRefs != NULL) {
1183 Object **gapRef;
1184 size_t newPendCount = 0;
1185
1186 gapRef = ref = finRefs->refs.table;
1187 lastRef = finRefs->refs.nextEntry;
1188 while (ref < lastRef) {
1189 if (fromSpaceContains(*ref)) {
1190 if (!dvmHeapAddToHeapRefTable(&newPendingRefs, *ref)) {
1191 //TODO: add the current table and allocate
1192 // a new, smaller one.
Carl Shapiro8bb533e2010-05-06 15:35:27 -07001193 LOG_REF("no room for any more pending finalizations: %zd\n",
Carl Shapiro952e84a2010-05-06 14:35:29 -07001194 dvmHeapNumHeapRefTableEntries(&newPendingRefs));
1195 dvmAbort();
1196 }
1197 newPendCount++;
1198 } else {
1199 /* This ref is black, so will remain on finalizableRefs.
1200 */
1201 if (newPendCount > 0) {
1202 /* Copy it up to fill the holes.
1203 */
1204 *gapRef++ = *ref;
1205 } else {
1206 /* No holes yet; don't bother copying.
1207 */
1208 gapRef++;
1209 }
1210 }
1211 ref++;
1212 }
1213 finRefs->refs.nextEntry = gapRef;
1214 //TODO: if the table is empty when we're done, free it.
1215 totalPendCount += newPendCount;
1216 finRefs = finRefs->next;
1217 }
Carl Shapiro8bb533e2010-05-06 15:35:27 -07001218 LOG_REF("%zd finalizers triggered.\n", totalPendCount);
Carl Shapiro952e84a2010-05-06 14:35:29 -07001219 if (totalPendCount == 0) {
1220 /* No objects required finalization.
1221 * Free the empty temporary table.
1222 */
1223 dvmClearReferenceTable(&newPendingRefs);
1224 return;
1225 }
1226
1227 /* Add the new pending refs to the main list.
1228 */
1229 if (!dvmHeapAddTableToLargeTable(&gDvm.gcHeap->pendingFinalizationRefs,
1230 &newPendingRefs))
1231 {
Carl Shapiro8bb533e2010-05-06 15:35:27 -07001232 LOG_REF("can't insert new pending finalizations\n");
Carl Shapiro952e84a2010-05-06 14:35:29 -07001233 dvmAbort();
1234 }
1235
1236 //TODO: try compacting the main list with a memcpy loop
1237
1238 /* Blacken the refs we just moved; we don't want them or their
1239 * children to get swept yet.
1240 */
1241 ref = newPendingRefs.table;
1242 lastRef = newPendingRefs.nextEntry;
1243 assert(ref < lastRef);
1244 HPROF_SET_GC_SCAN_STATE(HPROF_ROOT_FINALIZING, 0);
1245 while (ref < lastRef) {
1246 scavengeReference(ref);
1247 ref++;
1248 }
1249 HPROF_CLEAR_GC_SCAN_STATE();
1250 scavengeBlockQueue();
1251 dvmSignalHeapWorker(false);
Carl Shapirod28668c2010-04-15 16:10:00 -07001252}
1253
Carl Shapirod28668c2010-04-15 16:10:00 -07001254/*
1255 * If a reference points to from-space and has been forwarded, we snap
1256 * the pointer to its new to-space address. If the reference points
1257 * to an unforwarded from-space address we must enqueue the reference
1258 * for later processing. TODO: implement proper reference processing
1259 * and move the referent scavenging elsewhere.
1260 */
Carl Shapiro952e84a2010-05-06 14:35:29 -07001261static void scavengeReferenceObject(Object *obj)
Carl Shapirod28668c2010-04-15 16:10:00 -07001262{
Carl Shapiro952e84a2010-05-06 14:35:29 -07001263 Object *referent;
1264 Object **queue;
1265 size_t referentOffset, vmDataOffset;
1266
Carl Shapirod28668c2010-04-15 16:10:00 -07001267 assert(obj != NULL);
Carl Shapiro8bb533e2010-05-06 15:35:27 -07001268 LOG_SCAV("scavengeReferenceObject(obj=%p),'%s'", obj, obj->clazz->descriptor);
Carl Shapiro2396fda2010-05-03 20:14:14 -07001269 scavengeDataObject(obj);
Carl Shapiro952e84a2010-05-06 14:35:29 -07001270 referentOffset = gDvm.offJavaLangRefReference_referent;
1271 referent = dvmGetFieldObject(obj, referentOffset);
1272 if (referent == NULL || toSpaceContains(referent)) {
1273 return;
Carl Shapirod28668c2010-04-15 16:10:00 -07001274 }
Carl Shapiro952e84a2010-05-06 14:35:29 -07001275 if (isSoftReference(obj)) {
1276 queue = &gDvm.gcHeap->softReferences;
1277 } else if (isWeakReference(obj)) {
1278 queue = &gDvm.gcHeap->weakReferences;
1279 } else {
1280 assert(isPhantomReference(obj));
1281 queue = &gDvm.gcHeap->phantomReferences;
1282 }
1283 vmDataOffset = gDvm.offJavaLangRefReference_vmData;
Carl Shapiro7800c092010-05-11 13:46:29 -07001284 dvmSetFieldObject(obj, vmDataOffset, *queue);
Carl Shapiro952e84a2010-05-06 14:35:29 -07001285 *queue = obj;
Carl Shapiro8bb533e2010-05-06 15:35:27 -07001286 LOG_SCAV("scavengeReferenceObject: enqueueing %p", obj);
Carl Shapirod28668c2010-04-15 16:10:00 -07001287}
1288
1289/*
1290 * Data object scavenging.
1291 */
Carl Shapiro952e84a2010-05-06 14:35:29 -07001292static void scavengeDataObject(Object *obj)
Carl Shapirod28668c2010-04-15 16:10:00 -07001293{
1294 ClassObject *clazz;
Carl Shapirod28668c2010-04-15 16:10:00 -07001295 int i;
1296
Carl Shapiro8bb533e2010-05-06 15:35:27 -07001297 // LOG_SCAV("scavengeDataObject(obj=%p)", obj);
Carl Shapirod28668c2010-04-15 16:10:00 -07001298 assert(obj != NULL);
Carl Shapiro952e84a2010-05-06 14:35:29 -07001299 assert(obj->clazz != NULL);
1300 assert(obj->clazz->objectSize != 0);
1301 assert(toSpaceContains(obj));
Carl Shapirod28668c2010-04-15 16:10:00 -07001302 /* Scavenge the class object. */
Carl Shapiro952e84a2010-05-06 14:35:29 -07001303 clazz = obj->clazz;
Carl Shapirod28668c2010-04-15 16:10:00 -07001304 scavengeReference((Object **) obj);
Carl Shapirod28668c2010-04-15 16:10:00 -07001305 /* Scavenge instance fields. */
1306 if (clazz->refOffsets != CLASS_WALK_SUPER) {
1307 size_t refOffsets = clazz->refOffsets;
1308 while (refOffsets != 0) {
1309 size_t rshift = CLZ(refOffsets);
1310 size_t offset = CLASS_OFFSET_FROM_CLZ(rshift);
1311 Object **ref = (Object **)((u1 *)obj + offset);
1312 scavengeReference(ref);
1313 refOffsets &= ~(CLASS_HIGH_BIT >> rshift);
1314 }
1315 } else {
1316 for (; clazz != NULL; clazz = clazz->super) {
1317 InstField *field = clazz->ifields;
1318 for (i = 0; i < clazz->ifieldRefCount; ++i, ++field) {
1319 size_t offset = field->byteOffset;
1320 Object **ref = (Object **)((u1 *)obj + offset);
1321 scavengeReference(ref);
1322 }
1323 }
1324 }
Carl Shapiro2396fda2010-05-03 20:14:14 -07001325}
1326
1327static Object *transportObject(const Object *fromObj)
1328{
1329 Object *toObj;
1330 size_t allocSize, copySize;
1331
Carl Shapiro8bb533e2010-05-06 15:35:27 -07001332 LOG_TRAN("transportObject(fromObj=%p) allocBlocks=%zu",
Carl Shapiro2396fda2010-05-03 20:14:14 -07001333 fromObj,
1334 gDvm.gcHeap->heapSource->allocBlocks);
1335 assert(fromObj != NULL);
Carl Shapiro952e84a2010-05-06 14:35:29 -07001336 assert(fromSpaceContains(fromObj));
Carl Shapiro2396fda2010-05-03 20:14:14 -07001337 allocSize = copySize = objectSize(fromObj);
1338 if (LW_HASH_STATE(fromObj->lock) != LW_HASH_STATE_UNHASHED) {
1339 /*
1340 * The object has been hashed or hashed and moved. We must
1341 * reserve an additional word for a hash code.
1342 */
1343 allocSize += sizeof(u4);
1344 }
1345 if (LW_HASH_STATE(fromObj->lock) == LW_HASH_STATE_HASHED_AND_MOVED) {
1346 /*
1347 * The object has its hash code allocated. Ensure the hash
1348 * code is copied along with the instance data.
1349 */
1350 copySize += sizeof(u4);
1351 }
1352 /* TODO(cshapiro): don't copy, re-map large data objects. */
1353 assert(copySize <= allocSize);
1354 toObj = allocateGray(allocSize);
1355 assert(toObj != NULL);
Carl Shapiro952e84a2010-05-06 14:35:29 -07001356 assert(toSpaceContains(toObj));
Carl Shapiro2396fda2010-05-03 20:14:14 -07001357 memcpy(toObj, fromObj, copySize);
1358 if (LW_HASH_STATE(fromObj->lock) == LW_HASH_STATE_HASHED) {
1359 /*
1360 * The object has had its hash code exposed. Append it to the
1361 * instance and set a bit so we know to look for it there.
1362 */
1363 *(u4 *)(((char *)toObj) + copySize) = (u4)fromObj >> 3;
1364 toObj->lock |= LW_HASH_STATE_HASHED_AND_MOVED << LW_HASH_STATE_SHIFT;
1365 }
Carl Shapiro8bb533e2010-05-06 15:35:27 -07001366 LOG_TRAN("transportObject: from %p/%zu to %p/%zu (%zu,%zu) %s",
1367 fromObj, addressToBlock(gDvm.gcHeap->heapSource,fromObj),
1368 toObj, addressToBlock(gDvm.gcHeap->heapSource,toObj),
1369 copySize, allocSize, copySize < allocSize ? "DIFFERENT" : "");
Carl Shapiro2396fda2010-05-03 20:14:14 -07001370 return toObj;
Carl Shapirod28668c2010-04-15 16:10:00 -07001371}
1372
1373/*
1374 * Generic reference scavenging.
1375 */
1376
1377/*
1378 * Given a reference to an object, the scavenge routine will gray the
1379 * reference. Any objects pointed to by the scavenger object will be
1380 * transported to new space and a forwarding pointer will be installed
1381 * in the header of the object.
1382 */
1383
1384/*
1385 * Blacken the given pointer. If the pointer is in from space, it is
1386 * transported to new space. If the object has a forwarding pointer
1387 * installed it has already been transported and the referent is
1388 * snapped to the new address.
1389 */
Carl Shapiro2396fda2010-05-03 20:14:14 -07001390static void scavengeReference(Object **obj)
Carl Shapirod28668c2010-04-15 16:10:00 -07001391{
1392 ClassObject *clazz;
Carl Shapiro2396fda2010-05-03 20:14:14 -07001393 Object *fromObj, *toObj;
Carl Shapirod28668c2010-04-15 16:10:00 -07001394
1395 assert(obj);
1396
Carl Shapiro2396fda2010-05-03 20:14:14 -07001397 if (*obj == NULL) return;
Carl Shapirod28668c2010-04-15 16:10:00 -07001398
1399 assert(dvmIsValidObject(*obj));
1400
1401 /* The entire block is black. */
Carl Shapiro952e84a2010-05-06 14:35:29 -07001402 if (toSpaceContains(*obj)) {
Carl Shapiro8bb533e2010-05-06 15:35:27 -07001403 LOG_SCAV("scavengeReference skipping pinned object @ %p", *obj);
Carl Shapiro2396fda2010-05-03 20:14:14 -07001404 return;
Carl Shapirod28668c2010-04-15 16:10:00 -07001405 }
Carl Shapiro8bb533e2010-05-06 15:35:27 -07001406 LOG_SCAV("scavengeReference(*obj=%p)", *obj);
Carl Shapirod28668c2010-04-15 16:10:00 -07001407
Carl Shapiro952e84a2010-05-06 14:35:29 -07001408 assert(fromSpaceContains(*obj));
Carl Shapirod28668c2010-04-15 16:10:00 -07001409
1410 clazz = (*obj)->clazz;
1411
1412 if (isForward(clazz)) {
Carl Shapiro8bb533e2010-05-06 15:35:27 -07001413 // LOG_SCAV("forwarding %p @ %p to %p", *obj, obj, (void *)((uintptr_t)clazz & ~0x1));
Carl Shapirod28668c2010-04-15 16:10:00 -07001414 *obj = (Object *)getForward(clazz);
Carl Shapiro2396fda2010-05-03 20:14:14 -07001415 return;
Carl Shapirod28668c2010-04-15 16:10:00 -07001416 }
Carl Shapiro2396fda2010-05-03 20:14:14 -07001417 fromObj = *obj;
1418 if (clazz == NULL) {
Carl Shapiro8bb533e2010-05-06 15:35:27 -07001419 // LOG_SCAV("scavangeReference %p has a NULL class object", fromObj);
Carl Shapiro2396fda2010-05-03 20:14:14 -07001420 assert(!"implemented");
1421 toObj = NULL;
Carl Shapiro2396fda2010-05-03 20:14:14 -07001422 } else {
1423 toObj = transportObject(fromObj);
1424 }
1425 setForward(toObj, fromObj);
1426 *obj = (Object *)toObj;
Carl Shapirod28668c2010-04-15 16:10:00 -07001427}
1428
Carl Shapirod28668c2010-04-15 16:10:00 -07001429/*
1430 * Generic object scavenging.
1431 */
Carl Shapiro2396fda2010-05-03 20:14:14 -07001432static void scavengeObject(Object *obj)
Carl Shapirod28668c2010-04-15 16:10:00 -07001433{
1434 ClassObject *clazz;
Carl Shapirod28668c2010-04-15 16:10:00 -07001435
1436 assert(obj != NULL);
Carl Shapiro952e84a2010-05-06 14:35:29 -07001437 assert(obj->clazz != NULL);
1438 assert(!((uintptr_t)obj->clazz & 0x1));
Carl Shapirod28668c2010-04-15 16:10:00 -07001439 clazz = obj->clazz;
Carl Shapirod28668c2010-04-15 16:10:00 -07001440 if (clazz == gDvm.classJavaLangClass) {
Carl Shapiro2396fda2010-05-03 20:14:14 -07001441 scavengeClassObject((ClassObject *)obj);
Carl Shapirod28668c2010-04-15 16:10:00 -07001442 } else if (IS_CLASS_FLAG_SET(clazz, CLASS_ISARRAY)) {
Carl Shapiro2396fda2010-05-03 20:14:14 -07001443 scavengeArrayObject((ArrayObject *)obj);
Carl Shapirod28668c2010-04-15 16:10:00 -07001444 } else if (IS_CLASS_FLAG_SET(clazz, CLASS_ISREFERENCE)) {
Carl Shapiro952e84a2010-05-06 14:35:29 -07001445 scavengeReferenceObject(obj);
Carl Shapirod28668c2010-04-15 16:10:00 -07001446 } else {
Carl Shapiro952e84a2010-05-06 14:35:29 -07001447 scavengeDataObject(obj);
Carl Shapirod28668c2010-04-15 16:10:00 -07001448 }
Carl Shapirod28668c2010-04-15 16:10:00 -07001449}
1450
1451/*
1452 * External root scavenging routines.
1453 */
1454
Carl Shapirod28668c2010-04-15 16:10:00 -07001455static void pinHashTableEntries(HashTable *table)
1456{
1457 HashEntry *entry;
1458 void *obj;
1459 int i;
1460
Carl Shapiro8bb533e2010-05-06 15:35:27 -07001461 LOG_PIN(">>> pinHashTableEntries(table=%p)", table);
Carl Shapirod28668c2010-04-15 16:10:00 -07001462 if (table == NULL) {
1463 return;
1464 }
1465 dvmHashTableLock(table);
1466 for (i = 0; i < table->tableSize; ++i) {
1467 entry = &table->pEntries[i];
1468 obj = entry->data;
1469 if (obj == NULL || obj == HASH_TOMBSTONE) {
1470 continue;
1471 }
1472 pinObject(entry->data);
1473 }
1474 dvmHashTableUnlock(table);
Carl Shapiro8bb533e2010-05-06 15:35:27 -07001475 LOG_PIN("<<< pinHashTableEntries(table=%p)", table);
Carl Shapirod28668c2010-04-15 16:10:00 -07001476}
1477
1478static void pinPrimitiveClasses(void)
1479{
1480 size_t length;
1481 size_t i;
1482
1483 length = ARRAYSIZE(gDvm.primitiveClass);
1484 for (i = 0; i < length; i++) {
1485 if (gDvm.primitiveClass[i] != NULL) {
1486 pinObject((Object *)gDvm.primitiveClass[i]);
1487 }
1488 }
1489}
1490
1491/*
1492 * Scavenge interned strings. Permanent interned strings will have
1493 * been pinned and are therefore ignored. Non-permanent strings that
1494 * have been forwarded are snapped. All other entries are removed.
1495 */
1496static void scavengeInternedStrings(void)
1497{
1498 HashTable *table;
1499 HashEntry *entry;
1500 Object *obj;
1501 int i;
1502
1503 table = gDvm.internedStrings;
1504 if (table == NULL) {
1505 return;
1506 }
1507 dvmHashTableLock(table);
1508 for (i = 0; i < table->tableSize; ++i) {
1509 entry = &table->pEntries[i];
1510 obj = (Object *)entry->data;
1511 if (obj == NULL || obj == HASH_TOMBSTONE) {
1512 continue;
1513 } else if (!isPermanentString((StringObject *)obj)) {
Carl Shapiro8bb533e2010-05-06 15:35:27 -07001514 // LOG_SCAV("entry->data=%p", entry->data);
1515 LOG_SCAV(">>> string obj=%p", entry->data);
Carl Shapirod28668c2010-04-15 16:10:00 -07001516 /* TODO(cshapiro): detach white string objects */
1517 scavengeReference((Object **)(void *)&entry->data);
Carl Shapiro8bb533e2010-05-06 15:35:27 -07001518 LOG_SCAV("<<< string obj=%p", entry->data);
Carl Shapirod28668c2010-04-15 16:10:00 -07001519 }
1520 }
1521 dvmHashTableUnlock(table);
1522}
1523
1524static void pinInternedStrings(void)
1525{
1526 HashTable *table;
1527 HashEntry *entry;
1528 Object *obj;
1529 int i;
1530
1531 table = gDvm.internedStrings;
1532 if (table == NULL) {
1533 return;
1534 }
1535 dvmHashTableLock(table);
1536 for (i = 0; i < table->tableSize; ++i) {
1537 entry = &table->pEntries[i];
1538 obj = (Object *)entry->data;
1539 if (obj == NULL || obj == HASH_TOMBSTONE) {
1540 continue;
1541 } else if (isPermanentString((StringObject *)obj)) {
1542 obj = (Object *)getPermanentString((StringObject*)obj);
Carl Shapiro8bb533e2010-05-06 15:35:27 -07001543 LOG_PROM(">>> pin string obj=%p", obj);
Carl Shapirod28668c2010-04-15 16:10:00 -07001544 pinObject(obj);
Carl Shapiro8bb533e2010-05-06 15:35:27 -07001545 LOG_PROM("<<< pin string obj=%p", obj);
Carl Shapirod28668c2010-04-15 16:10:00 -07001546 }
1547 }
1548 dvmHashTableUnlock(table);
1549}
1550
Carl Shapirod28668c2010-04-15 16:10:00 -07001551/*
1552 * At present, reference tables contain references that must not be
1553 * moved by the collector. Instead of scavenging each reference in
1554 * the table we pin each referenced object.
1555 */
Carl Shapiro583d64c2010-05-04 10:44:47 -07001556static void pinReferenceTable(const ReferenceTable *table)
Carl Shapirod28668c2010-04-15 16:10:00 -07001557{
1558 Object **entry;
Carl Shapirod28668c2010-04-15 16:10:00 -07001559
1560 assert(table != NULL);
1561 assert(table->table != NULL);
1562 assert(table->nextEntry != NULL);
1563 for (entry = table->table; entry < table->nextEntry; ++entry) {
1564 assert(entry != NULL);
1565 assert(!isForward(*entry));
1566 pinObject(*entry);
1567 }
1568}
1569
Carl Shapiro646ba092010-06-10 15:17:00 -07001570static void scavengeLargeHeapRefTable(LargeHeapRefTable *table)
Carl Shapirod28668c2010-04-15 16:10:00 -07001571{
Carl Shapirod28668c2010-04-15 16:10:00 -07001572 for (; table != NULL; table = table->next) {
Carl Shapiro7800c092010-05-11 13:46:29 -07001573 Object **ref = table->refs.table;
1574 for (; ref < table->refs.nextEntry; ++ref) {
Carl Shapiro646ba092010-06-10 15:17:00 -07001575 scavengeReference(ref);
Carl Shapiro7800c092010-05-11 13:46:29 -07001576 }
1577 }
1578}
1579
Carl Shapirod28668c2010-04-15 16:10:00 -07001580/* This code was copied from Thread.c */
1581static void scavengeThreadStack(Thread *thread)
1582{
1583 const u4 *framePtr;
1584#if WITH_EXTRA_GC_CHECKS > 1
1585 bool first = true;
1586#endif
1587
1588 framePtr = (const u4 *)thread->curFrame;
1589 while (framePtr != NULL) {
1590 const StackSaveArea *saveArea;
1591 const Method *method;
1592
1593 saveArea = SAVEAREA_FROM_FP(framePtr);
1594 method = saveArea->method;
Carl Shapiro583d64c2010-05-04 10:44:47 -07001595 if (method != NULL && !dvmIsNativeMethod(method)) {
Carl Shapirod28668c2010-04-15 16:10:00 -07001596#ifdef COUNT_PRECISE_METHODS
1597 /* the GC is running, so no lock required */
1598 if (dvmPointerSetAddEntry(gDvm.preciseMethods, method))
Carl Shapiro8bb533e2010-05-06 15:35:27 -07001599 LOG_SCAV("PGC: added %s.%s %p\n",
1600 method->clazz->descriptor, method->name, method);
Carl Shapirod28668c2010-04-15 16:10:00 -07001601#endif
1602#if WITH_EXTRA_GC_CHECKS > 1
1603 /*
1604 * May also want to enable the memset() in the "invokeMethod"
1605 * goto target in the portable interpreter. That sets the stack
1606 * to a pattern that makes referring to uninitialized data
1607 * very obvious.
1608 */
1609
1610 if (first) {
1611 /*
1612 * First frame, isn't native, check the "alternate" saved PC
1613 * as a sanity check.
1614 *
1615 * It seems like we could check the second frame if the first
1616 * is native, since the PCs should be the same. It turns out
1617 * this doesn't always work. The problem is that we could
1618 * have calls in the sequence:
1619 * interp method #2
1620 * native method
1621 * interp method #1
1622 *
1623 * and then GC while in the native method after returning
1624 * from interp method #2. The currentPc on the stack is
1625 * for interp method #1, but thread->currentPc2 is still
1626 * set for the last thing interp method #2 did.
1627 *
1628 * This can also happen in normal execution:
1629 * - sget-object on not-yet-loaded class
1630 * - class init updates currentPc2
1631 * - static field init is handled by parsing annotations;
1632 * static String init requires creation of a String object,
1633 * which can cause a GC
1634 *
1635 * Essentially, any pattern that involves executing
1636 * interpreted code and then causes an allocation without
1637 * executing instructions in the original method will hit
1638 * this. These are rare enough that the test still has
1639 * some value.
1640 */
1641 if (saveArea->xtra.currentPc != thread->currentPc2) {
1642 LOGW("PGC: savedPC(%p) != current PC(%p), %s.%s ins=%p\n",
1643 saveArea->xtra.currentPc, thread->currentPc2,
1644 method->clazz->descriptor, method->name, method->insns);
1645 if (saveArea->xtra.currentPc != NULL)
1646 LOGE(" pc inst = 0x%04x\n", *saveArea->xtra.currentPc);
1647 if (thread->currentPc2 != NULL)
1648 LOGE(" pc2 inst = 0x%04x\n", *thread->currentPc2);
1649 dvmDumpThread(thread, false);
1650 }
1651 } else {
1652 /*
1653 * It's unusual, but not impossible, for a non-first frame
1654 * to be at something other than a method invocation. For
1655 * example, if we do a new-instance on a nonexistent class,
1656 * we'll have a lot of class loader activity on the stack
1657 * above the frame with the "new" operation. Could also
1658 * happen while we initialize a Throwable when an instruction
1659 * fails.
1660 *
1661 * So there's not much we can do here to verify the PC,
1662 * except to verify that it's a GC point.
1663 */
1664 }
1665 assert(saveArea->xtra.currentPc != NULL);
1666#endif
1667
1668 const RegisterMap* pMap;
1669 const u1* regVector;
1670 int i;
1671
1672 Method* nonConstMethod = (Method*) method; // quiet gcc
1673 pMap = dvmGetExpandedRegisterMap(nonConstMethod);
1674
Carl Shapiro8bb533e2010-05-06 15:35:27 -07001675 //LOG_SCAV("PGC: %s.%s\n", method->clazz->descriptor, method->name);
Carl Shapirod28668c2010-04-15 16:10:00 -07001676
1677 if (pMap != NULL) {
1678 /* found map, get registers for this address */
1679 int addr = saveArea->xtra.currentPc - method->insns;
1680 regVector = dvmRegisterMapGetLine(pMap, addr);
1681 /*
1682 if (regVector == NULL) {
Carl Shapiro8bb533e2010-05-06 15:35:27 -07001683 LOG_SCAV("PGC: map but no entry for %s.%s addr=0x%04x\n",
1684 method->clazz->descriptor, method->name, addr);
Carl Shapirod28668c2010-04-15 16:10:00 -07001685 } else {
Carl Shapiro8bb533e2010-05-06 15:35:27 -07001686 LOG_SCAV("PGC: found map for %s.%s 0x%04x (t=%d)\n",
1687 method->clazz->descriptor, method->name, addr,
1688 thread->threadId);
Carl Shapirod28668c2010-04-15 16:10:00 -07001689 }
1690 */
1691 } else {
1692 /*
1693 * No map found. If precise GC is disabled this is
1694 * expected -- we don't create pointers to the map data even
1695 * if it's present -- but if it's enabled it means we're
1696 * unexpectedly falling back on a conservative scan, so it's
1697 * worth yelling a little.
1698 */
1699 if (gDvm.preciseGc) {
Carl Shapiro8bb533e2010-05-06 15:35:27 -07001700 LOG_SCAV("PGC: no map for %s.%s\n", method->clazz->descriptor, method->name);
Carl Shapirod28668c2010-04-15 16:10:00 -07001701 }
1702 regVector = NULL;
1703 }
Carl Shapirod28668c2010-04-15 16:10:00 -07001704 if (regVector == NULL) {
Carl Shapiro88b00352010-05-19 17:38:33 -07001705 /*
1706 * There are no roots to scavenge. Skip over the entire frame.
1707 */
1708 framePtr += method->registersSize;
Carl Shapirod28668c2010-04-15 16:10:00 -07001709 } else {
1710 /*
1711 * Precise scan. v0 is at the lowest address on the
1712 * interpreted stack, and is the first bit in the register
1713 * vector, so we can walk through the register map and
1714 * memory in the same direction.
1715 *
1716 * A '1' bit indicates a live reference.
1717 */
1718 u2 bits = 1 << 1;
1719 for (i = method->registersSize - 1; i >= 0; i--) {
Carl Shapirod28668c2010-04-15 16:10:00 -07001720 u4 rval = *framePtr;
1721
1722 bits >>= 1;
1723 if (bits == 1) {
1724 /* set bit 9 so we can tell when we're empty */
1725 bits = *regVector++ | 0x0100;
Carl Shapirod28668c2010-04-15 16:10:00 -07001726 }
1727
1728 if (rval != 0 && (bits & 0x01) != 0) {
1729 /*
1730 * Non-null, register marked as live reference. This
1731 * should always be a valid object.
1732 */
1733#if WITH_EXTRA_GC_CHECKS > 0
1734 if ((rval & 0x3) != 0 || !dvmIsValidObject((Object*) rval)) {
1735 /* this is very bad */
1736 LOGE("PGC: invalid ref in reg %d: 0x%08x\n",
1737 method->registersSize-1 - i, rval);
1738 } else
1739#endif
1740 {
1741
Carl Shapiro8bb533e2010-05-06 15:35:27 -07001742 // LOG_SCAV("stack reference %u@%p", *framePtr, framePtr);
Carl Shapirod28668c2010-04-15 16:10:00 -07001743 /* dvmMarkObjectNonNull((Object *)rval); */
1744 scavengeReference((Object **) framePtr);
1745 }
1746 } else {
1747 /*
1748 * Null or non-reference, do nothing at all.
1749 */
1750#if WITH_EXTRA_GC_CHECKS > 1
1751 if (dvmIsValidObject((Object*) rval)) {
1752 /* this is normal, but we feel chatty */
1753 LOGD("PGC: ignoring valid ref in reg %d: 0x%08x\n",
1754 method->registersSize-1 - i, rval);
1755 }
1756#endif
1757 }
1758 ++framePtr;
1759 }
1760 dvmReleaseRegisterMapLine(pMap, regVector);
1761 }
1762 }
Carl Shapiro952e84a2010-05-06 14:35:29 -07001763 /* else this is a break frame and there is nothing to gray, or
Carl Shapirod28668c2010-04-15 16:10:00 -07001764 * this is a native method and the registers are just the "ins",
1765 * copied from various registers in the caller's set.
1766 */
1767
1768#if WITH_EXTRA_GC_CHECKS > 1
1769 first = false;
1770#endif
1771
1772 /* Don't fall into an infinite loop if things get corrupted.
1773 */
1774 assert((uintptr_t)saveArea->prevFrame > (uintptr_t)framePtr ||
1775 saveArea->prevFrame == NULL);
1776 framePtr = saveArea->prevFrame;
1777 }
1778}
1779
1780static void scavengeThread(Thread *thread)
1781{
1782 assert(thread->status != THREAD_RUNNING ||
1783 thread->isSuspended ||
1784 thread == dvmThreadSelf());
1785
Carl Shapiro8bb533e2010-05-06 15:35:27 -07001786 // LOG_SCAV("scavengeThread(thread=%p)", thread);
Carl Shapirod28668c2010-04-15 16:10:00 -07001787
Carl Shapiro8bb533e2010-05-06 15:35:27 -07001788 // LOG_SCAV("Scavenging threadObj=%p", thread->threadObj);
Carl Shapirod28668c2010-04-15 16:10:00 -07001789 scavengeReference(&thread->threadObj);
1790
Carl Shapiro8bb533e2010-05-06 15:35:27 -07001791 // LOG_SCAV("Scavenging exception=%p", thread->exception);
Carl Shapirod28668c2010-04-15 16:10:00 -07001792 scavengeReference(&thread->exception);
1793
1794 scavengeThreadStack(thread);
1795}
1796
1797static void scavengeThreadList(void)
1798{
1799 Thread *thread;
1800
1801 dvmLockThreadList(dvmThreadSelf());
1802 thread = gDvm.threadList;
1803 while (thread) {
1804 scavengeThread(thread);
1805 thread = thread->next;
1806 }
1807 dvmUnlockThreadList();
1808}
1809
Carl Shapiro88b00352010-05-19 17:38:33 -07001810static void pinThreadStack(const Thread *thread)
Carl Shapiro583d64c2010-05-04 10:44:47 -07001811{
1812 const u4 *framePtr;
1813 const StackSaveArea *saveArea;
Carl Shapiro88b00352010-05-19 17:38:33 -07001814 Method *method;
Carl Shapiro583d64c2010-05-04 10:44:47 -07001815 const char *shorty;
1816 Object *obj;
1817 int i;
1818
1819 saveArea = NULL;
1820 framePtr = (const u4 *)thread->curFrame;
1821 for (; framePtr != NULL; framePtr = saveArea->prevFrame) {
1822 saveArea = SAVEAREA_FROM_FP(framePtr);
Carl Shapiro88b00352010-05-19 17:38:33 -07001823 method = (Method *)saveArea->method;
Carl Shapiro583d64c2010-05-04 10:44:47 -07001824 if (method != NULL && dvmIsNativeMethod(method)) {
1825 /*
Carl Shapiro88b00352010-05-19 17:38:33 -07001826 * This is native method, pin its arguments.
1827 *
Carl Shapiro952e84a2010-05-06 14:35:29 -07001828 * For purposes of graying references, we don't need to do
Carl Shapiro583d64c2010-05-04 10:44:47 -07001829 * anything here, because all of the native "ins" were copied
1830 * from registers in the caller's stack frame and won't be
1831 * changed (an interpreted method can freely use registers
1832 * with parameters like any other register, but natives don't
1833 * work that way).
1834 *
1835 * However, we need to ensure that references visible to
1836 * native methods don't move around. We can do a precise scan
1837 * of the arguments by examining the method signature.
1838 */
Carl Shapiro8bb533e2010-05-06 15:35:27 -07001839 LOG_PIN("+++ native scan %s.%s\n",
1840 method->clazz->descriptor, method->name);
Carl Shapiro583d64c2010-05-04 10:44:47 -07001841 assert(method->registersSize == method->insSize);
1842 if (!dvmIsStaticMethod(method)) {
1843 /* grab the "this" pointer */
1844 obj = (Object *)*framePtr++;
1845 if (obj == NULL) {
1846 /*
1847 * This can happen for the "fake" entry frame inserted
1848 * for threads created outside the VM. There's no actual
1849 * call so there's no object. If we changed the fake
1850 * entry method to be declared "static" then this
1851 * situation should never occur.
1852 */
1853 } else {
1854 assert(dvmIsValidObject(obj));
1855 pinObject(obj);
1856 }
1857 }
1858 shorty = method->shorty+1; // skip return value
1859 for (i = method->registersSize - 1; i >= 0; i--, framePtr++) {
1860 switch (*shorty++) {
1861 case 'L':
1862 obj = (Object *)*framePtr;
1863 if (obj != NULL) {
1864 assert(dvmIsValidObject(obj));
1865 pinObject(obj);
1866 }
1867 break;
1868 case 'D':
1869 case 'J':
1870 framePtr++;
1871 break;
1872 default:
1873 /* 32-bit non-reference value */
1874 obj = (Object *)*framePtr; // debug, remove
1875 if (dvmIsValidObject(obj)) { // debug, remove
1876 /* if we see a lot of these, our scan might be off */
Carl Shapiro8bb533e2010-05-06 15:35:27 -07001877 LOG_PIN("+++ did NOT pin obj %p\n", obj);
Carl Shapiro583d64c2010-05-04 10:44:47 -07001878 }
1879 break;
1880 }
1881 }
Carl Shapiro88b00352010-05-19 17:38:33 -07001882 } else if (method != NULL && !dvmIsNativeMethod(method)) {
1883 const RegisterMap* pMap = dvmGetExpandedRegisterMap(method);
1884 const u1* regVector = NULL;
1885
1886 LOGI("conservative : %s.%s\n", method->clazz->descriptor, method->name);
1887
1888 if (pMap != NULL) {
1889 int addr = saveArea->xtra.currentPc - method->insns;
1890 regVector = dvmRegisterMapGetLine(pMap, addr);
1891 }
1892 if (regVector == NULL) {
1893 /*
1894 * No register info for this frame, conservatively pin.
1895 */
1896 for (i = 0; i < method->registersSize; ++i) {
1897 u4 regValue = framePtr[i];
1898 if (regValue != 0 && (regValue & 0x3) == 0 && dvmIsValidObject((Object *)regValue)) {
1899 pinObject((Object *)regValue);
1900 }
1901 }
1902 }
Carl Shapiro583d64c2010-05-04 10:44:47 -07001903 }
1904 /*
1905 * Don't fall into an infinite loop if things get corrupted.
1906 */
1907 assert((uintptr_t)saveArea->prevFrame > (uintptr_t)framePtr ||
1908 saveArea->prevFrame == NULL);
1909 }
1910}
1911
1912static void pinThread(const Thread *thread)
Carl Shapirod28668c2010-04-15 16:10:00 -07001913{
1914 assert(thread != NULL);
1915 assert(thread->status != THREAD_RUNNING ||
1916 thread->isSuspended ||
1917 thread == dvmThreadSelf());
Carl Shapiro8bb533e2010-05-06 15:35:27 -07001918 LOG_PIN("pinThread(thread=%p)", thread);
Carl Shapirod28668c2010-04-15 16:10:00 -07001919
Carl Shapiro8bb533e2010-05-06 15:35:27 -07001920 LOG_PIN("Pin native method arguments");
Carl Shapiro88b00352010-05-19 17:38:33 -07001921 pinThreadStack(thread);
Carl Shapiro583d64c2010-05-04 10:44:47 -07001922
Carl Shapiro8bb533e2010-05-06 15:35:27 -07001923 LOG_PIN("Pin internalLocalRefTable");
Carl Shapirod28668c2010-04-15 16:10:00 -07001924 pinReferenceTable(&thread->internalLocalRefTable);
1925
Carl Shapiro8bb533e2010-05-06 15:35:27 -07001926 LOG_PIN("Pin jniLocalRefTable");
Carl Shapirod28668c2010-04-15 16:10:00 -07001927 pinReferenceTable(&thread->jniLocalRefTable);
1928
1929 /* Can the check be pushed into the promote routine? */
1930 if (thread->jniMonitorRefTable.table) {
Carl Shapiro8bb533e2010-05-06 15:35:27 -07001931 LOG_PIN("Pin jniMonitorRefTable");
Carl Shapirod28668c2010-04-15 16:10:00 -07001932 pinReferenceTable(&thread->jniMonitorRefTable);
1933 }
1934}
1935
1936static void pinThreadList(void)
1937{
1938 Thread *thread;
1939
1940 dvmLockThreadList(dvmThreadSelf());
1941 thread = gDvm.threadList;
1942 while (thread) {
1943 pinThread(thread);
1944 thread = thread->next;
1945 }
1946 dvmUnlockThreadList();
1947}
1948
1949/*
1950 * Heap block scavenging.
1951 */
1952
1953/*
1954 * Scavenge objects in the current block. Scavenging terminates when
1955 * the pointer reaches the highest address in the block or when a run
1956 * of zero words that continues to the highest address is reached.
1957 */
1958static void scavengeBlock(HeapSource *heapSource, size_t block)
1959{
1960 u1 *cursor;
1961 u1 *end;
1962 size_t size;
1963
Carl Shapiro8bb533e2010-05-06 15:35:27 -07001964 LOG_SCAV("scavengeBlock(heapSource=%p,block=%zu)", heapSource, block);
Carl Shapirod28668c2010-04-15 16:10:00 -07001965
1966 assert(heapSource != NULL);
1967 assert(block < heapSource->totalBlocks);
1968 assert(heapSource->blockSpace[block] == BLOCK_TO_SPACE);
1969
1970 cursor = blockToAddress(heapSource, block);
1971 end = cursor + BLOCK_SIZE;
Carl Shapiro8bb533e2010-05-06 15:35:27 -07001972 LOG_SCAV("scavengeBlock start=%p, end=%p", cursor, end);
Carl Shapirod28668c2010-04-15 16:10:00 -07001973
1974 /* Parse and scavenge the current block. */
1975 size = 0;
1976 while (cursor < end) {
1977 u4 word = *(u4 *)cursor;
1978 if (word != 0) {
Carl Shapiro2396fda2010-05-03 20:14:14 -07001979 scavengeObject((Object *)cursor);
1980 size = objectSize((Object *)cursor);
Carl Shapirod28668c2010-04-15 16:10:00 -07001981 size = alignUp(size, ALLOC_ALIGNMENT);
1982 cursor += size;
Carl Shapirod28668c2010-04-15 16:10:00 -07001983 } else {
1984 /* Check for padding. */
1985 while (*(u4 *)cursor == 0) {
1986 cursor += 4;
1987 if (cursor == end) break;
1988 }
1989 /* Punt if something went wrong. */
1990 assert(cursor == end);
1991 }
1992 }
1993}
1994
Carl Shapiro2396fda2010-05-03 20:14:14 -07001995static size_t objectSize(const Object *obj)
Carl Shapirod28668c2010-04-15 16:10:00 -07001996{
1997 size_t size;
1998
Carl Shapiro2396fda2010-05-03 20:14:14 -07001999 assert(obj != NULL);
2000 assert(obj->clazz != NULL);
Barry Hayesc49db852010-05-14 13:43:34 -07002001 if (obj->clazz == gDvm.classJavaLangClass) {
Carl Shapirod28668c2010-04-15 16:10:00 -07002002 size = dvmClassObjectSize((ClassObject *)obj);
2003 } else if (IS_CLASS_FLAG_SET(obj->clazz, CLASS_ISARRAY)) {
2004 size = dvmArrayObjectSize((ArrayObject *)obj);
2005 } else {
Carl Shapiro2396fda2010-05-03 20:14:14 -07002006 assert(obj->clazz->objectSize != 0);
Carl Shapirod28668c2010-04-15 16:10:00 -07002007 size = obj->clazz->objectSize;
2008 }
Carl Shapiro2396fda2010-05-03 20:14:14 -07002009 if (LW_HASH_STATE(obj->lock) == LW_HASH_STATE_HASHED_AND_MOVED) {
2010 size += sizeof(u4);
2011 }
Carl Shapirod28668c2010-04-15 16:10:00 -07002012 return size;
2013}
2014
2015static void verifyBlock(HeapSource *heapSource, size_t block)
2016{
2017 u1 *cursor;
2018 u1 *end;
2019 size_t size;
2020
Carl Shapiro8bb533e2010-05-06 15:35:27 -07002021 // LOG_VER("verifyBlock(heapSource=%p,block=%zu)", heapSource, block);
Carl Shapirod28668c2010-04-15 16:10:00 -07002022
2023 assert(heapSource != NULL);
2024 assert(block < heapSource->totalBlocks);
2025 assert(heapSource->blockSpace[block] == BLOCK_TO_SPACE);
2026
2027 cursor = blockToAddress(heapSource, block);
2028 end = cursor + BLOCK_SIZE;
Carl Shapiro8bb533e2010-05-06 15:35:27 -07002029 // LOG_VER("verifyBlock start=%p, end=%p", cursor, end);
Carl Shapirod28668c2010-04-15 16:10:00 -07002030
2031 /* Parse and scavenge the current block. */
2032 size = 0;
2033 while (cursor < end) {
2034 u4 word = *(u4 *)cursor;
2035 if (word != 0) {
2036 dvmVerifyObject((Object *)cursor);
2037 size = objectSize((Object *)cursor);
2038 size = alignUp(size, ALLOC_ALIGNMENT);
2039 cursor += size;
Carl Shapirod28668c2010-04-15 16:10:00 -07002040 } else {
2041 /* Check for padding. */
2042 while (*(unsigned long *)cursor == 0) {
2043 cursor += 4;
2044 if (cursor == end) break;
2045 }
2046 /* Punt if something went wrong. */
2047 assert(cursor == end);
2048 }
2049 }
2050}
2051
2052static void describeBlockQueue(const HeapSource *heapSource)
2053{
2054 size_t block, count;
2055 char space;
2056
2057 block = heapSource->queueHead;
2058 count = 0;
Carl Shapiro8bb533e2010-05-06 15:35:27 -07002059 LOG_SCAV(">>> describeBlockQueue(heapSource=%p)", heapSource);
Carl Shapirod28668c2010-04-15 16:10:00 -07002060 /* Count the number of blocks enqueued. */
2061 while (block != QUEUE_TAIL) {
2062 block = heapSource->blockQueue[block];
2063 ++count;
2064 }
Carl Shapiro8bb533e2010-05-06 15:35:27 -07002065 LOG_SCAV("blockQueue %zu elements, enqueued %zu",
Carl Shapirod28668c2010-04-15 16:10:00 -07002066 count, heapSource->queueSize);
2067 block = heapSource->queueHead;
2068 while (block != QUEUE_TAIL) {
2069 space = heapSource->blockSpace[block];
Carl Shapiro8bb533e2010-05-06 15:35:27 -07002070 LOG_SCAV("block=%zu@%p,space=%zu", block, blockToAddress(heapSource,block), space);
Carl Shapirod28668c2010-04-15 16:10:00 -07002071 block = heapSource->blockQueue[block];
2072 }
2073
Carl Shapiro8bb533e2010-05-06 15:35:27 -07002074 LOG_SCAV("<<< describeBlockQueue(heapSource=%p)", heapSource);
Carl Shapirod28668c2010-04-15 16:10:00 -07002075}
2076
2077/*
2078 * Blackens promoted objects.
2079 */
2080static void scavengeBlockQueue(void)
2081{
2082 HeapSource *heapSource;
2083 size_t block;
2084
Carl Shapiro8bb533e2010-05-06 15:35:27 -07002085 LOG_SCAV(">>> scavengeBlockQueue()");
Carl Shapirod28668c2010-04-15 16:10:00 -07002086 heapSource = gDvm.gcHeap->heapSource;
2087 describeBlockQueue(heapSource);
2088 while (heapSource->queueHead != QUEUE_TAIL) {
2089 block = heapSource->queueHead;
Carl Shapiro8bb533e2010-05-06 15:35:27 -07002090 LOG_SCAV("Dequeueing block %zu\n", block);
Carl Shapirod28668c2010-04-15 16:10:00 -07002091 scavengeBlock(heapSource, block);
2092 heapSource->queueHead = heapSource->blockQueue[block];
Carl Shapiro8bb533e2010-05-06 15:35:27 -07002093 LOG_SCAV("New queue head is %zu\n", heapSource->queueHead);
Carl Shapirod28668c2010-04-15 16:10:00 -07002094 }
Carl Shapiro8bb533e2010-05-06 15:35:27 -07002095 LOG_SCAV("<<< scavengeBlockQueue()");
Carl Shapirod28668c2010-04-15 16:10:00 -07002096}
2097
2098/*
2099 * Scan the block list and verify all blocks that are marked as being
2100 * in new space. This should be parametrized so we can invoke this
2101 * routine outside of the context of a collection.
2102 */
2103static void verifyNewSpace(void)
2104{
2105 HeapSource *heapSource;
2106 size_t i;
2107 size_t c0, c1, c2, c7;
2108
2109 c0 = c1 = c2 = c7 = 0;
2110 heapSource = gDvm.gcHeap->heapSource;
2111 for (i = 0; i < heapSource->totalBlocks; ++i) {
2112 switch (heapSource->blockSpace[i]) {
2113 case BLOCK_FREE: ++c0; break;
2114 case BLOCK_TO_SPACE: ++c1; break;
2115 case BLOCK_FROM_SPACE: ++c2; break;
2116 case BLOCK_CONTINUED: ++c7; break;
2117 default: assert(!"reached");
2118 }
2119 }
Carl Shapiro8bb533e2010-05-06 15:35:27 -07002120 LOG_VER("Block Demographics: "
2121 "Free=%zu,ToSpace=%zu,FromSpace=%zu,Continued=%zu",
2122 c0, c1, c2, c7);
Carl Shapirod28668c2010-04-15 16:10:00 -07002123 for (i = 0; i < heapSource->totalBlocks; ++i) {
2124 if (heapSource->blockSpace[i] != BLOCK_TO_SPACE) {
2125 continue;
2126 }
2127 verifyBlock(heapSource, i);
2128 }
2129}
2130
2131static void scavengeGlobals(void)
2132{
2133 scavengeReference((Object **)(void *)&gDvm.classJavaLangClass);
2134 scavengeReference((Object **)(void *)&gDvm.classJavaLangClassArray);
2135 scavengeReference((Object **)(void *)&gDvm.classJavaLangError);
2136 scavengeReference((Object **)(void *)&gDvm.classJavaLangObject);
2137 scavengeReference((Object **)(void *)&gDvm.classJavaLangObjectArray);
2138 scavengeReference((Object **)(void *)&gDvm.classJavaLangRuntimeException);
2139 scavengeReference((Object **)(void *)&gDvm.classJavaLangString);
2140 scavengeReference((Object **)(void *)&gDvm.classJavaLangThread);
2141 scavengeReference((Object **)(void *)&gDvm.classJavaLangVMThread);
2142 scavengeReference((Object **)(void *)&gDvm.classJavaLangThreadGroup);
2143 scavengeReference((Object **)(void *)&gDvm.classJavaLangThrowable);
2144 scavengeReference((Object **)(void *)&gDvm.classJavaLangStackTraceElement);
2145 scavengeReference((Object **)(void *)&gDvm.classJavaLangStackTraceElementArray);
2146 scavengeReference((Object **)(void *)&gDvm.classJavaLangAnnotationAnnotationArray);
2147 scavengeReference((Object **)(void *)&gDvm.classJavaLangAnnotationAnnotationArrayArray);
2148 scavengeReference((Object **)(void *)&gDvm.classJavaLangReflectAccessibleObject);
2149 scavengeReference((Object **)(void *)&gDvm.classJavaLangReflectConstructor);
2150 scavengeReference((Object **)(void *)&gDvm.classJavaLangReflectConstructorArray);
2151 scavengeReference((Object **)(void *)&gDvm.classJavaLangReflectField);
2152 scavengeReference((Object **)(void *)&gDvm.classJavaLangReflectFieldArray);
2153 scavengeReference((Object **)(void *)&gDvm.classJavaLangReflectMethod);
2154 scavengeReference((Object **)(void *)&gDvm.classJavaLangReflectMethodArray);
2155 scavengeReference((Object **)(void *)&gDvm.classJavaLangReflectProxy);
2156 scavengeReference((Object **)(void *)&gDvm.classJavaLangExceptionInInitializerError);
2157 scavengeReference((Object **)(void *)&gDvm.classJavaLangRefReference);
2158 scavengeReference((Object **)(void *)&gDvm.classJavaNioReadWriteDirectByteBuffer);
2159 scavengeReference((Object **)(void *)&gDvm.classJavaSecurityAccessController);
2160 scavengeReference((Object **)(void *)&gDvm.classOrgApacheHarmonyLangAnnotationAnnotationFactory);
2161 scavengeReference((Object **)(void *)&gDvm.classOrgApacheHarmonyLangAnnotationAnnotationMember);
2162 scavengeReference((Object **)(void *)&gDvm.classOrgApacheHarmonyLangAnnotationAnnotationMemberArray);
2163 scavengeReference((Object **)(void *)&gDvm.classOrgApacheHarmonyNioInternalDirectBuffer);
2164 scavengeReference((Object **)(void *)&gDvm.classArrayBoolean);
2165 scavengeReference((Object **)(void *)&gDvm.classArrayChar);
2166 scavengeReference((Object **)(void *)&gDvm.classArrayFloat);
2167 scavengeReference((Object **)(void *)&gDvm.classArrayDouble);
2168 scavengeReference((Object **)(void *)&gDvm.classArrayByte);
2169 scavengeReference((Object **)(void *)&gDvm.classArrayShort);
2170 scavengeReference((Object **)(void *)&gDvm.classArrayInt);
2171 scavengeReference((Object **)(void *)&gDvm.classArrayLong);
2172}
2173
2174void describeHeap(void)
2175{
2176 HeapSource *heapSource;
2177
2178 heapSource = gDvm.gcHeap->heapSource;
2179 describeBlocks(heapSource);
2180}
2181
2182/*
2183 * The collection interface. Collection has a few distinct phases.
2184 * The first is flipping AKA condemning AKA whitening the heap. The
2185 * second is to promote all objects which are pointed to by pinned or
2186 * ambiguous references. The third phase is tracing from the stacks,
2187 * registers and various globals. Lastly, a verification of the heap
2188 * is performed. The last phase should be optional.
2189 */
2190void dvmScavengeRoots(void) /* Needs a new name badly */
2191{
Carl Shapirod28668c2010-04-15 16:10:00 -07002192 GcHeap *gcHeap;
2193
2194 {
2195 size_t alloc, unused, total;
2196
2197 room(&alloc, &unused, &total);
Carl Shapiro8bb533e2010-05-06 15:35:27 -07002198 LOG_SCAV("BEFORE GC: %zu alloc, %zu free, %zu total.",
2199 alloc, unused, total);
Carl Shapirod28668c2010-04-15 16:10:00 -07002200 }
2201
2202 gcHeap = gDvm.gcHeap;
2203 dvmHeapSourceFlip();
2204
2205 /*
2206 * Promote blocks with stationary objects.
2207 */
Carl Shapirod28668c2010-04-15 16:10:00 -07002208 pinThreadList();
Carl Shapirod28668c2010-04-15 16:10:00 -07002209 pinReferenceTable(&gDvm.jniGlobalRefTable);
Carl Shapirod28668c2010-04-15 16:10:00 -07002210 pinReferenceTable(&gDvm.jniPinRefTable);
Carl Shapirod28668c2010-04-15 16:10:00 -07002211 pinHashTableEntries(gDvm.loadedClasses);
Carl Shapiro427bf462010-06-04 00:03:18 -07002212 pinHashTableEntries(gDvm.dbgRegistry);
Carl Shapirod28668c2010-04-15 16:10:00 -07002213 pinPrimitiveClasses();
Carl Shapirod28668c2010-04-15 16:10:00 -07002214 pinInternedStrings();
2215
2216 // describeBlocks(gcHeap->heapSource);
2217
2218 /*
2219 * Create first, open new-space page right here.
2220 */
2221
2222 /* Reset allocation to an unallocated block. */
2223 gDvm.gcHeap->heapSource->allocPtr = allocateBlocks(gDvm.gcHeap->heapSource, 1);
2224 gDvm.gcHeap->heapSource->allocLimit = gDvm.gcHeap->heapSource->allocPtr + BLOCK_SIZE;
2225 /*
2226 * Hack: promote the empty block allocated above. If the
2227 * promotions that occurred above did not actually gray any
2228 * objects, the block queue may be empty. We must force a
2229 * promotion to be safe.
2230 */
2231 promoteBlockByAddr(gDvm.gcHeap->heapSource, gDvm.gcHeap->heapSource->allocPtr);
2232
2233 /*
2234 * Scavenge blocks and relocate movable objects.
2235 */
2236
Carl Shapiro8bb533e2010-05-06 15:35:27 -07002237 LOG_SCAV("Scavenging gDvm.threadList");
Carl Shapirod28668c2010-04-15 16:10:00 -07002238 scavengeThreadList();
2239
Carl Shapiro8bb533e2010-05-06 15:35:27 -07002240 LOG_SCAV("Scavenging gDvm.gcHeap->referenceOperations");
Carl Shapiro646ba092010-06-10 15:17:00 -07002241 scavengeLargeHeapRefTable(gcHeap->referenceOperations);
Carl Shapirod28668c2010-04-15 16:10:00 -07002242
Carl Shapiro8bb533e2010-05-06 15:35:27 -07002243 LOG_SCAV("Scavenging gDvm.gcHeap->pendingFinalizationRefs");
Carl Shapiro646ba092010-06-10 15:17:00 -07002244 scavengeLargeHeapRefTable(gcHeap->pendingFinalizationRefs);
Carl Shapirod28668c2010-04-15 16:10:00 -07002245
Carl Shapiro8bb533e2010-05-06 15:35:27 -07002246 LOG_SCAV("Scavenging random global stuff");
Carl Shapirod28668c2010-04-15 16:10:00 -07002247 scavengeReference(&gDvm.outOfMemoryObj);
2248 scavengeReference(&gDvm.internalErrorObj);
2249 scavengeReference(&gDvm.noClassDefFoundErrorObj);
2250
Carl Shapiro8bb533e2010-05-06 15:35:27 -07002251 // LOG_SCAV("Scavenging gDvm.internedString");
Carl Shapirod28668c2010-04-15 16:10:00 -07002252 scavengeInternedStrings();
2253
Carl Shapiro8bb533e2010-05-06 15:35:27 -07002254 LOG_SCAV("Root scavenge has completed.");
Carl Shapirod28668c2010-04-15 16:10:00 -07002255
2256 scavengeBlockQueue();
2257
Carl Shapiro8bb533e2010-05-06 15:35:27 -07002258 LOG_SCAV("Re-snap global class pointers.");
Carl Shapirod28668c2010-04-15 16:10:00 -07002259 scavengeGlobals();
2260
Carl Shapiro8bb533e2010-05-06 15:35:27 -07002261 LOG_SCAV("New space scavenge has completed.");
Carl Shapirod28668c2010-04-15 16:10:00 -07002262
2263 /*
Carl Shapiro952e84a2010-05-06 14:35:29 -07002264 * Process reference objects in strength order.
2265 */
2266
Carl Shapiro8bb533e2010-05-06 15:35:27 -07002267 LOG_REF("Processing soft references...");
Carl Shapiro952e84a2010-05-06 14:35:29 -07002268 preserveSoftReferences(&gDvm.gcHeap->softReferences);
2269 clearWhiteReferences(&gDvm.gcHeap->softReferences);
2270
Carl Shapiro8bb533e2010-05-06 15:35:27 -07002271 LOG_REF("Processing weak references...");
Carl Shapiro952e84a2010-05-06 14:35:29 -07002272 clearWhiteReferences(&gDvm.gcHeap->weakReferences);
2273
Carl Shapiro8bb533e2010-05-06 15:35:27 -07002274 LOG_REF("Finding finalizations...");
Carl Shapiro952e84a2010-05-06 14:35:29 -07002275 processFinalizableReferences();
2276
Carl Shapiro8bb533e2010-05-06 15:35:27 -07002277 LOG_REF("Processing f-reachable soft references...");
Carl Shapiro952e84a2010-05-06 14:35:29 -07002278 clearWhiteReferences(&gDvm.gcHeap->softReferences);
2279
Carl Shapiro8bb533e2010-05-06 15:35:27 -07002280 LOG_REF("Processing f-reachable weak references...");
Carl Shapiro952e84a2010-05-06 14:35:29 -07002281 clearWhiteReferences(&gDvm.gcHeap->weakReferences);
2282
Carl Shapiro8bb533e2010-05-06 15:35:27 -07002283 LOG_REF("Processing phantom references...");
Carl Shapiro952e84a2010-05-06 14:35:29 -07002284 clearWhiteReferences(&gDvm.gcHeap->phantomReferences);
2285
2286 /*
Carl Shapirod28668c2010-04-15 16:10:00 -07002287 * Verify the stack and heap.
2288 */
Carl Shapirof5718252010-05-11 20:55:13 -07002289 dvmVerifyRoots();
Carl Shapirod28668c2010-04-15 16:10:00 -07002290 verifyNewSpace();
2291
Carl Shapirod28668c2010-04-15 16:10:00 -07002292 //describeBlocks(gcHeap->heapSource);
2293
2294 clearFromSpace(gcHeap->heapSource);
2295
2296 {
2297 size_t alloc, rem, total;
2298
2299 room(&alloc, &rem, &total);
Carl Shapiro8bb533e2010-05-06 15:35:27 -07002300 LOG_SCAV("AFTER GC: %zu alloc, %zu free, %zu total.", alloc, rem, total);
Carl Shapirod28668c2010-04-15 16:10:00 -07002301 }
2302}
2303
2304/*
2305 * Interface compatibility routines.
2306 */
2307
2308void dvmClearWhiteRefs(Object **list)
2309{
Carl Shapiro952e84a2010-05-06 14:35:29 -07002310 /* do nothing */
Carl Shapirod28668c2010-04-15 16:10:00 -07002311 assert(*list == NULL);
2312}
2313
2314void dvmHandleSoftRefs(Object **list)
2315{
Carl Shapiro952e84a2010-05-06 14:35:29 -07002316 /* do nothing */
Carl Shapirod28668c2010-04-15 16:10:00 -07002317 assert(*list == NULL);
2318}
2319
2320bool dvmHeapBeginMarkStep(GcMode mode)
2321{
2322 /* do nothing */
2323 return true;
2324}
2325
2326void dvmHeapFinishMarkStep(void)
2327{
2328 /* do nothing */
2329}
2330
2331void dvmHeapMarkRootSet(void)
2332{
2333 /* do nothing */
2334}
2335
2336void dvmHeapScanMarkedObjects(void)
2337{
2338 dvmScavengeRoots();
2339}
2340
2341void dvmHeapScheduleFinalizations(void)
2342{
2343 /* do nothing */
2344}
2345
2346void dvmHeapSweepUnmarkedObjects(GcMode mode, int *numFreed, size_t *sizeFreed)
2347{
Carl Shapiro703a2f32010-05-12 23:11:37 -07002348 *numFreed = 0;
2349 *sizeFreed = 0;
Carl Shapirod28668c2010-04-15 16:10:00 -07002350 /* do nothing */
2351}
2352
2353void dvmMarkObjectNonNull(const Object *obj)
2354{
2355 assert(!"implemented");
2356}