blob: 4027c096f4da6f0fe80ffedb7c70994417d4461a [file] [log] [blame]
Carl Shapirod28668c2010-04-15 16:10:00 -07001/*
2 * Copyright (C) 2009 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17#include <errno.h>
18#include <limits.h>
19#include <sys/mman.h>
20
21#include "Dalvik.h"
22#include "alloc/Heap.h"
23#include "alloc/HeapBitmap.h"
24#include "alloc/HeapInternal.h"
25#include "alloc/HeapSource.h"
26#include "alloc/Verify.h"
27#include "alloc/clz.h"
28
29/*
30 * A "mostly copying", generational, garbage collector.
31 *
32 * TODO: we allocate our own contiguous tract of page frames to back
33 * object allocations. To cooperate with other heaps active in the
34 * virtual machine we need to move the responsibility of allocating
35 * pages someplace outside of this code.
36 *
37 * The other major data structures that maintain the state of the heap
38 * are the block space table and the block queue.
39 *
40 * The block space table records the state of a block. We must track
41 * whether a block is:
42 *
43 * - Free or allocated in some space.
44 *
45 * - If the block holds part of a large object allocation, whether the
46 * block is the initial or a continued block of the allocation.
47 *
48 * - Whether the block is pinned, that is to say whether at least one
49 * object in the block must remain stationary. Only needed during a
50 * GC.
51 *
52 * - Which space the object belongs to. At present this means
53 * from-space or to-space.
54 *
55 * The block queue is used during garbage collection. Unlike Cheney's
56 * algorithm, from-space and to-space are not contiguous. Therefore,
57 * one cannot maintain the state of the copy with just two pointers.
58 * The block queue exists to thread lists of blocks from the various
59 * spaces together.
60 *
61 * Additionally, we record the free space frontier of the heap, as
62 * well as the address of the first object within a block, which is
63 * required to copy objects following a large object (not currently
64 * implemented). This is stored in the heap source structure. This
65 * should be moved elsewhere to support in-line allocations from Java
66 * threads.
67 *
68 * Allocation requests are satisfied by reserving storage from one or
69 * more contiguous blocks. Objects that are small enough to fit
70 * inside a block are packed together within a block. Objects that
71 * are larger than a block are allocated from contiguous sequences of
72 * blocks. When half the available blocks are filled, a garbage
73 * collection occurs. We "flip" spaces (exchange from- and to-space),
74 * copy live objects into to space, and perform pointer adjustment.
75 *
76 * Copying is made more complicated by the requirement that some
77 * objects must not be moved. This property is known as "pinning".
78 * These objects must be dealt with specially. We use Bartlett's
79 * scheme; blocks containing such objects are grayed (promoted) at the
80 * start of a garbage collection. By virtue of this trick, marking
81 * from the roots proceeds as usual but all objects on those pages are
82 * considered promoted and therefore not moved.
83 *
84 * TODO: there is sufficient information within the garbage collector
85 * to implement Attardi's scheme for evacuating unpinned objects from
86 * a page that is otherwise pinned. This would eliminate false
87 * retention caused by the large pinning granularity.
88 *
89 * We need a scheme for medium and large objects. Ignore that for
90 * now, we can return to this later.
91 *
92 * Eventually we need to worry about promoting objects out of the
93 * copy-collected heap (tenuring) into a less volatile space. Copying
94 * may not always be the best policy for such spaces. We should
95 * consider a variant of mark, sweep, compact.
96 *
97 * The block scheme allows us to use VM page faults to maintain a
98 * write barrier. Consider having a special leaf state for a page.
99 *
100 * Bibliography:
101 *
102 * C. J. Cheney. 1970. A non-recursive list compacting
103 * algorithm. CACM. 13-11 pp677--678.
104 *
105 * Joel F. Bartlett. 1988. Compacting Garbage Collection with
106 * Ambiguous Roots. Digital Equipment Corporation.
107 *
108 * Joel F. Bartlett. 1989. Mostly-Copying Garbage Collection Picks Up
109 * Generations and C++. Digital Equipment Corporation.
110 *
111 * G. May Yip. 1991. Incremental, Generational Mostly-Copying Garbage
112 * Collection in Uncooperative Environments. Digital Equipment
113 * Corporation.
114 *
115 * Giuseppe Attardi, Tito Flagella. 1994. A Customisable Memory
116 * Management Framework. TR-94-010
117 *
118 * Giuseppe Attardi, Tito Flagella, Pietro Iglio. 1998. A customisable
119 * memory management framework for C++. Software -- Practice and
120 * Experience. 28(11), 1143-1183.
121 *
122 */
123
124#define ARRAYSIZE(x) (sizeof(x) / sizeof(x[0]))
125
126#if 1
127#define LOG_ALLOC LOGI
128#define LOG_SCAVENGE LOGI
129#define LOG_TRANSPORT LOGI
130#define LOG_PROMOTE LOGI
131#define LOG_VERIFY LOGI
132#else
133#define LOG_ALLOC(...) ((void *)0)
134#define LOG_SCAVENGE(...) ((void *)0)
135#define LOG_TRANSPORT(...) ((void *)0)
136#define LOG_PROMOTE(...) ((void *)0)
137#define LOG_VERIFY(...) ((void *)0)
138#endif
139
140static void enqueueBlock(HeapSource *heapSource, size_t block);
141static size_t scavengeReference(Object **obj);
142static void verifyReference(const void *obj);
143static void printHeapBitmap(const HeapBitmap *bitmap);
144static void printHeapBitmapSxS(const HeapBitmap *b1, const HeapBitmap *b2);
145static bool isToSpace(const void *addr);
146static bool isFromSpace(const void *addr);
147static size_t sumHeapBitmap(const HeapBitmap *bitmap);
148static size_t scavengeDataObject(DataObject *obj);
149static DataObject *transportDataObject(const DataObject *fromObj);
150
151/*
152 * We use 512-byte blocks.
153 */
154enum { BLOCK_SHIFT = 9 };
155enum { BLOCK_SIZE = 1 << BLOCK_SHIFT };
156
157/*
158 * Space identifiers, stored into the blockSpace array.
159 */
160enum {
161 BLOCK_FREE = 0,
162 BLOCK_FROM_SPACE = 1,
163 BLOCK_TO_SPACE = 2,
164 BLOCK_CONTINUED = 7
165};
166
167/*
168 * Alignment for all allocations, in bytes.
169 */
170enum { ALLOC_ALIGNMENT = 8 };
171
172/*
173 * Sentinel value for the queue end.
174 */
175#define QUEUE_TAIL (~(size_t)0)
176
177struct HeapSource {
178
179 /* The base address of backing store. */
180 u1 *blockBase;
181
182 /* Total number of blocks available for allocation. */
183 size_t totalBlocks;
184 size_t allocBlocks;
185
186 /*
187 * The scavenger work queue. Implemented as an array of index
188 * values into the queue.
189 */
190 size_t *blockQueue;
191
192 /*
193 * Base and limit blocks. Basically the shifted start address of
194 * the block. We convert blocks to a relative number when
195 * indexing in the block queue. TODO: make the block queue base
196 * relative rather than the index into the block queue.
197 */
198 size_t baseBlock, limitBlock;
199
200 size_t queueHead;
201 size_t queueTail;
202 size_t queueSize;
203
204 /* The space of the current block 0 (free), 1 or 2. */
205 char *blockSpace;
206
207 /* Start of free space in the current block. */
208 u1 *allocPtr;
209 /* Exclusive limit of free space in the current block. */
210 u1 *allocLimit;
211
212 HeapBitmap allocBits;
213
214 /*
215 * Singly-linked lists of live Reference objects. Built when
216 * scavenging, threaded through the Reference.vmData field.
217 */
218 DataObject *softReferenceList;
219 DataObject *weakReferenceList;
220 DataObject *phantomReferenceList;
221
222 /*
223 * The starting size of the heap. This value is the same as the
224 * value provided to the -Xms flag.
225 */
226 size_t minimumSize;
227
228 /*
229 * The maximum size of the heap. This value is the same as the
230 * -Xmx flag.
231 */
232 size_t maximumSize;
233
234 /*
235 * The current, committed size of the heap. At present, this is
236 * equivalent to the maximumSize.
237 */
238 size_t currentSize;
239
240 size_t bytesAllocated;
241};
242
243static unsigned long alignDown(unsigned long x, unsigned long n)
244{
245 return x & -n;
246}
247
248static unsigned long alignUp(unsigned long x, unsigned long n)
249{
250 return alignDown(x + (n - 1), n);
251}
252
253static void describeBlocks(const HeapSource *heapSource)
254{
255 size_t i;
256
257 for (i = 0; i < heapSource->totalBlocks; ++i) {
258 if ((i % 32) == 0) putchar('\n');
259 printf("%d ", heapSource->blockSpace[i]);
260 }
261 putchar('\n');
262}
263
264/*
265 * Virtual memory interface.
266 */
267
268static void *virtualAlloc(size_t length)
269{
270 void *addr;
271 int flags, prot;
272
273 flags = MAP_PRIVATE | MAP_ANONYMOUS;
274 prot = PROT_READ | PROT_WRITE;
275 addr = mmap(NULL, length, prot, flags, -1, 0);
276 if (addr == MAP_FAILED) {
277 LOGE_HEAP("mmap: %s", strerror(errno));
278 addr = NULL;
279 }
280 return addr;
281}
282
283static void virtualFree(void *addr, size_t length)
284{
285 int res;
286
287 assert(addr != NULL);
288 assert((uintptr_t)addr % SYSTEM_PAGE_SIZE == 0);
289 res = munmap(addr, length);
290 if (res == -1) {
291 LOGE_HEAP("munmap: %s", strerror(errno));
292 }
293}
294
295static int isValidAddress(const HeapSource *heapSource, const u1 *addr)
296{
297 size_t block;
298
299 block = (uintptr_t)addr >> BLOCK_SHIFT;
300 return heapSource->baseBlock <= block &&
301 heapSource->limitBlock > block;
302}
303
304/*
305 * Iterate over the block map looking for a contiguous run of free
306 * blocks.
307 */
308static void *allocateBlocks(HeapSource *heapSource, size_t blocks)
309{
310 void *addr;
311 size_t allocBlocks, totalBlocks;
312 size_t i, j;
313
314 allocBlocks = heapSource->allocBlocks;
315 totalBlocks = heapSource->totalBlocks;
316 /* Check underflow. */
317 assert(blocks != 0);
318 /* Check overflow. */
319 if (allocBlocks + blocks > totalBlocks / 2) {
320 return NULL;
321 }
322 /* Scan block map. */
323 for (i = 0; i < totalBlocks; ++i) {
324 /* Check fit. */
325 for (j = 0; j < blocks; ++j) { /* runs over totalBlocks */
326 if (heapSource->blockSpace[i+j] != BLOCK_FREE) {
327 break;
328 }
329 }
330 /* No fit? */
331 if (j != blocks) {
332 i += j;
333 continue;
334 }
335 /* Fit, allocate. */
336 heapSource->blockSpace[i] = BLOCK_TO_SPACE; /* why to-space? */
337 for (j = 1; j < blocks; ++j) {
338 heapSource->blockSpace[i+j] = BLOCK_CONTINUED;
339 }
340 heapSource->allocBlocks += blocks;
341 addr = &heapSource->blockBase[i*BLOCK_SIZE];
342 memset(addr, 0, blocks*BLOCK_SIZE);
343 /* Collecting? */
344 if (heapSource->queueHead != QUEUE_TAIL) {
345 LOG_ALLOC("allocateBlocks allocBlocks=%zu,block#=%zu", heapSource->allocBlocks, i);
346 /*
347 * This allocated was on behalf of the transporter when it
348 * shaded a white object gray. We enqueue the block so
349 * the scavenger can further shade the gray objects black.
350 */
351 enqueueBlock(heapSource, i);
352 }
353
354 return addr;
355 }
356 /* Insufficient space, fail. */
357 LOGE("Insufficient space, %zu blocks, %zu blocks allocated and %zu bytes allocated",
358 heapSource->totalBlocks,
359 heapSource->allocBlocks,
360 heapSource->bytesAllocated);
361 return NULL;
362}
363
364/* Converts an absolute address to a relative block number. */
365static size_t addressToBlock(const HeapSource *heapSource, const void *addr)
366{
367 assert(heapSource != NULL);
368 assert(isValidAddress(heapSource, addr));
369 return (((uintptr_t)addr) >> BLOCK_SHIFT) - heapSource->baseBlock;
370}
371
372/* Converts a relative block number to an absolute address. */
373static u1 *blockToAddress(const HeapSource *heapSource, size_t block)
374{
375 u1 *addr;
376
377 addr = (u1 *) (((uintptr_t) heapSource->baseBlock + block) * BLOCK_SIZE);
378 assert(isValidAddress(heapSource, addr));
379 return addr;
380}
381
382static void clearBlock(HeapSource *heapSource, size_t block)
383{
384 u1 *addr;
385 size_t i;
386
387 assert(heapSource != NULL);
388 assert(block < heapSource->totalBlocks);
389 addr = heapSource->blockBase + block*BLOCK_SIZE;
390 memset(addr, 0xCC, BLOCK_SIZE);
391 for (i = 0; i < BLOCK_SIZE; i += 8) {
392 dvmHeapBitmapClearObjectBit(&heapSource->allocBits, addr + i);
393 }
394}
395
396static void clearFromSpace(HeapSource *heapSource)
397{
398 size_t i, count;
399
400 assert(heapSource != NULL);
401 i = count = 0;
402 while (i < heapSource->totalBlocks) {
403 if (heapSource->blockSpace[i] != BLOCK_FROM_SPACE) {
404 ++i;
405 continue;
406 }
407 heapSource->blockSpace[i] = BLOCK_FREE;
408 clearBlock(heapSource, i);
409 ++i;
410 ++count;
411 while (i < heapSource->totalBlocks &&
412 heapSource->blockSpace[i] == BLOCK_CONTINUED) {
413 heapSource->blockSpace[i] = BLOCK_FREE;
414 clearBlock(heapSource, i);
415 ++i;
416 ++count;
417 }
418 }
419 LOGI("freed %zu blocks (%zu bytes)", count, count*BLOCK_SIZE);
420}
421
422/*
423 * Appends the given block to the block queue. The block queue is
424 * processed in-order by the scavenger.
425 */
426static void enqueueBlock(HeapSource *heapSource, size_t block)
427{
428 assert(heapSource != NULL);
429 assert(block < heapSource->totalBlocks);
430 if (heapSource->queueHead != QUEUE_TAIL) {
431 heapSource->blockQueue[heapSource->queueTail] = block;
432 } else {
433 heapSource->queueHead = block;
434 }
435 heapSource->blockQueue[block] = QUEUE_TAIL;
436 heapSource->queueTail = block;
437 ++heapSource->queueSize;
438}
439
440/*
441 * Grays all objects within the block corresponding to the given
442 * address.
443 */
444static void promoteBlockByAddr(HeapSource *heapSource, const void *addr)
445{
446 size_t block;
447
448 block = addressToBlock(heapSource, (const u1 *)addr);
449 if (heapSource->blockSpace[block] != BLOCK_TO_SPACE) {
450 // LOGI("promoting block %zu %d @ %p", block, heapSource->blockSpace[block], obj);
451 heapSource->blockSpace[block] = BLOCK_TO_SPACE;
452 enqueueBlock(heapSource, block);
453 /* TODO(cshapiro): count continued blocks?*/
454 heapSource->allocBlocks += 1;
455 } else {
456 // LOGI("NOT promoting block %zu %d @ %p", block, heapSource->blockSpace[block], obj);
457 }
458}
459
460GcHeap *dvmHeapSourceStartup(size_t startSize, size_t absoluteMaxSize)
461{
462 GcHeap* gcHeap;
463 HeapSource *heapSource;
464
465 assert(startSize <= absoluteMaxSize);
466
467 heapSource = malloc(sizeof(*heapSource));
468 assert(heapSource != NULL);
469 memset(heapSource, 0, sizeof(*heapSource));
470
471 heapSource->minimumSize = alignUp(startSize, BLOCK_SIZE);
472 heapSource->maximumSize = alignUp(absoluteMaxSize, BLOCK_SIZE);
473
474 heapSource->currentSize = heapSource->maximumSize;
475
476 /* Allocate underlying storage for blocks. */
477 heapSource->blockBase = virtualAlloc(heapSource->maximumSize);
478 assert(heapSource->blockBase != NULL);
479 heapSource->baseBlock = (uintptr_t) heapSource->blockBase >> BLOCK_SHIFT;
480 heapSource->limitBlock = ((uintptr_t) heapSource->blockBase + heapSource->maximumSize) >> BLOCK_SHIFT;
481
482 heapSource->allocBlocks = 0;
483 heapSource->totalBlocks = (heapSource->limitBlock - heapSource->baseBlock);
484
485 assert(heapSource->totalBlocks = heapSource->maximumSize / BLOCK_SIZE);
486
487 {
488 size_t size = sizeof(heapSource->blockQueue[0]);
489 heapSource->blockQueue = malloc(heapSource->totalBlocks*size);
490 assert(heapSource->blockQueue != NULL);
491 memset(heapSource->blockQueue, 0xCC, heapSource->totalBlocks*size);
492 heapSource->queueHead = QUEUE_TAIL;
493 }
494
495 /* Byte indicating space residence or free status of block. */
496 {
497 size_t size = sizeof(heapSource->blockSpace[0]);
498 heapSource->blockSpace = malloc(heapSource->totalBlocks*size);
499 assert(heapSource->blockSpace != NULL);
500 memset(heapSource->blockSpace, 0, heapSource->totalBlocks*size);
501 }
502
503 dvmHeapBitmapInit(&heapSource->allocBits,
504 heapSource->blockBase,
505 heapSource->maximumSize,
506 "blockBase");
507
508 /* Initialize allocation pointers. */
509 heapSource->allocPtr = allocateBlocks(heapSource, 1);
510 heapSource->allocLimit = heapSource->allocPtr + BLOCK_SIZE;
511
512 gcHeap = malloc(sizeof(*gcHeap));
513 assert(gcHeap != NULL);
514 memset(gcHeap, 0, sizeof(*gcHeap));
515 gcHeap->heapSource = heapSource;
516
517 return gcHeap;
518}
519
520/*
521 * Perform any required heap initializations after forking from the
522 * zygote process. This is a no-op for the time being. Eventually
523 * this will demarcate the shared region of the heap.
524 */
525bool dvmHeapSourceStartupAfterZygote(void)
526{
527 return true;
528}
529
530bool dvmHeapSourceStartupBeforeFork(void)
531{
532 assert(!"implemented");
533 return false;
534}
535
536void dvmHeapSourceShutdown(GcHeap **gcHeap)
537{
538 if (*gcHeap == NULL || (*gcHeap)->heapSource == NULL)
539 return;
540 virtualFree((*gcHeap)->heapSource->blockBase,
541 (*gcHeap)->heapSource->maximumSize);
542 free((*gcHeap)->heapSource);
543 (*gcHeap)->heapSource = NULL;
544 free(*gcHeap);
545 *gcHeap = NULL;
546}
547
548size_t dvmHeapSourceGetValue(enum HeapSourceValueSpec spec,
549 size_t perHeapStats[],
550 size_t arrayLen)
551{
552 HeapSource *heapSource;
553 size_t value;
554
555 heapSource = gDvm.gcHeap->heapSource;
556 switch (spec) {
557 case HS_EXTERNAL_BYTES_ALLOCATED:
558 value = 0;
559 break;
560 case HS_EXTERNAL_LIMIT:
561 value = 0;
562 break;
563 case HS_FOOTPRINT:
564 value = heapSource->maximumSize;
565 break;
566 case HS_ALLOWED_FOOTPRINT:
567 value = heapSource->maximumSize;
568 break;
569 case HS_BYTES_ALLOCATED:
570 value = heapSource->bytesAllocated;
571 break;
572 case HS_OBJECTS_ALLOCATED:
573 value = sumHeapBitmap(&heapSource->allocBits);
574 break;
575 default:
576 assert(!"implemented");
577 value = 0;
578 }
579 if (perHeapStats) {
580 *perHeapStats = value;
581 }
582 return value;
583}
584
585/*
586 * Performs a shallow copy of the allocation bitmap into the given
587 * vector of heap bitmaps.
588 */
589void dvmHeapSourceGetObjectBitmaps(HeapBitmap objBits[], HeapBitmap markBits[],
590 size_t numHeaps)
591{
592 assert(!"implemented");
593}
594
595HeapBitmap *dvmHeapSourceGetLiveBits(void)
596{
597 assert(!"implemented");
598 return NULL;
599}
600
601/*
602 * Allocate the specified number of bytes from the heap. The
603 * allocation cursor points into a block of free storage. If the
604 * given allocation fits in the remaining space of the block, we
605 * advance the cursor and return a pointer to the free storage. If
606 * the allocation cannot fit in the current block but is smaller than
607 * a block we request a new block and allocate from it instead. If
608 * the allocation is larger than a block we must allocate from a span
609 * of contiguous blocks.
610 */
611void *dvmHeapSourceAlloc(size_t length)
612{
613 HeapSource *heapSource;
614 unsigned char *addr;
615 size_t aligned, available, blocks;
616
617 heapSource = gDvm.gcHeap->heapSource;
618 assert(heapSource != NULL);
619 assert(heapSource->allocPtr != NULL);
620 assert(heapSource->allocLimit != NULL);
621
622 aligned = alignUp(length, ALLOC_ALIGNMENT);
623 available = heapSource->allocLimit - heapSource->allocPtr;
624
625 /* Try allocating inside the current block. */
626 if (aligned <= available) {
627 addr = heapSource->allocPtr;
628 heapSource->allocPtr += aligned;
629 heapSource->bytesAllocated += aligned;
630 dvmHeapBitmapSetObjectBit(&heapSource->allocBits, addr);
631 return addr;
632 }
633
634 /* Try allocating in a new block. */
635 if (aligned <= BLOCK_SIZE) {
636 addr = allocateBlocks(heapSource, 1);
637 if (addr != NULL) {
638 heapSource->allocLimit = addr + BLOCK_SIZE;
639 heapSource->allocPtr = addr + aligned;
640 heapSource->bytesAllocated += aligned;
641 dvmHeapBitmapSetObjectBit(&heapSource->allocBits, addr);
642 /* TODO(cshapiro): pad out the current block. */
643 }
644 return addr;
645 }
646
647 /* Try allocating in a span of blocks. */
648 blocks = alignUp(aligned, BLOCK_SIZE) / BLOCK_SIZE;
649
650 addr = allocateBlocks(heapSource, blocks);
651 /* Propagate failure upward. */
652 if (addr != NULL) {
653 heapSource->bytesAllocated += aligned;
654 dvmHeapBitmapSetObjectBit(&heapSource->allocBits, addr);
655 /* TODO(cshapiro): pad out free space in the last block. */
656 }
657 return addr;
658}
659
660void *dvmHeapSourceAllocAndGrow(size_t size)
661{
662 return dvmHeapSourceAlloc(size);
663}
664
665/* TODO: refactor along with dvmHeapSourceAlloc */
666void *allocateGray(size_t size)
667{
668 assert(gDvm.gcHeap->heapSource->queueHead != QUEUE_TAIL);
669 return dvmHeapSourceAlloc(size);
670}
671
672/*
673 * Returns true if the given address is within the heap and points to
674 * the header of a live object.
675 */
676bool dvmHeapSourceContains(const void *addr)
677{
678 HeapSource *heapSource;
679 HeapBitmap *bitmap;
680
681 heapSource = gDvm.gcHeap->heapSource;
682 bitmap = &heapSource->allocBits;
683 return dvmHeapBitmapIsObjectBitSet(bitmap, addr);
684}
685
686bool dvmHeapSourceGetPtrFlag(const void *ptr, enum HeapSourcePtrFlag flag)
687{
688 assert(!"implemented");
689 return false;
690}
691
692size_t dvmHeapSourceChunkSize(const void *ptr)
693{
694 assert(!"implemented");
695 return 0;
696}
697
698size_t dvmHeapSourceFootprint(void)
699{
700 assert(!"implemented");
701 return 0;
702}
703
704/*
705 * Returns the "ideal footprint" which appears to be the number of
706 * bytes currently committed to the heap. This starts out at the
707 * start size of the heap and grows toward the maximum size.
708 */
709size_t dvmHeapSourceGetIdealFootprint(void)
710{
711 return gDvm.gcHeap->heapSource->currentSize;
712}
713
714float dvmGetTargetHeapUtilization(void)
715{
716 assert(!"implemented");
717 return 0.0f;
718}
719
720void dvmSetTargetHeapUtilization(float newTarget)
721{
722 assert(!"implemented");
723}
724
725size_t dvmMinimumHeapSize(size_t size, bool set)
726{
727 return gDvm.gcHeap->heapSource->minimumSize;
728}
729
730/*
731 * Expands the size of the heap after a collection. At present we
732 * commit the pages for maximum size of the heap so this routine is
733 * just a no-op. Eventually, we will either allocate or commit pages
734 * on an as-need basis.
735 */
736void dvmHeapSourceGrowForUtilization(void)
737{
738 /* do nothing */
739}
740
741void dvmHeapSourceTrim(size_t bytesTrimmed[], size_t arrayLen)
742{
743 /* do nothing */
744}
745
746void dvmHeapSourceWalk(void (*callback)(const void *chunkptr, size_t chunklen,
747 const void *userptr, size_t userlen,
748 void *arg),
749 void *arg)
750{
751 assert(!"implemented");
752}
753
754size_t dvmHeapSourceGetNumHeaps(void)
755{
756 return 1;
757}
758
759bool dvmTrackExternalAllocation(size_t n)
760{
761 assert(!"implemented");
762 return false;
763}
764
765void dvmTrackExternalFree(size_t n)
766{
767 assert(!"implemented");
768}
769
770size_t dvmGetExternalBytesAllocated(void)
771{
772 assert(!"implemented");
773 return 0;
774}
775
776void dvmHeapSourceFlip(void)
777{
778 HeapSource *heapSource;
779 size_t i;
780
781 heapSource = gDvm.gcHeap->heapSource;
782
783 /* Reset the block queue. */
784 heapSource->allocBlocks = 0;
785 heapSource->queueSize = 0;
786 heapSource->queueHead = QUEUE_TAIL;
787
788 /* Hack: reset the reference lists. */
789 /* TODO(cshapiro): implement reference object processing. */
790 heapSource->softReferenceList = NULL;
791 heapSource->weakReferenceList = NULL;
792 heapSource->phantomReferenceList = NULL;
793
794 /* TODO(cshapiro): pad the current (prev) block. */
795
796 heapSource->allocPtr = NULL;
797 heapSource->allocLimit = NULL;
798
799 /* Whiten all allocated blocks. */
800 for (i = 0; i < heapSource->totalBlocks; ++i) {
801 if (heapSource->blockSpace[i] == BLOCK_TO_SPACE) {
802 heapSource->blockSpace[i] = BLOCK_FROM_SPACE;
803 }
804 }
805}
806
807static void room(size_t *alloc, size_t *avail, size_t *total)
808{
809 HeapSource *heapSource;
810 size_t i;
811
812 heapSource = gDvm.gcHeap->heapSource;
813 *total = heapSource->totalBlocks*BLOCK_SIZE;
814 *alloc = heapSource->allocBlocks*BLOCK_SIZE;
815 *avail = *total - *alloc;
816}
817
818static bool isSpaceInternal(u1 *addr, int space)
819{
820 HeapSource *heapSource;
821 u1 *base, *limit;
822 size_t offset;
823 char space2;
824
825 heapSource = gDvm.gcHeap->heapSource;
826 base = heapSource->blockBase;
827 assert(addr >= base);
828 limit = heapSource->blockBase + heapSource->maximumSize;
829 assert(addr < limit);
830 offset = addr - base;
831 space2 = heapSource->blockSpace[offset >> BLOCK_SHIFT];
832 return space == space2;
833}
834
835static bool isFromSpace(const void *addr)
836{
837 return isSpaceInternal((u1 *)addr, BLOCK_FROM_SPACE);
838}
839
840static bool isToSpace(const void *addr)
841{
842 return isSpaceInternal((u1 *)addr, BLOCK_TO_SPACE);
843}
844
845/*
846 * Notifies the collector that the object at the given address must
847 * remain stationary during the current collection.
848 */
849static void pinObject(const Object *obj)
850{
851 promoteBlockByAddr(gDvm.gcHeap->heapSource, obj);
852}
853
854static void printHeapBitmap(const HeapBitmap *bitmap)
855{
856 const char *cp;
857 size_t i, length;
858
859 length = bitmap->bitsLen >> 2;
860 fprintf(stderr, "%p", bitmap->bits);
861 for (i = 0; i < length; ++i) {
862 fprintf(stderr, " %lx", bitmap->bits[i]);
863 fputc('\n', stderr);
864 }
865}
866
867static void printHeapBitmapSxS(const HeapBitmap *b1, const HeapBitmap *b2)
868{
869 uintptr_t addr;
870 size_t i, length;
871
872 assert(b1->base == b2->base);
873 assert(b1->bitsLen == b2->bitsLen);
874 addr = b1->base;
875 length = b1->bitsLen >> 2;
876 for (i = 0; i < length; ++i) {
877 int diff = b1->bits[i] == b2->bits[i];
878 fprintf(stderr, "%08x %08lx %08lx %d\n",
879 addr, b1->bits[i], b2->bits[i], diff);
880 addr += sizeof(*b1->bits)*CHAR_BIT;
881 }
882}
883
884static size_t sumHeapBitmap(const HeapBitmap *bitmap)
885{
886 const char *cp;
887 size_t i, sum;
888
889 sum = 0;
890 for (i = 0; i < bitmap->bitsLen >> 2; ++i) {
891 sum += dvmClzImpl(bitmap->bits[i]);
892 }
893 return sum;
894}
895
896/*
897 * Miscellaneous functionality.
898 */
899
900static int isForward(const void *addr)
901{
902 return (uintptr_t)addr & 0x1;
903}
904
905static void setForward(const void *toObj, void *fromObj)
906{
907 *(unsigned long *)fromObj = (uintptr_t)toObj | 0x1;
908}
909
910static void* getForward(const void *fromObj)
911{
912 return (void *)((uintptr_t)fromObj & ~0x1);
913}
914
915/* Beware, uses the same encoding as a forwarding pointers! */
916static int isPermanentString(const StringObject *obj) {
917 return (uintptr_t)obj & 0x1;
918}
919
920static void* getPermanentString(const StringObject *obj)
921{
922 return (void *)((uintptr_t)obj & ~0x1);
923}
924
925
926/*
927 * Scavenging and transporting routines follow. A transporter grays
928 * an object. A scavenger blackens an object. We define these
929 * routines for each fundamental object type. Dispatch is performed
930 * in scavengeObject.
931 */
932
933/*
934 * Class object scavenging and transporting.
935 */
936
937static ClassObject *transportClassObject(const ClassObject *fromObj)
938{
939 ClassObject *toObj;
940 size_t length;
941
942 LOG_TRANSPORT("transportClassObject(fromObj=%p)", fromObj);
943 length = dvmClassObjectSize(fromObj); /* TODO: hash code */
944 assert(length != 0);
945 toObj = allocateGray(length);
946 assert(toObj != NULL);
947 memcpy(toObj, fromObj, length);
948 LOG_TRANSPORT("transportClassObject: from %p to %p (%zu)", fromObj, toObj, length);
949 return toObj;
950}
951
952static size_t scavengeClassObject(ClassObject *obj)
953{
954 size_t size;
955 int i;
956
957 assert(obj != NULL);
958 LOG_SCAVENGE("scavengeClassObject(obj=%p)", obj);
959 /* Scavenge our class object. */
960 assert(obj->obj.clazz != NULL);
961 assert(obj->obj.clazz->descriptor != NULL);
962 assert(!strcmp(obj->obj.clazz->descriptor, "Ljava/lang/Class;"));
963 assert(obj->descriptor != NULL);
964 LOG_SCAVENGE("scavengeClassObject: descriptor='%s',vtableCount=%zu",
965 obj->descriptor, obj->vtableCount);
966 scavengeReference((Object **) obj);
967 /* Scavenge the array element class object. */
968 if (IS_CLASS_FLAG_SET(obj, CLASS_ISARRAY)) {
969 scavengeReference((Object **)(void *)&obj->elementClass);
970 }
971 /* Scavenge the superclass. */
972 scavengeReference((Object **)(void *)&obj->super);
973 /* Scavenge the class loader. */
974 scavengeReference(&obj->classLoader);
975 /* Scavenge static fields. */
976 for (i = 0; i < obj->sfieldCount; ++i) {
977 char ch = obj->sfields[i].field.signature[0];
978 if (ch == '[' || ch == 'L') {
979 scavengeReference((Object **)(void *)&obj->sfields[i].value.l);
980 }
981 }
982 /* Scavenge interface class objects. */
983 for (i = 0; i < obj->interfaceCount; ++i) {
984 scavengeReference((Object **) &obj->interfaces[i]);
985 }
986 size = dvmClassObjectSize(obj);
987 return size;
988}
989
990/*
991 * Array object scavenging.
992 */
993
994static ArrayObject *transportArrayObject(const ArrayObject *fromObj)
995{
996 ArrayObject *toObj;
997 size_t length;
998
999 LOG_TRANSPORT("transportArrayObject(fromObj=%p)", fromObj);
1000 length = dvmArrayObjectSize(fromObj);
1001 assert(length != 0);
1002 if (length >= BLOCK_SIZE) {
1003 LOGI("WARNING: LARGE ARRAY OBJECT %s", fromObj->obj.clazz->descriptor);
1004 }
1005 toObj = allocateGray(length);
1006 LOG_TRANSPORT("transportArrayObject: from %p to %p (%zu)", fromObj, toObj, length);
1007 assert(toObj != NULL);
1008 memcpy(toObj, fromObj, length);
1009 return toObj;
1010}
1011
1012static size_t scavengeArrayObject(ArrayObject *array)
1013{
1014 size_t i, length;
1015
1016 LOG_SCAVENGE("scavengeArrayObject(array=%p)", array);
1017 /* Scavenge the class object. */
1018 assert(isToSpace(array));
1019 assert(array != NULL);
1020 assert(array->obj.clazz != NULL);
1021 scavengeReference((Object **) array);
1022 length = dvmArrayObjectSize(array);
1023 /* Scavenge the array contents. */
1024 if (IS_CLASS_FLAG_SET(array->obj.clazz, CLASS_ISOBJECTARRAY)) {
1025 Object **contents = (Object **)array->contents;
1026 for (i = 0; i < array->length; ++i) {
1027 scavengeReference(&contents[i]);
1028 }
1029 }
1030 return length;
1031}
1032
1033/*
1034 * Reference object scavenging.
1035 */
1036
1037static int getReferenceFlags(const DataObject *obj)
1038{
1039 int flags;
1040
1041 flags = CLASS_ISREFERENCE |
1042 CLASS_ISWEAKREFERENCE |
1043 CLASS_ISPHANTOMREFERENCE;
1044 return GET_CLASS_FLAG_GROUP(obj->obj.clazz, flags);
1045}
1046
1047static int isReference(const DataObject *obj)
1048{
1049 return getReferenceFlags(obj) != 0;
1050}
1051
1052static int isSoftReference(const DataObject *obj)
1053{
1054 return getReferenceFlags(obj) == CLASS_ISREFERENCE;
1055}
1056
1057static int isWeakReference(const DataObject *obj)
1058{
1059 return getReferenceFlags(obj) & CLASS_ISWEAKREFERENCE;
1060}
1061
1062static bool isPhantomReference(const DataObject *obj)
1063{
1064 return getReferenceFlags(obj) & CLASS_ISPHANTOMREFERENCE;
1065}
1066
1067static void clearReference(DataObject *reference)
1068{
1069 size_t offset;
1070
1071 assert(isSoftReference(reference) || isWeakReference(reference));
1072 offset = gDvm.offJavaLangRefReference_referent;
1073 dvmSetFieldObject((Object *)reference, offset, NULL);
1074}
1075
1076static bool isReferentGray(const DataObject *reference)
1077{
1078 Object *obj;
1079 size_t offset;
1080
1081 assert(reference != NULL);
1082 assert(isSoftReference(reference) || isWeakReference(reference));
1083 offset = gDvm.offJavaLangRefReference_referent;
1084 obj = dvmGetFieldObject((Object *)reference, offset);
1085 return obj == NULL || isToSpace(obj);
1086}
1087
1088static void enqueueReference(HeapSource *heapSource, DataObject *reference)
1089{
1090 DataObject **queue;
1091 size_t offset;
1092
1093 LOGI("enqueueReference(heapSource=%p,reference=%p)", heapSource, reference);
1094 assert(heapSource != NULL);
1095 assert(reference != NULL);
1096 assert(isToSpace(reference));
1097 assert(isReference(reference));
1098 if (isSoftReference(reference)) {
1099 queue = &heapSource->softReferenceList;
1100 } else if (isWeakReference(reference)) {
1101 queue = &heapSource->weakReferenceList;
1102 } else if (isPhantomReference(reference)) {
1103 queue = &heapSource->phantomReferenceList;
1104 } else {
1105 assert(!"reached");
1106 queue = NULL;
1107 }
1108 offset = gDvm.offJavaLangRefReference_vmData;
1109 dvmSetFieldObject((Object *)reference, offset, (Object *)*queue);
1110 *queue = reference;
1111}
1112
1113static DataObject *transportReferenceObject(const DataObject *fromObj)
1114{
1115 assert(fromObj != NULL);
1116 LOG_TRANSPORT("transportReferenceObject(fromObj=%p)", fromObj);
1117 return transportDataObject(fromObj);
1118}
1119
1120/*
1121 * If a reference points to from-space and has been forwarded, we snap
1122 * the pointer to its new to-space address. If the reference points
1123 * to an unforwarded from-space address we must enqueue the reference
1124 * for later processing. TODO: implement proper reference processing
1125 * and move the referent scavenging elsewhere.
1126 */
1127static size_t scavengeReferenceObject(DataObject *obj)
1128{
1129 size_t length;
1130
1131 assert(obj != NULL);
1132 LOG_SCAVENGE("scavengeReferenceObject(obj=%p),'%s'", obj, obj->obj.clazz->descriptor);
1133 {
1134 /* Always scavenge the hidden Reference.referent field. */
1135 size_t offset = gDvm.offJavaLangRefReference_referent;
1136 void *addr = BYTE_OFFSET((Object *)obj, offset);
1137 Object **ref = (Object **)(void *)&((JValue *)addr)->l;
1138 scavengeReference(ref);
1139 }
1140 length = scavengeDataObject(obj);
1141 if (!isReferentGray(obj)) {
1142 assert(!"reached"); /* TODO(cshapiro): remove this */
1143 LOG_SCAVENGE("scavengeReferenceObject: enqueueing %p", obj);
1144 enqueueReference(gDvm.gcHeap->heapSource, obj);
1145 length = obj->obj.clazz->objectSize;
1146 }
1147 return length;
1148}
1149
1150/*
1151 * Data object scavenging.
1152 */
1153
1154static DataObject *transportDataObject(const DataObject *fromObj)
1155{
1156 DataObject *toObj;
1157 ClassObject *clazz;
1158 const char *name;
1159 size_t length;
1160 int flags;
1161
1162 assert(fromObj != NULL);
1163 assert(isFromSpace(fromObj));
1164 LOG_TRANSPORT("transportDataObject(fromObj=%p) allocBlocks=%zu", fromObj, gDvm.gcHeap->heapSource->allocBlocks);
1165 clazz = fromObj->obj.clazz;
1166 assert(clazz != NULL);
1167 length = clazz->objectSize;
1168 assert(length != 0);
1169 /* TODO(cshapiro): don't copy, re-map large data objects. */
1170 toObj = allocateGray(length);
1171 assert(toObj != NULL);
1172 assert(isToSpace(toObj));
1173 memcpy(toObj, fromObj, length);
1174 LOG_TRANSPORT("transportDataObject: from %p/%zu to %p/%zu (%zu)", fromObj, addressToBlock(gDvm.gcHeap->heapSource,fromObj), toObj, addressToBlock(gDvm.gcHeap->heapSource,toObj), length);
1175 return toObj;
1176}
1177
1178static size_t scavengeDataObject(DataObject *obj)
1179{
1180 ClassObject *clazz;
1181 size_t length;
1182 int i;
1183
1184 // LOGI("scavengeDataObject(obj=%p)", obj);
1185 assert(obj != NULL);
1186 assert(obj->obj.clazz != NULL);
1187 assert(obj->obj.clazz->objectSize != 0);
1188 assert(isToSpace(obj));
1189 /* Scavenge the class object. */
1190 clazz = obj->obj.clazz;
1191 scavengeReference((Object **) obj);
1192 length = obj->obj.clazz->objectSize;
1193 /* Scavenge instance fields. */
1194 if (clazz->refOffsets != CLASS_WALK_SUPER) {
1195 size_t refOffsets = clazz->refOffsets;
1196 while (refOffsets != 0) {
1197 size_t rshift = CLZ(refOffsets);
1198 size_t offset = CLASS_OFFSET_FROM_CLZ(rshift);
1199 Object **ref = (Object **)((u1 *)obj + offset);
1200 scavengeReference(ref);
1201 refOffsets &= ~(CLASS_HIGH_BIT >> rshift);
1202 }
1203 } else {
1204 for (; clazz != NULL; clazz = clazz->super) {
1205 InstField *field = clazz->ifields;
1206 for (i = 0; i < clazz->ifieldRefCount; ++i, ++field) {
1207 size_t offset = field->byteOffset;
1208 Object **ref = (Object **)((u1 *)obj + offset);
1209 scavengeReference(ref);
1210 }
1211 }
1212 }
1213 return length;
1214}
1215
1216/*
1217 * Generic reference scavenging.
1218 */
1219
1220/*
1221 * Given a reference to an object, the scavenge routine will gray the
1222 * reference. Any objects pointed to by the scavenger object will be
1223 * transported to new space and a forwarding pointer will be installed
1224 * in the header of the object.
1225 */
1226
1227/*
1228 * Blacken the given pointer. If the pointer is in from space, it is
1229 * transported to new space. If the object has a forwarding pointer
1230 * installed it has already been transported and the referent is
1231 * snapped to the new address.
1232 */
1233static size_t scavengeReference(Object **obj)
1234{
1235 ClassObject *clazz;
1236 uintptr_t word;
1237
1238 assert(obj);
1239
1240 if (*obj == NULL) goto exit;
1241
1242 assert(dvmIsValidObject(*obj));
1243
1244 /* The entire block is black. */
1245 if (isToSpace(*obj)) {
1246 LOG_SCAVENGE("scavengeReference skipping pinned object @ %p", *obj);
1247 goto exit;
1248 }
1249 LOG_SCAVENGE("scavengeReference(*obj=%p)", *obj);
1250
1251 assert(isFromSpace(*obj));
1252
1253 clazz = (*obj)->clazz;
1254
1255 if (isForward(clazz)) {
1256 // LOGI("forwarding %p @ %p to %p", *obj, obj, (void *)((uintptr_t)clazz & ~0x1));
1257 *obj = (Object *)getForward(clazz);
1258 } else if (clazz == NULL) {
1259 // LOGI("scavangeReference %p has a NULL class object", *obj);
1260 assert(!"implemented");
1261 } else if (clazz == gDvm.unlinkedJavaLangClass) {
1262 // LOGI("scavangeReference %p is an unlinked class object", *obj);
1263 assert(!"implemented");
1264 } else if (clazz == gDvm.classJavaLangClass) {
1265 ClassObject *toObj;
1266
1267 toObj = transportClassObject((ClassObject *)*obj);
1268 setForward(toObj, *obj);
1269 *obj = (Object *)toObj;
1270 } else if (IS_CLASS_FLAG_SET(clazz, CLASS_ISARRAY)) {
1271 ArrayObject *toObj;
1272
1273 toObj = transportArrayObject((ArrayObject *)*obj);
1274 setForward(toObj, *obj);
1275 *obj = (Object *)toObj;
1276 } else if (IS_CLASS_FLAG_SET(clazz, CLASS_ISREFERENCE)) {
1277 DataObject *toObj;
1278
1279 toObj = transportReferenceObject((DataObject *)*obj);
1280 setForward(toObj, *obj);
1281 *obj = (Object *)toObj;
1282 } else {
1283 DataObject *toObj;
1284
1285 toObj = transportDataObject((DataObject *)*obj);
1286 setForward(toObj, *obj);
1287 *obj = (Object *)toObj;
1288 }
1289exit:
1290 return sizeof(Object *);
1291}
1292
1293static void verifyReference(const void *obj)
1294{
1295 HeapSource *heapSource;
1296 size_t block;
1297 char space;
1298
1299 if (obj == NULL) {
1300 LOG_VERIFY("verifyReference(obj=%p)", obj);
1301 return;
1302 }
1303 heapSource = gDvm.gcHeap->heapSource;
1304 block = addressToBlock(heapSource, obj);
1305 space = heapSource->blockSpace[block];
1306 LOG_VERIFY("verifyReference(obj=%p),block=%zu,space=%d", obj, block, space);
1307 assert(!((uintptr_t)obj & 7));
1308 assert(isToSpace(obj));
1309 assert(dvmIsValidObject(obj));
1310}
1311
1312/*
1313 * Generic object scavenging.
1314 */
1315
1316static size_t scavengeObject(Object *obj)
1317{
1318 ClassObject *clazz;
1319 size_t length;
1320
1321 assert(obj != NULL);
1322 clazz = obj->clazz;
1323 assert(clazz != NULL);
1324 assert(!((uintptr_t)clazz & 0x1));
1325 assert(clazz != gDvm.unlinkedJavaLangClass);
1326 if (clazz == gDvm.classJavaLangClass) {
1327 length = scavengeClassObject((ClassObject *)obj);
1328 } else if (IS_CLASS_FLAG_SET(clazz, CLASS_ISARRAY)) {
1329 length = scavengeArrayObject((ArrayObject *)obj);
1330 } else if (IS_CLASS_FLAG_SET(clazz, CLASS_ISREFERENCE)) {
1331 length = scavengeReferenceObject((DataObject *)obj);
1332 } else {
1333 length = scavengeDataObject((DataObject *)obj);
1334 }
1335 return length;
1336}
1337
1338/*
1339 * External root scavenging routines.
1340 */
1341
1342static void scavengeHashTable(HashTable *table)
1343{
1344 HashEntry *entry;
1345 void *obj;
1346 int i;
1347
1348 if (table == NULL) {
1349 return;
1350 }
1351 dvmHashTableLock(table);
1352 for (i = 0; i < table->tableSize; ++i) {
1353 entry = &table->pEntries[i];
1354 obj = entry->data;
1355 if (obj == NULL || obj == HASH_TOMBSTONE) {
1356 continue;
1357 }
1358 scavengeReference((Object **)(void *)&entry->data);
1359 }
1360 dvmHashTableUnlock(table);
1361}
1362
1363static void pinHashTableEntries(HashTable *table)
1364{
1365 HashEntry *entry;
1366 void *obj;
1367 int i;
1368
1369 LOGI(">>> pinHashTableEntries(table=%p)", table);
1370 if (table == NULL) {
1371 return;
1372 }
1373 dvmHashTableLock(table);
1374 for (i = 0; i < table->tableSize; ++i) {
1375 entry = &table->pEntries[i];
1376 obj = entry->data;
1377 if (obj == NULL || obj == HASH_TOMBSTONE) {
1378 continue;
1379 }
1380 pinObject(entry->data);
1381 }
1382 dvmHashTableUnlock(table);
1383 LOGI("<<< pinHashTableEntries(table=%p)", table);
1384}
1385
1386static void pinPrimitiveClasses(void)
1387{
1388 size_t length;
1389 size_t i;
1390
1391 length = ARRAYSIZE(gDvm.primitiveClass);
1392 for (i = 0; i < length; i++) {
1393 if (gDvm.primitiveClass[i] != NULL) {
1394 pinObject((Object *)gDvm.primitiveClass[i]);
1395 }
1396 }
1397}
1398
1399/*
1400 * Scavenge interned strings. Permanent interned strings will have
1401 * been pinned and are therefore ignored. Non-permanent strings that
1402 * have been forwarded are snapped. All other entries are removed.
1403 */
1404static void scavengeInternedStrings(void)
1405{
1406 HashTable *table;
1407 HashEntry *entry;
1408 Object *obj;
1409 int i;
1410
1411 table = gDvm.internedStrings;
1412 if (table == NULL) {
1413 return;
1414 }
1415 dvmHashTableLock(table);
1416 for (i = 0; i < table->tableSize; ++i) {
1417 entry = &table->pEntries[i];
1418 obj = (Object *)entry->data;
1419 if (obj == NULL || obj == HASH_TOMBSTONE) {
1420 continue;
1421 } else if (!isPermanentString((StringObject *)obj)) {
1422 // LOGI("entry->data=%p", entry->data);
1423 LOG_SCAVENGE(">>> string obj=%p", entry->data);
1424 /* TODO(cshapiro): detach white string objects */
1425 scavengeReference((Object **)(void *)&entry->data);
1426 LOG_SCAVENGE("<<< string obj=%p", entry->data);
1427 }
1428 }
1429 dvmHashTableUnlock(table);
1430}
1431
1432static void pinInternedStrings(void)
1433{
1434 HashTable *table;
1435 HashEntry *entry;
1436 Object *obj;
1437 int i;
1438
1439 table = gDvm.internedStrings;
1440 if (table == NULL) {
1441 return;
1442 }
1443 dvmHashTableLock(table);
1444 for (i = 0; i < table->tableSize; ++i) {
1445 entry = &table->pEntries[i];
1446 obj = (Object *)entry->data;
1447 if (obj == NULL || obj == HASH_TOMBSTONE) {
1448 continue;
1449 } else if (isPermanentString((StringObject *)obj)) {
1450 obj = (Object *)getPermanentString((StringObject*)obj);
1451 LOG_PROMOTE(">>> pin string obj=%p", obj);
1452 pinObject(obj);
1453 LOG_PROMOTE("<<< pin string obj=%p", obj);
1454 }
1455 }
1456 dvmHashTableUnlock(table);
1457}
1458
1459static void verifyInternedStrings(void)
1460{
1461 HashTable *table;
1462 HashEntry *entry;
1463 Object *fwd, *obj;
1464 int i;
1465
1466 table = gDvm.internedStrings;
1467 if (table == NULL) {
1468 return;
1469 }
1470 dvmHashTableLock(table);
1471 for (i = 0; i < table->tableSize; ++i) {
1472 entry = &table->pEntries[i];
1473 obj = (Object *)entry->data;
1474 if (obj == NULL || obj == HASH_TOMBSTONE) {
1475 continue;
1476 } else if (isPermanentString((StringObject *)obj)) {
1477 fwd = (Object *)getForward(obj);
1478 LOG_VERIFY(">>> verify string fwd=%p obj=%p", fwd, obj);
1479 verifyReference(fwd);
1480 LOG_VERIFY(">>> verify string fwd=%p obj=%p", fwd, obj);
1481 } else {
1482 LOG_SCAVENGE(">>> verify string obj=%p %p", obj, entry->data);
1483 verifyReference(obj);
1484 LOG_SCAVENGE("<<< verify string obj=%p %p", obj, entry->data);
1485 }
1486 }
1487 dvmHashTableUnlock(table);
1488}
1489
1490/*
1491 * At present, reference tables contain references that must not be
1492 * moved by the collector. Instead of scavenging each reference in
1493 * the table we pin each referenced object.
1494 */
1495static void pinReferenceTable(ReferenceTable *table)
1496{
1497 Object **entry;
1498 int i;
1499
1500 assert(table != NULL);
1501 assert(table->table != NULL);
1502 assert(table->nextEntry != NULL);
1503 for (entry = table->table; entry < table->nextEntry; ++entry) {
1504 assert(entry != NULL);
1505 assert(!isForward(*entry));
1506 pinObject(*entry);
1507 }
1508}
1509
1510static void verifyReferenceTable(const ReferenceTable *table)
1511{
1512 Object **entry;
1513 int i;
1514
1515 LOGI(">>> verifyReferenceTable(table=%p)", table);
1516 for (entry = table->table; entry < table->nextEntry; ++entry) {
1517 assert(entry != NULL);
1518 assert(!isForward(*entry));
1519 verifyReference(*entry);
1520 }
1521 LOGI("<<< verifyReferenceTable(table=%p)", table);
1522}
1523
1524static void scavengeLargeHeapRefTable(LargeHeapRefTable *table)
1525{
1526 Object **entry;
1527
1528 for (; table != NULL; table = table->next) {
1529 for (entry = table->refs.table; entry < table->refs.nextEntry; ++entry) {
1530 if ((uintptr_t)*entry & ~0x3) {
1531 /* It's a pending reference operation. */
1532 assert(!"implemented");
1533 }
1534 scavengeReference(entry);
1535 }
1536 }
1537}
1538
1539/* This code was copied from Thread.c */
1540static void scavengeThreadStack(Thread *thread)
1541{
1542 const u4 *framePtr;
1543#if WITH_EXTRA_GC_CHECKS > 1
1544 bool first = true;
1545#endif
1546
1547 framePtr = (const u4 *)thread->curFrame;
1548 while (framePtr != NULL) {
1549 const StackSaveArea *saveArea;
1550 const Method *method;
1551
1552 saveArea = SAVEAREA_FROM_FP(framePtr);
1553 method = saveArea->method;
1554 if (method != NULL && !dvmIsNativeMethod(method)) {
1555#ifdef COUNT_PRECISE_METHODS
1556 /* the GC is running, so no lock required */
1557 if (dvmPointerSetAddEntry(gDvm.preciseMethods, method))
1558 LOGI("PGC: added %s.%s %p\n",
1559 method->clazz->descriptor, method->name, method);
1560#endif
1561#if WITH_EXTRA_GC_CHECKS > 1
1562 /*
1563 * May also want to enable the memset() in the "invokeMethod"
1564 * goto target in the portable interpreter. That sets the stack
1565 * to a pattern that makes referring to uninitialized data
1566 * very obvious.
1567 */
1568
1569 if (first) {
1570 /*
1571 * First frame, isn't native, check the "alternate" saved PC
1572 * as a sanity check.
1573 *
1574 * It seems like we could check the second frame if the first
1575 * is native, since the PCs should be the same. It turns out
1576 * this doesn't always work. The problem is that we could
1577 * have calls in the sequence:
1578 * interp method #2
1579 * native method
1580 * interp method #1
1581 *
1582 * and then GC while in the native method after returning
1583 * from interp method #2. The currentPc on the stack is
1584 * for interp method #1, but thread->currentPc2 is still
1585 * set for the last thing interp method #2 did.
1586 *
1587 * This can also happen in normal execution:
1588 * - sget-object on not-yet-loaded class
1589 * - class init updates currentPc2
1590 * - static field init is handled by parsing annotations;
1591 * static String init requires creation of a String object,
1592 * which can cause a GC
1593 *
1594 * Essentially, any pattern that involves executing
1595 * interpreted code and then causes an allocation without
1596 * executing instructions in the original method will hit
1597 * this. These are rare enough that the test still has
1598 * some value.
1599 */
1600 if (saveArea->xtra.currentPc != thread->currentPc2) {
1601 LOGW("PGC: savedPC(%p) != current PC(%p), %s.%s ins=%p\n",
1602 saveArea->xtra.currentPc, thread->currentPc2,
1603 method->clazz->descriptor, method->name, method->insns);
1604 if (saveArea->xtra.currentPc != NULL)
1605 LOGE(" pc inst = 0x%04x\n", *saveArea->xtra.currentPc);
1606 if (thread->currentPc2 != NULL)
1607 LOGE(" pc2 inst = 0x%04x\n", *thread->currentPc2);
1608 dvmDumpThread(thread, false);
1609 }
1610 } else {
1611 /*
1612 * It's unusual, but not impossible, for a non-first frame
1613 * to be at something other than a method invocation. For
1614 * example, if we do a new-instance on a nonexistent class,
1615 * we'll have a lot of class loader activity on the stack
1616 * above the frame with the "new" operation. Could also
1617 * happen while we initialize a Throwable when an instruction
1618 * fails.
1619 *
1620 * So there's not much we can do here to verify the PC,
1621 * except to verify that it's a GC point.
1622 */
1623 }
1624 assert(saveArea->xtra.currentPc != NULL);
1625#endif
1626
1627 const RegisterMap* pMap;
1628 const u1* regVector;
1629 int i;
1630
1631 Method* nonConstMethod = (Method*) method; // quiet gcc
1632 pMap = dvmGetExpandedRegisterMap(nonConstMethod);
1633
1634 /* assert(pMap != NULL); */
1635
1636 //LOGI("PGC: %s.%s\n", method->clazz->descriptor, method->name);
1637
1638 if (pMap != NULL) {
1639 /* found map, get registers for this address */
1640 int addr = saveArea->xtra.currentPc - method->insns;
1641 regVector = dvmRegisterMapGetLine(pMap, addr);
1642 /*
1643 if (regVector == NULL) {
1644 LOGI("PGC: map but no entry for %s.%s addr=0x%04x\n",
1645 method->clazz->descriptor, method->name, addr);
1646 } else {
1647 LOGI("PGC: found map for %s.%s 0x%04x (t=%d)\n",
1648 method->clazz->descriptor, method->name, addr,
1649 thread->threadId);
1650 }
1651 */
1652 } else {
1653 /*
1654 * No map found. If precise GC is disabled this is
1655 * expected -- we don't create pointers to the map data even
1656 * if it's present -- but if it's enabled it means we're
1657 * unexpectedly falling back on a conservative scan, so it's
1658 * worth yelling a little.
1659 */
1660 if (gDvm.preciseGc) {
1661 LOGI("PGC: no map for %s.%s\n", method->clazz->descriptor, method->name);
1662 }
1663 regVector = NULL;
1664 }
1665
1666 /* assert(regVector != NULL); */
1667
1668 if (regVector == NULL) {
1669 /* conservative scan */
1670 for (i = method->registersSize - 1; i >= 0; i--) {
1671 u4 rval = *framePtr++;
1672 if (rval != 0 && (rval & 0x3) == 0) {
1673 abort();
1674 /* dvmMarkIfObject((Object *)rval); */
1675 }
1676 }
1677 } else {
1678 /*
1679 * Precise scan. v0 is at the lowest address on the
1680 * interpreted stack, and is the first bit in the register
1681 * vector, so we can walk through the register map and
1682 * memory in the same direction.
1683 *
1684 * A '1' bit indicates a live reference.
1685 */
1686 u2 bits = 1 << 1;
1687 for (i = method->registersSize - 1; i >= 0; i--) {
1688 /* u4 rval = *framePtr++; */
1689 u4 rval = *framePtr;
1690
1691 bits >>= 1;
1692 if (bits == 1) {
1693 /* set bit 9 so we can tell when we're empty */
1694 bits = *regVector++ | 0x0100;
1695 LOGVV("loaded bits: 0x%02x\n", bits & 0xff);
1696 }
1697
1698 if (rval != 0 && (bits & 0x01) != 0) {
1699 /*
1700 * Non-null, register marked as live reference. This
1701 * should always be a valid object.
1702 */
1703#if WITH_EXTRA_GC_CHECKS > 0
1704 if ((rval & 0x3) != 0 || !dvmIsValidObject((Object*) rval)) {
1705 /* this is very bad */
1706 LOGE("PGC: invalid ref in reg %d: 0x%08x\n",
1707 method->registersSize-1 - i, rval);
1708 } else
1709#endif
1710 {
1711
1712 // LOGI("stack reference %u@%p", *framePtr, framePtr);
1713 /* dvmMarkObjectNonNull((Object *)rval); */
1714 scavengeReference((Object **) framePtr);
1715 }
1716 } else {
1717 /*
1718 * Null or non-reference, do nothing at all.
1719 */
1720#if WITH_EXTRA_GC_CHECKS > 1
1721 if (dvmIsValidObject((Object*) rval)) {
1722 /* this is normal, but we feel chatty */
1723 LOGD("PGC: ignoring valid ref in reg %d: 0x%08x\n",
1724 method->registersSize-1 - i, rval);
1725 }
1726#endif
1727 }
1728 ++framePtr;
1729 }
1730 dvmReleaseRegisterMapLine(pMap, regVector);
1731 }
1732 }
1733 /* else this is a break frame and there is nothing to mark, or
1734 * this is a native method and the registers are just the "ins",
1735 * copied from various registers in the caller's set.
1736 */
1737
1738#if WITH_EXTRA_GC_CHECKS > 1
1739 first = false;
1740#endif
1741
1742 /* Don't fall into an infinite loop if things get corrupted.
1743 */
1744 assert((uintptr_t)saveArea->prevFrame > (uintptr_t)framePtr ||
1745 saveArea->prevFrame == NULL);
1746 framePtr = saveArea->prevFrame;
1747 }
1748}
1749
1750static void scavengeThread(Thread *thread)
1751{
1752 assert(thread->status != THREAD_RUNNING ||
1753 thread->isSuspended ||
1754 thread == dvmThreadSelf());
1755
1756 // LOGI("scavengeThread(thread=%p)", thread);
1757
1758 // LOGI("Scavenging threadObj=%p", thread->threadObj);
1759 scavengeReference(&thread->threadObj);
1760
1761 // LOGI("Scavenging exception=%p", thread->exception);
1762 scavengeReference(&thread->exception);
1763
1764 scavengeThreadStack(thread);
1765}
1766
1767static void scavengeThreadList(void)
1768{
1769 Thread *thread;
1770
1771 dvmLockThreadList(dvmThreadSelf());
1772 thread = gDvm.threadList;
1773 while (thread) {
1774 scavengeThread(thread);
1775 thread = thread->next;
1776 }
1777 dvmUnlockThreadList();
1778}
1779
1780static void verifyThreadStack(const Thread *thread)
1781{
1782 const u4 *framePtr;
1783
1784 assert(thread != NULL);
1785 framePtr = (const u4 *)thread->curFrame;
1786 while (framePtr != NULL) {
1787 const StackSaveArea *saveArea;
1788 const Method *method;
1789
1790 saveArea = SAVEAREA_FROM_FP(framePtr);
1791 method = saveArea->method;
1792 if (method != NULL && !dvmIsNativeMethod(method)) {
1793 const RegisterMap* pMap;
1794 const u1* regVector;
1795 int i;
1796
1797 Method* nonConstMethod = (Method*) method; // quiet gcc
1798 pMap = dvmGetExpandedRegisterMap(nonConstMethod);
1799
1800 /* assert(pMap != NULL); */
1801
1802 // LOGI("PGC: %s.%s\n", method->clazz->descriptor, method->name);
1803
1804 if (pMap != NULL) {
1805 /* found map, get registers for this address */
1806 int addr = saveArea->xtra.currentPc - method->insns;
1807 regVector = dvmRegisterMapGetLine(pMap, addr);
1808 if (regVector == NULL) {
1809 LOGI("PGC: map but no entry for %s.%s addr=0x%04x\n",
1810 method->clazz->descriptor, method->name, addr);
1811 } else {
1812 //LOGI("PGC: found map for %s.%s 0x%04x (t=%d)\n", method->clazz->descriptor, method->name, addr, thread->threadId);
1813 }
1814 } else {
1815 /*
1816 * No map found. If precise GC is disabled this is
1817 * expected -- we don't create pointers to the map data even
1818 * if it's present -- but if it's enabled it means we're
1819 * unexpectedly falling back on a conservative scan, so it's
1820 * worth yelling a little.
1821 */
1822 if (gDvm.preciseGc) {
1823 LOGI("PGC: no map for %s.%s\n",
1824 method->clazz->descriptor, method->name);
1825 }
1826 regVector = NULL;
1827 }
1828
1829 /* assert(regVector != NULL); */
1830
1831 if (regVector == NULL) {
1832 /* conservative scan */
1833 for (i = method->registersSize - 1; i >= 0; i--) {
1834 u4 rval = *framePtr++;
1835 if (rval != 0 && (rval & 0x3) == 0) {
1836 abort();
1837 /* dvmMarkIfObject((Object *)rval); */
1838 }
1839 }
1840 } else {
1841 /*
1842 * Precise scan. v0 is at the lowest address on the
1843 * interpreted stack, and is the first bit in the register
1844 * vector, so we can walk through the register map and
1845 * memory in the same direction.
1846 *
1847 * A '1' bit indicates a live reference.
1848 */
1849 u2 bits = 1 << 1;
1850 for (i = method->registersSize - 1; i >= 0; i--) {
1851 u4 rval = *framePtr;
1852
1853 bits >>= 1;
1854 if (bits == 1) {
1855 /* set bit 9 so we can tell when we're empty */
1856 bits = *regVector++ | 0x0100;
1857 LOGVV("loaded bits: 0x%02x\n", bits & 0xff);
1858 }
1859
1860 if (rval != 0 && (bits & 0x01) != 0) {
1861 /*
1862 * Non-null, register marked as live reference. This
1863 * should always be a valid object.
1864 */
1865 //LOGI("verify stack reference %p", (Object *)*framePtr);
1866 verifyReference((Object *)*framePtr);
1867 } else {
1868 /*
1869 * Null or non-reference, do nothing at all.
1870 */
1871 }
1872 ++framePtr;
1873 }
1874 dvmReleaseRegisterMapLine(pMap, regVector);
1875 }
1876 }
1877 /* else this is a break frame and there is nothing to mark, or
1878 * this is a native method and the registers are just the "ins",
1879 * copied from various registers in the caller's set.
1880 */
1881
1882 /* Don't fall into an infinite loop if things get corrupted.
1883 */
1884 assert((uintptr_t)saveArea->prevFrame > (uintptr_t)framePtr ||
1885 saveArea->prevFrame == NULL);
1886 framePtr = saveArea->prevFrame;
1887 }
1888}
1889
1890static void verifyThread(const Thread *thread)
1891{
1892 assert(thread->status != THREAD_RUNNING ||
1893 thread->isSuspended ||
1894 thread == dvmThreadSelf());
1895
1896 LOGI("verifyThread(thread=%p)", thread);
1897
1898 LOGI("verify threadObj=%p", thread->threadObj);
1899 verifyReference(thread->threadObj);
1900
1901 LOGI("verify exception=%p", thread->exception);
1902 verifyReference(thread->exception);
1903
1904 LOGI("verify thread->internalLocalRefTable");
1905 verifyReferenceTable(&thread->internalLocalRefTable);
1906
1907 LOGI("verify thread->jniLocalRefTable");
1908 verifyReferenceTable(&thread->jniLocalRefTable);
1909
1910 /* Can the check be pushed into the promote routine? */
1911 if (thread->jniMonitorRefTable.table) {
1912 LOGI("verify thread->jniMonitorRefTable");
1913 verifyReferenceTable(&thread->jniMonitorRefTable);
1914 }
1915
1916 verifyThreadStack(thread);
1917}
1918
1919static void verifyThreadList(void)
1920{
1921 Thread *thread;
1922
1923 dvmLockThreadList(dvmThreadSelf());
1924 thread = gDvm.threadList;
1925 while (thread) {
1926 verifyThread(thread);
1927 thread = thread->next;
1928 }
1929 dvmUnlockThreadList();
1930}
1931
1932static void pinThread(Thread *thread)
1933{
1934 assert(thread != NULL);
1935 assert(thread->status != THREAD_RUNNING ||
1936 thread->isSuspended ||
1937 thread == dvmThreadSelf());
1938 LOGI("pinThread(thread=%p)", thread);
1939
1940 LOGI("Pin internalLocalRefTable");
1941 pinReferenceTable(&thread->internalLocalRefTable);
1942
1943 LOGI("Pin jniLocalRefTable");
1944 pinReferenceTable(&thread->jniLocalRefTable);
1945
1946 /* Can the check be pushed into the promote routine? */
1947 if (thread->jniMonitorRefTable.table) {
1948 LOGI("Pin jniMonitorRefTable");
1949 pinReferenceTable(&thread->jniMonitorRefTable);
1950 }
1951}
1952
1953static void pinThreadList(void)
1954{
1955 Thread *thread;
1956
1957 dvmLockThreadList(dvmThreadSelf());
1958 thread = gDvm.threadList;
1959 while (thread) {
1960 pinThread(thread);
1961 thread = thread->next;
1962 }
1963 dvmUnlockThreadList();
1964}
1965
1966/*
1967 * Heap block scavenging.
1968 */
1969
1970/*
1971 * Scavenge objects in the current block. Scavenging terminates when
1972 * the pointer reaches the highest address in the block or when a run
1973 * of zero words that continues to the highest address is reached.
1974 */
1975static void scavengeBlock(HeapSource *heapSource, size_t block)
1976{
1977 u1 *cursor;
1978 u1 *end;
1979 size_t size;
1980
1981 LOG_SCAVENGE("scavengeBlock(heapSource=%p,block=%zu)", heapSource, block);
1982
1983 assert(heapSource != NULL);
1984 assert(block < heapSource->totalBlocks);
1985 assert(heapSource->blockSpace[block] == BLOCK_TO_SPACE);
1986
1987 cursor = blockToAddress(heapSource, block);
1988 end = cursor + BLOCK_SIZE;
1989 LOG_SCAVENGE("scavengeBlock start=%p, end=%p", cursor, end);
1990
1991 /* Parse and scavenge the current block. */
1992 size = 0;
1993 while (cursor < end) {
1994 u4 word = *(u4 *)cursor;
1995 if (word != 0) {
1996 size = scavengeObject((Object *)cursor);
1997 size = alignUp(size, ALLOC_ALIGNMENT);
1998 cursor += size;
1999 } else if (word == 0 && cursor == (u1 *)gDvm.unlinkedJavaLangClass) {
2000 size = sizeof(ClassObject);
2001 cursor += size;
2002 } else {
2003 /* Check for padding. */
2004 while (*(u4 *)cursor == 0) {
2005 cursor += 4;
2006 if (cursor == end) break;
2007 }
2008 /* Punt if something went wrong. */
2009 assert(cursor == end);
2010 }
2011 }
2012}
2013
2014static size_t objectSize(Object *obj)
2015{
2016 size_t size;
2017
2018 if (obj->clazz == gDvm.classJavaLangClass ||
2019 obj->clazz == gDvm.unlinkedJavaLangClass) {
2020 size = dvmClassObjectSize((ClassObject *)obj);
2021 } else if (IS_CLASS_FLAG_SET(obj->clazz, CLASS_ISARRAY)) {
2022 size = dvmArrayObjectSize((ArrayObject *)obj);
2023 } else {
2024 size = obj->clazz->objectSize;
2025 }
2026 return size;
2027}
2028
2029static void verifyBlock(HeapSource *heapSource, size_t block)
2030{
2031 u1 *cursor;
2032 u1 *end;
2033 size_t size;
2034
2035 // LOGI("verifyBlock(heapSource=%p,block=%zu)", heapSource, block);
2036
2037 assert(heapSource != NULL);
2038 assert(block < heapSource->totalBlocks);
2039 assert(heapSource->blockSpace[block] == BLOCK_TO_SPACE);
2040
2041 cursor = blockToAddress(heapSource, block);
2042 end = cursor + BLOCK_SIZE;
2043 // LOGI("verifyBlock start=%p, end=%p", cursor, end);
2044
2045 /* Parse and scavenge the current block. */
2046 size = 0;
2047 while (cursor < end) {
2048 u4 word = *(u4 *)cursor;
2049 if (word != 0) {
2050 dvmVerifyObject((Object *)cursor);
2051 size = objectSize((Object *)cursor);
2052 size = alignUp(size, ALLOC_ALIGNMENT);
2053 cursor += size;
2054 } else if (word == 0 && cursor == (u1 *)gDvm.unlinkedJavaLangClass) {
2055 size = sizeof(ClassObject);
2056 cursor += size;
2057 } else {
2058 /* Check for padding. */
2059 while (*(unsigned long *)cursor == 0) {
2060 cursor += 4;
2061 if (cursor == end) break;
2062 }
2063 /* Punt if something went wrong. */
2064 assert(cursor == end);
2065 }
2066 }
2067}
2068
2069static void describeBlockQueue(const HeapSource *heapSource)
2070{
2071 size_t block, count;
2072 char space;
2073
2074 block = heapSource->queueHead;
2075 count = 0;
2076 LOG_SCAVENGE(">>> describeBlockQueue(heapSource=%p)", heapSource);
2077 /* Count the number of blocks enqueued. */
2078 while (block != QUEUE_TAIL) {
2079 block = heapSource->blockQueue[block];
2080 ++count;
2081 }
2082 LOG_SCAVENGE("blockQueue %zu elements, enqueued %zu",
2083 count, heapSource->queueSize);
2084 block = heapSource->queueHead;
2085 while (block != QUEUE_TAIL) {
2086 space = heapSource->blockSpace[block];
2087 LOG_SCAVENGE("block=%zu@%p,space=%zu", block, blockToAddress(heapSource,block), space);
2088 block = heapSource->blockQueue[block];
2089 }
2090
2091 LOG_SCAVENGE("<<< describeBlockQueue(heapSource=%p)", heapSource);
2092}
2093
2094/*
2095 * Blackens promoted objects.
2096 */
2097static void scavengeBlockQueue(void)
2098{
2099 HeapSource *heapSource;
2100 size_t block;
2101
2102 LOG_SCAVENGE(">>> scavengeBlockQueue()");
2103 heapSource = gDvm.gcHeap->heapSource;
2104 describeBlockQueue(heapSource);
2105 while (heapSource->queueHead != QUEUE_TAIL) {
2106 block = heapSource->queueHead;
2107 LOG_SCAVENGE("Dequeueing block %zu\n", block);
2108 scavengeBlock(heapSource, block);
2109 heapSource->queueHead = heapSource->blockQueue[block];
2110 LOGI("New queue head is %zu\n", heapSource->queueHead);
2111 }
2112 LOG_SCAVENGE("<<< scavengeBlockQueue()");
2113}
2114
2115/*
2116 * Scan the block list and verify all blocks that are marked as being
2117 * in new space. This should be parametrized so we can invoke this
2118 * routine outside of the context of a collection.
2119 */
2120static void verifyNewSpace(void)
2121{
2122 HeapSource *heapSource;
2123 size_t i;
2124 size_t c0, c1, c2, c7;
2125
2126 c0 = c1 = c2 = c7 = 0;
2127 heapSource = gDvm.gcHeap->heapSource;
2128 for (i = 0; i < heapSource->totalBlocks; ++i) {
2129 switch (heapSource->blockSpace[i]) {
2130 case BLOCK_FREE: ++c0; break;
2131 case BLOCK_TO_SPACE: ++c1; break;
2132 case BLOCK_FROM_SPACE: ++c2; break;
2133 case BLOCK_CONTINUED: ++c7; break;
2134 default: assert(!"reached");
2135 }
2136 }
2137 LOG_VERIFY("Block Demographics: "
2138 "Free=%zu,ToSpace=%zu,FromSpace=%zu,Continued=%zu",
2139 c0, c1, c2, c7);
2140 for (i = 0; i < heapSource->totalBlocks; ++i) {
2141 if (heapSource->blockSpace[i] != BLOCK_TO_SPACE) {
2142 continue;
2143 }
2144 verifyBlock(heapSource, i);
2145 }
2146}
2147
2148static void scavengeGlobals(void)
2149{
2150 scavengeReference((Object **)(void *)&gDvm.classJavaLangClass);
2151 scavengeReference((Object **)(void *)&gDvm.classJavaLangClassArray);
2152 scavengeReference((Object **)(void *)&gDvm.classJavaLangError);
2153 scavengeReference((Object **)(void *)&gDvm.classJavaLangObject);
2154 scavengeReference((Object **)(void *)&gDvm.classJavaLangObjectArray);
2155 scavengeReference((Object **)(void *)&gDvm.classJavaLangRuntimeException);
2156 scavengeReference((Object **)(void *)&gDvm.classJavaLangString);
2157 scavengeReference((Object **)(void *)&gDvm.classJavaLangThread);
2158 scavengeReference((Object **)(void *)&gDvm.classJavaLangVMThread);
2159 scavengeReference((Object **)(void *)&gDvm.classJavaLangThreadGroup);
2160 scavengeReference((Object **)(void *)&gDvm.classJavaLangThrowable);
2161 scavengeReference((Object **)(void *)&gDvm.classJavaLangStackTraceElement);
2162 scavengeReference((Object **)(void *)&gDvm.classJavaLangStackTraceElementArray);
2163 scavengeReference((Object **)(void *)&gDvm.classJavaLangAnnotationAnnotationArray);
2164 scavengeReference((Object **)(void *)&gDvm.classJavaLangAnnotationAnnotationArrayArray);
2165 scavengeReference((Object **)(void *)&gDvm.classJavaLangReflectAccessibleObject);
2166 scavengeReference((Object **)(void *)&gDvm.classJavaLangReflectConstructor);
2167 scavengeReference((Object **)(void *)&gDvm.classJavaLangReflectConstructorArray);
2168 scavengeReference((Object **)(void *)&gDvm.classJavaLangReflectField);
2169 scavengeReference((Object **)(void *)&gDvm.classJavaLangReflectFieldArray);
2170 scavengeReference((Object **)(void *)&gDvm.classJavaLangReflectMethod);
2171 scavengeReference((Object **)(void *)&gDvm.classJavaLangReflectMethodArray);
2172 scavengeReference((Object **)(void *)&gDvm.classJavaLangReflectProxy);
2173 scavengeReference((Object **)(void *)&gDvm.classJavaLangExceptionInInitializerError);
2174 scavengeReference((Object **)(void *)&gDvm.classJavaLangRefReference);
2175 scavengeReference((Object **)(void *)&gDvm.classJavaNioReadWriteDirectByteBuffer);
2176 scavengeReference((Object **)(void *)&gDvm.classJavaSecurityAccessController);
2177 scavengeReference((Object **)(void *)&gDvm.classOrgApacheHarmonyLangAnnotationAnnotationFactory);
2178 scavengeReference((Object **)(void *)&gDvm.classOrgApacheHarmonyLangAnnotationAnnotationMember);
2179 scavengeReference((Object **)(void *)&gDvm.classOrgApacheHarmonyLangAnnotationAnnotationMemberArray);
2180 scavengeReference((Object **)(void *)&gDvm.classOrgApacheHarmonyNioInternalDirectBuffer);
2181 scavengeReference((Object **)(void *)&gDvm.classArrayBoolean);
2182 scavengeReference((Object **)(void *)&gDvm.classArrayChar);
2183 scavengeReference((Object **)(void *)&gDvm.classArrayFloat);
2184 scavengeReference((Object **)(void *)&gDvm.classArrayDouble);
2185 scavengeReference((Object **)(void *)&gDvm.classArrayByte);
2186 scavengeReference((Object **)(void *)&gDvm.classArrayShort);
2187 scavengeReference((Object **)(void *)&gDvm.classArrayInt);
2188 scavengeReference((Object **)(void *)&gDvm.classArrayLong);
2189}
2190
2191void describeHeap(void)
2192{
2193 HeapSource *heapSource;
2194
2195 heapSource = gDvm.gcHeap->heapSource;
2196 describeBlocks(heapSource);
2197}
2198
2199/*
2200 * The collection interface. Collection has a few distinct phases.
2201 * The first is flipping AKA condemning AKA whitening the heap. The
2202 * second is to promote all objects which are pointed to by pinned or
2203 * ambiguous references. The third phase is tracing from the stacks,
2204 * registers and various globals. Lastly, a verification of the heap
2205 * is performed. The last phase should be optional.
2206 */
2207void dvmScavengeRoots(void) /* Needs a new name badly */
2208{
2209 HeapRefTable *refs;
2210 GcHeap *gcHeap;
2211
2212 {
2213 size_t alloc, unused, total;
2214
2215 room(&alloc, &unused, &total);
2216 LOGI("BEFORE GC: %zu alloc, %zu free, %zu total.",
2217 alloc, unused, total);
2218 }
2219
2220 gcHeap = gDvm.gcHeap;
2221 dvmHeapSourceFlip();
2222
2223 /*
2224 * Promote blocks with stationary objects.
2225 */
2226
2227 // LOGI("Pinning gDvm.threadList");
2228 pinThreadList();
2229
2230 // LOGI("Pinning gDvm.jniGlobalRefTable");
2231 pinReferenceTable(&gDvm.jniGlobalRefTable);
2232
2233 // LOGI("Pinning gDvm.jniPinRefTable");
2234 pinReferenceTable(&gDvm.jniPinRefTable);
2235
2236 // LOGI("Pinning gDvm.gcHeap->nonCollectableRefs");
2237 pinReferenceTable(&gcHeap->nonCollectableRefs);
2238
2239 // LOGI("Pinning gDvm.loadedClasses");
2240 pinHashTableEntries(gDvm.loadedClasses);
2241
2242 // LOGI("Pinning gDvm.primitiveClass");
2243 pinPrimitiveClasses();
2244
2245 // LOGI("Pinning gDvm.internedStrings");
2246 pinInternedStrings();
2247
2248 // describeBlocks(gcHeap->heapSource);
2249
2250 /*
2251 * Create first, open new-space page right here.
2252 */
2253
2254 /* Reset allocation to an unallocated block. */
2255 gDvm.gcHeap->heapSource->allocPtr = allocateBlocks(gDvm.gcHeap->heapSource, 1);
2256 gDvm.gcHeap->heapSource->allocLimit = gDvm.gcHeap->heapSource->allocPtr + BLOCK_SIZE;
2257 /*
2258 * Hack: promote the empty block allocated above. If the
2259 * promotions that occurred above did not actually gray any
2260 * objects, the block queue may be empty. We must force a
2261 * promotion to be safe.
2262 */
2263 promoteBlockByAddr(gDvm.gcHeap->heapSource, gDvm.gcHeap->heapSource->allocPtr);
2264
2265 /*
2266 * Scavenge blocks and relocate movable objects.
2267 */
2268
2269 LOGI("Scavenging gDvm.threadList");
2270 scavengeThreadList();
2271
2272 LOGI("Scavenging gDvm.gcHeap->referenceOperations");
2273 scavengeLargeHeapRefTable(gcHeap->referenceOperations);
2274
2275 LOGI("Scavenging gDvm.gcHeap->pendingFinalizationRefs");
2276 scavengeLargeHeapRefTable(gcHeap->pendingFinalizationRefs);
2277
2278 LOGI("Scavenging random global stuff");
2279 scavengeReference(&gDvm.outOfMemoryObj);
2280 scavengeReference(&gDvm.internalErrorObj);
2281 scavengeReference(&gDvm.noClassDefFoundErrorObj);
2282
2283 LOGI("Scavenging gDvm.dbgRegistry");
2284 scavengeHashTable(gDvm.dbgRegistry);
2285
2286 // LOGI("Scavenging gDvm.internedString");
2287 scavengeInternedStrings();
2288
2289 LOGI("Root scavenge has completed.");
2290
2291 scavengeBlockQueue();
2292
2293 LOGI("Re-snap global class pointers.");
2294 scavengeGlobals();
2295
2296 LOGI("New space scavenge has completed.");
2297
2298 /*
2299 * Verify the stack and heap.
2300 */
2301
2302 // LOGI("Validating new space.");
2303
2304 verifyInternedStrings();
2305
2306 verifyThreadList();
2307
2308 verifyNewSpace();
2309
2310 // LOGI("New space verify has completed.");
2311
2312 //describeBlocks(gcHeap->heapSource);
2313
2314 clearFromSpace(gcHeap->heapSource);
2315
2316 {
2317 size_t alloc, rem, total;
2318
2319 room(&alloc, &rem, &total);
2320 LOGI("AFTER GC: %zu alloc, %zu free, %zu total.", alloc, rem, total);
2321 }
2322}
2323
2324/*
2325 * Interface compatibility routines.
2326 */
2327
2328void dvmClearWhiteRefs(Object **list)
2329{
2330 /* TODO */
2331 assert(*list == NULL);
2332}
2333
2334void dvmHandleSoftRefs(Object **list)
2335{
2336 /* TODO */
2337 assert(*list == NULL);
2338}
2339
2340bool dvmHeapBeginMarkStep(GcMode mode)
2341{
2342 /* do nothing */
2343 return true;
2344}
2345
2346void dvmHeapFinishMarkStep(void)
2347{
2348 /* do nothing */
2349}
2350
2351void dvmHeapMarkRootSet(void)
2352{
2353 /* do nothing */
2354}
2355
2356void dvmHeapScanMarkedObjects(void)
2357{
2358 dvmScavengeRoots();
2359}
2360
2361void dvmHeapScheduleFinalizations(void)
2362{
2363 /* do nothing */
2364}
2365
2366void dvmHeapSweepUnmarkedObjects(GcMode mode, int *numFreed, size_t *sizeFreed)
2367{
2368 /* do nothing */
2369}
2370
2371void dvmMarkObjectNonNull(const Object *obj)
2372{
2373 assert(!"implemented");
2374}