blob: 262e522eb9f402d45e576659cfcb45273354389b [file] [log] [blame]
Carl Shapirod28668c2010-04-15 16:10:00 -07001/*
2 * Copyright (C) 2009 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17#include <errno.h>
18#include <limits.h>
19#include <sys/mman.h>
20
21#include "Dalvik.h"
22#include "alloc/Heap.h"
23#include "alloc/HeapBitmap.h"
24#include "alloc/HeapInternal.h"
25#include "alloc/HeapSource.h"
26#include "alloc/Verify.h"
27#include "alloc/clz.h"
28
29/*
30 * A "mostly copying", generational, garbage collector.
31 *
32 * TODO: we allocate our own contiguous tract of page frames to back
33 * object allocations. To cooperate with other heaps active in the
34 * virtual machine we need to move the responsibility of allocating
35 * pages someplace outside of this code.
36 *
37 * The other major data structures that maintain the state of the heap
38 * are the block space table and the block queue.
39 *
40 * The block space table records the state of a block. We must track
41 * whether a block is:
42 *
43 * - Free or allocated in some space.
44 *
45 * - If the block holds part of a large object allocation, whether the
46 * block is the initial or a continued block of the allocation.
47 *
48 * - Whether the block is pinned, that is to say whether at least one
49 * object in the block must remain stationary. Only needed during a
50 * GC.
51 *
52 * - Which space the object belongs to. At present this means
53 * from-space or to-space.
54 *
55 * The block queue is used during garbage collection. Unlike Cheney's
56 * algorithm, from-space and to-space are not contiguous. Therefore,
57 * one cannot maintain the state of the copy with just two pointers.
58 * The block queue exists to thread lists of blocks from the various
59 * spaces together.
60 *
61 * Additionally, we record the free space frontier of the heap, as
62 * well as the address of the first object within a block, which is
63 * required to copy objects following a large object (not currently
64 * implemented). This is stored in the heap source structure. This
65 * should be moved elsewhere to support in-line allocations from Java
66 * threads.
67 *
68 * Allocation requests are satisfied by reserving storage from one or
69 * more contiguous blocks. Objects that are small enough to fit
70 * inside a block are packed together within a block. Objects that
71 * are larger than a block are allocated from contiguous sequences of
72 * blocks. When half the available blocks are filled, a garbage
73 * collection occurs. We "flip" spaces (exchange from- and to-space),
74 * copy live objects into to space, and perform pointer adjustment.
75 *
76 * Copying is made more complicated by the requirement that some
77 * objects must not be moved. This property is known as "pinning".
78 * These objects must be dealt with specially. We use Bartlett's
79 * scheme; blocks containing such objects are grayed (promoted) at the
80 * start of a garbage collection. By virtue of this trick, marking
81 * from the roots proceeds as usual but all objects on those pages are
82 * considered promoted and therefore not moved.
83 *
84 * TODO: there is sufficient information within the garbage collector
85 * to implement Attardi's scheme for evacuating unpinned objects from
86 * a page that is otherwise pinned. This would eliminate false
87 * retention caused by the large pinning granularity.
88 *
89 * We need a scheme for medium and large objects. Ignore that for
90 * now, we can return to this later.
91 *
92 * Eventually we need to worry about promoting objects out of the
93 * copy-collected heap (tenuring) into a less volatile space. Copying
94 * may not always be the best policy for such spaces. We should
95 * consider a variant of mark, sweep, compact.
96 *
97 * The block scheme allows us to use VM page faults to maintain a
98 * write barrier. Consider having a special leaf state for a page.
99 *
100 * Bibliography:
101 *
102 * C. J. Cheney. 1970. A non-recursive list compacting
103 * algorithm. CACM. 13-11 pp677--678.
104 *
105 * Joel F. Bartlett. 1988. Compacting Garbage Collection with
106 * Ambiguous Roots. Digital Equipment Corporation.
107 *
108 * Joel F. Bartlett. 1989. Mostly-Copying Garbage Collection Picks Up
109 * Generations and C++. Digital Equipment Corporation.
110 *
111 * G. May Yip. 1991. Incremental, Generational Mostly-Copying Garbage
112 * Collection in Uncooperative Environments. Digital Equipment
113 * Corporation.
114 *
115 * Giuseppe Attardi, Tito Flagella. 1994. A Customisable Memory
116 * Management Framework. TR-94-010
117 *
118 * Giuseppe Attardi, Tito Flagella, Pietro Iglio. 1998. A customisable
119 * memory management framework for C++. Software -- Practice and
120 * Experience. 28(11), 1143-1183.
121 *
122 */
123
124#define ARRAYSIZE(x) (sizeof(x) / sizeof(x[0]))
125
126#if 1
127#define LOG_ALLOC LOGI
128#define LOG_SCAVENGE LOGI
129#define LOG_TRANSPORT LOGI
130#define LOG_PROMOTE LOGI
131#define LOG_VERIFY LOGI
132#else
133#define LOG_ALLOC(...) ((void *)0)
134#define LOG_SCAVENGE(...) ((void *)0)
135#define LOG_TRANSPORT(...) ((void *)0)
136#define LOG_PROMOTE(...) ((void *)0)
137#define LOG_VERIFY(...) ((void *)0)
138#endif
139
140static void enqueueBlock(HeapSource *heapSource, size_t block);
Carl Shapiro2396fda2010-05-03 20:14:14 -0700141static void scavengeReference(Object **obj);
Carl Shapirod28668c2010-04-15 16:10:00 -0700142static void verifyReference(const void *obj);
143static void printHeapBitmap(const HeapBitmap *bitmap);
144static void printHeapBitmapSxS(const HeapBitmap *b1, const HeapBitmap *b2);
145static bool isToSpace(const void *addr);
146static bool isFromSpace(const void *addr);
147static size_t sumHeapBitmap(const HeapBitmap *bitmap);
Carl Shapiro2396fda2010-05-03 20:14:14 -0700148static size_t objectSize(const Object *obj);
149static void scavengeDataObject(DataObject *obj);
Carl Shapirod28668c2010-04-15 16:10:00 -0700150
151/*
152 * We use 512-byte blocks.
153 */
154enum { BLOCK_SHIFT = 9 };
155enum { BLOCK_SIZE = 1 << BLOCK_SHIFT };
156
157/*
158 * Space identifiers, stored into the blockSpace array.
159 */
160enum {
161 BLOCK_FREE = 0,
162 BLOCK_FROM_SPACE = 1,
163 BLOCK_TO_SPACE = 2,
164 BLOCK_CONTINUED = 7
165};
166
167/*
168 * Alignment for all allocations, in bytes.
169 */
170enum { ALLOC_ALIGNMENT = 8 };
171
172/*
173 * Sentinel value for the queue end.
174 */
175#define QUEUE_TAIL (~(size_t)0)
176
177struct HeapSource {
178
179 /* The base address of backing store. */
180 u1 *blockBase;
181
182 /* Total number of blocks available for allocation. */
183 size_t totalBlocks;
184 size_t allocBlocks;
185
186 /*
187 * The scavenger work queue. Implemented as an array of index
188 * values into the queue.
189 */
190 size_t *blockQueue;
191
192 /*
193 * Base and limit blocks. Basically the shifted start address of
194 * the block. We convert blocks to a relative number when
195 * indexing in the block queue. TODO: make the block queue base
196 * relative rather than the index into the block queue.
197 */
198 size_t baseBlock, limitBlock;
199
200 size_t queueHead;
201 size_t queueTail;
202 size_t queueSize;
203
204 /* The space of the current block 0 (free), 1 or 2. */
205 char *blockSpace;
206
207 /* Start of free space in the current block. */
208 u1 *allocPtr;
209 /* Exclusive limit of free space in the current block. */
210 u1 *allocLimit;
211
212 HeapBitmap allocBits;
213
214 /*
215 * Singly-linked lists of live Reference objects. Built when
216 * scavenging, threaded through the Reference.vmData field.
217 */
218 DataObject *softReferenceList;
219 DataObject *weakReferenceList;
220 DataObject *phantomReferenceList;
221
222 /*
223 * The starting size of the heap. This value is the same as the
224 * value provided to the -Xms flag.
225 */
226 size_t minimumSize;
227
228 /*
229 * The maximum size of the heap. This value is the same as the
230 * -Xmx flag.
231 */
232 size_t maximumSize;
233
234 /*
235 * The current, committed size of the heap. At present, this is
236 * equivalent to the maximumSize.
237 */
238 size_t currentSize;
239
240 size_t bytesAllocated;
241};
242
243static unsigned long alignDown(unsigned long x, unsigned long n)
244{
245 return x & -n;
246}
247
248static unsigned long alignUp(unsigned long x, unsigned long n)
249{
250 return alignDown(x + (n - 1), n);
251}
252
253static void describeBlocks(const HeapSource *heapSource)
254{
255 size_t i;
256
257 for (i = 0; i < heapSource->totalBlocks; ++i) {
258 if ((i % 32) == 0) putchar('\n');
259 printf("%d ", heapSource->blockSpace[i]);
260 }
261 putchar('\n');
262}
263
264/*
265 * Virtual memory interface.
266 */
267
268static void *virtualAlloc(size_t length)
269{
270 void *addr;
271 int flags, prot;
272
273 flags = MAP_PRIVATE | MAP_ANONYMOUS;
274 prot = PROT_READ | PROT_WRITE;
275 addr = mmap(NULL, length, prot, flags, -1, 0);
276 if (addr == MAP_FAILED) {
277 LOGE_HEAP("mmap: %s", strerror(errno));
278 addr = NULL;
279 }
280 return addr;
281}
282
283static void virtualFree(void *addr, size_t length)
284{
285 int res;
286
287 assert(addr != NULL);
288 assert((uintptr_t)addr % SYSTEM_PAGE_SIZE == 0);
289 res = munmap(addr, length);
290 if (res == -1) {
291 LOGE_HEAP("munmap: %s", strerror(errno));
292 }
293}
294
295static int isValidAddress(const HeapSource *heapSource, const u1 *addr)
296{
297 size_t block;
298
299 block = (uintptr_t)addr >> BLOCK_SHIFT;
300 return heapSource->baseBlock <= block &&
301 heapSource->limitBlock > block;
302}
303
304/*
305 * Iterate over the block map looking for a contiguous run of free
306 * blocks.
307 */
308static void *allocateBlocks(HeapSource *heapSource, size_t blocks)
309{
310 void *addr;
311 size_t allocBlocks, totalBlocks;
312 size_t i, j;
313
314 allocBlocks = heapSource->allocBlocks;
315 totalBlocks = heapSource->totalBlocks;
316 /* Check underflow. */
317 assert(blocks != 0);
318 /* Check overflow. */
319 if (allocBlocks + blocks > totalBlocks / 2) {
320 return NULL;
321 }
322 /* Scan block map. */
323 for (i = 0; i < totalBlocks; ++i) {
324 /* Check fit. */
325 for (j = 0; j < blocks; ++j) { /* runs over totalBlocks */
326 if (heapSource->blockSpace[i+j] != BLOCK_FREE) {
327 break;
328 }
329 }
330 /* No fit? */
331 if (j != blocks) {
332 i += j;
333 continue;
334 }
335 /* Fit, allocate. */
336 heapSource->blockSpace[i] = BLOCK_TO_SPACE; /* why to-space? */
337 for (j = 1; j < blocks; ++j) {
338 heapSource->blockSpace[i+j] = BLOCK_CONTINUED;
339 }
340 heapSource->allocBlocks += blocks;
341 addr = &heapSource->blockBase[i*BLOCK_SIZE];
342 memset(addr, 0, blocks*BLOCK_SIZE);
343 /* Collecting? */
344 if (heapSource->queueHead != QUEUE_TAIL) {
345 LOG_ALLOC("allocateBlocks allocBlocks=%zu,block#=%zu", heapSource->allocBlocks, i);
346 /*
347 * This allocated was on behalf of the transporter when it
348 * shaded a white object gray. We enqueue the block so
349 * the scavenger can further shade the gray objects black.
350 */
351 enqueueBlock(heapSource, i);
352 }
353
354 return addr;
355 }
356 /* Insufficient space, fail. */
357 LOGE("Insufficient space, %zu blocks, %zu blocks allocated and %zu bytes allocated",
358 heapSource->totalBlocks,
359 heapSource->allocBlocks,
360 heapSource->bytesAllocated);
361 return NULL;
362}
363
364/* Converts an absolute address to a relative block number. */
365static size_t addressToBlock(const HeapSource *heapSource, const void *addr)
366{
367 assert(heapSource != NULL);
368 assert(isValidAddress(heapSource, addr));
369 return (((uintptr_t)addr) >> BLOCK_SHIFT) - heapSource->baseBlock;
370}
371
372/* Converts a relative block number to an absolute address. */
373static u1 *blockToAddress(const HeapSource *heapSource, size_t block)
374{
375 u1 *addr;
376
377 addr = (u1 *) (((uintptr_t) heapSource->baseBlock + block) * BLOCK_SIZE);
378 assert(isValidAddress(heapSource, addr));
379 return addr;
380}
381
382static void clearBlock(HeapSource *heapSource, size_t block)
383{
384 u1 *addr;
385 size_t i;
386
387 assert(heapSource != NULL);
388 assert(block < heapSource->totalBlocks);
389 addr = heapSource->blockBase + block*BLOCK_SIZE;
390 memset(addr, 0xCC, BLOCK_SIZE);
391 for (i = 0; i < BLOCK_SIZE; i += 8) {
392 dvmHeapBitmapClearObjectBit(&heapSource->allocBits, addr + i);
393 }
394}
395
396static void clearFromSpace(HeapSource *heapSource)
397{
398 size_t i, count;
399
400 assert(heapSource != NULL);
401 i = count = 0;
402 while (i < heapSource->totalBlocks) {
403 if (heapSource->blockSpace[i] != BLOCK_FROM_SPACE) {
404 ++i;
405 continue;
406 }
407 heapSource->blockSpace[i] = BLOCK_FREE;
408 clearBlock(heapSource, i);
409 ++i;
410 ++count;
411 while (i < heapSource->totalBlocks &&
412 heapSource->blockSpace[i] == BLOCK_CONTINUED) {
413 heapSource->blockSpace[i] = BLOCK_FREE;
414 clearBlock(heapSource, i);
415 ++i;
416 ++count;
417 }
418 }
419 LOGI("freed %zu blocks (%zu bytes)", count, count*BLOCK_SIZE);
420}
421
422/*
423 * Appends the given block to the block queue. The block queue is
424 * processed in-order by the scavenger.
425 */
426static void enqueueBlock(HeapSource *heapSource, size_t block)
427{
428 assert(heapSource != NULL);
429 assert(block < heapSource->totalBlocks);
430 if (heapSource->queueHead != QUEUE_TAIL) {
431 heapSource->blockQueue[heapSource->queueTail] = block;
432 } else {
433 heapSource->queueHead = block;
434 }
435 heapSource->blockQueue[block] = QUEUE_TAIL;
436 heapSource->queueTail = block;
437 ++heapSource->queueSize;
438}
439
440/*
441 * Grays all objects within the block corresponding to the given
442 * address.
443 */
444static void promoteBlockByAddr(HeapSource *heapSource, const void *addr)
445{
446 size_t block;
447
448 block = addressToBlock(heapSource, (const u1 *)addr);
449 if (heapSource->blockSpace[block] != BLOCK_TO_SPACE) {
450 // LOGI("promoting block %zu %d @ %p", block, heapSource->blockSpace[block], obj);
451 heapSource->blockSpace[block] = BLOCK_TO_SPACE;
452 enqueueBlock(heapSource, block);
453 /* TODO(cshapiro): count continued blocks?*/
454 heapSource->allocBlocks += 1;
455 } else {
456 // LOGI("NOT promoting block %zu %d @ %p", block, heapSource->blockSpace[block], obj);
457 }
458}
459
460GcHeap *dvmHeapSourceStartup(size_t startSize, size_t absoluteMaxSize)
461{
462 GcHeap* gcHeap;
463 HeapSource *heapSource;
464
465 assert(startSize <= absoluteMaxSize);
466
467 heapSource = malloc(sizeof(*heapSource));
468 assert(heapSource != NULL);
469 memset(heapSource, 0, sizeof(*heapSource));
470
471 heapSource->minimumSize = alignUp(startSize, BLOCK_SIZE);
472 heapSource->maximumSize = alignUp(absoluteMaxSize, BLOCK_SIZE);
473
474 heapSource->currentSize = heapSource->maximumSize;
475
476 /* Allocate underlying storage for blocks. */
477 heapSource->blockBase = virtualAlloc(heapSource->maximumSize);
478 assert(heapSource->blockBase != NULL);
479 heapSource->baseBlock = (uintptr_t) heapSource->blockBase >> BLOCK_SHIFT;
480 heapSource->limitBlock = ((uintptr_t) heapSource->blockBase + heapSource->maximumSize) >> BLOCK_SHIFT;
481
482 heapSource->allocBlocks = 0;
483 heapSource->totalBlocks = (heapSource->limitBlock - heapSource->baseBlock);
484
485 assert(heapSource->totalBlocks = heapSource->maximumSize / BLOCK_SIZE);
486
487 {
488 size_t size = sizeof(heapSource->blockQueue[0]);
489 heapSource->blockQueue = malloc(heapSource->totalBlocks*size);
490 assert(heapSource->blockQueue != NULL);
491 memset(heapSource->blockQueue, 0xCC, heapSource->totalBlocks*size);
492 heapSource->queueHead = QUEUE_TAIL;
493 }
494
495 /* Byte indicating space residence or free status of block. */
496 {
497 size_t size = sizeof(heapSource->blockSpace[0]);
498 heapSource->blockSpace = malloc(heapSource->totalBlocks*size);
499 assert(heapSource->blockSpace != NULL);
500 memset(heapSource->blockSpace, 0, heapSource->totalBlocks*size);
501 }
502
503 dvmHeapBitmapInit(&heapSource->allocBits,
504 heapSource->blockBase,
505 heapSource->maximumSize,
506 "blockBase");
507
508 /* Initialize allocation pointers. */
509 heapSource->allocPtr = allocateBlocks(heapSource, 1);
510 heapSource->allocLimit = heapSource->allocPtr + BLOCK_SIZE;
511
512 gcHeap = malloc(sizeof(*gcHeap));
513 assert(gcHeap != NULL);
514 memset(gcHeap, 0, sizeof(*gcHeap));
515 gcHeap->heapSource = heapSource;
516
517 return gcHeap;
518}
519
520/*
521 * Perform any required heap initializations after forking from the
522 * zygote process. This is a no-op for the time being. Eventually
523 * this will demarcate the shared region of the heap.
524 */
525bool dvmHeapSourceStartupAfterZygote(void)
526{
527 return true;
528}
529
530bool dvmHeapSourceStartupBeforeFork(void)
531{
532 assert(!"implemented");
533 return false;
534}
535
536void dvmHeapSourceShutdown(GcHeap **gcHeap)
537{
538 if (*gcHeap == NULL || (*gcHeap)->heapSource == NULL)
539 return;
540 virtualFree((*gcHeap)->heapSource->blockBase,
541 (*gcHeap)->heapSource->maximumSize);
542 free((*gcHeap)->heapSource);
543 (*gcHeap)->heapSource = NULL;
544 free(*gcHeap);
545 *gcHeap = NULL;
546}
547
548size_t dvmHeapSourceGetValue(enum HeapSourceValueSpec spec,
549 size_t perHeapStats[],
550 size_t arrayLen)
551{
552 HeapSource *heapSource;
553 size_t value;
554
555 heapSource = gDvm.gcHeap->heapSource;
556 switch (spec) {
557 case HS_EXTERNAL_BYTES_ALLOCATED:
558 value = 0;
559 break;
560 case HS_EXTERNAL_LIMIT:
561 value = 0;
562 break;
563 case HS_FOOTPRINT:
564 value = heapSource->maximumSize;
565 break;
566 case HS_ALLOWED_FOOTPRINT:
567 value = heapSource->maximumSize;
568 break;
569 case HS_BYTES_ALLOCATED:
570 value = heapSource->bytesAllocated;
571 break;
572 case HS_OBJECTS_ALLOCATED:
573 value = sumHeapBitmap(&heapSource->allocBits);
574 break;
575 default:
576 assert(!"implemented");
577 value = 0;
578 }
579 if (perHeapStats) {
580 *perHeapStats = value;
581 }
582 return value;
583}
584
585/*
586 * Performs a shallow copy of the allocation bitmap into the given
587 * vector of heap bitmaps.
588 */
589void dvmHeapSourceGetObjectBitmaps(HeapBitmap objBits[], HeapBitmap markBits[],
590 size_t numHeaps)
591{
592 assert(!"implemented");
593}
594
595HeapBitmap *dvmHeapSourceGetLiveBits(void)
596{
597 assert(!"implemented");
598 return NULL;
599}
600
601/*
602 * Allocate the specified number of bytes from the heap. The
603 * allocation cursor points into a block of free storage. If the
604 * given allocation fits in the remaining space of the block, we
605 * advance the cursor and return a pointer to the free storage. If
606 * the allocation cannot fit in the current block but is smaller than
607 * a block we request a new block and allocate from it instead. If
608 * the allocation is larger than a block we must allocate from a span
609 * of contiguous blocks.
610 */
611void *dvmHeapSourceAlloc(size_t length)
612{
613 HeapSource *heapSource;
614 unsigned char *addr;
615 size_t aligned, available, blocks;
616
617 heapSource = gDvm.gcHeap->heapSource;
618 assert(heapSource != NULL);
619 assert(heapSource->allocPtr != NULL);
620 assert(heapSource->allocLimit != NULL);
621
622 aligned = alignUp(length, ALLOC_ALIGNMENT);
623 available = heapSource->allocLimit - heapSource->allocPtr;
624
625 /* Try allocating inside the current block. */
626 if (aligned <= available) {
627 addr = heapSource->allocPtr;
628 heapSource->allocPtr += aligned;
629 heapSource->bytesAllocated += aligned;
630 dvmHeapBitmapSetObjectBit(&heapSource->allocBits, addr);
631 return addr;
632 }
633
634 /* Try allocating in a new block. */
635 if (aligned <= BLOCK_SIZE) {
636 addr = allocateBlocks(heapSource, 1);
637 if (addr != NULL) {
638 heapSource->allocLimit = addr + BLOCK_SIZE;
639 heapSource->allocPtr = addr + aligned;
640 heapSource->bytesAllocated += aligned;
641 dvmHeapBitmapSetObjectBit(&heapSource->allocBits, addr);
642 /* TODO(cshapiro): pad out the current block. */
643 }
644 return addr;
645 }
646
647 /* Try allocating in a span of blocks. */
648 blocks = alignUp(aligned, BLOCK_SIZE) / BLOCK_SIZE;
649
650 addr = allocateBlocks(heapSource, blocks);
651 /* Propagate failure upward. */
652 if (addr != NULL) {
653 heapSource->bytesAllocated += aligned;
654 dvmHeapBitmapSetObjectBit(&heapSource->allocBits, addr);
655 /* TODO(cshapiro): pad out free space in the last block. */
656 }
657 return addr;
658}
659
660void *dvmHeapSourceAllocAndGrow(size_t size)
661{
662 return dvmHeapSourceAlloc(size);
663}
664
665/* TODO: refactor along with dvmHeapSourceAlloc */
666void *allocateGray(size_t size)
667{
668 assert(gDvm.gcHeap->heapSource->queueHead != QUEUE_TAIL);
669 return dvmHeapSourceAlloc(size);
670}
671
672/*
673 * Returns true if the given address is within the heap and points to
674 * the header of a live object.
675 */
676bool dvmHeapSourceContains(const void *addr)
677{
678 HeapSource *heapSource;
679 HeapBitmap *bitmap;
680
681 heapSource = gDvm.gcHeap->heapSource;
682 bitmap = &heapSource->allocBits;
Carl Shapirodc1e4f12010-05-01 22:27:56 -0700683 if (!dvmHeapBitmapCoversAddress(bitmap, addr)) {
684 return false;
685 } else {
686 return dvmHeapBitmapIsObjectBitSet(bitmap, addr);
687 }
Carl Shapirod28668c2010-04-15 16:10:00 -0700688}
689
690bool dvmHeapSourceGetPtrFlag(const void *ptr, enum HeapSourcePtrFlag flag)
691{
692 assert(!"implemented");
693 return false;
694}
695
696size_t dvmHeapSourceChunkSize(const void *ptr)
697{
698 assert(!"implemented");
699 return 0;
700}
701
702size_t dvmHeapSourceFootprint(void)
703{
704 assert(!"implemented");
705 return 0;
706}
707
708/*
709 * Returns the "ideal footprint" which appears to be the number of
710 * bytes currently committed to the heap. This starts out at the
711 * start size of the heap and grows toward the maximum size.
712 */
713size_t dvmHeapSourceGetIdealFootprint(void)
714{
715 return gDvm.gcHeap->heapSource->currentSize;
716}
717
718float dvmGetTargetHeapUtilization(void)
719{
720 assert(!"implemented");
721 return 0.0f;
722}
723
724void dvmSetTargetHeapUtilization(float newTarget)
725{
726 assert(!"implemented");
727}
728
729size_t dvmMinimumHeapSize(size_t size, bool set)
730{
731 return gDvm.gcHeap->heapSource->minimumSize;
732}
733
734/*
735 * Expands the size of the heap after a collection. At present we
736 * commit the pages for maximum size of the heap so this routine is
737 * just a no-op. Eventually, we will either allocate or commit pages
738 * on an as-need basis.
739 */
740void dvmHeapSourceGrowForUtilization(void)
741{
742 /* do nothing */
743}
744
745void dvmHeapSourceTrim(size_t bytesTrimmed[], size_t arrayLen)
746{
747 /* do nothing */
748}
749
750void dvmHeapSourceWalk(void (*callback)(const void *chunkptr, size_t chunklen,
751 const void *userptr, size_t userlen,
752 void *arg),
753 void *arg)
754{
755 assert(!"implemented");
756}
757
758size_t dvmHeapSourceGetNumHeaps(void)
759{
760 return 1;
761}
762
763bool dvmTrackExternalAllocation(size_t n)
764{
765 assert(!"implemented");
766 return false;
767}
768
769void dvmTrackExternalFree(size_t n)
770{
771 assert(!"implemented");
772}
773
774size_t dvmGetExternalBytesAllocated(void)
775{
776 assert(!"implemented");
777 return 0;
778}
779
780void dvmHeapSourceFlip(void)
781{
782 HeapSource *heapSource;
783 size_t i;
784
785 heapSource = gDvm.gcHeap->heapSource;
786
787 /* Reset the block queue. */
788 heapSource->allocBlocks = 0;
789 heapSource->queueSize = 0;
790 heapSource->queueHead = QUEUE_TAIL;
791
792 /* Hack: reset the reference lists. */
793 /* TODO(cshapiro): implement reference object processing. */
794 heapSource->softReferenceList = NULL;
795 heapSource->weakReferenceList = NULL;
796 heapSource->phantomReferenceList = NULL;
797
798 /* TODO(cshapiro): pad the current (prev) block. */
799
800 heapSource->allocPtr = NULL;
801 heapSource->allocLimit = NULL;
802
803 /* Whiten all allocated blocks. */
804 for (i = 0; i < heapSource->totalBlocks; ++i) {
805 if (heapSource->blockSpace[i] == BLOCK_TO_SPACE) {
806 heapSource->blockSpace[i] = BLOCK_FROM_SPACE;
807 }
808 }
809}
810
811static void room(size_t *alloc, size_t *avail, size_t *total)
812{
813 HeapSource *heapSource;
814 size_t i;
815
816 heapSource = gDvm.gcHeap->heapSource;
817 *total = heapSource->totalBlocks*BLOCK_SIZE;
818 *alloc = heapSource->allocBlocks*BLOCK_SIZE;
819 *avail = *total - *alloc;
820}
821
822static bool isSpaceInternal(u1 *addr, int space)
823{
824 HeapSource *heapSource;
825 u1 *base, *limit;
826 size_t offset;
827 char space2;
828
829 heapSource = gDvm.gcHeap->heapSource;
830 base = heapSource->blockBase;
831 assert(addr >= base);
832 limit = heapSource->blockBase + heapSource->maximumSize;
833 assert(addr < limit);
834 offset = addr - base;
835 space2 = heapSource->blockSpace[offset >> BLOCK_SHIFT];
836 return space == space2;
837}
838
839static bool isFromSpace(const void *addr)
840{
841 return isSpaceInternal((u1 *)addr, BLOCK_FROM_SPACE);
842}
843
844static bool isToSpace(const void *addr)
845{
846 return isSpaceInternal((u1 *)addr, BLOCK_TO_SPACE);
847}
848
849/*
850 * Notifies the collector that the object at the given address must
851 * remain stationary during the current collection.
852 */
853static void pinObject(const Object *obj)
854{
855 promoteBlockByAddr(gDvm.gcHeap->heapSource, obj);
856}
857
858static void printHeapBitmap(const HeapBitmap *bitmap)
859{
860 const char *cp;
861 size_t i, length;
862
863 length = bitmap->bitsLen >> 2;
864 fprintf(stderr, "%p", bitmap->bits);
865 for (i = 0; i < length; ++i) {
866 fprintf(stderr, " %lx", bitmap->bits[i]);
867 fputc('\n', stderr);
868 }
869}
870
871static void printHeapBitmapSxS(const HeapBitmap *b1, const HeapBitmap *b2)
872{
873 uintptr_t addr;
874 size_t i, length;
875
876 assert(b1->base == b2->base);
877 assert(b1->bitsLen == b2->bitsLen);
878 addr = b1->base;
879 length = b1->bitsLen >> 2;
880 for (i = 0; i < length; ++i) {
881 int diff = b1->bits[i] == b2->bits[i];
882 fprintf(stderr, "%08x %08lx %08lx %d\n",
883 addr, b1->bits[i], b2->bits[i], diff);
884 addr += sizeof(*b1->bits)*CHAR_BIT;
885 }
886}
887
888static size_t sumHeapBitmap(const HeapBitmap *bitmap)
889{
890 const char *cp;
891 size_t i, sum;
892
893 sum = 0;
894 for (i = 0; i < bitmap->bitsLen >> 2; ++i) {
895 sum += dvmClzImpl(bitmap->bits[i]);
896 }
897 return sum;
898}
899
900/*
901 * Miscellaneous functionality.
902 */
903
904static int isForward(const void *addr)
905{
906 return (uintptr_t)addr & 0x1;
907}
908
909static void setForward(const void *toObj, void *fromObj)
910{
911 *(unsigned long *)fromObj = (uintptr_t)toObj | 0x1;
912}
913
914static void* getForward(const void *fromObj)
915{
916 return (void *)((uintptr_t)fromObj & ~0x1);
917}
918
919/* Beware, uses the same encoding as a forwarding pointers! */
920static int isPermanentString(const StringObject *obj) {
921 return (uintptr_t)obj & 0x1;
922}
923
924static void* getPermanentString(const StringObject *obj)
925{
926 return (void *)((uintptr_t)obj & ~0x1);
927}
928
929
930/*
931 * Scavenging and transporting routines follow. A transporter grays
932 * an object. A scavenger blackens an object. We define these
933 * routines for each fundamental object type. Dispatch is performed
934 * in scavengeObject.
935 */
936
937/*
Carl Shapiro2396fda2010-05-03 20:14:14 -0700938 * Class object scavenging.
Carl Shapirod28668c2010-04-15 16:10:00 -0700939 */
Carl Shapiro2396fda2010-05-03 20:14:14 -0700940static void scavengeClassObject(ClassObject *obj)
Carl Shapirod28668c2010-04-15 16:10:00 -0700941{
Carl Shapirod28668c2010-04-15 16:10:00 -0700942 int i;
943
Carl Shapirod28668c2010-04-15 16:10:00 -0700944 LOG_SCAVENGE("scavengeClassObject(obj=%p)", obj);
Carl Shapiro2396fda2010-05-03 20:14:14 -0700945 assert(obj != NULL);
Carl Shapirod28668c2010-04-15 16:10:00 -0700946 assert(obj->obj.clazz != NULL);
947 assert(obj->obj.clazz->descriptor != NULL);
948 assert(!strcmp(obj->obj.clazz->descriptor, "Ljava/lang/Class;"));
949 assert(obj->descriptor != NULL);
950 LOG_SCAVENGE("scavengeClassObject: descriptor='%s',vtableCount=%zu",
951 obj->descriptor, obj->vtableCount);
Carl Shapiro2396fda2010-05-03 20:14:14 -0700952 /* Scavenge our class object. */
Carl Shapirod28668c2010-04-15 16:10:00 -0700953 scavengeReference((Object **) obj);
954 /* Scavenge the array element class object. */
955 if (IS_CLASS_FLAG_SET(obj, CLASS_ISARRAY)) {
956 scavengeReference((Object **)(void *)&obj->elementClass);
957 }
958 /* Scavenge the superclass. */
959 scavengeReference((Object **)(void *)&obj->super);
960 /* Scavenge the class loader. */
961 scavengeReference(&obj->classLoader);
962 /* Scavenge static fields. */
963 for (i = 0; i < obj->sfieldCount; ++i) {
964 char ch = obj->sfields[i].field.signature[0];
965 if (ch == '[' || ch == 'L') {
966 scavengeReference((Object **)(void *)&obj->sfields[i].value.l);
967 }
968 }
969 /* Scavenge interface class objects. */
970 for (i = 0; i < obj->interfaceCount; ++i) {
971 scavengeReference((Object **) &obj->interfaces[i]);
972 }
Carl Shapirod28668c2010-04-15 16:10:00 -0700973}
974
975/*
976 * Array object scavenging.
977 */
Carl Shapirod28668c2010-04-15 16:10:00 -0700978static size_t scavengeArrayObject(ArrayObject *array)
979{
980 size_t i, length;
981
982 LOG_SCAVENGE("scavengeArrayObject(array=%p)", array);
983 /* Scavenge the class object. */
984 assert(isToSpace(array));
985 assert(array != NULL);
986 assert(array->obj.clazz != NULL);
987 scavengeReference((Object **) array);
988 length = dvmArrayObjectSize(array);
989 /* Scavenge the array contents. */
990 if (IS_CLASS_FLAG_SET(array->obj.clazz, CLASS_ISOBJECTARRAY)) {
991 Object **contents = (Object **)array->contents;
992 for (i = 0; i < array->length; ++i) {
993 scavengeReference(&contents[i]);
994 }
995 }
996 return length;
997}
998
999/*
1000 * Reference object scavenging.
1001 */
1002
1003static int getReferenceFlags(const DataObject *obj)
1004{
1005 int flags;
1006
1007 flags = CLASS_ISREFERENCE |
1008 CLASS_ISWEAKREFERENCE |
1009 CLASS_ISPHANTOMREFERENCE;
1010 return GET_CLASS_FLAG_GROUP(obj->obj.clazz, flags);
1011}
1012
1013static int isReference(const DataObject *obj)
1014{
1015 return getReferenceFlags(obj) != 0;
1016}
1017
1018static int isSoftReference(const DataObject *obj)
1019{
1020 return getReferenceFlags(obj) == CLASS_ISREFERENCE;
1021}
1022
1023static int isWeakReference(const DataObject *obj)
1024{
1025 return getReferenceFlags(obj) & CLASS_ISWEAKREFERENCE;
1026}
1027
1028static bool isPhantomReference(const DataObject *obj)
1029{
1030 return getReferenceFlags(obj) & CLASS_ISPHANTOMREFERENCE;
1031}
1032
1033static void clearReference(DataObject *reference)
1034{
1035 size_t offset;
1036
1037 assert(isSoftReference(reference) || isWeakReference(reference));
1038 offset = gDvm.offJavaLangRefReference_referent;
1039 dvmSetFieldObject((Object *)reference, offset, NULL);
1040}
1041
1042static bool isReferentGray(const DataObject *reference)
1043{
1044 Object *obj;
1045 size_t offset;
1046
1047 assert(reference != NULL);
1048 assert(isSoftReference(reference) || isWeakReference(reference));
1049 offset = gDvm.offJavaLangRefReference_referent;
1050 obj = dvmGetFieldObject((Object *)reference, offset);
1051 return obj == NULL || isToSpace(obj);
1052}
1053
1054static void enqueueReference(HeapSource *heapSource, DataObject *reference)
1055{
1056 DataObject **queue;
1057 size_t offset;
1058
1059 LOGI("enqueueReference(heapSource=%p,reference=%p)", heapSource, reference);
1060 assert(heapSource != NULL);
1061 assert(reference != NULL);
1062 assert(isToSpace(reference));
1063 assert(isReference(reference));
1064 if (isSoftReference(reference)) {
1065 queue = &heapSource->softReferenceList;
1066 } else if (isWeakReference(reference)) {
1067 queue = &heapSource->weakReferenceList;
1068 } else if (isPhantomReference(reference)) {
1069 queue = &heapSource->phantomReferenceList;
1070 } else {
1071 assert(!"reached");
1072 queue = NULL;
1073 }
1074 offset = gDvm.offJavaLangRefReference_vmData;
1075 dvmSetFieldObject((Object *)reference, offset, (Object *)*queue);
1076 *queue = reference;
1077}
1078
Carl Shapirod28668c2010-04-15 16:10:00 -07001079/*
1080 * If a reference points to from-space and has been forwarded, we snap
1081 * the pointer to its new to-space address. If the reference points
1082 * to an unforwarded from-space address we must enqueue the reference
1083 * for later processing. TODO: implement proper reference processing
1084 * and move the referent scavenging elsewhere.
1085 */
Carl Shapiro2396fda2010-05-03 20:14:14 -07001086static void scavengeReferenceObject(DataObject *obj)
Carl Shapirod28668c2010-04-15 16:10:00 -07001087{
Carl Shapirod28668c2010-04-15 16:10:00 -07001088 assert(obj != NULL);
1089 LOG_SCAVENGE("scavengeReferenceObject(obj=%p),'%s'", obj, obj->obj.clazz->descriptor);
1090 {
1091 /* Always scavenge the hidden Reference.referent field. */
1092 size_t offset = gDvm.offJavaLangRefReference_referent;
1093 void *addr = BYTE_OFFSET((Object *)obj, offset);
1094 Object **ref = (Object **)(void *)&((JValue *)addr)->l;
1095 scavengeReference(ref);
1096 }
Carl Shapiro2396fda2010-05-03 20:14:14 -07001097 scavengeDataObject(obj);
Carl Shapirod28668c2010-04-15 16:10:00 -07001098 if (!isReferentGray(obj)) {
1099 assert(!"reached"); /* TODO(cshapiro): remove this */
1100 LOG_SCAVENGE("scavengeReferenceObject: enqueueing %p", obj);
1101 enqueueReference(gDvm.gcHeap->heapSource, obj);
Carl Shapirod28668c2010-04-15 16:10:00 -07001102 }
Carl Shapirod28668c2010-04-15 16:10:00 -07001103}
1104
1105/*
1106 * Data object scavenging.
1107 */
Carl Shapiro2396fda2010-05-03 20:14:14 -07001108static void scavengeDataObject(DataObject *obj)
Carl Shapirod28668c2010-04-15 16:10:00 -07001109{
1110 ClassObject *clazz;
Carl Shapirod28668c2010-04-15 16:10:00 -07001111 int i;
1112
1113 // LOGI("scavengeDataObject(obj=%p)", obj);
1114 assert(obj != NULL);
1115 assert(obj->obj.clazz != NULL);
1116 assert(obj->obj.clazz->objectSize != 0);
1117 assert(isToSpace(obj));
1118 /* Scavenge the class object. */
1119 clazz = obj->obj.clazz;
1120 scavengeReference((Object **) obj);
Carl Shapirod28668c2010-04-15 16:10:00 -07001121 /* Scavenge instance fields. */
1122 if (clazz->refOffsets != CLASS_WALK_SUPER) {
1123 size_t refOffsets = clazz->refOffsets;
1124 while (refOffsets != 0) {
1125 size_t rshift = CLZ(refOffsets);
1126 size_t offset = CLASS_OFFSET_FROM_CLZ(rshift);
1127 Object **ref = (Object **)((u1 *)obj + offset);
1128 scavengeReference(ref);
1129 refOffsets &= ~(CLASS_HIGH_BIT >> rshift);
1130 }
1131 } else {
1132 for (; clazz != NULL; clazz = clazz->super) {
1133 InstField *field = clazz->ifields;
1134 for (i = 0; i < clazz->ifieldRefCount; ++i, ++field) {
1135 size_t offset = field->byteOffset;
1136 Object **ref = (Object **)((u1 *)obj + offset);
1137 scavengeReference(ref);
1138 }
1139 }
1140 }
Carl Shapiro2396fda2010-05-03 20:14:14 -07001141}
1142
1143static Object *transportObject(const Object *fromObj)
1144{
1145 Object *toObj;
1146 size_t allocSize, copySize;
1147
1148 LOG_TRANSPORT("transportObject(fromObj=%p) allocBlocks=%zu",
1149 fromObj,
1150 gDvm.gcHeap->heapSource->allocBlocks);
1151 assert(fromObj != NULL);
1152 assert(isFromSpace(fromObj));
1153 allocSize = copySize = objectSize(fromObj);
1154 if (LW_HASH_STATE(fromObj->lock) != LW_HASH_STATE_UNHASHED) {
1155 /*
1156 * The object has been hashed or hashed and moved. We must
1157 * reserve an additional word for a hash code.
1158 */
1159 allocSize += sizeof(u4);
1160 }
1161 if (LW_HASH_STATE(fromObj->lock) == LW_HASH_STATE_HASHED_AND_MOVED) {
1162 /*
1163 * The object has its hash code allocated. Ensure the hash
1164 * code is copied along with the instance data.
1165 */
1166 copySize += sizeof(u4);
1167 }
1168 /* TODO(cshapiro): don't copy, re-map large data objects. */
1169 assert(copySize <= allocSize);
1170 toObj = allocateGray(allocSize);
1171 assert(toObj != NULL);
1172 assert(isToSpace(toObj));
1173 memcpy(toObj, fromObj, copySize);
1174 if (LW_HASH_STATE(fromObj->lock) == LW_HASH_STATE_HASHED) {
1175 /*
1176 * The object has had its hash code exposed. Append it to the
1177 * instance and set a bit so we know to look for it there.
1178 */
1179 *(u4 *)(((char *)toObj) + copySize) = (u4)fromObj >> 3;
1180 toObj->lock |= LW_HASH_STATE_HASHED_AND_MOVED << LW_HASH_STATE_SHIFT;
1181 }
1182 LOG_TRANSPORT("transportObject: from %p/%zu to %p/%zu (%zu,%zu) %s",
1183 fromObj, addressToBlock(gDvm.gcHeap->heapSource,fromObj),
1184 toObj, addressToBlock(gDvm.gcHeap->heapSource,toObj),
1185 copySize, allocSize, copySize < allocSize ? "DIFFERENT" : "");
1186 return toObj;
Carl Shapirod28668c2010-04-15 16:10:00 -07001187}
1188
1189/*
1190 * Generic reference scavenging.
1191 */
1192
1193/*
1194 * Given a reference to an object, the scavenge routine will gray the
1195 * reference. Any objects pointed to by the scavenger object will be
1196 * transported to new space and a forwarding pointer will be installed
1197 * in the header of the object.
1198 */
1199
1200/*
1201 * Blacken the given pointer. If the pointer is in from space, it is
1202 * transported to new space. If the object has a forwarding pointer
1203 * installed it has already been transported and the referent is
1204 * snapped to the new address.
1205 */
Carl Shapiro2396fda2010-05-03 20:14:14 -07001206static void scavengeReference(Object **obj)
Carl Shapirod28668c2010-04-15 16:10:00 -07001207{
1208 ClassObject *clazz;
Carl Shapiro2396fda2010-05-03 20:14:14 -07001209 Object *fromObj, *toObj;
Carl Shapirod28668c2010-04-15 16:10:00 -07001210 uintptr_t word;
1211
1212 assert(obj);
1213
Carl Shapiro2396fda2010-05-03 20:14:14 -07001214 if (*obj == NULL) return;
Carl Shapirod28668c2010-04-15 16:10:00 -07001215
1216 assert(dvmIsValidObject(*obj));
1217
1218 /* The entire block is black. */
1219 if (isToSpace(*obj)) {
1220 LOG_SCAVENGE("scavengeReference skipping pinned object @ %p", *obj);
Carl Shapiro2396fda2010-05-03 20:14:14 -07001221 return;
Carl Shapirod28668c2010-04-15 16:10:00 -07001222 }
1223 LOG_SCAVENGE("scavengeReference(*obj=%p)", *obj);
1224
1225 assert(isFromSpace(*obj));
1226
1227 clazz = (*obj)->clazz;
1228
1229 if (isForward(clazz)) {
1230 // LOGI("forwarding %p @ %p to %p", *obj, obj, (void *)((uintptr_t)clazz & ~0x1));
1231 *obj = (Object *)getForward(clazz);
Carl Shapiro2396fda2010-05-03 20:14:14 -07001232 return;
Carl Shapirod28668c2010-04-15 16:10:00 -07001233 }
Carl Shapiro2396fda2010-05-03 20:14:14 -07001234 fromObj = *obj;
1235 if (clazz == NULL) {
1236 // LOGI("scavangeReference %p has a NULL class object", fromObj);
1237 assert(!"implemented");
1238 toObj = NULL;
1239 } else if (clazz == gDvm.unlinkedJavaLangClass) {
1240 // LOGI("scavangeReference %p is an unlinked class object", fromObj);
1241 assert(!"implemented");
1242 toObj = NULL;
1243 } else {
1244 toObj = transportObject(fromObj);
1245 }
1246 setForward(toObj, fromObj);
1247 *obj = (Object *)toObj;
Carl Shapirod28668c2010-04-15 16:10:00 -07001248}
1249
1250static void verifyReference(const void *obj)
1251{
1252 HeapSource *heapSource;
1253 size_t block;
1254 char space;
1255
1256 if (obj == NULL) {
1257 LOG_VERIFY("verifyReference(obj=%p)", obj);
1258 return;
1259 }
1260 heapSource = gDvm.gcHeap->heapSource;
1261 block = addressToBlock(heapSource, obj);
1262 space = heapSource->blockSpace[block];
1263 LOG_VERIFY("verifyReference(obj=%p),block=%zu,space=%d", obj, block, space);
1264 assert(!((uintptr_t)obj & 7));
1265 assert(isToSpace(obj));
1266 assert(dvmIsValidObject(obj));
1267}
1268
1269/*
1270 * Generic object scavenging.
1271 */
1272
Carl Shapiro2396fda2010-05-03 20:14:14 -07001273static void scavengeObject(Object *obj)
Carl Shapirod28668c2010-04-15 16:10:00 -07001274{
1275 ClassObject *clazz;
Carl Shapirod28668c2010-04-15 16:10:00 -07001276
1277 assert(obj != NULL);
1278 clazz = obj->clazz;
1279 assert(clazz != NULL);
1280 assert(!((uintptr_t)clazz & 0x1));
1281 assert(clazz != gDvm.unlinkedJavaLangClass);
1282 if (clazz == gDvm.classJavaLangClass) {
Carl Shapiro2396fda2010-05-03 20:14:14 -07001283 scavengeClassObject((ClassObject *)obj);
Carl Shapirod28668c2010-04-15 16:10:00 -07001284 } else if (IS_CLASS_FLAG_SET(clazz, CLASS_ISARRAY)) {
Carl Shapiro2396fda2010-05-03 20:14:14 -07001285 scavengeArrayObject((ArrayObject *)obj);
Carl Shapirod28668c2010-04-15 16:10:00 -07001286 } else if (IS_CLASS_FLAG_SET(clazz, CLASS_ISREFERENCE)) {
Carl Shapiro2396fda2010-05-03 20:14:14 -07001287 scavengeReferenceObject((DataObject *)obj);
Carl Shapirod28668c2010-04-15 16:10:00 -07001288 } else {
Carl Shapiro2396fda2010-05-03 20:14:14 -07001289 scavengeDataObject((DataObject *)obj);
Carl Shapirod28668c2010-04-15 16:10:00 -07001290 }
Carl Shapirod28668c2010-04-15 16:10:00 -07001291}
1292
1293/*
1294 * External root scavenging routines.
1295 */
1296
1297static void scavengeHashTable(HashTable *table)
1298{
1299 HashEntry *entry;
1300 void *obj;
1301 int i;
1302
1303 if (table == NULL) {
1304 return;
1305 }
1306 dvmHashTableLock(table);
1307 for (i = 0; i < table->tableSize; ++i) {
1308 entry = &table->pEntries[i];
1309 obj = entry->data;
1310 if (obj == NULL || obj == HASH_TOMBSTONE) {
1311 continue;
1312 }
1313 scavengeReference((Object **)(void *)&entry->data);
1314 }
1315 dvmHashTableUnlock(table);
1316}
1317
1318static void pinHashTableEntries(HashTable *table)
1319{
1320 HashEntry *entry;
1321 void *obj;
1322 int i;
1323
1324 LOGI(">>> pinHashTableEntries(table=%p)", table);
1325 if (table == NULL) {
1326 return;
1327 }
1328 dvmHashTableLock(table);
1329 for (i = 0; i < table->tableSize; ++i) {
1330 entry = &table->pEntries[i];
1331 obj = entry->data;
1332 if (obj == NULL || obj == HASH_TOMBSTONE) {
1333 continue;
1334 }
1335 pinObject(entry->data);
1336 }
1337 dvmHashTableUnlock(table);
1338 LOGI("<<< pinHashTableEntries(table=%p)", table);
1339}
1340
1341static void pinPrimitiveClasses(void)
1342{
1343 size_t length;
1344 size_t i;
1345
1346 length = ARRAYSIZE(gDvm.primitiveClass);
1347 for (i = 0; i < length; i++) {
1348 if (gDvm.primitiveClass[i] != NULL) {
1349 pinObject((Object *)gDvm.primitiveClass[i]);
1350 }
1351 }
1352}
1353
1354/*
1355 * Scavenge interned strings. Permanent interned strings will have
1356 * been pinned and are therefore ignored. Non-permanent strings that
1357 * have been forwarded are snapped. All other entries are removed.
1358 */
1359static void scavengeInternedStrings(void)
1360{
1361 HashTable *table;
1362 HashEntry *entry;
1363 Object *obj;
1364 int i;
1365
1366 table = gDvm.internedStrings;
1367 if (table == NULL) {
1368 return;
1369 }
1370 dvmHashTableLock(table);
1371 for (i = 0; i < table->tableSize; ++i) {
1372 entry = &table->pEntries[i];
1373 obj = (Object *)entry->data;
1374 if (obj == NULL || obj == HASH_TOMBSTONE) {
1375 continue;
1376 } else if (!isPermanentString((StringObject *)obj)) {
1377 // LOGI("entry->data=%p", entry->data);
1378 LOG_SCAVENGE(">>> string obj=%p", entry->data);
1379 /* TODO(cshapiro): detach white string objects */
1380 scavengeReference((Object **)(void *)&entry->data);
1381 LOG_SCAVENGE("<<< string obj=%p", entry->data);
1382 }
1383 }
1384 dvmHashTableUnlock(table);
1385}
1386
1387static void pinInternedStrings(void)
1388{
1389 HashTable *table;
1390 HashEntry *entry;
1391 Object *obj;
1392 int i;
1393
1394 table = gDvm.internedStrings;
1395 if (table == NULL) {
1396 return;
1397 }
1398 dvmHashTableLock(table);
1399 for (i = 0; i < table->tableSize; ++i) {
1400 entry = &table->pEntries[i];
1401 obj = (Object *)entry->data;
1402 if (obj == NULL || obj == HASH_TOMBSTONE) {
1403 continue;
1404 } else if (isPermanentString((StringObject *)obj)) {
1405 obj = (Object *)getPermanentString((StringObject*)obj);
1406 LOG_PROMOTE(">>> pin string obj=%p", obj);
1407 pinObject(obj);
1408 LOG_PROMOTE("<<< pin string obj=%p", obj);
1409 }
1410 }
1411 dvmHashTableUnlock(table);
1412}
1413
1414static void verifyInternedStrings(void)
1415{
1416 HashTable *table;
1417 HashEntry *entry;
1418 Object *fwd, *obj;
1419 int i;
1420
1421 table = gDvm.internedStrings;
1422 if (table == NULL) {
1423 return;
1424 }
1425 dvmHashTableLock(table);
1426 for (i = 0; i < table->tableSize; ++i) {
1427 entry = &table->pEntries[i];
1428 obj = (Object *)entry->data;
1429 if (obj == NULL || obj == HASH_TOMBSTONE) {
1430 continue;
1431 } else if (isPermanentString((StringObject *)obj)) {
1432 fwd = (Object *)getForward(obj);
1433 LOG_VERIFY(">>> verify string fwd=%p obj=%p", fwd, obj);
1434 verifyReference(fwd);
1435 LOG_VERIFY(">>> verify string fwd=%p obj=%p", fwd, obj);
1436 } else {
1437 LOG_SCAVENGE(">>> verify string obj=%p %p", obj, entry->data);
1438 verifyReference(obj);
1439 LOG_SCAVENGE("<<< verify string obj=%p %p", obj, entry->data);
1440 }
1441 }
1442 dvmHashTableUnlock(table);
1443}
1444
1445/*
1446 * At present, reference tables contain references that must not be
1447 * moved by the collector. Instead of scavenging each reference in
1448 * the table we pin each referenced object.
1449 */
Carl Shapiro583d64c2010-05-04 10:44:47 -07001450static void pinReferenceTable(const ReferenceTable *table)
Carl Shapirod28668c2010-04-15 16:10:00 -07001451{
1452 Object **entry;
1453 int i;
1454
1455 assert(table != NULL);
1456 assert(table->table != NULL);
1457 assert(table->nextEntry != NULL);
1458 for (entry = table->table; entry < table->nextEntry; ++entry) {
1459 assert(entry != NULL);
1460 assert(!isForward(*entry));
1461 pinObject(*entry);
1462 }
1463}
1464
1465static void verifyReferenceTable(const ReferenceTable *table)
1466{
1467 Object **entry;
1468 int i;
1469
1470 LOGI(">>> verifyReferenceTable(table=%p)", table);
1471 for (entry = table->table; entry < table->nextEntry; ++entry) {
1472 assert(entry != NULL);
1473 assert(!isForward(*entry));
1474 verifyReference(*entry);
1475 }
1476 LOGI("<<< verifyReferenceTable(table=%p)", table);
1477}
1478
1479static void scavengeLargeHeapRefTable(LargeHeapRefTable *table)
1480{
1481 Object **entry;
1482
1483 for (; table != NULL; table = table->next) {
1484 for (entry = table->refs.table; entry < table->refs.nextEntry; ++entry) {
1485 if ((uintptr_t)*entry & ~0x3) {
1486 /* It's a pending reference operation. */
1487 assert(!"implemented");
1488 }
1489 scavengeReference(entry);
1490 }
1491 }
1492}
1493
1494/* This code was copied from Thread.c */
1495static void scavengeThreadStack(Thread *thread)
1496{
1497 const u4 *framePtr;
1498#if WITH_EXTRA_GC_CHECKS > 1
1499 bool first = true;
1500#endif
1501
1502 framePtr = (const u4 *)thread->curFrame;
1503 while (framePtr != NULL) {
1504 const StackSaveArea *saveArea;
1505 const Method *method;
1506
1507 saveArea = SAVEAREA_FROM_FP(framePtr);
1508 method = saveArea->method;
Carl Shapiro583d64c2010-05-04 10:44:47 -07001509 if (method != NULL && !dvmIsNativeMethod(method)) {
Carl Shapirod28668c2010-04-15 16:10:00 -07001510#ifdef COUNT_PRECISE_METHODS
1511 /* the GC is running, so no lock required */
1512 if (dvmPointerSetAddEntry(gDvm.preciseMethods, method))
1513 LOGI("PGC: added %s.%s %p\n",
1514 method->clazz->descriptor, method->name, method);
1515#endif
1516#if WITH_EXTRA_GC_CHECKS > 1
1517 /*
1518 * May also want to enable the memset() in the "invokeMethod"
1519 * goto target in the portable interpreter. That sets the stack
1520 * to a pattern that makes referring to uninitialized data
1521 * very obvious.
1522 */
1523
1524 if (first) {
1525 /*
1526 * First frame, isn't native, check the "alternate" saved PC
1527 * as a sanity check.
1528 *
1529 * It seems like we could check the second frame if the first
1530 * is native, since the PCs should be the same. It turns out
1531 * this doesn't always work. The problem is that we could
1532 * have calls in the sequence:
1533 * interp method #2
1534 * native method
1535 * interp method #1
1536 *
1537 * and then GC while in the native method after returning
1538 * from interp method #2. The currentPc on the stack is
1539 * for interp method #1, but thread->currentPc2 is still
1540 * set for the last thing interp method #2 did.
1541 *
1542 * This can also happen in normal execution:
1543 * - sget-object on not-yet-loaded class
1544 * - class init updates currentPc2
1545 * - static field init is handled by parsing annotations;
1546 * static String init requires creation of a String object,
1547 * which can cause a GC
1548 *
1549 * Essentially, any pattern that involves executing
1550 * interpreted code and then causes an allocation without
1551 * executing instructions in the original method will hit
1552 * this. These are rare enough that the test still has
1553 * some value.
1554 */
1555 if (saveArea->xtra.currentPc != thread->currentPc2) {
1556 LOGW("PGC: savedPC(%p) != current PC(%p), %s.%s ins=%p\n",
1557 saveArea->xtra.currentPc, thread->currentPc2,
1558 method->clazz->descriptor, method->name, method->insns);
1559 if (saveArea->xtra.currentPc != NULL)
1560 LOGE(" pc inst = 0x%04x\n", *saveArea->xtra.currentPc);
1561 if (thread->currentPc2 != NULL)
1562 LOGE(" pc2 inst = 0x%04x\n", *thread->currentPc2);
1563 dvmDumpThread(thread, false);
1564 }
1565 } else {
1566 /*
1567 * It's unusual, but not impossible, for a non-first frame
1568 * to be at something other than a method invocation. For
1569 * example, if we do a new-instance on a nonexistent class,
1570 * we'll have a lot of class loader activity on the stack
1571 * above the frame with the "new" operation. Could also
1572 * happen while we initialize a Throwable when an instruction
1573 * fails.
1574 *
1575 * So there's not much we can do here to verify the PC,
1576 * except to verify that it's a GC point.
1577 */
1578 }
1579 assert(saveArea->xtra.currentPc != NULL);
1580#endif
1581
1582 const RegisterMap* pMap;
1583 const u1* regVector;
1584 int i;
1585
1586 Method* nonConstMethod = (Method*) method; // quiet gcc
1587 pMap = dvmGetExpandedRegisterMap(nonConstMethod);
1588
1589 /* assert(pMap != NULL); */
1590
1591 //LOGI("PGC: %s.%s\n", method->clazz->descriptor, method->name);
1592
1593 if (pMap != NULL) {
1594 /* found map, get registers for this address */
1595 int addr = saveArea->xtra.currentPc - method->insns;
1596 regVector = dvmRegisterMapGetLine(pMap, addr);
1597 /*
1598 if (regVector == NULL) {
1599 LOGI("PGC: map but no entry for %s.%s addr=0x%04x\n",
1600 method->clazz->descriptor, method->name, addr);
1601 } else {
1602 LOGI("PGC: found map for %s.%s 0x%04x (t=%d)\n",
1603 method->clazz->descriptor, method->name, addr,
1604 thread->threadId);
1605 }
1606 */
1607 } else {
1608 /*
1609 * No map found. If precise GC is disabled this is
1610 * expected -- we don't create pointers to the map data even
1611 * if it's present -- but if it's enabled it means we're
1612 * unexpectedly falling back on a conservative scan, so it's
1613 * worth yelling a little.
1614 */
1615 if (gDvm.preciseGc) {
1616 LOGI("PGC: no map for %s.%s\n", method->clazz->descriptor, method->name);
1617 }
1618 regVector = NULL;
1619 }
1620
1621 /* assert(regVector != NULL); */
1622
1623 if (regVector == NULL) {
1624 /* conservative scan */
1625 for (i = method->registersSize - 1; i >= 0; i--) {
1626 u4 rval = *framePtr++;
1627 if (rval != 0 && (rval & 0x3) == 0) {
1628 abort();
1629 /* dvmMarkIfObject((Object *)rval); */
1630 }
1631 }
1632 } else {
1633 /*
1634 * Precise scan. v0 is at the lowest address on the
1635 * interpreted stack, and is the first bit in the register
1636 * vector, so we can walk through the register map and
1637 * memory in the same direction.
1638 *
1639 * A '1' bit indicates a live reference.
1640 */
1641 u2 bits = 1 << 1;
1642 for (i = method->registersSize - 1; i >= 0; i--) {
1643 /* u4 rval = *framePtr++; */
1644 u4 rval = *framePtr;
1645
1646 bits >>= 1;
1647 if (bits == 1) {
1648 /* set bit 9 so we can tell when we're empty */
1649 bits = *regVector++ | 0x0100;
1650 LOGVV("loaded bits: 0x%02x\n", bits & 0xff);
1651 }
1652
1653 if (rval != 0 && (bits & 0x01) != 0) {
1654 /*
1655 * Non-null, register marked as live reference. This
1656 * should always be a valid object.
1657 */
1658#if WITH_EXTRA_GC_CHECKS > 0
1659 if ((rval & 0x3) != 0 || !dvmIsValidObject((Object*) rval)) {
1660 /* this is very bad */
1661 LOGE("PGC: invalid ref in reg %d: 0x%08x\n",
1662 method->registersSize-1 - i, rval);
1663 } else
1664#endif
1665 {
1666
1667 // LOGI("stack reference %u@%p", *framePtr, framePtr);
1668 /* dvmMarkObjectNonNull((Object *)rval); */
1669 scavengeReference((Object **) framePtr);
1670 }
1671 } else {
1672 /*
1673 * Null or non-reference, do nothing at all.
1674 */
1675#if WITH_EXTRA_GC_CHECKS > 1
1676 if (dvmIsValidObject((Object*) rval)) {
1677 /* this is normal, but we feel chatty */
1678 LOGD("PGC: ignoring valid ref in reg %d: 0x%08x\n",
1679 method->registersSize-1 - i, rval);
1680 }
1681#endif
1682 }
1683 ++framePtr;
1684 }
1685 dvmReleaseRegisterMapLine(pMap, regVector);
1686 }
1687 }
1688 /* else this is a break frame and there is nothing to mark, or
1689 * this is a native method and the registers are just the "ins",
1690 * copied from various registers in the caller's set.
1691 */
1692
1693#if WITH_EXTRA_GC_CHECKS > 1
1694 first = false;
1695#endif
1696
1697 /* Don't fall into an infinite loop if things get corrupted.
1698 */
1699 assert((uintptr_t)saveArea->prevFrame > (uintptr_t)framePtr ||
1700 saveArea->prevFrame == NULL);
1701 framePtr = saveArea->prevFrame;
1702 }
1703}
1704
1705static void scavengeThread(Thread *thread)
1706{
1707 assert(thread->status != THREAD_RUNNING ||
1708 thread->isSuspended ||
1709 thread == dvmThreadSelf());
1710
1711 // LOGI("scavengeThread(thread=%p)", thread);
1712
1713 // LOGI("Scavenging threadObj=%p", thread->threadObj);
1714 scavengeReference(&thread->threadObj);
1715
1716 // LOGI("Scavenging exception=%p", thread->exception);
1717 scavengeReference(&thread->exception);
1718
1719 scavengeThreadStack(thread);
1720}
1721
1722static void scavengeThreadList(void)
1723{
1724 Thread *thread;
1725
1726 dvmLockThreadList(dvmThreadSelf());
1727 thread = gDvm.threadList;
1728 while (thread) {
1729 scavengeThread(thread);
1730 thread = thread->next;
1731 }
1732 dvmUnlockThreadList();
1733}
1734
1735static void verifyThreadStack(const Thread *thread)
1736{
1737 const u4 *framePtr;
1738
1739 assert(thread != NULL);
1740 framePtr = (const u4 *)thread->curFrame;
1741 while (framePtr != NULL) {
1742 const StackSaveArea *saveArea;
1743 const Method *method;
1744
1745 saveArea = SAVEAREA_FROM_FP(framePtr);
1746 method = saveArea->method;
1747 if (method != NULL && !dvmIsNativeMethod(method)) {
1748 const RegisterMap* pMap;
1749 const u1* regVector;
1750 int i;
1751
1752 Method* nonConstMethod = (Method*) method; // quiet gcc
1753 pMap = dvmGetExpandedRegisterMap(nonConstMethod);
1754
1755 /* assert(pMap != NULL); */
1756
1757 // LOGI("PGC: %s.%s\n", method->clazz->descriptor, method->name);
1758
1759 if (pMap != NULL) {
1760 /* found map, get registers for this address */
1761 int addr = saveArea->xtra.currentPc - method->insns;
1762 regVector = dvmRegisterMapGetLine(pMap, addr);
1763 if (regVector == NULL) {
1764 LOGI("PGC: map but no entry for %s.%s addr=0x%04x\n",
1765 method->clazz->descriptor, method->name, addr);
1766 } else {
1767 //LOGI("PGC: found map for %s.%s 0x%04x (t=%d)\n", method->clazz->descriptor, method->name, addr, thread->threadId);
1768 }
1769 } else {
1770 /*
1771 * No map found. If precise GC is disabled this is
1772 * expected -- we don't create pointers to the map data even
1773 * if it's present -- but if it's enabled it means we're
1774 * unexpectedly falling back on a conservative scan, so it's
1775 * worth yelling a little.
1776 */
1777 if (gDvm.preciseGc) {
1778 LOGI("PGC: no map for %s.%s\n",
1779 method->clazz->descriptor, method->name);
1780 }
1781 regVector = NULL;
1782 }
1783
1784 /* assert(regVector != NULL); */
1785
1786 if (regVector == NULL) {
1787 /* conservative scan */
1788 for (i = method->registersSize - 1; i >= 0; i--) {
1789 u4 rval = *framePtr++;
1790 if (rval != 0 && (rval & 0x3) == 0) {
1791 abort();
1792 /* dvmMarkIfObject((Object *)rval); */
1793 }
1794 }
1795 } else {
1796 /*
1797 * Precise scan. v0 is at the lowest address on the
1798 * interpreted stack, and is the first bit in the register
1799 * vector, so we can walk through the register map and
1800 * memory in the same direction.
1801 *
1802 * A '1' bit indicates a live reference.
1803 */
1804 u2 bits = 1 << 1;
1805 for (i = method->registersSize - 1; i >= 0; i--) {
1806 u4 rval = *framePtr;
1807
1808 bits >>= 1;
1809 if (bits == 1) {
1810 /* set bit 9 so we can tell when we're empty */
1811 bits = *regVector++ | 0x0100;
1812 LOGVV("loaded bits: 0x%02x\n", bits & 0xff);
1813 }
1814
1815 if (rval != 0 && (bits & 0x01) != 0) {
1816 /*
1817 * Non-null, register marked as live reference. This
1818 * should always be a valid object.
1819 */
1820 //LOGI("verify stack reference %p", (Object *)*framePtr);
1821 verifyReference((Object *)*framePtr);
1822 } else {
1823 /*
1824 * Null or non-reference, do nothing at all.
1825 */
1826 }
1827 ++framePtr;
1828 }
1829 dvmReleaseRegisterMapLine(pMap, regVector);
1830 }
1831 }
1832 /* else this is a break frame and there is nothing to mark, or
1833 * this is a native method and the registers are just the "ins",
1834 * copied from various registers in the caller's set.
1835 */
1836
1837 /* Don't fall into an infinite loop if things get corrupted.
1838 */
1839 assert((uintptr_t)saveArea->prevFrame > (uintptr_t)framePtr ||
1840 saveArea->prevFrame == NULL);
1841 framePtr = saveArea->prevFrame;
1842 }
1843}
1844
1845static void verifyThread(const Thread *thread)
1846{
1847 assert(thread->status != THREAD_RUNNING ||
1848 thread->isSuspended ||
1849 thread == dvmThreadSelf());
1850
1851 LOGI("verifyThread(thread=%p)", thread);
1852
1853 LOGI("verify threadObj=%p", thread->threadObj);
1854 verifyReference(thread->threadObj);
1855
1856 LOGI("verify exception=%p", thread->exception);
1857 verifyReference(thread->exception);
1858
1859 LOGI("verify thread->internalLocalRefTable");
1860 verifyReferenceTable(&thread->internalLocalRefTable);
1861
1862 LOGI("verify thread->jniLocalRefTable");
1863 verifyReferenceTable(&thread->jniLocalRefTable);
1864
1865 /* Can the check be pushed into the promote routine? */
1866 if (thread->jniMonitorRefTable.table) {
1867 LOGI("verify thread->jniMonitorRefTable");
1868 verifyReferenceTable(&thread->jniMonitorRefTable);
1869 }
1870
1871 verifyThreadStack(thread);
1872}
1873
1874static void verifyThreadList(void)
1875{
1876 Thread *thread;
1877
1878 dvmLockThreadList(dvmThreadSelf());
1879 thread = gDvm.threadList;
1880 while (thread) {
1881 verifyThread(thread);
1882 thread = thread->next;
1883 }
1884 dvmUnlockThreadList();
1885}
1886
Carl Shapiro583d64c2010-05-04 10:44:47 -07001887static void pinNativeMethodArguments(const Thread *thread)
1888{
1889 const u4 *framePtr;
1890 const StackSaveArea *saveArea;
1891 const Method *method;
1892 const char *shorty;
1893 Object *obj;
1894 int i;
1895
1896 saveArea = NULL;
1897 framePtr = (const u4 *)thread->curFrame;
1898 for (; framePtr != NULL; framePtr = saveArea->prevFrame) {
1899 saveArea = SAVEAREA_FROM_FP(framePtr);
1900 method = saveArea->method;
1901 if (method != NULL && dvmIsNativeMethod(method)) {
1902 /*
1903 * For purposes of marking references, we don't need to do
1904 * anything here, because all of the native "ins" were copied
1905 * from registers in the caller's stack frame and won't be
1906 * changed (an interpreted method can freely use registers
1907 * with parameters like any other register, but natives don't
1908 * work that way).
1909 *
1910 * However, we need to ensure that references visible to
1911 * native methods don't move around. We can do a precise scan
1912 * of the arguments by examining the method signature.
1913 */
1914 LOGI("+++ native scan %s.%s\n",
1915 method->clazz->descriptor, method->name);
1916 assert(method->registersSize == method->insSize);
1917 if (!dvmIsStaticMethod(method)) {
1918 /* grab the "this" pointer */
1919 obj = (Object *)*framePtr++;
1920 if (obj == NULL) {
1921 /*
1922 * This can happen for the "fake" entry frame inserted
1923 * for threads created outside the VM. There's no actual
1924 * call so there's no object. If we changed the fake
1925 * entry method to be declared "static" then this
1926 * situation should never occur.
1927 */
1928 } else {
1929 assert(dvmIsValidObject(obj));
1930 pinObject(obj);
1931 }
1932 }
1933 shorty = method->shorty+1; // skip return value
1934 for (i = method->registersSize - 1; i >= 0; i--, framePtr++) {
1935 switch (*shorty++) {
1936 case 'L':
1937 obj = (Object *)*framePtr;
1938 if (obj != NULL) {
1939 assert(dvmIsValidObject(obj));
1940 pinObject(obj);
1941 }
1942 break;
1943 case 'D':
1944 case 'J':
1945 framePtr++;
1946 break;
1947 default:
1948 /* 32-bit non-reference value */
1949 obj = (Object *)*framePtr; // debug, remove
1950 if (dvmIsValidObject(obj)) { // debug, remove
1951 /* if we see a lot of these, our scan might be off */
1952 LOGI("+++ did NOT pin obj %p\n", obj);
1953 }
1954 break;
1955 }
1956 }
1957 }
1958 /*
1959 * Don't fall into an infinite loop if things get corrupted.
1960 */
1961 assert((uintptr_t)saveArea->prevFrame > (uintptr_t)framePtr ||
1962 saveArea->prevFrame == NULL);
1963 }
1964}
1965
1966static void pinThread(const Thread *thread)
Carl Shapirod28668c2010-04-15 16:10:00 -07001967{
1968 assert(thread != NULL);
1969 assert(thread->status != THREAD_RUNNING ||
1970 thread->isSuspended ||
1971 thread == dvmThreadSelf());
1972 LOGI("pinThread(thread=%p)", thread);
1973
Carl Shapiro583d64c2010-05-04 10:44:47 -07001974 LOGI("Pin native method arguments");
1975 pinNativeMethodArguments(thread);
1976
Carl Shapirod28668c2010-04-15 16:10:00 -07001977 LOGI("Pin internalLocalRefTable");
1978 pinReferenceTable(&thread->internalLocalRefTable);
1979
1980 LOGI("Pin jniLocalRefTable");
1981 pinReferenceTable(&thread->jniLocalRefTable);
1982
1983 /* Can the check be pushed into the promote routine? */
1984 if (thread->jniMonitorRefTable.table) {
1985 LOGI("Pin jniMonitorRefTable");
1986 pinReferenceTable(&thread->jniMonitorRefTable);
1987 }
1988}
1989
1990static void pinThreadList(void)
1991{
1992 Thread *thread;
1993
1994 dvmLockThreadList(dvmThreadSelf());
1995 thread = gDvm.threadList;
1996 while (thread) {
1997 pinThread(thread);
1998 thread = thread->next;
1999 }
2000 dvmUnlockThreadList();
2001}
2002
2003/*
2004 * Heap block scavenging.
2005 */
2006
2007/*
2008 * Scavenge objects in the current block. Scavenging terminates when
2009 * the pointer reaches the highest address in the block or when a run
2010 * of zero words that continues to the highest address is reached.
2011 */
2012static void scavengeBlock(HeapSource *heapSource, size_t block)
2013{
2014 u1 *cursor;
2015 u1 *end;
2016 size_t size;
2017
2018 LOG_SCAVENGE("scavengeBlock(heapSource=%p,block=%zu)", heapSource, block);
2019
2020 assert(heapSource != NULL);
2021 assert(block < heapSource->totalBlocks);
2022 assert(heapSource->blockSpace[block] == BLOCK_TO_SPACE);
2023
2024 cursor = blockToAddress(heapSource, block);
2025 end = cursor + BLOCK_SIZE;
2026 LOG_SCAVENGE("scavengeBlock start=%p, end=%p", cursor, end);
2027
2028 /* Parse and scavenge the current block. */
2029 size = 0;
2030 while (cursor < end) {
2031 u4 word = *(u4 *)cursor;
2032 if (word != 0) {
Carl Shapiro2396fda2010-05-03 20:14:14 -07002033 scavengeObject((Object *)cursor);
2034 size = objectSize((Object *)cursor);
Carl Shapirod28668c2010-04-15 16:10:00 -07002035 size = alignUp(size, ALLOC_ALIGNMENT);
2036 cursor += size;
2037 } else if (word == 0 && cursor == (u1 *)gDvm.unlinkedJavaLangClass) {
2038 size = sizeof(ClassObject);
2039 cursor += size;
2040 } else {
2041 /* Check for padding. */
2042 while (*(u4 *)cursor == 0) {
2043 cursor += 4;
2044 if (cursor == end) break;
2045 }
2046 /* Punt if something went wrong. */
2047 assert(cursor == end);
2048 }
2049 }
2050}
2051
Carl Shapiro2396fda2010-05-03 20:14:14 -07002052static size_t objectSize(const Object *obj)
Carl Shapirod28668c2010-04-15 16:10:00 -07002053{
2054 size_t size;
2055
Carl Shapiro2396fda2010-05-03 20:14:14 -07002056 assert(obj != NULL);
2057 assert(obj->clazz != NULL);
Carl Shapirod28668c2010-04-15 16:10:00 -07002058 if (obj->clazz == gDvm.classJavaLangClass ||
2059 obj->clazz == gDvm.unlinkedJavaLangClass) {
2060 size = dvmClassObjectSize((ClassObject *)obj);
2061 } else if (IS_CLASS_FLAG_SET(obj->clazz, CLASS_ISARRAY)) {
2062 size = dvmArrayObjectSize((ArrayObject *)obj);
2063 } else {
Carl Shapiro2396fda2010-05-03 20:14:14 -07002064 assert(obj->clazz->objectSize != 0);
Carl Shapirod28668c2010-04-15 16:10:00 -07002065 size = obj->clazz->objectSize;
2066 }
Carl Shapiro2396fda2010-05-03 20:14:14 -07002067 if (LW_HASH_STATE(obj->lock) == LW_HASH_STATE_HASHED_AND_MOVED) {
2068 size += sizeof(u4);
2069 }
Carl Shapirod28668c2010-04-15 16:10:00 -07002070 return size;
2071}
2072
2073static void verifyBlock(HeapSource *heapSource, size_t block)
2074{
2075 u1 *cursor;
2076 u1 *end;
2077 size_t size;
2078
2079 // LOGI("verifyBlock(heapSource=%p,block=%zu)", heapSource, block);
2080
2081 assert(heapSource != NULL);
2082 assert(block < heapSource->totalBlocks);
2083 assert(heapSource->blockSpace[block] == BLOCK_TO_SPACE);
2084
2085 cursor = blockToAddress(heapSource, block);
2086 end = cursor + BLOCK_SIZE;
2087 // LOGI("verifyBlock start=%p, end=%p", cursor, end);
2088
2089 /* Parse and scavenge the current block. */
2090 size = 0;
2091 while (cursor < end) {
2092 u4 word = *(u4 *)cursor;
2093 if (word != 0) {
2094 dvmVerifyObject((Object *)cursor);
2095 size = objectSize((Object *)cursor);
2096 size = alignUp(size, ALLOC_ALIGNMENT);
2097 cursor += size;
2098 } else if (word == 0 && cursor == (u1 *)gDvm.unlinkedJavaLangClass) {
2099 size = sizeof(ClassObject);
2100 cursor += size;
2101 } else {
2102 /* Check for padding. */
2103 while (*(unsigned long *)cursor == 0) {
2104 cursor += 4;
2105 if (cursor == end) break;
2106 }
2107 /* Punt if something went wrong. */
2108 assert(cursor == end);
2109 }
2110 }
2111}
2112
2113static void describeBlockQueue(const HeapSource *heapSource)
2114{
2115 size_t block, count;
2116 char space;
2117
2118 block = heapSource->queueHead;
2119 count = 0;
2120 LOG_SCAVENGE(">>> describeBlockQueue(heapSource=%p)", heapSource);
2121 /* Count the number of blocks enqueued. */
2122 while (block != QUEUE_TAIL) {
2123 block = heapSource->blockQueue[block];
2124 ++count;
2125 }
2126 LOG_SCAVENGE("blockQueue %zu elements, enqueued %zu",
2127 count, heapSource->queueSize);
2128 block = heapSource->queueHead;
2129 while (block != QUEUE_TAIL) {
2130 space = heapSource->blockSpace[block];
2131 LOG_SCAVENGE("block=%zu@%p,space=%zu", block, blockToAddress(heapSource,block), space);
2132 block = heapSource->blockQueue[block];
2133 }
2134
2135 LOG_SCAVENGE("<<< describeBlockQueue(heapSource=%p)", heapSource);
2136}
2137
2138/*
2139 * Blackens promoted objects.
2140 */
2141static void scavengeBlockQueue(void)
2142{
2143 HeapSource *heapSource;
2144 size_t block;
2145
2146 LOG_SCAVENGE(">>> scavengeBlockQueue()");
2147 heapSource = gDvm.gcHeap->heapSource;
2148 describeBlockQueue(heapSource);
2149 while (heapSource->queueHead != QUEUE_TAIL) {
2150 block = heapSource->queueHead;
2151 LOG_SCAVENGE("Dequeueing block %zu\n", block);
2152 scavengeBlock(heapSource, block);
2153 heapSource->queueHead = heapSource->blockQueue[block];
2154 LOGI("New queue head is %zu\n", heapSource->queueHead);
2155 }
2156 LOG_SCAVENGE("<<< scavengeBlockQueue()");
2157}
2158
2159/*
2160 * Scan the block list and verify all blocks that are marked as being
2161 * in new space. This should be parametrized so we can invoke this
2162 * routine outside of the context of a collection.
2163 */
2164static void verifyNewSpace(void)
2165{
2166 HeapSource *heapSource;
2167 size_t i;
2168 size_t c0, c1, c2, c7;
2169
2170 c0 = c1 = c2 = c7 = 0;
2171 heapSource = gDvm.gcHeap->heapSource;
2172 for (i = 0; i < heapSource->totalBlocks; ++i) {
2173 switch (heapSource->blockSpace[i]) {
2174 case BLOCK_FREE: ++c0; break;
2175 case BLOCK_TO_SPACE: ++c1; break;
2176 case BLOCK_FROM_SPACE: ++c2; break;
2177 case BLOCK_CONTINUED: ++c7; break;
2178 default: assert(!"reached");
2179 }
2180 }
2181 LOG_VERIFY("Block Demographics: "
2182 "Free=%zu,ToSpace=%zu,FromSpace=%zu,Continued=%zu",
2183 c0, c1, c2, c7);
2184 for (i = 0; i < heapSource->totalBlocks; ++i) {
2185 if (heapSource->blockSpace[i] != BLOCK_TO_SPACE) {
2186 continue;
2187 }
2188 verifyBlock(heapSource, i);
2189 }
2190}
2191
2192static void scavengeGlobals(void)
2193{
2194 scavengeReference((Object **)(void *)&gDvm.classJavaLangClass);
2195 scavengeReference((Object **)(void *)&gDvm.classJavaLangClassArray);
2196 scavengeReference((Object **)(void *)&gDvm.classJavaLangError);
2197 scavengeReference((Object **)(void *)&gDvm.classJavaLangObject);
2198 scavengeReference((Object **)(void *)&gDvm.classJavaLangObjectArray);
2199 scavengeReference((Object **)(void *)&gDvm.classJavaLangRuntimeException);
2200 scavengeReference((Object **)(void *)&gDvm.classJavaLangString);
2201 scavengeReference((Object **)(void *)&gDvm.classJavaLangThread);
2202 scavengeReference((Object **)(void *)&gDvm.classJavaLangVMThread);
2203 scavengeReference((Object **)(void *)&gDvm.classJavaLangThreadGroup);
2204 scavengeReference((Object **)(void *)&gDvm.classJavaLangThrowable);
2205 scavengeReference((Object **)(void *)&gDvm.classJavaLangStackTraceElement);
2206 scavengeReference((Object **)(void *)&gDvm.classJavaLangStackTraceElementArray);
2207 scavengeReference((Object **)(void *)&gDvm.classJavaLangAnnotationAnnotationArray);
2208 scavengeReference((Object **)(void *)&gDvm.classJavaLangAnnotationAnnotationArrayArray);
2209 scavengeReference((Object **)(void *)&gDvm.classJavaLangReflectAccessibleObject);
2210 scavengeReference((Object **)(void *)&gDvm.classJavaLangReflectConstructor);
2211 scavengeReference((Object **)(void *)&gDvm.classJavaLangReflectConstructorArray);
2212 scavengeReference((Object **)(void *)&gDvm.classJavaLangReflectField);
2213 scavengeReference((Object **)(void *)&gDvm.classJavaLangReflectFieldArray);
2214 scavengeReference((Object **)(void *)&gDvm.classJavaLangReflectMethod);
2215 scavengeReference((Object **)(void *)&gDvm.classJavaLangReflectMethodArray);
2216 scavengeReference((Object **)(void *)&gDvm.classJavaLangReflectProxy);
2217 scavengeReference((Object **)(void *)&gDvm.classJavaLangExceptionInInitializerError);
2218 scavengeReference((Object **)(void *)&gDvm.classJavaLangRefReference);
2219 scavengeReference((Object **)(void *)&gDvm.classJavaNioReadWriteDirectByteBuffer);
2220 scavengeReference((Object **)(void *)&gDvm.classJavaSecurityAccessController);
2221 scavengeReference((Object **)(void *)&gDvm.classOrgApacheHarmonyLangAnnotationAnnotationFactory);
2222 scavengeReference((Object **)(void *)&gDvm.classOrgApacheHarmonyLangAnnotationAnnotationMember);
2223 scavengeReference((Object **)(void *)&gDvm.classOrgApacheHarmonyLangAnnotationAnnotationMemberArray);
2224 scavengeReference((Object **)(void *)&gDvm.classOrgApacheHarmonyNioInternalDirectBuffer);
2225 scavengeReference((Object **)(void *)&gDvm.classArrayBoolean);
2226 scavengeReference((Object **)(void *)&gDvm.classArrayChar);
2227 scavengeReference((Object **)(void *)&gDvm.classArrayFloat);
2228 scavengeReference((Object **)(void *)&gDvm.classArrayDouble);
2229 scavengeReference((Object **)(void *)&gDvm.classArrayByte);
2230 scavengeReference((Object **)(void *)&gDvm.classArrayShort);
2231 scavengeReference((Object **)(void *)&gDvm.classArrayInt);
2232 scavengeReference((Object **)(void *)&gDvm.classArrayLong);
2233}
2234
2235void describeHeap(void)
2236{
2237 HeapSource *heapSource;
2238
2239 heapSource = gDvm.gcHeap->heapSource;
2240 describeBlocks(heapSource);
2241}
2242
2243/*
2244 * The collection interface. Collection has a few distinct phases.
2245 * The first is flipping AKA condemning AKA whitening the heap. The
2246 * second is to promote all objects which are pointed to by pinned or
2247 * ambiguous references. The third phase is tracing from the stacks,
2248 * registers and various globals. Lastly, a verification of the heap
2249 * is performed. The last phase should be optional.
2250 */
2251void dvmScavengeRoots(void) /* Needs a new name badly */
2252{
2253 HeapRefTable *refs;
2254 GcHeap *gcHeap;
2255
2256 {
2257 size_t alloc, unused, total;
2258
2259 room(&alloc, &unused, &total);
2260 LOGI("BEFORE GC: %zu alloc, %zu free, %zu total.",
2261 alloc, unused, total);
2262 }
2263
2264 gcHeap = gDvm.gcHeap;
2265 dvmHeapSourceFlip();
2266
2267 /*
2268 * Promote blocks with stationary objects.
2269 */
2270
2271 // LOGI("Pinning gDvm.threadList");
2272 pinThreadList();
2273
2274 // LOGI("Pinning gDvm.jniGlobalRefTable");
2275 pinReferenceTable(&gDvm.jniGlobalRefTable);
2276
2277 // LOGI("Pinning gDvm.jniPinRefTable");
2278 pinReferenceTable(&gDvm.jniPinRefTable);
2279
2280 // LOGI("Pinning gDvm.gcHeap->nonCollectableRefs");
2281 pinReferenceTable(&gcHeap->nonCollectableRefs);
2282
2283 // LOGI("Pinning gDvm.loadedClasses");
2284 pinHashTableEntries(gDvm.loadedClasses);
2285
2286 // LOGI("Pinning gDvm.primitiveClass");
2287 pinPrimitiveClasses();
2288
2289 // LOGI("Pinning gDvm.internedStrings");
2290 pinInternedStrings();
2291
2292 // describeBlocks(gcHeap->heapSource);
2293
2294 /*
2295 * Create first, open new-space page right here.
2296 */
2297
2298 /* Reset allocation to an unallocated block. */
2299 gDvm.gcHeap->heapSource->allocPtr = allocateBlocks(gDvm.gcHeap->heapSource, 1);
2300 gDvm.gcHeap->heapSource->allocLimit = gDvm.gcHeap->heapSource->allocPtr + BLOCK_SIZE;
2301 /*
2302 * Hack: promote the empty block allocated above. If the
2303 * promotions that occurred above did not actually gray any
2304 * objects, the block queue may be empty. We must force a
2305 * promotion to be safe.
2306 */
2307 promoteBlockByAddr(gDvm.gcHeap->heapSource, gDvm.gcHeap->heapSource->allocPtr);
2308
2309 /*
2310 * Scavenge blocks and relocate movable objects.
2311 */
2312
2313 LOGI("Scavenging gDvm.threadList");
2314 scavengeThreadList();
2315
2316 LOGI("Scavenging gDvm.gcHeap->referenceOperations");
2317 scavengeLargeHeapRefTable(gcHeap->referenceOperations);
2318
2319 LOGI("Scavenging gDvm.gcHeap->pendingFinalizationRefs");
2320 scavengeLargeHeapRefTable(gcHeap->pendingFinalizationRefs);
2321
2322 LOGI("Scavenging random global stuff");
2323 scavengeReference(&gDvm.outOfMemoryObj);
2324 scavengeReference(&gDvm.internalErrorObj);
2325 scavengeReference(&gDvm.noClassDefFoundErrorObj);
2326
2327 LOGI("Scavenging gDvm.dbgRegistry");
2328 scavengeHashTable(gDvm.dbgRegistry);
2329
2330 // LOGI("Scavenging gDvm.internedString");
2331 scavengeInternedStrings();
2332
2333 LOGI("Root scavenge has completed.");
2334
2335 scavengeBlockQueue();
2336
2337 LOGI("Re-snap global class pointers.");
2338 scavengeGlobals();
2339
2340 LOGI("New space scavenge has completed.");
2341
2342 /*
2343 * Verify the stack and heap.
2344 */
2345
2346 // LOGI("Validating new space.");
2347
2348 verifyInternedStrings();
2349
2350 verifyThreadList();
2351
2352 verifyNewSpace();
2353
2354 // LOGI("New space verify has completed.");
2355
2356 //describeBlocks(gcHeap->heapSource);
2357
2358 clearFromSpace(gcHeap->heapSource);
2359
2360 {
2361 size_t alloc, rem, total;
2362
2363 room(&alloc, &rem, &total);
2364 LOGI("AFTER GC: %zu alloc, %zu free, %zu total.", alloc, rem, total);
2365 }
2366}
2367
2368/*
2369 * Interface compatibility routines.
2370 */
2371
2372void dvmClearWhiteRefs(Object **list)
2373{
2374 /* TODO */
2375 assert(*list == NULL);
2376}
2377
2378void dvmHandleSoftRefs(Object **list)
2379{
2380 /* TODO */
2381 assert(*list == NULL);
2382}
2383
2384bool dvmHeapBeginMarkStep(GcMode mode)
2385{
2386 /* do nothing */
2387 return true;
2388}
2389
2390void dvmHeapFinishMarkStep(void)
2391{
2392 /* do nothing */
2393}
2394
2395void dvmHeapMarkRootSet(void)
2396{
2397 /* do nothing */
2398}
2399
2400void dvmHeapScanMarkedObjects(void)
2401{
2402 dvmScavengeRoots();
2403}
2404
2405void dvmHeapScheduleFinalizations(void)
2406{
2407 /* do nothing */
2408}
2409
2410void dvmHeapSweepUnmarkedObjects(GcMode mode, int *numFreed, size_t *sizeFreed)
2411{
2412 /* do nothing */
2413}
2414
2415void dvmMarkObjectNonNull(const Object *obj)
2416{
2417 assert(!"implemented");
2418}