Add the ability to treat the zygote heap as a root collect just the
forked application heap.

Change-Id: I8807897ae426f8274018d950fec44a2182a90525
diff --git a/vm/alloc/Heap.c b/vm/alloc/Heap.c
index f027c60..ed4e4f3 100644
--- a/vm/alloc/Heap.c
+++ b/vm/alloc/Heap.c
@@ -737,7 +737,7 @@
  * way to enforce this is to refuse to GC on an allocation made by the
  * JDWP thread -- we have to expand the heap or fail.
  */
-void dvmCollectGarbageInternal(bool collectSoftReferences, enum GcReason reason)
+void dvmCollectGarbageInternal(bool collectSoftReferences, GcReason reason)
 {
     GcHeap *gcHeap = gDvm.gcHeap;
     Object *softReferences;
@@ -749,6 +749,7 @@
     s8 gcElapsedTime;
     int numFreed;
     size_t sizeFreed;
+    GcMode gcMode;
 
 #if DVM_TRACK_HEAP_MARKING
     /* Since weak and soft references are always cleared,
@@ -770,6 +771,7 @@
         LOGW_HEAP("Attempted recursive GC\n");
         return;
     }
+    gcMode = (reason == GC_FOR_MALLOC) ? GC_PARTIAL : GC_FULL;
     gcHeap->gcRunning = true;
     now = dvmGetRelativeTimeUsec();
     if (gcHeap->gcStartTime != 0) {
@@ -895,7 +897,7 @@
 
     /* Set up the marking context.
      */
-    if (!dvmHeapBeginMarkStep()) {
+    if (!dvmHeapBeginMarkStep(gcMode)) {
         LOGE_HEAP("dvmHeapBeginMarkStep failed; aborting\n");
         dvmAbort();
     }
@@ -997,7 +999,7 @@
     dvmDumpMonitorInfo("before sweep");
 #endif
     LOGD_HEAP("Sweeping...");
-    dvmHeapSweepUnmarkedObjects(&numFreed, &sizeFreed);
+    dvmHeapSweepUnmarkedObjects(gcMode, &numFreed, &sizeFreed);
 #ifdef WITH_DEADLOCK_PREDICTION
     dvmDumpMonitorInfo("after sweep");
 #endif
diff --git a/vm/alloc/Heap.h b/vm/alloc/Heap.h
index ed25baf..5237a56 100644
--- a/vm/alloc/Heap.h
+++ b/vm/alloc/Heap.h
@@ -52,7 +52,14 @@
 size_t dvmObjectSizeInHeap(const Object *obj);
 #endif
 
-enum GcReason {
+typedef enum {
+    /* GC all heaps. */
+    GC_FULL,
+    /* GC just the first heap. */
+    GC_PARTIAL
+} GcMode;
+
+typedef enum {
     /* Not enough space for an "ordinary" Object to be allocated. */
     GC_FOR_MALLOC,
     /* Explicit GC via Runtime.gc(), VMRuntime.gc(), or SIGUSR1. */
@@ -61,7 +68,7 @@
     GC_EXTERNAL_ALLOC,
     /* GC to dump heap contents to a file, only used under WITH_HPROF */
     GC_HPROF_DUMP_HEAP
-};
+} GcReason;
 
 /*
  * Suspend the VM as for a GC, and assert-fail if any object has any
@@ -72,7 +79,6 @@
 /*
  * Run the garbage collector without doing any locking.
  */
-void dvmCollectGarbageInternal(bool collectSoftReferences,
-                               enum GcReason reason);
+void dvmCollectGarbageInternal(bool collectSoftReferences, GcReason reason);
 
 #endif  // _DALVIK_ALLOC_HEAP
diff --git a/vm/alloc/HeapBitmap.h b/vm/alloc/HeapBitmap.h
index ec82a7a..fb0878d 100644
--- a/vm/alloc/HeapBitmap.h
+++ b/vm/alloc/HeapBitmap.h
@@ -30,6 +30,9 @@
 #define HB_INDEX_TO_OFFSET(index_) \
     ((uintptr_t)(index_) * HB_OBJECT_ALIGNMENT * HB_BITS_PER_WORD)
 
+#define HB_OFFSET_TO_BYTE_INDEX(offset_) \
+  (HB_OFFSET_TO_INDEX(offset_) * sizeof(*((HeapBitmap *)0)->bits))
+
 /* Pack the bits in backwards so they come out in address order
  * when using CLZ.
  */
diff --git a/vm/alloc/HeapSource.c b/vm/alloc/HeapSource.c
index 7b36065..b7757c2 100644
--- a/vm/alloc/HeapSource.c
+++ b/vm/alloc/HeapSource.c
@@ -362,13 +362,14 @@
     } else {
         size_t overhead;
 
-        overhead = oldHeapOverhead(hs, true);
+        overhead = ALIGN_UP_TO_PAGE_SIZE(oldHeapOverhead(hs, true));
         if (overhead + HEAP_MIN_FREE >= hs->absoluteMaxSize) {
             LOGE_HEAP("No room to create any more heaps "
                     "(%zd overhead, %zd max)\n",
                     overhead, hs->absoluteMaxSize);
             return false;
         }
+        hs->heaps[0].absoluteMaxSize = overhead;
         heap.absoluteMaxSize = hs->absoluteMaxSize - overhead;
         base = contiguous_mspace_sbrk0(hs->heaps[0].msp);
         hs->heaps[0].limit = base;
@@ -426,7 +427,6 @@
      * among the heaps managed by the garbage collector.
      */
     length = ALIGN_UP_TO_PAGE_SIZE(absoluteMaxSize);
-    length *= HEAP_SOURCE_MAX_HEAP_COUNT;
     fd = ashmem_create_region("the-java-heap", length);
     if (fd == -1) {
         return NULL;
@@ -617,7 +617,8 @@
 
     dst->base = base;
     dst->max = max;
-    dst->bitsLen = max - base;
+    dst->bitsLen = HB_OFFSET_TO_BYTE_INDEX(max - base);
+    dst->allocLen = dst->bitsLen;
     offset = base - src->base;
     assert(HB_OFFSET_TO_MASK(offset) == 1 << 31);
     dst->bits = &src->bits[HB_OFFSET_TO_INDEX(offset)];
@@ -666,6 +667,34 @@
     dvmHeapBitmapZero(&gHs->markBits);
 }
 
+void dvmMarkImmuneObjects(void)
+{
+    char *dst, *src;
+    size_t i, offset, index, length;
+
+    /*
+     * Copy the contents of the live bit vector for immune object
+     * range into the mark bit vector.
+     */
+    assert(gHs->objBits.base == gHs->markBits.base);
+    assert(gHs->objBits.bitsLen == gHs->markBits.bitsLen);
+    for (i = 1; i < gHs->numHeaps; ++i) {
+        /* Compute the number of words to copy in the bitmap. */
+        index = HB_OFFSET_TO_INDEX((uintptr_t)gHs->heaps[i].base - gHs->objBits.base);
+        /* Compute the starting offset in the live and mark bits. */
+        src = (char *)(gHs->objBits.bits + index);
+        dst = (char *)(gHs->markBits.bits + index);
+        /* Compute the number of bytes of the live bitmap to copy. */
+        length = HB_OFFSET_TO_BYTE_INDEX(gHs->heaps[i].limit - gHs->heaps[i].base);
+        /* Do the copy. */
+        memcpy(dst, src, length);
+        /* Make sure max points to the address of the highest set bit. */
+        if (gHs->markBits.max < (uintptr_t)gHs->heaps[i].limit) {
+            gHs->markBits.max = (uintptr_t)gHs->heaps[i].limit;
+        }
+    }
+}
+
 /*
  * Allocates <n> bytes of zeroed data.
  */
@@ -1659,3 +1688,12 @@
 
     return ret;
 }
+
+void *dvmHeapSourceGetImmuneLimit(GcMode mode)
+{
+    if (mode == GC_PARTIAL) {
+        return hs2heap(gHs)->base;
+    } else {
+        return NULL;
+    }
+}
diff --git a/vm/alloc/HeapSource.h b/vm/alloc/HeapSource.h
index 221748c..50f9872 100644
--- a/vm/alloc/HeapSource.h
+++ b/vm/alloc/HeapSource.h
@@ -16,6 +16,7 @@
 #ifndef _DALVIK_HEAP_SOURCE
 #define _DALVIK_HEAP_SOURCE
 
+#include "alloc/Heap.h"
 #include "alloc/HeapInternal.h" // for GcHeap
 
 /* dlmalloc uses one size_t per allocated chunk.
@@ -160,4 +161,16 @@
  */
 void dvmHeapSourceSwapBitmaps(void);
 
+/*
+ * Marks all objects outside the threatened region of the heap.
+ */
+void dvmMarkImmuneObjects(void);
+
+/*
+ * Returns a pointer that demarcates the threatened region of the
+ * heap.  Addresses at or above this pointer are threatened, addresses
+ * below this pointer are not.
+ */
+void *dvmHeapSourceGetImmuneLimit(GcMode mode);
+
 #endif  // _DALVIK_HEAP_SOURCE
diff --git a/vm/alloc/MarkSweep.c b/vm/alloc/MarkSweep.c
index 223448a..eab30ba 100644
--- a/vm/alloc/MarkSweep.c
+++ b/vm/alloc/MarkSweep.c
@@ -134,7 +134,7 @@
     } while (false)
 
 bool
-dvmHeapBeginMarkStep()
+dvmHeapBeginMarkStep(GcMode mode)
 {
     GcMarkContext *mc = &gDvm.gcHeap->markContext;
 
@@ -142,6 +142,7 @@
         return false;
     }
     mc->finger = NULL;
+    mc->immuneLimit = dvmHeapSourceGetImmuneLimit(mode);
     return true;
 }
 
@@ -167,6 +168,10 @@
     assert(dvmIsValidObject(obj));
 #endif
 
+    if ((char *)obj < ctx->immuneLimit) {
+        assert(isMarked(obj, ctx));
+        return;
+    }
     if (!setAndReturnMarkBit(ctx, obj)) {
         /* This object was not previously marked.
          */
@@ -266,6 +271,9 @@
 
     HPROF_SET_GC_SCAN_STATE(HPROF_ROOT_STICKY_CLASS, 0);
 
+    LOG_SCAN("immune objects");
+    dvmMarkImmuneObjects();
+
     LOG_SCAN("root class loader\n");
     dvmGcScanRootClassLoader();
     LOG_SCAN("primitive classes\n");
@@ -1108,13 +1116,13 @@
  * marked and free them.
  */
 void
-dvmHeapSweepUnmarkedObjects(int *numFreed, size_t *sizeFreed)
+dvmHeapSweepUnmarkedObjects(GcMode mode, int *numFreed, size_t *sizeFreed)
 {
     HeapBitmap markBits[HEAP_SOURCE_MAX_HEAP_COUNT];
     HeapBitmap objBits[HEAP_SOURCE_MAX_HEAP_COUNT];
     size_t origObjectsAllocated;
     size_t origBytesAllocated;
-    size_t numBitmaps;
+    size_t numBitmaps, numSweepBitmaps;
 
     /* All reachable objects have been marked.
      * Detach any unreachable interned strings before
@@ -1131,7 +1139,13 @@
 
     numBitmaps = dvmHeapSourceGetNumHeaps();
     dvmHeapSourceGetObjectBitmaps(objBits, markBits, numBitmaps);
-    dvmHeapBitmapXorWalkLists(markBits, objBits, numBitmaps,
+    if (mode == GC_PARTIAL) {
+        numSweepBitmaps = 1;
+        assert(gDvm.gcHeap->markContext.immuneLimit == objBits[0].base);
+    } else {
+        numSweepBitmaps = numBitmaps;
+    }
+    dvmHeapBitmapXorWalkLists(markBits, objBits, numSweepBitmaps,
                               sweepBitmapCallback, NULL);
 
     *numFreed = origObjectsAllocated -
diff --git a/vm/alloc/MarkSweep.h b/vm/alloc/MarkSweep.h
index 2f280bb..e6d55bc 100644
--- a/vm/alloc/MarkSweep.h
+++ b/vm/alloc/MarkSweep.h
@@ -40,6 +40,7 @@
 typedef struct {
     HeapBitmap *bitmap;
     GcMarkStack stack;
+    const char *immuneLimit;
     const void *finger;   // only used while scanning/recursing.
 } GcMarkContext;
 
@@ -50,13 +51,13 @@
     REF_WEAKGLOBAL
 };
 
-bool dvmHeapBeginMarkStep(void);
+bool dvmHeapBeginMarkStep(GcMode mode);
 void dvmHeapMarkRootSet(void);
 void dvmHeapScanMarkedObjects(void);
 void dvmHeapHandleReferences(Object *refListHead, enum RefType refType);
 void dvmHeapScheduleFinalizations(void);
 void dvmHeapFinishMarkStep(void);
 
-void dvmHeapSweepUnmarkedObjects(int *numFreed, size_t *sizeFreed);
+void dvmHeapSweepUnmarkedObjects(GcMode mode, int *numFreed, size_t *sizeFreed);
 
 #endif  // _DALVIK_ALLOC_MARK_SWEEP