auto import from //depot/cupcake/@135843
diff --git a/vm/alloc/Alloc.c b/vm/alloc/Alloc.c
new file mode 100644
index 0000000..e247413
--- /dev/null
+++ b/vm/alloc/Alloc.c
@@ -0,0 +1,273 @@
+/*
+ * Copyright (C) 2008 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/*
+ * Garbage-collecting memory allocator.
+ */
+#include "Dalvik.h"
+#include "alloc/Heap.h"
+#include "alloc/HeapInternal.h"
+
+#if WITH_HPROF && WITH_HPROF_STACK
+#include "hprof/Hprof.h"
+#endif
+
+
+/*
+ * Initialize the GC universe.
+ *
+ * We're currently using a memory-mapped arena to keep things off of the
+ * main heap.  This needs to be replaced with something real.
+ */
+bool dvmGcStartup(void)
+{
+    dvmInitMutex(&gDvm.gcHeapLock);
+
+    return dvmHeapStartup();
+}
+
+/*
+ * Post-zygote heap initialization, including starting
+ * the HeapWorker thread.
+ */
+bool dvmGcStartupAfterZygote(void)
+{
+    if (!dvmHeapWorkerStartup()) {
+        return false;
+    }
+    return dvmHeapStartupAfterZygote();
+}
+
+/*
+ * Shut the GC down.
+ */
+void dvmGcShutdown(void)
+{
+    //TODO: grab and destroy the lock
+    dvmHeapShutdown();
+}
+
+/*
+ * Do any last-minute preparation before we call fork() for the first time.
+ */
+bool dvmGcPreZygoteFork(void)
+{
+    return dvmHeapSourceStartupBeforeFork();
+}
+
+/*
+ * Create a "stock instance" of an exception class.  These won't have
+ * useful stack traces in them, but they can be thrown when everything
+ * else is not working in a container class.
+ */
+static Object* createStockException(const char* descriptor)
+{
+    ClassObject* clazz;
+    Method* init;
+    Object* obj;
+
+    clazz = dvmFindSystemClass(descriptor);
+    if (clazz == NULL) {
+        LOGE("Unable to find %s\n", descriptor);
+        return NULL;
+    }
+
+    init = dvmFindDirectMethodByDescriptor(clazz, "<init>", "()V");
+    if (init == NULL) {
+        LOGE("Unable to find nullary constructor for %s\n", descriptor);
+        return NULL;
+    }
+
+    obj = dvmAllocObject(clazz, ALLOC_DEFAULT);
+    if (obj == NULL)
+        return NULL;
+
+    Thread* self = dvmThreadSelf();
+    JValue unused;
+    dvmCallMethod(self, init, obj, &unused);
+    if (dvmCheckException(self))
+        return NULL;
+
+    return obj;
+}
+
+/*
+ * "Late" initialization.  We had to defer this until we were able to
+ * interpret code.
+ */
+bool dvmGcLateInit(void)
+{
+    /*
+     * Pre-allocate some throwables.  These need to be explicitly added
+     * to the root set by the GC.
+     */
+    gDvm.outOfMemoryObj = createStockException("Ljava/lang/OutOfMemoryError;");
+    dvmReleaseTrackedAlloc(gDvm.outOfMemoryObj, NULL);
+    gDvm.internalErrorObj = createStockException("Ljava/lang/InternalError;");
+    dvmReleaseTrackedAlloc(gDvm.internalErrorObj, NULL);
+    if (gDvm.outOfMemoryObj == NULL || gDvm.internalErrorObj == NULL) {
+        LOGW("Unable to create stock exceptions\n");
+        return false;
+    }
+
+    return true;
+}
+
+
+/*
+ * Create an instance of the specified class.
+ *
+ * Returns NULL and throws an exception on failure.
+ */
+Object* dvmAllocObject(ClassObject* clazz, int flags)
+{
+    Object* newObj;
+
+    assert(dvmIsClassInitialized(clazz) || dvmIsClassInitializing(clazz));
+
+    if (IS_CLASS_FLAG_SET(clazz, CLASS_ISFINALIZABLE)) {
+        flags |= ALLOC_FINALIZABLE;
+    }
+
+    /* allocate on GC heap; memory is zeroed out */
+    newObj = dvmMalloc(clazz->objectSize, flags);
+    if (newObj != NULL) {
+        DVM_OBJECT_INIT(newObj, clazz);
+        LOGVV("AllocObject: %s (%d)\n", clazz->descriptor,
+            (int) clazz->objectSize);
+#if WITH_HPROF && WITH_HPROF_STACK
+        hprofFillInStackTrace(newObj);
+#endif
+        dvmTrackAllocation(clazz, clazz->objectSize);
+    }
+
+    return newObj;
+}
+
+/*
+ * Create a copy of an object, for Object.clone().
+ *
+ * We use the size actually allocated, rather than obj->clazz->objectSize,
+ * because the latter doesn't work for array objects.
+ */
+Object* dvmCloneObject(Object* obj)
+{
+    Object* copy;
+    int size;
+    int flags;
+
+    assert(dvmIsValidObject(obj));
+
+    /* Class.java shouldn't let us get here (java.lang.Class is final
+     * and does not implement Clonable), but make extra sure.
+     * A memcpy() clone will wreak havoc on a ClassObject's "innards".
+     */
+    assert(obj->clazz != gDvm.classJavaLangClass);
+
+    if (IS_CLASS_FLAG_SET(obj->clazz, CLASS_ISFINALIZABLE))
+        flags = ALLOC_DEFAULT | ALLOC_FINALIZABLE;
+    else
+        flags = ALLOC_DEFAULT;
+
+//TODO: use clazz->objectSize for non-arrays
+    size = dvmObjectSizeInHeap(obj);
+
+    copy = dvmMalloc(size, flags);
+    if (copy == NULL)
+        return NULL;
+#if WITH_HPROF && WITH_HPROF_STACK
+    hprofFillInStackTrace(copy);
+    dvmTrackAllocation(obj->clazz, size);
+#endif
+
+    memcpy(copy, obj, size);
+    DVM_LOCK_INIT(&copy->lock);
+
+    //LOGV("CloneObject: %p->%p %s (%d)\n", obj, copy, obj->clazz->name, size);
+
+    // TODO: deal with reference classes
+
+    /* don't call dvmReleaseTrackedAlloc -- the caller must do that */
+
+    return copy;
+}
+
+
+/*
+ * Track an object that was allocated internally and isn't yet part of the
+ * VM root set.
+ *
+ * We could do this per-thread or globally.  If it's global we don't have
+ * to do the thread lookup but we do have to synchronize access to the list.
+ *
+ * NOTE: "obj" is not a fully-formed object; in particular, obj->clazz will
+ * usually be NULL since we're being called from dvmMalloc().
+ */
+void dvmAddTrackedAlloc(Object* obj, Thread* self)
+{
+    if (self == NULL)
+        self = dvmThreadSelf();
+
+    //LOGI("TRACK ADD %p\n", obj);
+
+    assert(self != NULL);
+    if (!dvmAddToReferenceTable(&self->internalLocalRefTable, obj)) {
+        LOGE("threadid=%d: unable to add %p to internal ref table\n",
+            self->threadId, obj);
+        dvmDumpThread(self, false);
+        dvmAbort();
+    }
+}
+
+/*
+ * Stop tracking an object.
+ *
+ * We allow attempts to delete NULL "obj" so that callers don't have to wrap
+ * calls with "if != NULL".
+ */
+void dvmReleaseTrackedAlloc(Object* obj, Thread* self)
+{
+    if (obj == NULL)
+        return;
+
+    if (self == NULL)
+        self = dvmThreadSelf();
+    assert(self != NULL);
+
+    //LOGI("TRACK REM %p (%s)\n", obj,
+    //    (obj->clazz != NULL) ? obj->clazz->name : "");
+
+    if (!dvmRemoveFromReferenceTable(&self->internalLocalRefTable,
+            self->internalLocalRefTable.table, obj))
+    {
+        LOGE("threadid=%d: failed to remove %p from internal ref table\n",
+            self->threadId, obj);
+        dvmAbort();
+    }
+}
+
+
+/*
+ * Explicitly initiate garbage collection.
+ */
+void dvmCollectGarbage(bool collectSoftReferences)
+{
+    dvmLockHeap();
+
+    LOGVV("Explicit GC\n");
+    dvmCollectGarbageInternal(collectSoftReferences);
+
+    dvmUnlockHeap();
+}
diff --git a/vm/alloc/Alloc.h b/vm/alloc/Alloc.h
new file mode 100644
index 0000000..0489db7
--- /dev/null
+++ b/vm/alloc/Alloc.h
@@ -0,0 +1,199 @@
+/*
+ * Copyright (C) 2008 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/*
+ * Garbage-collecting allocator.
+ */
+#ifndef _DALVIK_ALLOC_ALLOC
+#define _DALVIK_ALLOC_ALLOC
+
+#include <stdlib.h>
+
+/*
+ * Initialization.
+ */
+bool dvmGcStartup(void);
+bool dvmGcStartupAfterZygote(void);
+void dvmGcShutdown(void);
+bool dvmGcLateInit(void);
+
+/*
+ * Do any last-minute preparation before we call fork() for the first time.
+ */
+bool dvmGcPreZygoteFork(void);
+
+/*
+ * Basic allocation function.
+ *
+ * The new object will be added to the "tracked alloc" table unless
+ * flags is ALLOC_DONT_TRACK or ALLOC_NO_GC.
+ *
+ * Returns NULL and throws an exception on failure.
+ */
+void* dvmMalloc(size_t size, int flags);
+
+/*
+ * Allocate a new object.
+ *
+ * The new object will be added to the "tracked alloc" table unless
+ * flags is ALLOC_DONT_TRACK or ALLOC_NO_GC.
+ *
+ * Returns NULL and throws an exception on failure.
+ */
+Object* dvmAllocObject(ClassObject* clazz, int flags);
+
+/*
+ * Clear flags set by dvmMalloc.  Pass in a bit mask of the flags that
+ * should be cleared.
+ */
+void dvmClearAllocFlags(Object* obj, int mask);
+
+/* flags for dvmMalloc */
+enum {
+    ALLOC_DEFAULT       = 0x00,
+    ALLOC_NO_GC         = 0x01,     /* do not garbage collect this object */
+    ALLOC_DONT_TRACK    = 0x02,     /* don't add to internal tracking list */
+    ALLOC_FINALIZABLE   = 0x04,     /* call finalize() before freeing */
+    // ALLOC_NO_MOVE?
+};
+
+/*
+ * Call when a request is so far off that we can't call dvmMalloc().  Throws
+ * an exception with the specified message.
+ */
+void dvmThrowBadAllocException(const char* msg);
+
+/*
+ * Track an object reference that is currently only visible internally.
+ * This is called automatically by dvmMalloc() unless ALLOC_DONT_TRACK
+ * is set.
+ *
+ * The "self" argument is allowed as an optimization; it may be NULL.
+ */
+void dvmAddTrackedAlloc(Object* obj, Thread* self);
+
+/*
+ * Remove an object from the internal tracking list.
+ *
+ * Does nothing if "obj" is NULL.
+ *
+ * The "self" argument is allowed as an optimization; it may be NULL.
+ */
+void dvmReleaseTrackedAlloc(Object* obj, Thread* self);
+
+/*
+ * Like dvmReleaseTrackedAlloc, but only does the release if "allocFlags"
+ * indicates that it's necessary to do so.
+ */
+INLINE void dvmReleaseTrackedAllocIFN(Object* obj, Thread* self, int allocFlags)
+{
+    if ((allocFlags & (ALLOC_NO_GC|ALLOC_DONT_TRACK)) == 0)
+        dvmReleaseTrackedAlloc(obj, self);
+}
+
+/*
+ * Returns true iff <obj> points to a valid allocated object.
+ */
+bool dvmIsValidObject(const Object* obj);
+
+/*
+ * Create a copy of an object.
+ *
+ * The new object will be added to the "tracked alloc" table.
+ */
+Object* dvmCloneObject(Object* obj);
+
+/*
+ * Validate the object pointer.  Returns "false" and throws an exception if
+ * "obj" is null or invalid.
+ *
+ * This may be used in performance critical areas as a null-pointer check;
+ * anything else here should be for debug builds only.  In particular, for
+ * "release" builds we want to skip the call to dvmIsValidObject() -- the
+ * classfile validation will screen out code that puts invalid data into
+ * object reference registers.
+ */
+INLINE int dvmValidateObject(Object* obj)
+{
+    if (obj == NULL) {
+        dvmThrowException("Ljava/lang/NullPointerException;", NULL);
+        return false;
+    }
+#ifdef WITH_EXTRA_OBJECT_VALIDATION
+    if (!dvmIsValidObject(obj)) {
+        //abort();
+        dvmThrowException("Ljava/lang/InternalError;",
+            "VM detected invalid object ptr");
+        return false;
+    }
+#endif
+#ifndef NDEBUG
+    /* check for heap corruption */
+    if (obj->clazz == NULL || ((u4) obj->clazz) <= 65536) {
+        abort();
+        dvmThrowException("Ljava/lang/InternalError;",
+            "VM detected invalid object class ptr");
+        return false;
+    }
+#endif
+    return true;
+}
+
+/*
+ * Determine the exact number of GC heap bytes used by an object.  (Internal
+ * to heap code except for debugging.)
+ */
+size_t dvmObjectSizeInHeap(const Object* obj);
+
+/*
+ * Gets the current ideal heap utilization, represented as a number
+ * between zero and one.
+ */
+float dvmGetTargetHeapUtilization(void);
+
+/*
+ * Sets the new ideal heap utilization, represented as a number
+ * between zero and one.
+ */
+void dvmSetTargetHeapUtilization(float newTarget);
+
+/*
+ * If set is true, sets the new minimum heap size to size; always
+ * returns the current (or previous) size.  If size is zero,
+ * removes the current minimum constraint (if present).
+ */
+size_t dvmMinimumHeapSize(size_t size, bool set);
+
+/*
+ * Updates the internal count of externally-allocated memory.  If there's
+ * enough room for that memory, returns true.  If not, returns false and
+ * does not update the count.
+ *
+ * May cause a GC as a side-effect.
+ */
+bool dvmTrackExternalAllocation(size_t n);
+
+/*
+ * Reduces the internal count of externally-allocated memory.
+ */
+void dvmTrackExternalFree(size_t n);
+
+/*
+ * Returns the number of externally-allocated bytes being tracked by
+ * dvmTrackExternalAllocation/Free().
+ */
+size_t dvmGetExternalBytesAllocated(void);
+
+#endif /*_DALVIK_ALLOC_ALLOC*/
diff --git a/vm/alloc/DdmHeap.c b/vm/alloc/DdmHeap.c
new file mode 100644
index 0000000..78da6cd
--- /dev/null
+++ b/vm/alloc/DdmHeap.c
@@ -0,0 +1,487 @@
+/*
+ * Copyright (C) 2008 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/*
+ * DDM-related heap functions
+ */
+#include <sys/time.h>
+#include <time.h>
+
+#include "Dalvik.h"
+#include "alloc/Heap.h"
+#include "alloc/HeapInternal.h"
+#include "alloc/DdmHeap.h"
+#include "alloc/HeapSource.h"
+
+#define DEFAULT_HEAP_ID  1
+
+enum HpifWhen {
+    HPIF_WHEN_NEVER = 0,
+    HPIF_WHEN_NOW = 1,
+    HPIF_WHEN_NEXT_GC = 2,
+    HPIF_WHEN_EVERY_GC = 3
+};
+
+/*
+ * Chunk HPIF (client --> server)
+ * 
+ * Heap Info. General information about the heap,
+ * suitable for a summary display.
+ * 
+ *   [u4]: number of heaps
+ * 
+ *   For each heap:
+ *     [u4]: heap ID
+ *     [u8]: timestamp in ms since Unix epoch
+ *     [u1]: capture reason (same as 'when' value from server)
+ *     [u4]: max heap size in bytes (-Xmx)
+ *     [u4]: current heap size in bytes
+ *     [u4]: current number of bytes allocated
+ *     [u4]: current number of objects allocated
+ */
+#define HPIF_SIZE(numHeaps) \
+        (sizeof(u4) + (numHeaps) * (5 * sizeof(u4) + sizeof(u1) + sizeof(u8)))
+void
+dvmDdmSendHeapInfo(int reason, bool shouldLock)
+{
+    struct timeval now;
+    u8 nowMs;
+    u1 *buf, *b;
+
+    buf = (u1 *)malloc(HPIF_SIZE(1));
+    if (buf == NULL) {
+        return;
+    }
+    b = buf;
+
+    /* If there's a one-shot 'when', reset it.
+     */
+    if (reason == gDvm.gcHeap->ddmHpifWhen) {
+        if (shouldLock && ! dvmLockHeap()) {
+            LOGW("%s(): can't lock heap to clear when\n", __func__);
+            goto skip_when;
+        }
+        if (reason == gDvm.gcHeap->ddmHpifWhen) {
+            if (gDvm.gcHeap->ddmHpifWhen == HPIF_WHEN_NEXT_GC) {
+                gDvm.gcHeap->ddmHpifWhen = HPIF_WHEN_NEVER;
+            }
+        }
+        if (shouldLock) {
+            dvmUnlockHeap();
+        }
+    }
+skip_when:
+
+    /* The current time, in milliseconds since 0:00 GMT, 1/1/70.
+     */
+    if (gettimeofday(&now, NULL) < 0) {
+        nowMs = 0;
+    } else {
+        nowMs = (u8)now.tv_sec * 1000 + now.tv_usec / 1000;
+    }
+
+    /* number of heaps */
+    set4BE(b, 1); b += 4;
+
+    /* For each heap (of which there is one) */
+    {
+        /* heap ID */
+        set4BE(b, DEFAULT_HEAP_ID); b += 4;
+
+        /* timestamp */
+        set8BE(b, nowMs); b += 8;
+
+        /* 'when' value */
+        *b++ = (u1)reason;
+
+        /* max allowed heap size in bytes */
+        set4BE(b, gDvm.heapSizeMax); b += 4;
+
+        /* current heap size in bytes */
+        set4BE(b, dvmHeapSourceGetValue(HS_FOOTPRINT, NULL, 0)); b += 4;
+
+        /* number of bytes allocated */
+        set4BE(b, dvmHeapSourceGetValue(HS_BYTES_ALLOCATED, NULL, 0)); b += 4;
+
+        /* number of objects allocated */
+        set4BE(b, dvmHeapSourceGetValue(HS_OBJECTS_ALLOCATED, NULL, 0)); b += 4;
+    }
+    assert((intptr_t)b == (intptr_t)buf + (intptr_t)HPIF_SIZE(1));
+
+    dvmDbgDdmSendChunk(CHUNK_TYPE("HPIF"), b - buf, buf);
+}
+
+bool
+dvmDdmHandleHpifChunk(int when)
+{
+    switch (when) {
+    case HPIF_WHEN_NOW:
+        dvmDdmSendHeapInfo(when, true);
+        break;
+    case HPIF_WHEN_NEVER:
+    case HPIF_WHEN_NEXT_GC:
+    case HPIF_WHEN_EVERY_GC:
+        if (dvmLockHeap()) {
+            gDvm.gcHeap->ddmHpifWhen = when;
+            dvmUnlockHeap();
+        } else {
+            LOGI("%s(): can't lock heap to set when\n", __func__);
+            return false;
+        }
+        break;
+    default:
+        LOGI("%s(): bad when value 0x%08x\n", __func__, when);
+        return false;
+    }
+
+    return true;
+}
+
+enum HpsgSolidity {
+    SOLIDITY_FREE = 0,
+    SOLIDITY_HARD = 1,
+    SOLIDITY_SOFT = 2,
+    SOLIDITY_WEAK = 3,
+    SOLIDITY_PHANTOM = 4,
+    SOLIDITY_FINALIZABLE = 5,
+    SOLIDITY_SWEEP = 6,
+};
+
+enum HpsgKind {
+    KIND_OBJECT = 0,
+    KIND_CLASS_OBJECT = 1,
+    KIND_ARRAY_1 = 2,
+    KIND_ARRAY_2 = 3,
+    KIND_ARRAY_4 = 4,
+    KIND_ARRAY_8 = 5,
+    KIND_UNKNOWN = 6,
+    KIND_NATIVE = 7,
+};
+
+#define HPSG_PARTIAL (1<<7)
+#define HPSG_STATE(solidity, kind) \
+    ((u1)((((kind) & 0x7) << 3) | ((solidity) & 0x7)))
+
+typedef struct HeapChunkContext {
+    u1 *buf;
+    u1 *p;
+    u1 *pieceLenField;
+    size_t bufLen;
+    size_t totalAllocationUnits;
+    int type;
+    bool merge;
+    bool needHeader;
+} HeapChunkContext;
+
+#define ALLOCATION_UNIT_SIZE 8
+
+static void
+flush_hpsg_chunk(HeapChunkContext *ctx)
+{
+    /* Patch the "length of piece" field.
+     */
+    assert(ctx->buf <= ctx->pieceLenField &&
+            ctx->pieceLenField <= ctx->p);
+    set4BE(ctx->pieceLenField, ctx->totalAllocationUnits);
+
+    /* Send the chunk.
+     */
+    dvmDbgDdmSendChunk(ctx->type, ctx->p - ctx->buf, ctx->buf);
+
+    /* Reset the context.
+     */
+    ctx->p = ctx->buf;
+    ctx->totalAllocationUnits = 0;
+    ctx->needHeader = true;
+    ctx->pieceLenField = NULL;
+}
+
+static void
+heap_chunk_callback(const void *chunkptr, size_t chunklen,
+                    const void *userptr, size_t userlen, void *arg)
+{
+    HeapChunkContext *ctx = (HeapChunkContext *)arg;
+    u1 state;
+
+    UNUSED_PARAMETER(userlen);
+
+    assert((chunklen & (ALLOCATION_UNIT_SIZE-1)) == 0);
+
+    /* Make sure there's enough room left in the buffer.
+     * We need to use two bytes for every fractional 256
+     * allocation units used by the chunk.
+     */
+    {
+        size_t bytesLeft = ctx->bufLen - (size_t)(ctx->p - ctx->buf);
+        if (bytesLeft < (((chunklen/ALLOCATION_UNIT_SIZE + 255) / 256) * 2)) {
+            flush_hpsg_chunk(ctx);
+        }
+    }
+
+//TODO: notice when there's a gap and start a new heap, or at least a new range.
+    if (ctx->needHeader) {
+        /*
+         * Start a new HPSx chunk.
+         */
+
+        /* [u4]: heap ID */
+        set4BE(ctx->p, DEFAULT_HEAP_ID); ctx->p += 4;
+
+        /* [u1]: size of allocation unit, in bytes */
+        *ctx->p++ = 8;
+
+        /* [u4]: virtual address of segment start */
+        set4BE(ctx->p, (uintptr_t)chunkptr); ctx->p += 4;
+
+        /* [u4]: offset of this piece (relative to the virtual address) */
+        set4BE(ctx->p, 0); ctx->p += 4;
+
+        /* [u4]: length of piece, in allocation units
+         * We won't know this until we're done, so save the offset
+         * and stuff in a dummy value.
+         */
+        ctx->pieceLenField = ctx->p;
+        set4BE(ctx->p, 0x55555555); ctx->p += 4;
+
+        ctx->needHeader = false;
+    }
+
+    /* Determine the type of this chunk.
+     */
+    if (userptr == NULL) {
+        /* It's a free chunk.
+         */
+        state = HPSG_STATE(SOLIDITY_FREE, 0);
+    } else {
+        const DvmHeapChunk *hc = (const DvmHeapChunk *)userptr;
+        const Object *obj = chunk2ptr(hc);
+        /* If we're looking at the native heap, we'll just return 
+         * (SOLIDITY_HARD, KIND_NATIVE) for all allocated chunks
+         */
+        bool native = ctx->type == CHUNK_TYPE("NHSG");
+
+        /* It's an allocated chunk.  Figure out what it is.
+         */
+//TODO: if ctx.merge, see if this chunk is different from the last chunk.
+//      If it's the same, we should combine them.
+        if (!native && dvmIsValidObject(obj)) {
+            ClassObject *clazz = obj->clazz;
+            if (clazz == NULL) {
+                /* The object was probably just created
+                 * but hasn't been initialized yet.
+                 */
+                state = HPSG_STATE(SOLIDITY_HARD, KIND_OBJECT);
+            } else if (clazz == gDvm.unlinkedJavaLangClass ||
+                       clazz == gDvm.classJavaLangClass)
+            {
+                state = HPSG_STATE(SOLIDITY_HARD, KIND_CLASS_OBJECT);
+            } else if (IS_CLASS_FLAG_SET(clazz, CLASS_ISARRAY)) {
+                if (IS_CLASS_FLAG_SET(clazz, CLASS_ISOBJECTARRAY)) {
+                    state = HPSG_STATE(SOLIDITY_HARD, KIND_ARRAY_4);
+                } else {
+                    switch (clazz->elementClass->primitiveType) {
+                    case PRIM_BOOLEAN:
+                    case PRIM_BYTE:
+                        state = HPSG_STATE(SOLIDITY_HARD, KIND_ARRAY_1);
+                        break;
+                    case PRIM_CHAR:
+                    case PRIM_SHORT:
+                        state = HPSG_STATE(SOLIDITY_HARD, KIND_ARRAY_2);
+                        break;
+                    case PRIM_INT:
+                    case PRIM_FLOAT:
+                        state = HPSG_STATE(SOLIDITY_HARD, KIND_ARRAY_4);
+                        break;
+                    case PRIM_DOUBLE:
+                    case PRIM_LONG:
+                        state = HPSG_STATE(SOLIDITY_HARD, KIND_ARRAY_8);
+                        break;
+                    default:
+                        assert(!"Unknown GC heap object type");
+                        state = HPSG_STATE(SOLIDITY_HARD, KIND_UNKNOWN);
+                        break;
+                    }
+                }
+            } else {
+                state = HPSG_STATE(SOLIDITY_HARD, KIND_OBJECT);
+            }
+        } else {
+            obj = NULL; // it's not actually an object
+            state = HPSG_STATE(SOLIDITY_HARD, KIND_NATIVE);
+        }
+    }
+
+    /* Write out the chunk description.
+     */
+    chunklen /= ALLOCATION_UNIT_SIZE;   // convert to allocation units
+    ctx->totalAllocationUnits += chunklen;
+    while (chunklen > 256) {
+        *ctx->p++ = state | HPSG_PARTIAL;
+        *ctx->p++ = 255;     // length - 1
+        chunklen -= 256;
+    }
+    *ctx->p++ = state;
+    *ctx->p++ = chunklen - 1;
+}
+
+enum HpsgWhen {
+    HPSG_WHEN_NEVER = 0,
+    HPSG_WHEN_EVERY_GC = 1,
+};
+enum HpsgWhat {
+    HPSG_WHAT_MERGED_OBJECTS = 0,
+    HPSG_WHAT_DISTINCT_OBJECTS = 1,
+};
+
+#define HPSx_CHUNK_SIZE (4096 - 16)
+
+void dlmalloc_walk_heap(void(*)(const void*, size_t, const void*, size_t, void*),void*);
+
+static void
+walkHeap(bool merge, bool native)
+{
+    HeapChunkContext ctx;
+    
+    memset(&ctx, 0, sizeof(ctx));
+    ctx.bufLen = HPSx_CHUNK_SIZE;
+    ctx.buf = (u1 *)malloc(ctx.bufLen);
+    if (ctx.buf == NULL) {
+        return;
+    }
+
+    ctx.merge = merge;
+    if (native) {
+        ctx.type = CHUNK_TYPE("NHSG");
+    } else {
+        if (ctx.merge) {
+            ctx.type = CHUNK_TYPE("HPSG");
+        } else {
+            ctx.type = CHUNK_TYPE("HPSO");
+        }
+    }
+
+    ctx.p = ctx.buf;
+    ctx.needHeader = true;
+    if (native) {
+        dlmalloc_walk_heap(heap_chunk_callback, (void *)&ctx);
+    } else {
+        dvmHeapSourceWalk(heap_chunk_callback, (void *)&ctx);
+    }
+    if (ctx.p > ctx.buf) {
+        flush_hpsg_chunk(&ctx);
+    }
+
+    free(ctx.buf);
+}
+
+void
+dvmDdmSendHeapSegments(bool shouldLock, bool native)
+{
+    u1 heapId[sizeof(u4)];
+    GcHeap *gcHeap = gDvm.gcHeap;
+    int when, what;
+    bool merge;
+
+    /* Don't even grab the lock if there's nothing to do when we're called.
+     */
+    if (!native) {
+        when = gcHeap->ddmHpsgWhen;
+        what = gcHeap->ddmHpsgWhat;
+        if (when == HPSG_WHEN_NEVER) {
+            return;
+        }
+    } else {
+        when = gcHeap->ddmNhsgWhen;
+        what = gcHeap->ddmNhsgWhat;
+        if (when == HPSG_WHEN_NEVER) {
+            return;
+        }
+    }
+    if (shouldLock && !dvmLockHeap()) {
+        LOGW("Can't lock heap for DDM HPSx dump\n");
+        return;
+    }
+
+    /* Figure out what kind of chunks we'll be sending.
+     */
+    if (what == HPSG_WHAT_MERGED_OBJECTS) {
+        merge = true;
+    } else if (what == HPSG_WHAT_DISTINCT_OBJECTS) {
+        merge = false;
+    } else {
+        assert(!"bad HPSG.what value");
+        return;
+    }
+
+    /* First, send a heap start chunk.
+     */
+    set4BE(heapId, DEFAULT_HEAP_ID);
+    dvmDbgDdmSendChunk(native ? CHUNK_TYPE("NHST") : CHUNK_TYPE("HPST"),
+        sizeof(u4), heapId);
+
+    /* Send a series of heap segment chunks.
+     */
+    walkHeap(merge, native);
+
+    /* Finally, send a heap end chunk.
+     */
+    dvmDbgDdmSendChunk(native ? CHUNK_TYPE("NHEN") : CHUNK_TYPE("HPEN"),
+        sizeof(u4), heapId);
+
+    if (shouldLock) {
+        dvmUnlockHeap();
+    }
+}
+
+bool
+dvmDdmHandleHpsgNhsgChunk(int when, int what, bool native)
+{
+    LOGI("dvmDdmHandleHpsgChunk(when %d, what %d, heap %d)\n", when, what,
+         native);
+    switch (when) {
+    case HPSG_WHEN_NEVER:
+    case HPSG_WHEN_EVERY_GC:
+        break;
+    default:
+        LOGI("%s(): bad when value 0x%08x\n", __func__, when);
+        return false;
+    }
+
+    switch (what) {
+    case HPSG_WHAT_MERGED_OBJECTS:
+    case HPSG_WHAT_DISTINCT_OBJECTS:
+        break;
+    default:
+        LOGI("%s(): bad what value 0x%08x\n", __func__, what);
+        return false;
+    }
+
+    if (dvmLockHeap()) {
+        if (!native) {
+            gDvm.gcHeap->ddmHpsgWhen = when;
+            gDvm.gcHeap->ddmHpsgWhat = what;
+        } else {
+            gDvm.gcHeap->ddmNhsgWhen = when;
+            gDvm.gcHeap->ddmNhsgWhat = what;
+        }
+//TODO: if what says we should dump immediately, signal (or do) it from here
+        dvmUnlockHeap();
+    } else {
+        LOGI("%s(): can't lock heap to set when/what\n", __func__);
+        return false;
+    }
+
+    return true;
+}
diff --git a/vm/alloc/DdmHeap.h b/vm/alloc/DdmHeap.h
new file mode 100644
index 0000000..c3e11dc
--- /dev/null
+++ b/vm/alloc/DdmHeap.h
@@ -0,0 +1,41 @@
+/*
+ * Copyright (C) 2008 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/*
+ * DDM-specific internal heap functions.
+ */
+#ifndef _DALVIK_ALLOC_DDMHEAP
+#define _DALVIK_ALLOC_DDMHEAP
+
+/*
+ * Sends the current heap info to the DDM server.
+ * Should be called after a GC when gcHeap->ddmHpifWhen
+ * is non-zero.
+ */
+void dvmDdmSendHeapInfo(int reason, bool shouldLock);
+
+/*
+ * Walks through the heap and sends a series of
+ * HPST/NHST, HPSG/HPSO/NHSG, and HPEN/NHEN chunks that describe
+ * the contents of the GC or native heap.
+ *
+ * @param shouldLock If true, grab the heap lock.  If false,
+ *                   the heap lock must already be held.
+ * @param heap       If false, dump the GC heap; if true, dump the
+ *                   native heap.
+ */
+void dvmDdmSendHeapSegments(bool shouldLock, bool native);
+
+#endif  // _DALVIK_ALLOC_DDMHEAP
diff --git a/vm/alloc/Float12.h b/vm/alloc/Float12.h
new file mode 100644
index 0000000..324cc51
--- /dev/null
+++ b/vm/alloc/Float12.h
@@ -0,0 +1,130 @@
+/*
+ * Copyright (C) 2008 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef _DALVIK_FLOAT12_H
+#define _DALVIK_FLOAT12_H
+
+/* Encodes a 32-bit number in 12 bits with +/-1.5% error,
+ * though the majority (80%) are within +/-0.25%.
+ *
+ * The encoding looks like:
+ *
+ *     EEEMMMMM MMMMMMMM MMMMMMMM
+ *     76543210 76543210 76543210
+ *
+ * where EEE is a base-16 exponent and MMMM is the mantissa.
+ * The output value is (MMMM * 16^EEE), or (MMMM << (EEE * 4)).
+ *
+ * TODO: do this in a less brain-dead way.  I'm sure we can do
+ *       it without all of these loops.
+ */
+inline unsigned short intToFloat12(unsigned int val)
+{
+    int oval = val;
+    int shift = 0;
+
+    /* Shift off the precision we don't care about.
+     * Don't round here; it biases the values too high
+     * (such that the encoded value is always greater
+     * than the actual value)
+     */
+    unsigned int pval = val;
+    while (val > 0x1ff) {
+        pval = val;
+        val >>= 1;
+        shift++;
+    }
+    if (shift > 0 && (pval & 1)) {
+        /* Round based on the last bit we shifted off.
+         */
+        val++;
+        if (val > 0x1ff) {
+            val = (val + 1) >> 1;
+            shift++;
+        }
+    }
+
+    /* Shift off enough bits to create a valid exponent.
+     * Since we care about the bits we're losing, be sure
+     * to round them.
+     */
+    while (shift % 4 != 0) {
+        val = (val + 1) >> 1;
+        shift++;
+    }
+
+    /* In the end, only round by the most-significant lost bit.
+     * This centers the values around the closest match.
+     * All of the rounding we did above guarantees that this
+     * round won't overflow past 0x1ff.
+     */
+    if (shift > 0) {
+        val = ((oval >> (shift - 1)) + 1) >> 1;
+    }
+
+    val |= (shift / 4) << 9;
+    return val;
+}
+
+inline unsigned int float12ToInt(unsigned short f12)
+{
+    return (f12 & 0x1ff) << ((f12 >> 9) * 4);
+}
+
+#if 0   // testing
+
+#include <stdio.h>
+int main(int argc, char *argv[])
+{
+    if (argc != 3) {
+        fprintf(stderr, "usage: %s <min> <max>\n", argv[0]);
+        return 1;
+    }
+
+    unsigned int min = atoi(argv[1]);
+    unsigned int max = atoi(argv[2]);
+    if (min > max) {
+        int t = min;
+        max = min;
+        min = t;
+    } else if (min == max) {
+        max++;
+    }
+
+    while (min < max) {
+        unsigned int out;
+        unsigned short sf;
+
+        sf = intToFloat12(min);
+        out = float12ToInt(sf);
+//        printf("%d 0x%03x / 0x%03x %d (%d)\n", min, min, sf, out, (int)min - (int)out);
+        printf("%6.6f %d %d\n", ((float)(int)(min - out)) / (float)(int)min, min, out);
+        if (min <= 8192) {
+            min++;
+        } else if (min < 10000) {
+            min += 10;
+        } else if (min < 100000) {
+            min += 1000;
+        } else {
+            min += 10000;
+        }
+    }
+    return 0;
+}
+
+#endif  // testing
+
+#endif  // _DALVIK_FLOAT12_H
diff --git a/vm/alloc/GC.h b/vm/alloc/GC.h
new file mode 100644
index 0000000..62e9aa6
--- /dev/null
+++ b/vm/alloc/GC.h
@@ -0,0 +1,155 @@
+/*
+ * Copyright (C) 2008 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/*
+ * Garbage collector
+ */
+#ifndef _DALVIK_ALLOC_GC
+#define _DALVIK_ALLOC_GC
+
+/*
+ * Initiate garbage collection.
+ *
+ * This usually happens automatically, but can also be caused by Runtime.gc().
+ */
+void dvmCollectGarbage(bool collectSoftRefs);
+
+/****
+ **** NOTE: The functions after this point will (should) only be called
+ ****       during GC.
+ ****/
+
+/*
+ * Functions that mark an object.
+ *
+ * Currently implemented in Heap.c.
+ */
+
+/*
+ * Mark an object and schedule it to be scanned for
+ * references to other objects.
+ *
+ * @param obj must be a valid object
+ */
+void dvmMarkObjectNonNull(const Object *obj);
+
+/*
+ * Mark an object and schedule it to be scanned for
+ * references to other objects.
+ *
+ * @param obj must be a valid object or NULL
+ */
+#define dvmMarkObject(obj) \
+    do { \
+        Object *DMO_obj_ = (Object *)(obj); \
+        if (DMO_obj_ != NULL) { \
+            dvmMarkObjectNonNull(DMO_obj_); \
+        } \
+    } while (false)
+
+/*
+ * If obj points to a valid object, mark it and
+ * schedule it to be scanned for references to other
+ * objects.
+ *
+ * @param obj any pointer that may be an Object, or NULL
+TODO: check for alignment, too (would require knowledge of heap chunks)
+ */
+#define dvmMarkIfObject(obj) \
+    do { \
+        Object *DMIO_obj_ = (Object *)(obj); \
+        if (DMIO_obj_ != NULL && dvmIsValidObject(DMIO_obj_)) { \
+            dvmMarkObjectNonNull(DMIO_obj_); \
+        } \
+    } while (false)
+
+/*
+ * Functions that handle scanning various objects for references.
+ */
+
+/*
+ * Mark all class objects loaded by the root class loader;
+ * most of these are the java.* classes.
+ *
+ * Currently implemented in Class.c.
+ */
+void dvmGcScanRootClassLoader(void);
+
+/*
+ * Mark all root ThreadGroup objects, guaranteeing that
+ * all live Thread objects will eventually be scanned.
+ *
+ * NOTE: this is a misnomer, because the current implementation
+ * actually only scans the internal list of VM threads, which
+ * will mark all VM-reachable Thread objects.  Someone else
+ * must scan the root class loader, which will mark java/lang/ThreadGroup.
+ * The ThreadGroup class object has static members pointing to
+ * the root ThreadGroups, and these will be marked as a side-effect
+ * of marking the class object.
+ *
+ * Currently implemented in Thread.c.
+ */
+void dvmGcScanRootThreadGroups(void);
+
+/*
+ * Mark all interned string objects.
+ *
+ * Currently implemented in Intern.c.
+ */
+void dvmGcScanInternedStrings(void);
+
+/*
+ * Remove any unmarked interned string objects from the table.
+ *
+ * Currently implemented in Intern.c.
+ */
+void dvmGcDetachDeadInternedStrings(int (*isUnmarkedObject)(void *));
+
+/*
+ * Mark all primitive class objects.
+ *
+ * Currently implemented in Array.c.
+ */
+void dvmGcScanPrimitiveClasses(void);
+
+/*
+ * Mark all JNI global references.
+ *
+ * Currently implemented in JNI.c.
+ */
+void dvmGcMarkJniGlobalRefs(void);
+
+/*
+ * Mark all debugger references.
+ *
+ * Currently implemented in Debugger.c.
+ */
+void dvmGcMarkDebuggerRefs(void);
+
+/*
+ * Optional heap profiling.
+ */
+#if WITH_HPROF && !defined(_DALVIK_HPROF_HPROF)
+#include "hprof/Hprof.h"
+#define HPROF_SET_GC_SCAN_STATE(tag_, thread_) \
+    dvmHeapSetHprofGcScanState((tag_), (thread_))
+#define HPROF_CLEAR_GC_SCAN_STATE() \
+    dvmHeapSetHprofGcScanState(0, 0)
+#else
+#define HPROF_SET_GC_SCAN_STATE(tag_, thread_)  do {} while (false)
+#define HPROF_CLEAR_GC_SCAN_STATE()  do {} while (false)
+#endif
+
+#endif  // _DALVIK_ALLOC_GC
diff --git a/vm/alloc/Heap.c b/vm/alloc/Heap.c
new file mode 100644
index 0000000..9ddc8be
--- /dev/null
+++ b/vm/alloc/Heap.c
@@ -0,0 +1,1068 @@
+/*
+ * Copyright (C) 2008 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/*
+ * Garbage-collecting memory allocator.
+ */
+#include "Dalvik.h"
+#include "alloc/HeapTable.h"
+#include "alloc/Heap.h"
+#include "alloc/HeapInternal.h"
+#include "alloc/DdmHeap.h"
+#include "alloc/HeapSource.h"
+#include "alloc/MarkSweep.h"
+
+#include "utils/threads.h"      // need Android thread priorities
+#define kInvalidPriority        10000
+
+#include <sys/time.h>
+#include <sys/resource.h>
+#include <limits.h>
+#include <errno.h>
+
+#define kNonCollectableRefDefault   16
+#define kFinalizableRefDefault      128
+
+/*
+ * Initialize the GC heap.
+ *
+ * Returns true if successful, false otherwise.
+ */
+bool dvmHeapStartup()
+{
+    GcHeap *gcHeap;
+
+#if defined(WITH_ALLOC_LIMITS)
+    gDvm.checkAllocLimits = false;
+    gDvm.allocationLimit = -1;
+#endif
+
+    gcHeap = dvmHeapSourceStartup(gDvm.heapSizeStart, gDvm.heapSizeMax);
+    if (gcHeap == NULL) {
+        return false;
+    }
+    gcHeap->heapWorkerCurrentObject = NULL;
+    gcHeap->heapWorkerCurrentMethod = NULL;
+    gcHeap->heapWorkerInterpStartTime = 0LL;
+    gcHeap->softReferenceCollectionState = SR_COLLECT_NONE;
+    gcHeap->softReferenceHeapSizeThreshold = gDvm.heapSizeStart;
+    gcHeap->ddmHpifWhen = 0;
+    gcHeap->ddmHpsgWhen = 0;
+    gcHeap->ddmHpsgWhat = 0;
+    gcHeap->ddmNhsgWhen = 0;
+    gcHeap->ddmNhsgWhat = 0;
+#if WITH_HPROF
+    gcHeap->hprofDumpOnGc = false;
+    gcHeap->hprofContext = NULL;
+#endif
+
+    /* This needs to be set before we call dvmHeapInitHeapRefTable().
+     */
+    gDvm.gcHeap = gcHeap;
+
+    /* Set up the table we'll use for ALLOC_NO_GC.
+     */
+    if (!dvmHeapInitHeapRefTable(&gcHeap->nonCollectableRefs,
+                           kNonCollectableRefDefault))
+    {
+        LOGE_HEAP("Can't allocate GC_NO_ALLOC table\n");
+        goto fail;
+    }
+
+    /* Set up the lists and lock we'll use for finalizable
+     * and reference objects.
+     */
+    dvmInitMutex(&gDvm.heapWorkerListLock);
+    gcHeap->finalizableRefs = NULL;
+    gcHeap->pendingFinalizationRefs = NULL;
+    gcHeap->referenceOperations = NULL;
+
+    /* Initialize the HeapWorker locks and other state
+     * that the GC uses.
+     */
+    dvmInitializeHeapWorkerState();
+
+    return true;
+
+fail:
+    gDvm.gcHeap = NULL;
+    dvmHeapSourceShutdown(gcHeap);
+    return false;
+}
+
+bool dvmHeapStartupAfterZygote()
+{
+    /* Update our idea of the last GC start time so that we
+     * don't use the last time that Zygote happened to GC.
+     */
+    gDvm.gcHeap->gcStartTime = dvmGetRelativeTimeUsec();
+
+    return dvmHeapSourceStartupAfterZygote();
+}
+
+void dvmHeapShutdown()
+{
+//TODO: make sure we're locked
+    if (gDvm.gcHeap != NULL) {
+        GcHeap *gcHeap;
+
+        gcHeap = gDvm.gcHeap;
+        gDvm.gcHeap = NULL;
+
+        /* Tables are allocated on the native heap;
+         * they need to be cleaned up explicitly.
+         * The process may stick around, so we don't
+         * want to leak any native memory.
+         */
+        dvmHeapFreeHeapRefTable(&gcHeap->nonCollectableRefs);
+
+        dvmHeapFreeLargeTable(gcHeap->finalizableRefs);
+        gcHeap->finalizableRefs = NULL;
+
+        dvmHeapFreeLargeTable(gcHeap->pendingFinalizationRefs);
+        gcHeap->pendingFinalizationRefs = NULL;
+
+        dvmHeapFreeLargeTable(gcHeap->referenceOperations);
+        gcHeap->referenceOperations = NULL;
+
+        /* Destroy the heap.  Any outstanding pointers
+         * will point to unmapped memory (unless/until
+         * someone else maps it).  This frees gcHeap
+         * as a side-effect.
+         */
+        dvmHeapSourceShutdown(gcHeap);
+    }
+}
+
+/*
+ * We've been asked to allocate something we can't, e.g. an array so
+ * large that (length * elementWidth) is larger than 2^31.  We want to
+ * throw an OutOfMemoryError, but doing so implies that certain other
+ * actions have taken place (like clearing soft references).
+ *
+ * TODO: for now we just throw an InternalError.
+ */
+void dvmThrowBadAllocException(const char* msg)
+{
+    dvmThrowException("Ljava/lang/InternalError;", msg);
+}
+
+/*
+ * Grab the lock, but put ourselves into THREAD_VMWAIT if it looks like
+ * we're going to have to wait on the mutex.
+ */
+bool dvmLockHeap()
+{
+    if (pthread_mutex_trylock(&gDvm.gcHeapLock) != 0) {
+        Thread *self;
+        ThreadStatus oldStatus;
+        int cc;
+
+        self = dvmThreadSelf();
+        if (self != NULL) {
+            oldStatus = dvmChangeStatus(self, THREAD_VMWAIT);
+        } else {
+            oldStatus = -1; // shut up gcc
+        }
+
+        cc = pthread_mutex_lock(&gDvm.gcHeapLock);
+        assert(cc == 0);
+
+        if (self != NULL) {
+            dvmChangeStatus(self, oldStatus);
+        }
+    }
+
+    return true;
+}
+
+void dvmUnlockHeap()
+{
+    dvmUnlockMutex(&gDvm.gcHeapLock);
+}
+
+/* Pop an object from the list of pending finalizations and
+ * reference clears/enqueues, and return the object.
+ * The caller must call dvmReleaseTrackedAlloc()
+ * on the object when finished.
+ *
+ * Typically only called by the heap worker thread.
+ */
+Object *dvmGetNextHeapWorkerObject(HeapWorkerOperation *op)
+{
+    Object *obj;
+    LargeHeapRefTable *table;
+    GcHeap *gcHeap = gDvm.gcHeap;
+
+    assert(op != NULL);
+
+    obj = NULL;
+
+    dvmLockMutex(&gDvm.heapWorkerListLock);
+
+    /* We must handle reference operations before finalizations.
+     * If:
+     *     a) Someone subclasses WeakReference and overrides clear()
+     *     b) A reference of this type is the last reference to
+     *        a finalizable object
+     * then we need to guarantee that the overridden clear() is called
+     * on the reference before finalize() is called on the referent.
+     * Both of these operations will always be scheduled at the same
+     * time, so handling reference operations first will guarantee
+     * the required order.
+     */
+    obj = dvmHeapGetNextObjectFromLargeTable(&gcHeap->referenceOperations);
+    if (obj != NULL) {
+        uintptr_t workBits;
+
+        workBits = (uintptr_t)obj & (WORKER_CLEAR | WORKER_ENQUEUE);
+        assert(workBits != 0);
+        obj = (Object *)((uintptr_t)obj & ~(WORKER_CLEAR | WORKER_ENQUEUE));
+
+        *op = workBits;
+    } else {
+        obj = dvmHeapGetNextObjectFromLargeTable(
+                &gcHeap->pendingFinalizationRefs);
+        if (obj != NULL) {
+            *op = WORKER_FINALIZE;
+        }
+    }
+
+    if (obj != NULL) {
+        /* Don't let the GC collect the object until the
+         * worker thread is done with it.
+         *
+         * This call is safe;  it uses thread-local storage
+         * and doesn't acquire any locks.
+         */
+        dvmAddTrackedAlloc(obj, NULL);
+    }
+
+    dvmUnlockMutex(&gDvm.heapWorkerListLock);
+
+    return obj;
+}
+
+/* Used for a heap size change hysteresis to avoid collecting
+ * SoftReferences when the heap only grows by a small amount.
+ */
+#define SOFT_REFERENCE_GROWTH_SLACK (128 * 1024)
+
+/* Whenever the effective heap size may have changed,
+ * this function must be called.
+ */
+void dvmHeapSizeChanged()
+{
+    GcHeap *gcHeap = gDvm.gcHeap;
+    size_t currentHeapSize;
+
+    currentHeapSize = dvmHeapSourceGetIdealFootprint();
+
+    /* See if the heap size has changed enough that we should care
+     * about it.
+     */
+    if (currentHeapSize <= gcHeap->softReferenceHeapSizeThreshold -
+            4 * SOFT_REFERENCE_GROWTH_SLACK)
+    {
+        /* The heap has shrunk enough that we'll use this as a new
+         * threshold.  Since we're doing better on space, there's
+         * no need to collect any SoftReferences.
+         *
+         * This is 4x the growth hysteresis because we don't want
+         * to snap down so easily after a shrink.  If we just cleared
+         * up a bunch of SoftReferences, we don't want to disallow
+         * any new ones from being created.
+         * TODO: determine if the 4x is important, needed, or even good
+         */
+        gcHeap->softReferenceHeapSizeThreshold = currentHeapSize;
+        gcHeap->softReferenceCollectionState = SR_COLLECT_NONE;
+    } else if (currentHeapSize >= gcHeap->softReferenceHeapSizeThreshold +
+            SOFT_REFERENCE_GROWTH_SLACK)
+    {
+        /* The heap has grown enough to warrant collecting SoftReferences.
+         */
+        gcHeap->softReferenceHeapSizeThreshold = currentHeapSize;
+        gcHeap->softReferenceCollectionState = SR_COLLECT_SOME;
+    }
+}
+
+
+/* Do a full garbage collection, which may grow the
+ * heap as a side-effect if the live set is large.
+ */
+static void gcForMalloc(bool collectSoftReferences)
+{
+#ifdef WITH_PROFILER
+    if (gDvm.allocProf.enabled) {
+        Thread* self = dvmThreadSelf();
+        gDvm.allocProf.gcCount++;
+        if (self != NULL) {
+            self->allocProf.gcCount++;
+        }
+    }
+#endif
+    /* This may adjust the soft limit as a side-effect.
+     */
+    LOGD_HEAP("dvmMalloc initiating GC%s\n",
+            collectSoftReferences ? "(collect SoftReferences)" : "");
+    dvmCollectGarbageInternal(collectSoftReferences);
+}
+
+/* Try as hard as possible to allocate some memory.
+ */
+static DvmHeapChunk *tryMalloc(size_t size)
+{
+    DvmHeapChunk *hc;
+
+    /* Don't try too hard if there's no way the allocation is
+     * going to succeed.  We have to collect SoftReferences before
+     * throwing an OOME, though.
+     */
+    if (size >= gDvm.heapSizeMax) {
+        LOGW_HEAP("dvmMalloc(%zu/0x%08zx): "
+                "someone's allocating a huge buffer\n", size, size);
+        hc = NULL;
+        goto collect_soft_refs;
+    }
+
+//TODO: figure out better heuristics
+//    There will be a lot of churn if someone allocates a bunch of
+//    big objects in a row, and we hit the frag case each time.
+//    A full GC for each.
+//    Maybe we grow the heap in bigger leaps
+//    Maybe we skip the GC if the size is large and we did one recently
+//      (number of allocations ago) (watch for thread effects)
+//    DeflateTest allocs a bunch of ~128k buffers w/in 0-5 allocs of each other
+//      (or, at least, there are only 0-5 objects swept each time)
+
+    hc = dvmHeapSourceAlloc(size + sizeof(DvmHeapChunk));
+    if (hc != NULL) {
+        return hc;
+    }
+
+    /* The allocation failed.  Free up some space by doing
+     * a full garbage collection.  This may grow the heap
+     * if the live set is sufficiently large.
+     */
+    gcForMalloc(false);
+    hc = dvmHeapSourceAlloc(size + sizeof(DvmHeapChunk));
+    if (hc != NULL) {
+        return hc;
+    }
+
+    /* Even that didn't work;  this is an exceptional state.
+     * Try harder, growing the heap if necessary.
+     */
+    hc = dvmHeapSourceAllocAndGrow(size + sizeof(DvmHeapChunk));
+    dvmHeapSizeChanged();
+    if (hc != NULL) {
+        size_t newHeapSize;
+
+        newHeapSize = dvmHeapSourceGetIdealFootprint();
+//TODO: may want to grow a little bit more so that the amount of free
+//      space is equal to the old free space + the utilization slop for
+//      the new allocation.
+        LOGI_HEAP("Grow heap (frag case) to "
+                "%zu.%03zuMB for %zu-byte allocation\n",
+                FRACTIONAL_MB(newHeapSize), size);
+        return hc;
+    }
+
+    /* Most allocations should have succeeded by now, so the heap
+     * is really full, really fragmented, or the requested size is
+     * really big.  Do another GC, collecting SoftReferences this
+     * time.  The VM spec requires that all SoftReferences have
+     * been collected and cleared before throwing an OOME.
+     */
+//TODO: wait for the finalizers from the previous GC to finish
+collect_soft_refs:
+    LOGI_HEAP("Forcing collection of SoftReferences for %zu-byte allocation\n",
+            size);
+    gcForMalloc(true);
+    hc = dvmHeapSourceAllocAndGrow(size + sizeof(DvmHeapChunk));
+    dvmHeapSizeChanged();
+    if (hc != NULL) {
+        return hc;
+    }
+//TODO: maybe wait for finalizers and try one last time
+
+    LOGE_HEAP("Out of memory on a %zd-byte allocation.\n", size);
+//TODO: tell the HeapSource to dump its state
+    dvmDumpThread(dvmThreadSelf(), false);
+
+    return NULL;
+}
+
+/* Throw an OutOfMemoryError if there's a thread to attach it to.
+ * Avoid recursing.
+ *
+ * The caller must not be holding the heap lock, or else the allocations
+ * in dvmThrowException() will deadlock.
+ */
+static void throwOOME()
+{
+    Thread *self;
+
+    if ((self = dvmThreadSelf()) != NULL) {
+        /* If the current (failing) dvmMalloc() happened as part of thread
+         * creation/attachment before the thread became part of the root set,
+         * we can't rely on the thread-local trackedAlloc table, so
+         * we can't keep track of a real allocated OOME object.  But, since
+         * the thread is in the process of being created, it won't have
+         * a useful stack anyway, so we may as well make things easier
+         * by throwing the (stackless) pre-built OOME.
+         */
+        if (dvmIsOnThreadList(self) && !self->throwingOOME) {
+            /* Let ourselves know that we tried to throw an OOM
+             * error in the normal way in case we run out of
+             * memory trying to allocate it inside dvmThrowException().
+             */
+            self->throwingOOME = true;
+
+            /* Don't include a description string;
+             * one fewer allocation.
+             */
+            dvmThrowException("Ljava/lang/OutOfMemoryError;", NULL);
+        } else {
+            /*
+             * This thread has already tried to throw an OutOfMemoryError,
+             * which probably means that we're running out of memory
+             * while recursively trying to throw.
+             *
+             * To avoid any more allocation attempts, "throw" a pre-built
+             * OutOfMemoryError object (which won't have a useful stack trace).
+             *
+             * Note that since this call can't possibly allocate anything,
+             * we don't care about the state of self->throwingOOME
+             * (which will usually already be set).
+             */
+            dvmSetException(self, gDvm.outOfMemoryObj);
+        }
+        /* We're done with the possible recursion.
+         */
+        self->throwingOOME = false;
+    }
+}
+
+/*
+ * Allocate storage on the GC heap.  We guarantee 8-byte alignment.
+ *
+ * The new storage is zeroed out.
+ *
+ * Note that, in rare cases, this could get called while a GC is in
+ * progress.  If a non-VM thread tries to attach itself through JNI,
+ * it will need to allocate some objects.  If this becomes annoying to
+ * deal with, we can block it at the source, but holding the allocation
+ * mutex should be enough.
+ *
+ * In rare circumstances (JNI AttachCurrentThread) we can be called
+ * from a non-VM thread.
+ *
+ * We implement ALLOC_NO_GC by maintaining an internal list of objects
+ * that should not be collected.  This requires no actual flag storage in
+ * the object itself, which is good, but makes flag queries expensive.
+ *
+ * Use ALLOC_DONT_TRACK when we either don't want to track an allocation
+ * (because it's being done for the interpreter "new" operation and will
+ * be part of the root set immediately) or we can't (because this allocation
+ * is for a brand new thread).
+ *
+ * Returns NULL and throws an exception on failure.
+ *
+ * TODO: don't do a GC if the debugger thinks all threads are suspended
+ */
+void* dvmMalloc(size_t size, int flags)
+{
+    GcHeap *gcHeap = gDvm.gcHeap;
+    DvmHeapChunk *hc;
+    void *ptr;
+    bool triedGc, triedGrowing;
+
+#if 0
+    /* handy for spotting large allocations */
+    if (size >= 100000) {
+        LOGI("dvmMalloc(%d):\n", size);
+        dvmDumpThread(dvmThreadSelf(), false);
+    }
+#endif
+
+#if defined(WITH_ALLOC_LIMITS)
+    /*
+     * See if they've exceeded the allocation limit for this thread.
+     *
+     * A limit value of -1 means "no limit".
+     *
+     * This is enabled at compile time because it requires us to do a
+     * TLS lookup for the Thread pointer.  This has enough of a performance
+     * impact that we don't want to do it if we don't have to.  (Now that
+     * we're using gDvm.checkAllocLimits we may want to reconsider this,
+     * but it's probably still best to just compile the check out of
+     * production code -- one less thing to hit on every allocation.)
+     */
+    if (gDvm.checkAllocLimits) {
+        Thread* self = dvmThreadSelf();
+        if (self != NULL) {
+            int count = self->allocLimit;
+            if (count > 0) {
+                self->allocLimit--;
+            } else if (count == 0) {
+                /* fail! */
+                assert(!gDvm.initializing);
+                self->allocLimit = -1;
+                dvmThrowException("Ldalvik/system/AllocationLimitError;",
+                    "thread allocation limit exceeded");
+                return NULL;
+            }
+        }
+    }
+
+    if (gDvm.allocationLimit >= 0) {
+        assert(!gDvm.initializing);
+        gDvm.allocationLimit = -1;
+        dvmThrowException("Ldalvik/system/AllocationLimitError;",
+            "global allocation limit exceeded");
+        return NULL;
+    }
+#endif
+
+    dvmLockHeap();
+
+    /* Try as hard as possible to allocate some memory.
+     */
+    hc = tryMalloc(size);
+    if (hc != NULL) {
+alloc_succeeded:
+        /* We've got the memory.
+         */
+        if ((flags & ALLOC_FINALIZABLE) != 0) {
+            /* This object is an instance of a class that
+             * overrides finalize().  Add it to the finalizable list.
+             *
+             * Note that until DVM_OBJECT_INIT() is called on this
+             * object, its clazz will be NULL.  Since the object is
+             * in this table, it will be scanned as part of the root
+             * set.  scanObject() explicitly deals with the NULL clazz.
+             */
+            if (!dvmHeapAddRefToLargeTable(&gcHeap->finalizableRefs,
+                                    (Object *)hc->data))
+            {
+                LOGE_HEAP("dvmMalloc(): no room for any more "
+                        "finalizable objects\n");
+                dvmAbort();
+            }
+        }
+
+#if WITH_OBJECT_HEADERS
+        hc->header = OBJECT_HEADER;
+        hc->birthGeneration = gGeneration;
+#endif
+        ptr = hc->data;
+
+        /* The caller may not want us to collect this object.
+         * If not, throw it in the nonCollectableRefs table, which
+         * will be added to the root set when we GC.
+         *
+         * Note that until DVM_OBJECT_INIT() is called on this
+         * object, its clazz will be NULL.  Since the object is
+         * in this table, it will be scanned as part of the root
+         * set.  scanObject() explicitly deals with the NULL clazz.
+         */
+        if ((flags & ALLOC_NO_GC) != 0) {
+            if (!dvmHeapAddToHeapRefTable(&gcHeap->nonCollectableRefs, ptr)) {
+                LOGE_HEAP("dvmMalloc(): no room for any more "
+                        "ALLOC_NO_GC objects: %zd\n",
+                        dvmHeapNumHeapRefTableEntries(
+                                &gcHeap->nonCollectableRefs));
+                dvmAbort();
+            }
+        }
+
+#ifdef WITH_PROFILER
+        if (gDvm.allocProf.enabled) {
+            Thread* self = dvmThreadSelf();
+            gDvm.allocProf.allocCount++;
+            gDvm.allocProf.allocSize += size;
+            if (self != NULL) {
+                self->allocProf.allocCount++;
+                self->allocProf.allocSize += size;
+            }
+        }
+#endif
+    } else {
+        /* The allocation failed.
+         */
+        ptr = NULL;
+
+#ifdef WITH_PROFILER
+        if (gDvm.allocProf.enabled) {
+            Thread* self = dvmThreadSelf();
+            gDvm.allocProf.failedAllocCount++;
+            gDvm.allocProf.failedAllocSize += size;
+            if (self != NULL) {
+                self->allocProf.failedAllocCount++;
+                self->allocProf.failedAllocSize += size;
+            }
+        }
+#endif
+    }
+
+    dvmUnlockHeap();
+
+    if (ptr != NULL) {
+        /*
+         * If this block is immediately GCable, and they haven't asked us not
+         * to track it, add it to the internal tracking list.
+         *
+         * If there's no "self" yet, we can't track it.  Calls made before
+         * the Thread exists should use ALLOC_NO_GC.
+         */
+        if ((flags & (ALLOC_DONT_TRACK | ALLOC_NO_GC)) == 0) {
+            dvmAddTrackedAlloc(ptr, NULL);
+        }
+    } else {
+        /* 
+         * The allocation failed; throw an OutOfMemoryError.
+         */
+        throwOOME();
+    }
+
+    return ptr;
+}
+
+/*
+ * Returns true iff <obj> points to a valid allocated object.
+ */
+bool dvmIsValidObject(const Object* obj)
+{
+    const DvmHeapChunk *hc;
+
+    /* Don't bother if it's NULL or not 8-byte aligned.
+     */
+    hc = ptr2chunk(obj);
+    if (obj != NULL && ((uintptr_t)hc & (8-1)) == 0) {
+        /* Even if the heap isn't locked, this shouldn't return
+         * any false negatives.  The only mutation that could
+         * be happening is allocation, which means that another
+         * thread could be in the middle of a read-modify-write
+         * to add a new bit for a new object.  However, that
+         * RMW will have completed by the time any other thread
+         * could possibly see the new pointer, so there is no
+         * danger of dvmIsValidObject() being called on a valid
+         * pointer whose bit isn't set.
+         *
+         * Freeing will only happen during the sweep phase, which
+         * only happens while the heap is locked.
+         */
+        return dvmHeapSourceContains(hc);
+    }
+    return false;
+}
+
+/*
+ * Clear flags that were passed into dvmMalloc() et al.
+ * e.g., ALLOC_NO_GC, ALLOC_DONT_TRACK.
+ */
+void dvmClearAllocFlags(Object *obj, int mask)
+{
+    if ((mask & ALLOC_NO_GC) != 0) {
+        dvmLockHeap();
+        if (dvmIsValidObject(obj)) {
+            if (!dvmHeapRemoveFromHeapRefTable(&gDvm.gcHeap->nonCollectableRefs,
+                                               obj))
+            {
+                LOGE_HEAP("dvmMalloc(): failed to remove ALLOC_NO_GC bit from "
+                        "object 0x%08x\n", (uintptr_t)obj);
+                dvmAbort();
+            }
+//TODO: shrink if the table is very empty
+        }
+        dvmUnlockHeap();
+    }
+
+    if ((mask & ALLOC_DONT_TRACK) != 0) {
+        dvmReleaseTrackedAlloc(obj, NULL);
+    }
+}
+
+size_t dvmObjectSizeInHeap(const Object *obj)
+{
+    return dvmHeapSourceChunkSize(ptr2chunk(obj)) - sizeof(DvmHeapChunk);
+}
+
+/*
+ * Initiate garbage collection.
+ *
+ * NOTES:
+ * - If we don't hold gDvm.threadListLock, it's possible for a thread to
+ *   be added to the thread list while we work.  The thread should NOT
+ *   start executing, so this is only interesting when we start chasing
+ *   thread stacks.  (Before we do so, grab the lock.)
+ *
+ * We are not allowed to GC when the debugger has suspended the VM, which
+ * is awkward because debugger requests can cause allocations.  The easiest
+ * way to enforce this is to refuse to GC on an allocation made by the
+ * JDWP thread -- we have to expand the heap or fail.
+ */
+void dvmCollectGarbageInternal(bool collectSoftReferences)
+{
+    GcHeap *gcHeap = gDvm.gcHeap;
+    Object *softReferences;
+    Object *weakReferences;
+    Object *phantomReferences;
+
+    u8 now;
+    s8 timeSinceLastGc;
+    s8 gcElapsedTime;
+    int numFreed;
+    size_t sizeFreed;
+
+#if DVM_TRACK_HEAP_MARKING
+    /* Since weak and soft references are always cleared,
+     * they don't require any marking.
+     * (Soft are lumped into strong when they aren't cleared.)
+     */
+    size_t strongMarkCount = 0;
+    size_t strongMarkSize = 0;
+    size_t finalizeMarkCount = 0;
+    size_t finalizeMarkSize = 0;
+    size_t phantomMarkCount = 0;
+    size_t phantomMarkSize = 0;
+#endif
+
+    /* The heap lock must be held.
+     */
+
+    if (gcHeap->gcRunning) {
+        LOGW_HEAP("Attempted recursive GC\n");
+        return;
+    }
+    gcHeap->gcRunning = true;
+    now = dvmGetRelativeTimeUsec();
+    if (gcHeap->gcStartTime != 0) {
+        timeSinceLastGc = (now - gcHeap->gcStartTime) / 1000;
+    } else {
+        timeSinceLastGc = 0;
+    }
+    gcHeap->gcStartTime = now;
+
+    LOGV_HEAP("GC starting -- suspending threads\n");
+
+    dvmSuspendAllThreads(SUSPEND_FOR_GC);
+
+    /* Get the priority (the "nice" value) of the current thread.  The
+     * getpriority() call can legitimately return -1, so we have to
+     * explicitly test errno.
+     */
+    errno = 0;
+    int oldThreadPriority = kInvalidPriority;
+    int priorityResult = getpriority(PRIO_PROCESS, 0);
+    if (errno != 0) {
+        LOGI_HEAP("getpriority(self) failed: %s\n", strerror(errno));
+    } else if (priorityResult > ANDROID_PRIORITY_NORMAL) {
+        /* Current value is numerically greater than "normal", which
+         * in backward UNIX terms means lower priority.
+         */
+        if (setpriority(PRIO_PROCESS, 0, ANDROID_PRIORITY_NORMAL) != 0) {
+            LOGI_HEAP("Unable to elevate priority from %d to %d\n",
+                priorityResult, ANDROID_PRIORITY_NORMAL);
+        } else {
+            /* priority elevated; save value so we can restore it later */
+            LOGD_HEAP("Elevating priority from %d to %d\n",
+                priorityResult, ANDROID_PRIORITY_NORMAL);
+            oldThreadPriority = priorityResult;
+        }
+    }
+
+    /* Wait for the HeapWorker thread to block.
+     * (It may also already be suspended in interp code,
+     * in which case it's not holding heapWorkerLock.)
+     */
+    dvmLockMutex(&gDvm.heapWorkerLock);
+
+    /* Make sure that the HeapWorker thread hasn't become
+     * wedged inside interp code.  If it has, this call will
+     * print a message and abort the VM.
+     */
+    dvmAssertHeapWorkerThreadRunning();
+
+    /* Lock the pendingFinalizationRefs list.
+     *
+     * Acquire the lock after suspending so the finalizer
+     * thread can't block in the RUNNING state while
+     * we try to suspend.
+     */
+    dvmLockMutex(&gDvm.heapWorkerListLock);
+
+#ifdef WITH_PROFILER
+    dvmMethodTraceGCBegin();
+#endif
+
+#if WITH_HPROF
+
+/* Set DUMP_HEAP_ON_DDMS_UPDATE to 1 to enable heap dumps
+ * whenever DDMS requests a heap update (HPIF chunk).
+ * The output files will appear in /data/misc, which must
+ * already exist.
+ * You must define "WITH_HPROF := true" in your buildspec.mk
+ * and recompile libdvm for this to work.
+ *
+ * To enable stack traces for each allocation, define
+ * "WITH_HPROF_STACK := true" in buildspec.mk.  This option slows down
+ * allocations and also requires 8 additional bytes per object on the
+ * GC heap.
+ */
+#define DUMP_HEAP_ON_DDMS_UPDATE 0
+#if DUMP_HEAP_ON_DDMS_UPDATE
+    gcHeap->hprofDumpOnGc |= (gcHeap->ddmHpifWhen != 0);
+#endif
+
+    if (gcHeap->hprofDumpOnGc) {
+        char nameBuf[128];
+
+        if (gcHeap->hprofFileName == NULL) {
+            /* no filename was provided; invent one */
+            sprintf(nameBuf, "/data/misc/heap-dump-tm%d-pid%d.hprof",
+                (int) time(NULL), (int) getpid());
+            gcHeap->hprofFileName = nameBuf;
+        }
+        gcHeap->hprofContext = hprofStartup(gcHeap->hprofFileName);
+        if (gcHeap->hprofContext != NULL) {
+            hprofStartHeapDump(gcHeap->hprofContext);
+        }
+        gcHeap->hprofDumpOnGc = false;
+        gcHeap->hprofFileName = NULL;
+    }
+#endif
+
+    if (timeSinceLastGc < 10000) {
+        LOGD_HEAP("GC! (%dms since last GC)\n",
+                (int)timeSinceLastGc);
+    } else {
+        LOGD_HEAP("GC! (%d sec since last GC)\n",
+                (int)(timeSinceLastGc / 1000));
+    }
+#if DVM_TRACK_HEAP_MARKING
+    gcHeap->markCount = 0;
+    gcHeap->markSize = 0;
+#endif
+
+    /* Set up the marking context.
+     */
+    dvmHeapBeginMarkStep();
+
+    /* Mark the set of objects that are strongly reachable from the roots.
+     */
+    LOGD_HEAP("Marking...");
+    dvmHeapMarkRootSet();
+
+    /* dvmHeapScanMarkedObjects() will build the lists of known
+     * instances of the Reference classes.
+     */
+    gcHeap->softReferences = NULL;
+    gcHeap->weakReferences = NULL;
+    gcHeap->phantomReferences = NULL;
+
+    /* Make sure that we don't hard-mark the referents of Reference
+     * objects by default.
+     */
+    gcHeap->markAllReferents = false;
+
+    /* Don't mark SoftReferences if our caller wants us to collect them.
+     * This has to be set before calling dvmHeapScanMarkedObjects().
+     */
+    if (collectSoftReferences) {
+        gcHeap->softReferenceCollectionState = SR_COLLECT_ALL;
+    }
+
+    /* Recursively mark any objects that marked objects point to strongly.
+     * If we're not collecting soft references, soft-reachable
+     * objects will also be marked.
+     */
+    LOGD_HEAP("Recursing...");
+    dvmHeapScanMarkedObjects();
+#if DVM_TRACK_HEAP_MARKING
+    strongMarkCount = gcHeap->markCount;
+    strongMarkSize = gcHeap->markSize;
+    gcHeap->markCount = 0;
+    gcHeap->markSize = 0;
+#endif
+
+    /* Latch these so that the other calls to dvmHeapScanMarkedObjects() don't
+     * mess with them.
+     */
+    softReferences = gcHeap->softReferences;
+    weakReferences = gcHeap->weakReferences;
+    phantomReferences = gcHeap->phantomReferences;
+
+    /* All strongly-reachable objects have now been marked.
+     */
+    if (gcHeap->softReferenceCollectionState != SR_COLLECT_NONE) {
+        LOGD_HEAP("Handling soft references...");
+        dvmHeapHandleReferences(softReferences, REF_SOFT);
+        // markCount always zero
+
+        /* Now that we've tried collecting SoftReferences,
+         * fall back to not collecting them.  If the heap
+         * grows, we will start collecting again.
+         */
+        gcHeap->softReferenceCollectionState = SR_COLLECT_NONE;
+    } // else dvmHeapScanMarkedObjects() already marked the soft-reachable set
+    LOGD_HEAP("Handling weak references...");
+    dvmHeapHandleReferences(weakReferences, REF_WEAK);
+    // markCount always zero
+
+    /* Once all weak-reachable objects have been taken
+     * care of, any remaining unmarked objects can be finalized.
+     */
+    LOGD_HEAP("Finding finalizations...");
+    dvmHeapScheduleFinalizations();
+#if DVM_TRACK_HEAP_MARKING
+    finalizeMarkCount = gcHeap->markCount;
+    finalizeMarkSize = gcHeap->markSize;
+    gcHeap->markCount = 0;
+    gcHeap->markSize = 0;
+#endif
+
+    /* Any remaining objects that are not pending finalization
+     * could be phantom-reachable.  This will mark any phantom-reachable
+     * objects, as well as enqueue their references.
+     */
+    LOGD_HEAP("Handling phantom references...");
+    dvmHeapHandleReferences(phantomReferences, REF_PHANTOM);
+#if DVM_TRACK_HEAP_MARKING
+    phantomMarkCount = gcHeap->markCount;
+    phantomMarkSize = gcHeap->markSize;
+    gcHeap->markCount = 0;
+    gcHeap->markSize = 0;
+#endif
+
+//TODO: take care of JNI weak global references
+
+#if DVM_TRACK_HEAP_MARKING
+    LOGI_HEAP("Marked objects: %dB strong, %dB final, %dB phantom\n",
+            strongMarkSize, finalizeMarkSize, phantomMarkSize);
+#endif
+
+#ifdef WITH_DEADLOCK_PREDICTION
+    dvmDumpMonitorInfo("before sweep");
+#endif
+    LOGD_HEAP("Sweeping...");
+    dvmHeapSweepUnmarkedObjects(&numFreed, &sizeFreed);
+#ifdef WITH_DEADLOCK_PREDICTION
+    dvmDumpMonitorInfo("after sweep");
+#endif
+
+    LOGD_HEAP("Cleaning up...");
+    dvmHeapFinishMarkStep();
+
+    LOGD_HEAP("Done.");
+
+    /* Now's a good time to adjust the heap size, since
+     * we know what our utilization is.
+     *
+     * This doesn't actually resize any memory;
+     * it just lets the heap grow more when necessary.
+     */
+    dvmHeapSourceGrowForUtilization();
+    dvmHeapSizeChanged();
+
+#if WITH_HPROF
+    if (gcHeap->hprofContext != NULL) {
+        hprofFinishHeapDump(gcHeap->hprofContext);
+//TODO: write a HEAP_SUMMARY record
+        hprofShutdown(gcHeap->hprofContext);
+        gcHeap->hprofContext = NULL;
+    }
+#endif
+
+    /* Now that we've freed up the GC heap, return any large
+     * free chunks back to the system.  They'll get paged back
+     * in the next time they're used.  Don't do it immediately,
+     * though;  if the process is still allocating a bunch of
+     * memory, we'll be taking a ton of page faults that we don't
+     * necessarily need to.
+     *
+     * Cancel any old scheduled trims, and schedule a new one.
+     */
+    dvmScheduleHeapSourceTrim(5);  // in seconds
+
+#ifdef WITH_PROFILER
+    dvmMethodTraceGCEnd();
+#endif
+    LOGV_HEAP("GC finished -- resuming threads\n");
+
+    gcHeap->gcRunning = false;
+
+    dvmUnlockMutex(&gDvm.heapWorkerListLock);
+    dvmUnlockMutex(&gDvm.heapWorkerLock);
+
+    dvmResumeAllThreads(SUSPEND_FOR_GC);
+    if (oldThreadPriority != kInvalidPriority) {
+        if (setpriority(PRIO_PROCESS, 0, oldThreadPriority) != 0) {
+            LOGW_HEAP("Unable to reset priority to %d: %s\n",
+                oldThreadPriority, strerror(errno));
+        } else {
+            LOGD_HEAP("Reset priority to %d\n", oldThreadPriority);
+        }
+    }
+    gcElapsedTime = (dvmGetRelativeTimeUsec() - gcHeap->gcStartTime) / 1000;
+    if (gcElapsedTime < 10000) {
+        LOGD("GC freed %d objects / %zd bytes in %dms\n",
+                numFreed, sizeFreed, (int)gcElapsedTime);
+    } else {
+        LOGD("GC freed %d objects / %zd bytes in %d sec\n",
+                numFreed, sizeFreed, (int)(gcElapsedTime / 1000));
+    }
+    dvmLogGcStats(numFreed, sizeFreed, gcElapsedTime);
+
+    if (gcHeap->ddmHpifWhen != 0) {
+        LOGD_HEAP("Sending VM heap info to DDM\n");
+        dvmDdmSendHeapInfo(gcHeap->ddmHpifWhen, false);
+    }
+    if (gcHeap->ddmHpsgWhen != 0) {
+        LOGD_HEAP("Dumping VM heap to DDM\n");
+        dvmDdmSendHeapSegments(false, false);
+    }
+    if (gcHeap->ddmNhsgWhen != 0) {
+        LOGD_HEAP("Dumping native heap to DDM\n");
+        dvmDdmSendHeapSegments(false, true);
+    }
+}
+
+#if WITH_HPROF
+/*
+ * Perform garbage collection, writing heap information to the specified file.
+ *
+ * If "fileName" is NULL, a suitable name will be generated automatically.
+ */
+void hprofDumpHeap(const char* fileName)
+{
+    dvmLockMutex(&gDvm.gcHeapLock);
+
+    gDvm.gcHeap->hprofDumpOnGc = true;
+    gDvm.gcHeap->hprofFileName = fileName;
+    dvmCollectGarbageInternal(false);
+
+    dvmUnlockMutex(&gDvm.gcHeapLock);
+}
+
+void dvmHeapSetHprofGcScanState(hprof_heap_tag_t state, u4 threadSerialNumber)
+{
+    if (gDvm.gcHeap->hprofContext != NULL) {
+        hprofSetGcScanState(gDvm.gcHeap->hprofContext, state,
+                threadSerialNumber);
+    }
+}
+#endif
diff --git a/vm/alloc/Heap.h b/vm/alloc/Heap.h
new file mode 100644
index 0000000..cc29c40
--- /dev/null
+++ b/vm/alloc/Heap.h
@@ -0,0 +1,60 @@
+/*
+ * Copyright (C) 2008 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/*
+ * Internal heap functions
+ */
+#ifndef _DALVIK_ALLOC_HEAP
+#define _DALVIK_ALLOC_HEAP
+
+/*
+ * Initialize the GC heap.
+ *
+ * Returns true if successful, false otherwise.
+ */
+bool dvmHeapStartup(void);
+
+/*
+ * Initialization that needs to wait until after leaving zygote mode.
+ * This needs to be called before the first allocation or GC that
+ * happens after forking.
+ */
+bool dvmHeapStartupAfterZygote(void);
+
+/*
+ * Tear down the GC heap.
+ *
+ * Frees all memory allocated via dvmMalloc() as
+ * a side-effect.
+ */
+void dvmHeapShutdown(void);
+
+#if 0       // needs to be in Alloc.h so debug code can find it.
+/*
+ * Returns a number of bytes greater than or
+ * equal to the size of the named object in the heap.
+ *
+ * Specifically, it returns the size of the heap
+ * chunk which contains the object.
+ */
+size_t dvmObjectSizeInHeap(const Object *obj);
+#endif
+
+/*
+ * Run the garbage collector without doing any locking.
+ */
+void dvmCollectGarbageInternal(bool collectSoftReferences);
+
+#endif  // _DALVIK_ALLOC_HEAP
diff --git a/vm/alloc/HeapBitmap.c b/vm/alloc/HeapBitmap.c
new file mode 100644
index 0000000..2c75678
--- /dev/null
+++ b/vm/alloc/HeapBitmap.c
@@ -0,0 +1,419 @@
+/*
+ * Copyright (C) 2008 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "Dalvik.h"
+#include "HeapBitmap.h"
+#include "clz.h"
+#include <limits.h>     // for ULONG_MAX
+#include <sys/mman.h>   // for madvise(), mmap()
+#include <cutils/ashmem.h>
+
+#define HB_ASHMEM_NAME "dalvik-heap-bitmap"
+
+#ifndef PAGE_SIZE
+#define PAGE_SIZE 4096
+#endif
+#define ALIGN_UP_TO_PAGE_SIZE(p) \
+    (((size_t)(p) + (PAGE_SIZE - 1)) & ~(PAGE_SIZE - 1))
+
+#define LIKELY(exp)     (__builtin_expect((exp) != 0, true))
+#define UNLIKELY(exp)   (__builtin_expect((exp) != 0, false))
+
+/*
+ * Initialize a HeapBitmap so that it points to a bitmap large
+ * enough to cover a heap at <base> of <maxSize> bytes, where
+ * objects are guaranteed to be HB_OBJECT_ALIGNMENT-aligned.
+ */
+bool
+dvmHeapBitmapInit(HeapBitmap *hb, const void *base, size_t maxSize,
+        const char *name)
+{
+    void *bits;
+    size_t bitsLen;
+    size_t allocLen;
+    int fd;
+    char nameBuf[ASHMEM_NAME_LEN] = HB_ASHMEM_NAME;
+
+    assert(hb != NULL);
+
+    bitsLen = HB_OFFSET_TO_INDEX(maxSize) * sizeof(*hb->bits);
+    allocLen = ALIGN_UP_TO_PAGE_SIZE(bitsLen);   // required by ashmem
+
+    if (name != NULL) {
+        snprintf(nameBuf, sizeof(nameBuf), HB_ASHMEM_NAME "/%s", name);
+    }
+    fd = ashmem_create_region(nameBuf, allocLen);
+    if (fd < 0) {
+        LOGE("Could not create %zu-byte ashmem region \"%s\" to cover "
+                "%zu-byte heap (%d)\n",
+                allocLen, nameBuf, maxSize, fd);
+        return false;
+    }
+
+    bits = mmap(NULL, bitsLen, PROT_READ | PROT_WRITE, MAP_PRIVATE, fd, 0);
+    close(fd);
+    if (bits == MAP_FAILED) {
+        LOGE("Could not mmap %d-byte ashmem region \"%s\"\n",
+                bitsLen, nameBuf);
+        return false;
+    }
+
+    memset(hb, 0, sizeof(*hb));
+    hb->bits = bits;
+    hb->bitsLen = bitsLen;
+    hb->base = (uintptr_t)base;
+    hb->max = hb->base - 1;
+
+    return true;
+}
+
+/*
+ * Initialize <hb> so that it covers the same extent as <templateBitmap>.
+ */
+bool
+dvmHeapBitmapInitFromTemplate(HeapBitmap *hb, const HeapBitmap *templateBitmap,
+        const char *name)
+{
+    return dvmHeapBitmapInit(hb,
+            (void *)templateBitmap->base, HB_MAX_OFFSET(templateBitmap), name);
+}
+
+/*
+ * Initialize the bitmaps in <out> so that they cover the same extent as
+ * the corresponding bitmaps in <templates>.
+ */
+bool
+dvmHeapBitmapInitListFromTemplates(HeapBitmap out[], HeapBitmap templates[],
+    size_t numBitmaps, const char *name)
+{
+    size_t i;
+    char fullName[PATH_MAX];
+
+    fullName[sizeof(fullName)-1] = '\0';
+    for (i = 0; i < numBitmaps; i++) {
+        bool ok;
+
+        /* If two ashmem regions have the same name, only one gets
+         * the name when looking at the maps.
+         */
+        snprintf(fullName, sizeof(fullName)-1, "%s/%zd", name, i);
+        
+        ok = dvmHeapBitmapInitFromTemplate(&out[i], &templates[i], fullName);
+        if (!ok) {
+            dvmHeapBitmapDeleteList(out, i);
+            return false;
+        }
+    }
+    return true;
+}
+
+/*
+ * Clean up any resources associated with the bitmap.
+ */
+void
+dvmHeapBitmapDelete(HeapBitmap *hb)
+{
+    assert(hb != NULL);
+
+    if (hb->bits != NULL) {
+        // Re-calculate the size we passed to mmap().
+        size_t allocLen = ALIGN_UP_TO_PAGE_SIZE(hb->bitsLen);
+        munmap((char *)hb->bits, allocLen);
+    }
+    memset(hb, 0, sizeof(*hb));
+}
+
+/*
+ * Clean up any resources associated with the bitmaps.
+ */
+void
+dvmHeapBitmapDeleteList(HeapBitmap hbs[], size_t numBitmaps)
+{
+    size_t i;
+
+    for (i = 0; i < numBitmaps; i++) {
+        dvmHeapBitmapDelete(&hbs[i]);
+    }
+}
+
+/*
+ * Fill the bitmap with zeroes.  Returns the bitmap's memory to
+ * the system as a side-effect.
+ */
+void
+dvmHeapBitmapZero(HeapBitmap *hb)
+{
+    assert(hb != NULL);
+
+    if (hb->bits != NULL) {
+        /* This returns the memory to the system.
+         * Successive page faults will return zeroed memory.
+         */
+        madvise(hb->bits, hb->bitsLen, MADV_DONTNEED);
+        hb->max = hb->base - 1;
+    }
+}
+
+/*
+ * Walk through the bitmaps in increasing address order, and find the
+ * object pointers that correspond to places where the bitmaps differ.
+ * Call <callback> zero or more times with lists of these object pointers.
+ *
+ * The <finger> argument to the callback indicates the next-highest
+ * address that hasn't been visited yet; setting bits for objects whose
+ * addresses are less than <finger> are not guaranteed to be seen by
+ * the current XorWalk.  <finger> will be set to ULONG_MAX when the
+ * end of the bitmap is reached.
+ */
+bool
+dvmHeapBitmapXorWalk(const HeapBitmap *hb1, const HeapBitmap *hb2,
+        bool (*callback)(size_t numPtrs, void **ptrs,
+                         const void *finger, void *arg),
+        void *callbackArg)
+{
+    static const size_t kPointerBufSize = 128;
+    void *pointerBuf[kPointerBufSize];
+    void **pb = pointerBuf;
+    size_t index;
+    size_t i;
+
+#define FLUSH_POINTERBUF(finger_) \
+    do { \
+        if (!callback(pb - pointerBuf, (void **)pointerBuf, \
+                (void *)(finger_), callbackArg)) \
+        { \
+            LOGW("dvmHeapBitmapXorWalk: callback failed\n"); \
+            return false; \
+        } \
+        pb = pointerBuf; \
+    } while (false)
+
+#define DECODE_BITS(hb_, bits_, update_index_) \
+    do { \
+        if (UNLIKELY(bits_ != 0)) { \
+            static const unsigned long kHighBit = \
+                    (unsigned long)1 << (HB_BITS_PER_WORD - 1); \
+            const uintptr_t ptrBase = HB_INDEX_TO_OFFSET(i) + hb_->base; \
+/*TODO: hold onto ptrBase so we can shrink max later if possible */ \
+/*TODO: see if this is likely or unlikely */ \
+            while (bits_ != 0) { \
+                const int rshift = CLZ(bits_); \
+                bits_ &= ~(kHighBit >> rshift); \
+                *pb++ = (void *)(ptrBase + rshift * HB_OBJECT_ALIGNMENT); \
+            } \
+            /* Make sure that there are always enough slots available */ \
+            /* for an entire word of 1s. */ \
+            if (kPointerBufSize - (pb - pointerBuf) < HB_BITS_PER_WORD) { \
+                FLUSH_POINTERBUF(ptrBase + \
+                        HB_BITS_PER_WORD * HB_OBJECT_ALIGNMENT); \
+                if (update_index_) { \
+                    /* The callback may have caused hb_->max to grow. */ \
+                    index = HB_OFFSET_TO_INDEX(hb_->max - hb_->base); \
+                } \
+            } \
+        } \
+    } while (false)
+
+    assert(hb1 != NULL);
+    assert(hb1->bits != NULL);
+    assert(hb2 != NULL);
+    assert(hb2->bits != NULL);
+    assert(callback != NULL);
+
+    if (hb1->base != hb2->base) {
+        LOGW("dvmHeapBitmapXorWalk: bitmaps cover different heaps "
+                "(0x%08x != 0x%08x)\n",
+                (uintptr_t)hb1->base, (uintptr_t)hb2->base);
+        return false;
+    }
+    if (hb1->bitsLen != hb2->bitsLen) {
+        LOGW("dvmHeapBitmapXorWalk: size of bitmaps differ (%zd != %zd)\n",
+                hb1->bitsLen, hb2->bitsLen);
+        return false;
+    }
+    if (hb1->max < hb1->base && hb2->max < hb2->base) {
+        /* Easy case; both are obviously empty.
+         */
+        return true;
+    }
+
+    /* First, walk along the section of the bitmaps that may be the same.
+     */
+    if (hb1->max >= hb1->base && hb2->max >= hb2->base) {
+        unsigned long int *p1, *p2;
+        uintptr_t offset;
+
+        offset = ((hb1->max < hb2->max) ? hb1->max : hb2->max) - hb1->base;
+//TODO: keep track of which (and whether) one is longer for later
+        index = HB_OFFSET_TO_INDEX(offset);
+
+        p1 = hb1->bits;
+        p2 = hb2->bits;
+        for (i = 0; i <= index; i++) {
+//TODO: unroll this. pile up a few in locals?
+            unsigned long int diff = *p1++ ^ *p2++;
+            DECODE_BITS(hb1, diff, false);
+//BUG: if the callback was called, either max could have changed.
+        }
+        /* The next index to look at.
+         */
+        index++;
+    } else {
+        /* One of the bitmaps is empty.
+         */
+        index = 0;
+    }
+
+    /* If one bitmap's max is larger, walk through the rest of the
+     * set bits.
+     */
+const HeapBitmap *longHb;
+unsigned long int *p;
+//TODO: may be the same size, in which case this is wasted work
+    longHb = (hb1->max > hb2->max) ? hb1 : hb2;
+    i = index;
+    index = HB_OFFSET_TO_INDEX(longHb->max - longHb->base);
+    p = longHb->bits + i;
+    for (/* i = i */; i <= index; i++) {
+//TODO: unroll this
+        unsigned long bits = *p++;
+        DECODE_BITS(longHb, bits, true);
+    }
+
+    if (pb > pointerBuf) {
+        /* Set the finger to the end of the heap (rather than longHb->max)
+         * so that the callback doesn't expect to be called again
+         * if it happens to change the current max.
+         */
+        FLUSH_POINTERBUF(longHb->base + HB_MAX_OFFSET(longHb));
+    }
+
+    return true;
+
+#undef FLUSH_POINTERBUF
+#undef DECODE_BITS
+}
+
+/*
+ * Fills outIndexList with indices so that for all i:
+ *
+ *   hb[outIndexList[i]].base < hb[outIndexList[i+1]].base
+ */
+static void
+createSortedBitmapIndexList(const HeapBitmap hbs[], size_t numBitmaps,
+        size_t outIndexList[])
+{
+    int i, j;
+
+    /* numBitmaps is usually 2 or 3, so use a simple sort */
+    for (i = 0; i < (int) numBitmaps; i++) {
+        outIndexList[i] = i;
+        for (j = 0; j < i; j++) {
+            if (hbs[j].base > hbs[i].base) {
+                int tmp = outIndexList[i];
+                outIndexList[i] = outIndexList[j];
+                outIndexList[j] = tmp;
+            }
+        }
+    }
+}
+
+/*
+ * Similar to dvmHeapBitmapXorWalk(), but compare multiple bitmaps.
+ * Regardless of the order of the arrays, the bitmaps will be visited
+ * in address order, so that finger will increase monotonically.
+ */
+bool
+dvmHeapBitmapXorWalkLists(const HeapBitmap hbs1[], const HeapBitmap hbs2[],
+        size_t numBitmaps,
+        bool (*callback)(size_t numPtrs, void **ptrs,
+                         const void *finger, void *arg),
+        void *callbackArg)
+{
+    size_t indexList[numBitmaps];
+    size_t i;
+
+    /* Sort the bitmaps by address.  Assume that the two lists contain
+     * congruent bitmaps.
+     */
+    createSortedBitmapIndexList(hbs1, numBitmaps, indexList);
+
+    /* Walk each pair of bitmaps, lowest address first.
+     */
+    for (i = 0; i < numBitmaps; i++) {
+        bool ok;
+
+        ok = dvmHeapBitmapXorWalk(&hbs1[indexList[i]], &hbs2[indexList[i]],
+                callback, callbackArg);
+        if (!ok) {
+            return false;
+        }
+    }
+
+    return true;
+}
+
+/*
+ * Similar to dvmHeapBitmapXorWalk(), but visit the set bits
+ * in a single bitmap.
+ */
+bool
+dvmHeapBitmapWalk(const HeapBitmap *hb,
+        bool (*callback)(size_t numPtrs, void **ptrs,
+                         const void *finger, void *arg),
+        void *callbackArg)
+{
+    /* Create an empty bitmap with the same extent as <hb>.
+     * Don't actually allocate any memory.
+     */
+    HeapBitmap emptyHb = *hb;
+    emptyHb.max = emptyHb.base - 1; // empty
+    emptyHb.bits = (void *)1;       // non-NULL but intentionally bad
+
+    return dvmHeapBitmapXorWalk(hb, &emptyHb, callback, callbackArg);
+}
+
+/*
+ * Similar to dvmHeapBitmapXorWalkList(), but visit the set bits
+ * in a single list of bitmaps.  Regardless of the order of the array,
+ * the bitmaps will be visited in address order, so that finger will
+ * increase monotonically.
+ */
+bool dvmHeapBitmapWalkList(const HeapBitmap hbs[], size_t numBitmaps,
+        bool (*callback)(size_t numPtrs, void **ptrs,
+                         const void *finger, void *arg),
+        void *callbackArg)
+{
+    size_t indexList[numBitmaps];
+    size_t i;
+
+    /* Sort the bitmaps by address.
+     */
+    createSortedBitmapIndexList(hbs, numBitmaps, indexList);
+
+    /* Walk each bitmap, lowest address first.
+     */
+    for (i = 0; i < numBitmaps; i++) {
+        bool ok;
+
+        ok = dvmHeapBitmapWalk(&hbs[indexList[i]], callback, callbackArg);
+        if (!ok) {
+            return false;
+        }
+    }
+
+    return true;
+}
diff --git a/vm/alloc/HeapBitmap.h b/vm/alloc/HeapBitmap.h
new file mode 100644
index 0000000..b3e61c5
--- /dev/null
+++ b/vm/alloc/HeapBitmap.h
@@ -0,0 +1,340 @@
+/*
+ * Copyright (C) 2008 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#ifndef _DALVIK_HEAP_BITMAP
+#define _DALVIK_HEAP_BITMAP
+
+#include <stdint.h>
+#include <assert.h>
+
+#define HB_OBJECT_ALIGNMENT 8
+#define HB_BITS_PER_WORD    (sizeof (unsigned long int) * 8)
+
+/* <offset> is the difference from .base to a pointer address.
+ * <index> is the index of .bits that contains the bit representing
+ *         <offset>.
+ */
+#define HB_OFFSET_TO_INDEX(offset_) \
+    ((uintptr_t)(offset_) / HB_OBJECT_ALIGNMENT / HB_BITS_PER_WORD)
+#define HB_INDEX_TO_OFFSET(index_) \
+    ((uintptr_t)(index_) * HB_OBJECT_ALIGNMENT * HB_BITS_PER_WORD)
+
+/* Pack the bits in backwards so they come out in address order
+ * when using CLZ.
+ */
+#define HB_OFFSET_TO_MASK(offset_) \
+    (1 << \
+        (31-(((uintptr_t)(offset_) / HB_OBJECT_ALIGNMENT) % HB_BITS_PER_WORD)))
+
+/* Return the maximum offset (exclusive) that <hb> can represent.
+ */
+#define HB_MAX_OFFSET(hb_) \
+    HB_INDEX_TO_OFFSET((hb_)->bitsLen / sizeof(*(hb_)->bits))
+
+#define HB_INLINE_PROTO(p) \
+    static inline p __attribute__((always_inline)); \
+    static inline p
+
+
+typedef struct {
+    /* The bitmap data, which points to an mmap()ed area of zeroed
+     * anonymous memory.
+     */
+    unsigned long int *bits;
+
+    /* The size of the memory pointed to by bits, in bytes.
+     */
+    size_t bitsLen;
+
+    /* The base address, which corresponds to the first bit in
+     * the bitmap.
+     */
+    uintptr_t base;
+
+    /* The highest pointer value ever returned by an allocation
+     * from this heap.  I.e., the highest address that may correspond
+     * to a set bit.  If there are no bits set, (max < base).
+     */
+    uintptr_t max;
+} HeapBitmap;
+
+
+/*
+ * Initialize a HeapBitmap so that it points to a bitmap large
+ * enough to cover a heap at <base> of <maxSize> bytes, where
+ * objects are guaranteed to be HB_OBJECT_ALIGNMENT-aligned.
+ */
+bool dvmHeapBitmapInit(HeapBitmap *hb, const void *base, size_t maxSize,
+        const char *name);
+
+/*
+ * Initialize <hb> so that it covers the same extent as <templateBitmap>.
+ */
+bool dvmHeapBitmapInitFromTemplate(HeapBitmap *hb,
+        const HeapBitmap *templateBitmap, const char *name);
+
+/*
+ * Initialize the bitmaps in <out> so that they cover the same extent as
+ * the corresponding bitmaps in <templates>.
+ */
+bool dvmHeapBitmapInitListFromTemplates(HeapBitmap out[],
+    HeapBitmap templates[], size_t numBitmaps, const char *name);
+
+/*
+ * Clean up any resources associated with the bitmap.
+ */
+void dvmHeapBitmapDelete(HeapBitmap *hb);
+
+/*
+ * Clean up any resources associated with the bitmaps.
+ */
+void dvmHeapBitmapDeleteList(HeapBitmap hbs[], size_t numBitmaps);
+
+/*
+ * Fill the bitmap with zeroes.  Returns the bitmap's memory to
+ * the system as a side-effect.
+ */
+void dvmHeapBitmapZero(HeapBitmap *hb);
+
+/*
+ * Walk through the bitmaps in increasing address order, and find the
+ * object pointers that correspond to places where the bitmaps differ.
+ * Call <callback> zero or more times with lists of these object pointers.
+ *
+ * The <finger> argument to the callback indicates the next-highest
+ * address that hasn't been visited yet; setting bits for objects whose
+ * addresses are less than <finger> are not guaranteed to be seen by
+ * the current XorWalk.  <finger> will be set to ULONG_MAX when the
+ * end of the bitmap is reached.
+ */
+bool dvmHeapBitmapXorWalk(const HeapBitmap *hb1, const HeapBitmap *hb2,
+        bool (*callback)(size_t numPtrs, void **ptrs,
+                         const void *finger, void *arg),
+        void *callbackArg);
+
+/*
+ * Similar to dvmHeapBitmapXorWalk(), but compare multiple bitmaps.
+ * Regardless of the order of the arrays, the bitmaps will be visited
+ * in address order, so that finger will increase monotonically.
+ */
+bool dvmHeapBitmapXorWalkLists(const HeapBitmap hbs1[], const HeapBitmap hbs2[],
+        size_t numBitmaps,
+        bool (*callback)(size_t numPtrs, void **ptrs,
+                         const void *finger, void *arg),
+        void *callbackArg);
+
+/*
+ * Similar to dvmHeapBitmapXorWalk(), but visit the set bits
+ * in a single bitmap.
+ */
+bool dvmHeapBitmapWalk(const HeapBitmap *hb,
+        bool (*callback)(size_t numPtrs, void **ptrs,
+                         const void *finger, void *arg),
+        void *callbackArg);
+
+/*
+ * Similar to dvmHeapBitmapXorWalkList(), but visit the set bits
+ * in a single list of bitmaps.  Regardless of the order of the array,
+ * the bitmaps will be visited in address order, so that finger will
+ * increase monotonically.
+ */
+bool dvmHeapBitmapWalkList(const HeapBitmap hbs[], size_t numBitmaps,
+        bool (*callback)(size_t numPtrs, void **ptrs,
+                         const void *finger, void *arg),
+        void *callbackArg);
+/*
+ * Return true iff <obj> is within the range of pointers that
+ * have had corresponding bits set in this bitmap.
+ */
+HB_INLINE_PROTO(
+    bool
+    dvmHeapBitmapMayContainObject(const HeapBitmap *hb,
+            const void *obj)
+)
+{
+    const uintptr_t p = (const uintptr_t)obj;
+
+    assert((p & (HB_OBJECT_ALIGNMENT - 1)) == 0);
+
+    return p >= hb->base && p <= hb->max;
+}
+
+/*
+ * Return true iff <obj> is within the range of pointers that this
+ * bitmap could potentially cover, even if a bit has not been set
+ * for it.
+ */
+HB_INLINE_PROTO(
+    bool
+    dvmHeapBitmapCoversAddress(const HeapBitmap *hb, const void *obj)
+)
+{
+    assert(hb != NULL);
+
+    if (obj != NULL) {
+        const uintptr_t offset = (uintptr_t)obj - hb->base;
+        const size_t index = HB_OFFSET_TO_INDEX(offset);
+        return index < hb->bitsLen / sizeof(*hb->bits);
+    }
+    return false;
+}
+
+/*
+ * Internal function; do not call directly.
+ */
+HB_INLINE_PROTO(
+    unsigned long int
+    _heapBitmapModifyObjectBit(HeapBitmap *hb, const void *obj,
+            bool setBit, bool returnOld)
+)
+{
+    const uintptr_t offset = (uintptr_t)obj - hb->base;
+    const size_t index = HB_OFFSET_TO_INDEX(offset);
+    const unsigned long int mask = HB_OFFSET_TO_MASK(offset);
+
+#ifndef NDEBUG
+    assert(hb->bits != NULL);
+    assert((uintptr_t)obj >= hb->base);
+    assert(index < hb->bitsLen / sizeof(*hb->bits));
+#endif
+
+    if (setBit) {
+        if ((uintptr_t)obj > hb->max) {
+            hb->max = (uintptr_t)obj;
+        }
+        if (returnOld) {
+            unsigned long int *p = hb->bits + index;
+            const unsigned long int word = *p;
+            *p |= mask;
+            return word & mask;
+        } else {
+            hb->bits[index] |= mask;
+        }
+    } else {
+        hb->bits[index] &= ~mask;
+    }
+    return false;
+}
+
+/*
+ * Sets the bit corresponding to <obj>, and returns the previous value
+ * of that bit (as zero or non-zero). Does no range checking to see if
+ * <obj> is outside of the coverage of the bitmap.
+ *
+ * NOTE: casting this value to a bool is dangerous, because higher
+ * set bits will be lost.
+ */
+HB_INLINE_PROTO(
+    unsigned long int
+    dvmHeapBitmapSetAndReturnObjectBit(HeapBitmap *hb, const void *obj)
+)
+{
+    return _heapBitmapModifyObjectBit(hb, obj, true, true);
+}
+
+/*
+ * Like dvmHeapBitmapSetAndReturnObjectBit(), but sets/returns the bit
+ * in the appropriate bitmap.  Results are undefined if <obj> is not
+ * covered by any bitmap.
+ */
+HB_INLINE_PROTO(
+    unsigned long int
+    dvmHeapBitmapSetAndReturnObjectBitInList(HeapBitmap hbs[],
+            size_t numBitmaps, const void *obj)
+)
+{
+    size_t i;
+
+    for (i = 0; i < numBitmaps; i++) {
+        if (dvmHeapBitmapCoversAddress(&hbs[i], obj)) {
+            return dvmHeapBitmapSetAndReturnObjectBit(&hbs[i], obj);
+        }
+    }
+
+    assert(!"object not covered by any bitmap");
+    return false;
+}
+
+/*
+ * Sets the bit corresponding to <obj>, and widens the range of seen
+ * pointers if necessary.  Does no range checking.
+ */
+HB_INLINE_PROTO(
+    void
+    dvmHeapBitmapSetObjectBit(HeapBitmap *hb, const void *obj)
+)
+{
+    (void)_heapBitmapModifyObjectBit(hb, obj, true, false);
+}
+
+/*
+ * Clears the bit corresponding to <obj>.  Does no range checking.
+ */
+HB_INLINE_PROTO(
+    void
+    dvmHeapBitmapClearObjectBit(HeapBitmap *hb, const void *obj)
+)
+{
+    (void)_heapBitmapModifyObjectBit(hb, obj, false, false);
+}
+
+/*
+ * Returns the current value of the bit corresponding to <obj>,
+ * as zero or non-zero.  Does no range checking.
+ *
+ * NOTE: casting this value to a bool is dangerous, because higher
+ * set bits will be lost.
+ */
+HB_INLINE_PROTO(
+    unsigned long int
+    dvmHeapBitmapIsObjectBitSet(const HeapBitmap *hb, const void *obj)
+)
+{
+    assert(dvmHeapBitmapCoversAddress(hb, obj));
+    assert(hb->bits != NULL);
+    assert((uintptr_t)obj >= hb->base);
+
+    if ((uintptr_t)obj <= hb->max) {
+        const uintptr_t offset = (uintptr_t)obj - hb->base;
+        return hb->bits[HB_OFFSET_TO_INDEX(offset)] & HB_OFFSET_TO_MASK(offset);
+    } else {
+        return 0;
+    }
+}
+
+/*
+ * Looks through the list of bitmaps and returns the current value of the
+ * bit corresponding to <obj>, which may be covered by any of the bitmaps.
+ * Does no range checking.
+ */
+HB_INLINE_PROTO(
+    long
+    dvmHeapBitmapIsObjectBitSetInList(const HeapBitmap hbs[], size_t numBitmaps,
+            const void *obj)
+)
+{
+    size_t i;
+
+    for (i = 0; i < numBitmaps; i++) {
+        if (dvmHeapBitmapCoversAddress(&hbs[i], obj)) {
+            return dvmHeapBitmapIsObjectBitSet(&hbs[i], obj);
+        }
+    }
+    return false;
+}
+
+#undef HB_INLINE_PROTO
+
+#endif  // _DALVIK_HEAP_BITMAP
diff --git a/vm/alloc/HeapDebug.c b/vm/alloc/HeapDebug.c
new file mode 100644
index 0000000..fc3655f
--- /dev/null
+++ b/vm/alloc/HeapDebug.c
@@ -0,0 +1,403 @@
+/*
+ * Copyright (C) 2008 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <fcntl.h>
+#include <malloc.h>
+
+#include "Dalvik.h"
+#include "HeapInternal.h"
+#include "HeapSource.h"
+#include "Float12.h"
+
+int dvmGetHeapDebugInfo(HeapDebugInfoType info)
+{
+    switch (info) {
+    case kVirtualHeapSize:
+        return (int)dvmHeapSourceGetValue(HS_FOOTPRINT, NULL, 0);
+    case kVirtualHeapAllocated:
+        return (int)dvmHeapSourceGetValue(HS_BYTES_ALLOCATED, NULL, 0);
+    default:
+        return -1;
+    }
+}
+
+/* Looks up the cmdline for the process and tries to find
+ * the most descriptive five characters, then inserts the
+ * short name into the provided event value.
+ */
+#define PROC_NAME_LEN 5
+static void insertProcessName(long long *ep)
+{
+    static bool foundRealName = false;
+    static char name[PROC_NAME_LEN] = { 'X', 'X', 'X', 'X', 'X' };
+    long long event = *ep;
+
+    if (!foundRealName) {
+        int fd = open("/proc/self/cmdline", O_RDONLY);
+        if (fd > 0) {
+            char buf[128];
+            ssize_t n = read(fd, buf, sizeof(buf) - 1);
+            close(fd);
+            if (n > 0) {
+                memset(name, 0, sizeof(name));
+                if (n <= PROC_NAME_LEN) {
+                    // The whole name fits.
+                    memcpy(name, buf, n);
+                } else {
+                    /* We need to truncate.  The name will look something
+                     * like "com.android.home".  Favor the characters
+                     * immediately following the last dot.
+                     */
+                    buf[n] = '\0';
+                    char *dot = strrchr(buf, '.');
+                    if (dot == NULL) {
+                        /* Or, look for a slash, in case it's something like
+                         * "/system/bin/runtime".
+                         */
+                        dot = strrchr(buf, '/');
+                    }
+                    if (dot != NULL) {
+                        dot++;  // Skip the dot
+                        size_t dotlen = strlen(dot);
+                        if (dotlen < PROC_NAME_LEN) {
+                            /* Use all available characters.  We know that
+                             * n > PROC_NAME_LEN from the check above.
+                             */
+                            dot -= PROC_NAME_LEN - dotlen;
+                        }
+                        strncpy(name, dot, PROC_NAME_LEN);
+                    } else {
+                        // No dot; just use the leading characters.
+                        memcpy(name, buf, PROC_NAME_LEN);
+                    }
+                }
+                if (strcmp(buf, "zygote") != 0) {
+                    /* If the process is no longer called "zygote",
+                     * cache this name.
+                     */
+                    foundRealName = true;
+                }
+            }
+        }
+    }
+
+    event &= ~(0xffffffffffLL << 24);
+    event |= (long long)name[0] << 56;
+    event |= (long long)name[1] << 48;
+    event |= (long long)name[2] << 40;
+    event |= (long long)name[3] << 32;
+    event |= (long long)name[4] << 24;
+
+    *ep = event;
+}
+
+// See device/data/etc/event-log-tags
+#define EVENT_LOG_TAG_dvm_gc_info 20001
+#define EVENT_LOG_TAG_dvm_gc_madvise_info 20002
+
+void dvmLogGcStats(size_t numFreed, size_t sizeFreed, size_t gcTimeMs)
+{
+    const GcHeap *gcHeap = gDvm.gcHeap;
+    size_t perHeapActualSize[HEAP_SOURCE_MAX_HEAP_COUNT],
+           perHeapAllowedSize[HEAP_SOURCE_MAX_HEAP_COUNT],
+           perHeapNumAllocated[HEAP_SOURCE_MAX_HEAP_COUNT],
+           perHeapSizeAllocated[HEAP_SOURCE_MAX_HEAP_COUNT];
+    unsigned char eventBuf[1 + (1 + sizeof(long long)) * 4];
+    size_t actualSize, allowedSize, numAllocated, sizeAllocated;
+    size_t i;
+    size_t softLimit = dvmHeapSourceGetIdealFootprint();
+    size_t nHeaps = dvmHeapSourceGetNumHeaps();
+
+    /* Enough to quiet down gcc for unitialized variable check */
+    perHeapActualSize[0] = perHeapAllowedSize[0] = perHeapNumAllocated[0] =
+                           perHeapSizeAllocated[0] = 0;
+    actualSize = dvmHeapSourceGetValue(HS_FOOTPRINT, perHeapActualSize, 
+                                       HEAP_SOURCE_MAX_HEAP_COUNT);
+    allowedSize = dvmHeapSourceGetValue(HS_ALLOWED_FOOTPRINT, 
+                      perHeapAllowedSize, HEAP_SOURCE_MAX_HEAP_COUNT);
+    numAllocated = dvmHeapSourceGetValue(HS_OBJECTS_ALLOCATED,
+                      perHeapNumAllocated, HEAP_SOURCE_MAX_HEAP_COUNT);
+    sizeAllocated = dvmHeapSourceGetValue(HS_BYTES_ALLOCATED,
+                      perHeapSizeAllocated, HEAP_SOURCE_MAX_HEAP_COUNT);
+
+    /* 
+     * Construct the the first 64-bit value to write to the log. 
+     * Global information:
+     *
+     * [63   ] Must be zero
+     * [62-24] ASCII process identifier
+     * [23-12] GC time in ms
+     * [11- 0] Bytes freed
+     *
+     */
+    long long event0;
+    event0 = 0LL << 63 |
+            (long long)intToFloat12(gcTimeMs) << 12 |
+            (long long)intToFloat12(sizeFreed);
+    insertProcessName(&event0);
+
+    /*
+     * Aggregated heap stats:
+     *
+     * [63-62] 10
+     * [61-60] Reserved; must be zero
+     * [59-48] Objects freed
+     * [47-36] Actual size (current footprint)
+     * [35-24] Allowed size (current hard max)
+     * [23-12] Objects allocated
+     * [11- 0] Bytes allocated
+     */
+    long long event1;
+    event1 = 2LL << 62 |
+            (long long)intToFloat12(numFreed) << 48 |
+            (long long)intToFloat12(actualSize) << 36 |
+            (long long)intToFloat12(allowedSize) << 24 |
+            (long long)intToFloat12(numAllocated) << 12 |
+            (long long)intToFloat12(sizeAllocated);
+
+    /*
+     * Report the current state of the zygote heap(s).
+     *
+     * The active heap is always heap[0].  We can be in one of three states
+     * at present:
+     *
+     *  (1) Still in the zygote.  Zygote using heap[0].
+     *  (2) In the zygote, when the first child is started.  We created a
+     *      new heap just before the first fork() call, so the original
+     *      "zygote heap" is now heap[1], and we have a small heap[0] for
+     *      anything we do from here on.
+     *  (3) In an app process.  The app gets a new heap[0], and can also
+     *      see the two zygote heaps [1] and [2] (probably unwise to
+     *      assume any specific ordering).
+     *
+     * So if nHeaps == 1, we want the stats from heap[0]; else we want
+     * the sum of the values from heap[1] to heap[nHeaps-1].
+     *
+     *
+     * Zygote heap stats (except for the soft limit, which belongs to the
+     * active heap):
+     *
+     * [63-62] 11
+     * [61-60] Reserved; must be zero
+     * [59-48] Soft Limit (for the active heap)
+     * [47-36] Actual size (current footprint)
+     * [35-24] Allowed size (current hard max)
+     * [23-12] Objects allocated
+     * [11- 0] Bytes allocated
+     */
+    long long event2;
+    size_t zActualSize, zAllowedSize, zNumAllocated, zSizeAllocated;
+    int firstHeap = (nHeaps == 1) ? 0 : 1;
+    size_t hh;
+
+    zActualSize = zAllowedSize = zNumAllocated = zSizeAllocated = 0;
+    for (hh = firstHeap; hh < nHeaps; hh++) {
+        zActualSize += perHeapActualSize[hh];
+        zAllowedSize += perHeapAllowedSize[hh];
+        zNumAllocated += perHeapNumAllocated[hh];
+        zSizeAllocated += perHeapSizeAllocated[hh];
+    }
+    event2 = 3LL << 62 |
+            (long long)intToFloat12(softLimit) << 48 |
+            (long long)intToFloat12(zActualSize) << 36 |
+            (long long)intToFloat12(zAllowedSize) << 24 |
+            (long long)intToFloat12(zNumAllocated) << 12 |
+            (long long)intToFloat12(zSizeAllocated);
+
+    /*
+     * Report the current external allocation stats and the native heap
+     * summary.
+     *
+     * [63-48] Reserved; must be zero (TODO: put new data in these slots)
+     * [47-36] dlmalloc_footprint
+     * [35-24] mallinfo: total allocated space
+     * [23-12] External byte limit
+     * [11- 0] External bytes allocated
+     */
+    long long event3;
+    size_t externalLimit, externalBytesAllocated;
+    size_t uordblks, footprint;
+
+#if 0
+    /*
+     * This adds 2-5msec to the GC cost on a DVT, or about 2-3% of the cost
+     * of a GC, so it's not horribly expensive but it's not free either.
+     */
+    extern size_t dlmalloc_footprint(void);
+    struct mallinfo mi;
+    //u8 start, end;
+
+    //start = dvmGetRelativeTimeNsec();
+    mi = mallinfo();
+    uordblks = mi.uordblks;
+    footprint = dlmalloc_footprint();
+    //end = dvmGetRelativeTimeNsec();
+    //LOGD("mallinfo+footprint took %dusec; used=%zd footprint=%zd\n",
+    //    (int)((end - start) / 1000), mi.uordblks, footprint);
+#else
+    uordblks = footprint = 0;
+#endif
+
+    externalLimit =
+            dvmHeapSourceGetValue(HS_EXTERNAL_LIMIT, NULL, 0);
+    externalBytesAllocated =
+            dvmHeapSourceGetValue(HS_EXTERNAL_BYTES_ALLOCATED, NULL, 0);
+    event3 =
+            (long long)intToFloat12(footprint) << 36 |
+            (long long)intToFloat12(uordblks) << 24 |
+            (long long)intToFloat12(externalLimit) << 12 |
+            (long long)intToFloat12(externalBytesAllocated);
+
+    /* Build the event data.
+     * [ 0: 0] item count (4)
+     * [ 1: 1] EVENT_TYPE_LONG
+     * [ 2: 9] event0
+     * [10:10] EVENT_TYPE_LONG
+     * [11:18] event1
+     * [19:19] EVENT_TYPE_LONG
+     * [20:27] event2
+     * [28:28] EVENT_TYPE_LONG
+     * [29:36] event2
+     */
+    unsigned char *c = eventBuf;
+    *c++ = 4;
+    *c++ = EVENT_TYPE_LONG;
+    memcpy(c, &event0, sizeof(event0));
+    c += sizeof(event0);
+    *c++ = EVENT_TYPE_LONG;
+    memcpy(c, &event1, sizeof(event1));
+    c += sizeof(event1);
+    *c++ = EVENT_TYPE_LONG;
+    memcpy(c, &event2, sizeof(event2));
+    c += sizeof(event2);
+    *c++ = EVENT_TYPE_LONG;
+    memcpy(c, &event3, sizeof(event3));
+
+    (void) android_btWriteLog(EVENT_LOG_TAG_dvm_gc_info, EVENT_TYPE_LIST,
+            eventBuf, sizeof(eventBuf));
+}
+
+void dvmLogMadviseStats(size_t madvisedSizes[], size_t arrayLen)
+{
+    unsigned char eventBuf[1 + (1 + sizeof(int)) * 2];
+    size_t total, zyg;
+    size_t firstHeap, i;
+    size_t nHeaps = dvmHeapSourceGetNumHeaps();
+
+    assert(arrayLen >= nHeaps);
+
+    firstHeap = nHeaps > 1 ? 1 : 0;
+    total = 0;
+    zyg = 0;
+    for (i = 0; i < nHeaps; i++) {
+        total += madvisedSizes[i];
+        if (i >= firstHeap) {
+            zyg += madvisedSizes[i];
+        }
+    }
+
+    /* Build the event data.
+     * [ 0: 0] item count (2)
+     * [ 1: 1] EVENT_TYPE_INT
+     * [ 2: 5] total madvise byte count
+     * [ 6: 6] EVENT_TYPE_INT
+     * [ 7:10] zygote heap madvise byte count
+     */
+    unsigned char *c = eventBuf;
+    *c++ = 2;
+    *c++ = EVENT_TYPE_INT;
+    memcpy(c, &total, sizeof(total));
+    c += sizeof(total);
+    *c++ = EVENT_TYPE_INT;
+    memcpy(c, &zyg, sizeof(zyg));
+    c += sizeof(zyg);
+
+    (void) android_btWriteLog(EVENT_LOG_TAG_dvm_gc_madvise_info,
+            EVENT_TYPE_LIST, eventBuf, sizeof(eventBuf));
+}
+
+#if 0
+#include <errno.h>
+#include <stdio.h>
+
+typedef struct HeapDumpContext {
+    FILE *fp;
+    void *chunkStart;
+    size_t chunkLen;
+    bool chunkFree;
+} HeapDumpContext;
+
+static void
+dump_context(const HeapDumpContext *ctx)
+{
+    fprintf(ctx->fp, "0x%08x %12.12zd %s\n", (uintptr_t)ctx->chunkStart,
+            ctx->chunkLen, ctx->chunkFree ? "FREE" : "USED");
+}
+
+static void
+heap_chunk_callback(const void *chunkptr, size_t chunklen,
+                    const void *userptr, size_t userlen, void *arg)
+{
+    HeapDumpContext *ctx = (HeapDumpContext *)arg;
+    bool chunkFree = (userptr == NULL);
+
+    if (chunkFree != ctx->chunkFree ||
+            ((char *)ctx->chunkStart + ctx->chunkLen) != chunkptr)
+    {
+        /* The new chunk is of a different type or isn't
+         * contiguous with the current chunk.  Dump the
+         * old one and start a new one.
+         */
+        if (ctx->chunkStart != NULL) {
+            /* It's not the first chunk. */
+            dump_context(ctx);
+        }
+        ctx->chunkStart = (void *)chunkptr;
+        ctx->chunkLen = chunklen;
+        ctx->chunkFree = chunkFree;
+    } else {
+        /* Extend the current chunk.
+         */
+        ctx->chunkLen += chunklen;
+    }
+}
+
+/* Dumps free and used ranges, as text, to the named file.
+ */
+void dvmDumpHeapToFile(const char *fileName)
+{
+    HeapDumpContext ctx;
+    FILE *fp;
+
+    fp = fopen(fileName, "w+");
+    if (fp == NULL) {
+        LOGE("Can't open %s for writing: %s\n", fileName, strerror(errno));
+        return;
+    }
+    LOGW("Dumping heap to %s...\n", fileName);
+
+    fprintf(fp, "==== Dalvik heap dump ====\n");
+    memset(&ctx, 0, sizeof(ctx));
+    ctx.fp = fp;
+    dvmHeapSourceWalk(heap_chunk_callback, (void *)&ctx);
+    dump_context(&ctx);
+    fprintf(fp, "==== end heap dump ====\n");
+
+    LOGW("Dumped heap to %s.\n", fileName);
+
+    fclose(fp);
+}
+#endif
diff --git a/vm/alloc/HeapDebug.h b/vm/alloc/HeapDebug.h
new file mode 100644
index 0000000..19f4b45
--- /dev/null
+++ b/vm/alloc/HeapDebug.h
@@ -0,0 +1,31 @@
+/*
+ * Copyright (C) 2008 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#ifndef _DALVIK_HEAPDEBUG
+#define _DALVIK_HEAPDEBUG
+
+typedef enum HeapDebugInfoType {
+    kVirtualHeapSize = 0,
+    kNativeHeapSize = 1,
+    kVirtualHeapAllocated = 2,
+    kNativeHeapAllocated = 3,
+} HeapDebugInfoType;
+
+/* Return the specified value.
+ * Returns -1 if the type is unknown.
+ */
+int dvmGetHeapDebugInfo(HeapDebugInfoType info);
+
+#endif  // _DALVIK_HEAPDEBUG
diff --git a/vm/alloc/HeapInternal.h b/vm/alloc/HeapInternal.h
new file mode 100644
index 0000000..7851983
--- /dev/null
+++ b/vm/alloc/HeapInternal.h
@@ -0,0 +1,234 @@
+/*
+ * Copyright (C) 2008 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/*
+ * Types and macros used internally by the heap.
+ */
+#ifndef _DALVIK_ALLOC_HEAP_INTERNAL
+#define _DALVIK_ALLOC_HEAP_INTERNAL
+
+#include <time.h>  // for struct timespec
+
+#include "HeapTable.h"
+#include "MarkSweep.h"
+
+#define SCHEDULED_REFERENCE_MAGIC   ((Object*)0x87654321)
+
+#define ptr2chunk(p)    (((DvmHeapChunk *)(p)) - 1)
+#define chunk2ptr(p)    ((void *)(((DvmHeapChunk *)(p)) + 1))
+
+#define WITH_OBJECT_HEADERS 0
+#if WITH_OBJECT_HEADERS
+#define OBJECT_HEADER   0x11335577
+extern u2 gGeneration;
+#endif
+
+typedef struct DvmHeapChunk {
+#if WITH_OBJECT_HEADERS
+    u4 header;
+    const Object *parent;
+    const Object *parentOld;
+    const Object *markFinger;
+    const Object *markFingerOld;
+    u2 birthGeneration;
+    u2 markCount;
+    u2 scanCount;
+    u2 oldMarkGeneration;
+    u2 markGeneration;
+    u2 oldScanGeneration;
+    u2 scanGeneration;
+#endif
+#if WITH_HPROF && WITH_HPROF_STACK
+    u4 stackTraceSerialNumber;
+#endif
+    u8 data[0];
+} DvmHeapChunk;
+
+struct GcHeap {
+    HeapSource      *heapSource;
+
+    /* List of heap objects that the GC should never collect.
+     * These should be included in the root set of objects.
+     */
+    HeapRefTable    nonCollectableRefs;
+
+    /* List of heap objects that will require finalization when
+     * collected.  I.e., instance objects
+     *
+     *     a) whose class definitions override java.lang.Object.finalize()
+     *
+     * *** AND ***
+     *
+     *     b) that have never been finalized.
+     *
+     * Note that this does not exclude non-garbage objects;  this
+     * is not the list of pending finalizations, but of objects that
+     * potentially have finalization in their futures.
+     */
+    LargeHeapRefTable  *finalizableRefs;
+
+    /* The list of objects that need to have finalize() called
+     * on themselves.  These references are part of the root set.
+     *
+     * This table is protected by gDvm.heapWorkerListLock, which must
+     * be acquired after the heap lock.
+     */
+    LargeHeapRefTable  *pendingFinalizationRefs;
+
+    /* Linked lists of subclass instances of java/lang/ref/Reference
+     * that we find while recursing.  The "next" pointers are hidden
+     * in the objects' <code>int Reference.vmData</code> fields.
+     * These lists are cleared and rebuilt each time the GC runs.
+     */
+    Object         *softReferences;
+    Object         *weakReferences;
+    Object         *phantomReferences;
+
+    /* The list of Reference objects that need to be cleared and/or
+     * enqueued.  The bottom two bits of the object pointers indicate
+     * whether they should be cleared and/or enqueued.
+     *
+     * This table is protected by gDvm.heapWorkerListLock, which must
+     * be acquired after the heap lock.
+     */
+    LargeHeapRefTable  *referenceOperations;
+
+    /* If non-null, the method that the HeapWorker is currently
+     * executing.
+     */
+    Object *heapWorkerCurrentObject;
+    Method *heapWorkerCurrentMethod;
+
+    /* If heapWorkerCurrentObject is non-null, this gives the time when
+     * HeapWorker started executing that method.  The time value must come
+     * from dvmGetRelativeTimeUsec().
+     *
+     * The "Cpu" entry tracks the per-thread CPU timer (when available).
+     */
+    u8 heapWorkerInterpStartTime;
+    u8 heapWorkerInterpCpuStartTime;
+
+    /* If any fields are non-zero, indicates the next (absolute) time that
+     * the HeapWorker thread should call dvmHeapSourceTrim().
+     */
+    struct timespec heapWorkerNextTrim;
+
+    /* The current state of the mark step.
+     * Only valid during a GC.
+     */
+    GcMarkContext   markContext;
+
+    /* Set to dvmGetRelativeTimeUsec() whenever a GC begins.
+     * The value is preserved between GCs, so it can be used
+     * to determine the time between successive GCs.
+     * Initialized to zero before the first GC.
+     */
+    u8              gcStartTime;
+
+    /* Is the GC running?  Used to avoid recursive calls to GC.
+     */
+    bool            gcRunning;
+
+    /* Set at the end of a GC to indicate the collection policy
+     * for SoftReferences during the following GC.
+     */
+    enum { SR_COLLECT_NONE, SR_COLLECT_SOME, SR_COLLECT_ALL }
+                    softReferenceCollectionState;
+
+    /* The size of the heap is compared against this value
+     * to determine when to start collecting SoftReferences.
+     */
+    size_t          softReferenceHeapSizeThreshold;
+
+    /* A value that will increment every time we see a SoftReference
+     * whose referent isn't marked (during SR_COLLECT_SOME).
+     * The absolute value is meaningless, and does not need to
+     * be reset or initialized at any point.
+     */
+    int             softReferenceColor;
+
+    /* Indicates whether or not the object scanner should bother
+     * keeping track of any references.  If markAllReferents is
+     * true, referents will be hard-marked.  If false, normal
+     * reference following is used.
+     */
+    bool            markAllReferents;
+
+#if DVM_TRACK_HEAP_MARKING
+    /* Every time an unmarked object becomes marked, markCount
+     * is incremented and markSize increases by the size of
+     * that object.
+     */
+    size_t          markCount;
+    size_t          markSize;
+#endif
+
+    /*
+     * Debug control values
+     */
+
+    int             ddmHpifWhen;
+    int             ddmHpsgWhen;
+    int             ddmHpsgWhat;
+    int             ddmNhsgWhen;
+    int             ddmNhsgWhat;
+
+#if WITH_HPROF
+    bool            hprofDumpOnGc;
+    const char*     hprofFileName;
+    hprof_context_t *hprofContext;
+#endif
+};
+
+bool dvmLockHeap(void);
+void dvmUnlockHeap(void);
+void dvmLogGcStats(size_t numFreed, size_t sizeFreed, size_t gcTimeMs);
+void dvmLogMadviseStats(size_t madvisedSizes[], size_t arrayLen);
+void dvmHeapSizeChanged(void);
+
+/*
+ * Logging helpers
+ */
+
+#define HEAP_LOG_TAG      LOG_TAG "-heap"
+
+#if LOG_NDEBUG
+#define LOGV_HEAP(...)    ((void)0)
+#define LOGD_HEAP(...)    ((void)0)
+#else
+#define LOGV_HEAP(...)    LOG(LOG_VERBOSE, HEAP_LOG_TAG, __VA_ARGS__)
+#define LOGD_HEAP(...)    LOG(LOG_DEBUG, HEAP_LOG_TAG, __VA_ARGS__)
+#endif
+#define LOGI_HEAP(...)    LOG(LOG_INFO, HEAP_LOG_TAG, __VA_ARGS__)
+#define LOGW_HEAP(...)    LOG(LOG_WARN, HEAP_LOG_TAG, __VA_ARGS__)
+#define LOGE_HEAP(...)    LOG(LOG_ERROR, HEAP_LOG_TAG, __VA_ARGS__)
+
+#define QUIET_ZYGOTE_GC 1
+#if QUIET_ZYGOTE_GC
+#undef LOGI_HEAP
+#define LOGI_HEAP(...) \
+    do { \
+        if (!gDvm.zygote) { \
+            LOG(LOG_INFO, HEAP_LOG_TAG, __VA_ARGS__); \
+        } \
+    } while (false)
+#endif
+
+#define FRACTIONAL_MB(n)    (n) / (1024 * 1024), \
+                            ((((n) % (1024 * 1024)) / 1024) * 1000) / 1024
+#define FRACTIONAL_PCT(n,max)    ((n) * 100) / (max), \
+                                 (((n) * 1000) / (max)) % 10
+
+#endif  // _DALVIK_ALLOC_HEAP_INTERNAL
diff --git a/vm/alloc/HeapSource.c b/vm/alloc/HeapSource.c
new file mode 100644
index 0000000..d97290f
--- /dev/null
+++ b/vm/alloc/HeapSource.c
@@ -0,0 +1,1580 @@
+/*
+ * Copyright (C) 2008 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <cutils/mspace.h>
+#include <limits.h>     // for INT_MAX
+#include <sys/mman.h>
+#include <errno.h>
+
+#include "Dalvik.h"
+#include "alloc/Heap.h"
+#include "alloc/HeapInternal.h"
+#include "alloc/HeapSource.h"
+#include "alloc/HeapBitmap.h"
+
+// TODO: find a real header file for these.
+extern int dlmalloc_trim(size_t);
+extern void dlmalloc_walk_free_pages(void(*)(void*, void*, void*), void*);
+
+static void snapIdealFootprint(void);
+static void setIdealFootprint(size_t max);
+
+#ifndef PAGE_SIZE
+#define PAGE_SIZE 4096
+#endif
+#define ALIGN_UP_TO_PAGE_SIZE(p) \
+    (((size_t)(p) + (PAGE_SIZE - 1)) & ~(PAGE_SIZE - 1))
+#define ALIGN_DOWN_TO_PAGE_SIZE(p) \
+    ((size_t)(p) & ~(PAGE_SIZE - 1))
+
+#define HEAP_UTILIZATION_MAX        1024
+#define DEFAULT_HEAP_UTILIZATION    512     // Range 1..HEAP_UTILIZATION_MAX
+#define HEAP_IDEAL_FREE             (2 * 1024 * 1024)
+#define HEAP_MIN_FREE               (HEAP_IDEAL_FREE / 4)
+
+#define HS_BOILERPLATE() \
+    do { \
+        assert(gDvm.gcHeap != NULL); \
+        assert(gDvm.gcHeap->heapSource != NULL); \
+        assert(gHs == gDvm.gcHeap->heapSource); \
+    } while (0)
+
+#define DEBUG_HEAP_SOURCE 0
+#if DEBUG_HEAP_SOURCE
+#define HSTRACE(...)  LOG(LOG_INFO, LOG_TAG "-hs", __VA_ARGS__)
+#else
+#define HSTRACE(...)  /**/
+#endif
+
+/*
+=======================================================
+=======================================================
+=======================================================
+
+How will this be used?
+allocating/freeing: Heap.c just wants to say "alloc(n)" and get a ptr
+    - if allocating in large doesn't work, try allocating from small
+Heap.c will use HeapSource.h; HeapSource.c will do the right thing
+    between small and large
+    - some operations should be abstracted; put in a structure
+
+How do we manage the size trade-offs?
+- keep mspace max footprint clamped to actual footprint
+- if small-alloc returns null, adjust large vs. small ratio
+    - give small all available slack and retry
+    - success or fail, snap back to actual footprint and give rest to large
+
+managed as "small actual" + "large actual" + "delta to allowed total footprint"
+- when allocating from one source or the other, give the delta to the
+    active source, but snap back afterwards
+- that may not work so great for a gc heap, because small will always consume.
+    - but we need to use the memory, and the current max is the amount we
+      need to fill before a GC.
+
+Find a way to permanently steal pages from the middle of the heap
+    - segment tricks?
+
+Allocate String and char[] in a separate heap?
+
+Maybe avoid growing small heap, even if there's slack?  Look at
+live ratio of small heap after a gc; scale it based on that.
+
+=======================================================
+=======================================================
+=======================================================
+*/
+
+typedef struct {
+    /* The mspace to allocate from.
+     */
+    mspace *msp;
+
+    /* The bitmap that keeps track of where objects are in the heap.
+     */
+    HeapBitmap objectBitmap;
+
+    /* The largest size that this heap is allowed to grow to.
+     */
+    size_t absoluteMaxSize;
+
+    /* Number of bytes allocated from this mspace for objects,
+     * including any overhead.  This value is NOT exact, and
+     * should only be used as an input for certain heuristics.
+     */
+    size_t bytesAllocated;
+
+    /* Number of objects currently allocated from this mspace.
+     */
+    size_t objectsAllocated;
+} Heap;
+
+struct HeapSource {
+    /* Target ideal heap utilization ratio; range 1..HEAP_UTILIZATION_MAX
+     */
+    size_t targetUtilization;
+
+    /* Requested minimum heap size, or zero if there is no minimum.
+     */
+    size_t minimumSize;
+
+    /* The starting heap size.
+     */
+    size_t startSize;
+
+    /* The largest that the heap source as a whole is allowed to grow.
+     */
+    size_t absoluteMaxSize;
+
+    /* The desired max size of the heap source as a whole.
+     */
+    size_t idealSize;
+
+    /* The maximum number of bytes allowed to be allocated from the
+     * active heap before a GC is forced.  This is used to "shrink" the
+     * heap in lieu of actual compaction.
+     */
+    size_t softLimit;
+
+    /* The heaps; heaps[0] is always the active heap,
+     * which new objects should be allocated from.
+     */
+    Heap heaps[HEAP_SOURCE_MAX_HEAP_COUNT];
+
+    /* The current number of heaps.
+     */
+    size_t numHeaps;
+
+    /* External allocation count.
+     */
+    size_t externalBytesAllocated;
+
+    /* The maximum number of external bytes that may be allocated.
+     */
+    size_t externalLimit;
+
+    /* True if zygote mode was active when the HeapSource was created.
+     */
+    bool sawZygote;
+};
+
+#define hs2heap(hs_) (&((hs_)->heaps[0]))
+
+/*
+ * Returns true iff a soft limit is in effect for the active heap.
+ */
+static inline bool
+softLimited(const HeapSource *hs)
+{
+    /* softLimit will be either INT_MAX or the limit for the
+     * active mspace.  idealSize can be greater than softLimit
+     * if there is more than one heap.  If there is only one
+     * heap, a non-INT_MAX softLimit should always be the same
+     * as idealSize.
+     */
+    return hs->softLimit <= hs->idealSize;
+}
+
+/*
+ * Returns the current footprint of all heaps.  If includeActive
+ * is false, don't count the heap at index 0.
+ */
+static inline size_t
+oldHeapOverhead(const HeapSource *hs, bool includeActive)
+{
+    size_t footprint = 0;
+    size_t i;
+
+    if (includeActive) {
+        i = 0;
+    } else {
+        i = 1;
+    }
+    for (/* i = i */; i < hs->numHeaps; i++) {
+//TODO: include size of bitmaps?  If so, don't use bitsLen, listen to .max
+        footprint += mspace_footprint(hs->heaps[i].msp);
+    }
+    return footprint;
+}
+
+/*
+ * Returns the heap that <ptr> could have come from, or NULL
+ * if it could not have come from any heap.
+ */
+static inline Heap *
+ptr2heap(const HeapSource *hs, const void *ptr)
+{
+    const size_t numHeaps = hs->numHeaps;
+    size_t i;
+
+//TODO: unroll this to HEAP_SOURCE_MAX_HEAP_COUNT
+    if (ptr != NULL) {
+        for (i = 0; i < numHeaps; i++) {
+            const Heap *const heap = &hs->heaps[i];
+            
+            if (dvmHeapBitmapMayContainObject(&heap->objectBitmap, ptr)) {
+                return (Heap *)heap;
+            }
+        }
+    }
+    return NULL;
+}
+
+/*
+ * Functions to update heapSource->bytesAllocated when an object
+ * is allocated or freed.  mspace_usable_size() will give
+ * us a much more accurate picture of heap utilization than
+ * the requested byte sizes would.
+ *
+ * These aren't exact, and should not be treated as such.
+ */
+static inline void
+countAllocation(Heap *heap, const void *ptr, bool isObj)
+{
+    assert(heap->bytesAllocated < mspace_footprint(heap->msp));
+
+    heap->bytesAllocated += mspace_usable_size(heap->msp, ptr) +
+            HEAP_SOURCE_CHUNK_OVERHEAD;
+    if (isObj) {
+        heap->objectsAllocated++;
+        dvmHeapBitmapSetObjectBit(&heap->objectBitmap, ptr);
+    }
+
+    assert(heap->bytesAllocated < mspace_footprint(heap->msp));
+}
+
+static inline void
+countFree(Heap *heap, const void *ptr, bool isObj)
+{
+    size_t delta;
+
+    delta = mspace_usable_size(heap->msp, ptr) + HEAP_SOURCE_CHUNK_OVERHEAD;
+    assert(delta > 0);
+    if (delta < heap->bytesAllocated) {
+        heap->bytesAllocated -= delta;
+    } else {
+        heap->bytesAllocated = 0;
+    }
+    if (isObj) {
+        dvmHeapBitmapClearObjectBit(&heap->objectBitmap, ptr);
+        if (heap->objectsAllocated > 0) {
+            heap->objectsAllocated--;
+        }
+    }
+}
+
+static HeapSource *gHs = NULL;
+
+static mspace *
+createMspace(size_t startSize, size_t absoluteMaxSize, size_t id)
+{
+    mspace *msp;
+    char name[PATH_MAX];
+
+    /* If two ashmem regions have the same name, only one gets
+     * the name when looking at the maps.
+     */
+    snprintf(name, sizeof(name)-1, "dalvik-heap%s/%zd",
+        gDvm.zygote ? "/zygote" : "", id);
+    name[sizeof(name)-1] = '\0';
+
+    /* Create an unlocked dlmalloc mspace to use as
+     * a small-object heap source.
+     *
+     * We start off reserving heapSizeStart/2 bytes but
+     * letting the heap grow to heapSizeStart.  This saves
+     * memory in the case where a process uses even less
+     * than the starting size.
+     */
+    LOGV_HEAP("Creating VM heap of size %u\n", startSize);
+    errno = 0;
+    msp = create_contiguous_mspace_with_name(startSize/2,
+            absoluteMaxSize, /*locked=*/false, name);
+    if (msp != NULL) {
+        /* Don't let the heap grow past the starting size without
+         * our intervention.
+         */
+        mspace_set_max_allowed_footprint(msp, startSize);
+    } else {
+        /* There's no guarantee that errno has meaning when the call
+         * fails, but it often does.
+         */
+        LOGE_HEAP("Can't create VM heap of size (%u,%u) (errno=%d)\n",
+            startSize/2, absoluteMaxSize, errno);
+    }
+
+    return msp;
+}
+
+static bool
+addNewHeap(HeapSource *hs, mspace *msp, size_t mspAbsoluteMaxSize)
+{
+    Heap heap;
+
+    if (hs->numHeaps >= HEAP_SOURCE_MAX_HEAP_COUNT) {
+        LOGE("Attempt to create too many heaps (%zd >= %zd)\n",
+                hs->numHeaps, HEAP_SOURCE_MAX_HEAP_COUNT);
+        dvmAbort();
+        return false;
+    }
+
+    memset(&heap, 0, sizeof(heap));
+
+    if (msp != NULL) {
+        heap.msp = msp;
+        heap.absoluteMaxSize = mspAbsoluteMaxSize;
+    } else {
+        size_t overhead;
+
+        overhead = oldHeapOverhead(hs, true);
+        if (overhead + HEAP_MIN_FREE >= hs->absoluteMaxSize) {
+            LOGE_HEAP("No room to create any more heaps "
+                    "(%zd overhead, %zd max)\n",
+                    overhead, hs->absoluteMaxSize);
+            return false;
+        }
+        heap.absoluteMaxSize = hs->absoluteMaxSize - overhead;
+        heap.msp = createMspace(HEAP_MIN_FREE, heap.absoluteMaxSize,
+                hs->numHeaps);
+        if (heap.msp == NULL) {
+            return false;
+        }
+    }
+    if (!dvmHeapBitmapInit(&heap.objectBitmap,
+                           (void *)ALIGN_DOWN_TO_PAGE_SIZE(heap.msp),
+                           heap.absoluteMaxSize,
+                           "objects"))
+    {
+        LOGE_HEAP("Can't create objectBitmap\n");
+        goto fail;
+    }
+
+    /* Don't let the soon-to-be-old heap grow any further.
+     */
+    if (hs->numHeaps > 0) {
+        mspace *msp = hs->heaps[0].msp;
+        mspace_set_max_allowed_footprint(msp, mspace_footprint(msp));
+    }
+
+    /* Put the new heap in the list, at heaps[0].
+     * Shift existing heaps down.
+     */
+    memmove(&hs->heaps[1], &hs->heaps[0], hs->numHeaps * sizeof(hs->heaps[0]));
+    hs->heaps[0] = heap;
+    hs->numHeaps++;
+
+    return true;
+
+fail:
+    if (msp == NULL) {
+        destroy_contiguous_mspace(heap.msp);
+    }
+    return false;
+}
+
+/*
+ * Initializes the heap source; must be called before any other
+ * dvmHeapSource*() functions.  Returns a GcHeap structure
+ * allocated from the heap source.
+ */
+GcHeap *
+dvmHeapSourceStartup(size_t startSize, size_t absoluteMaxSize)
+{
+    GcHeap *gcHeap;
+    HeapSource *hs;
+    Heap *heap;
+    mspace msp;
+
+    assert(gHs == NULL);
+
+    if (startSize > absoluteMaxSize) {
+        LOGE("Bad heap parameters (start=%d, max=%d)\n",
+           startSize, absoluteMaxSize);
+        return NULL;
+    }
+
+    /* Create an unlocked dlmalloc mspace to use as
+     * the small object heap source.
+     */
+    msp = createMspace(startSize, absoluteMaxSize, 0);
+    if (msp == NULL) {
+        return false;
+    }
+
+    /* Allocate a descriptor from the heap we just created.
+     */
+    gcHeap = mspace_malloc(msp, sizeof(*gcHeap));
+    if (gcHeap == NULL) {
+        LOGE_HEAP("Can't allocate heap descriptor\n");
+        goto fail;
+    }
+    memset(gcHeap, 0, sizeof(*gcHeap));
+
+    hs = mspace_malloc(msp, sizeof(*hs));
+    if (hs == NULL) {
+        LOGE_HEAP("Can't allocate heap source\n");
+        goto fail;
+    }
+    memset(hs, 0, sizeof(*hs));
+
+    hs->targetUtilization = DEFAULT_HEAP_UTILIZATION;
+    hs->minimumSize = 0;
+    hs->startSize = startSize;
+    hs->absoluteMaxSize = absoluteMaxSize;
+    hs->idealSize = startSize;
+    hs->softLimit = INT_MAX;    // no soft limit at first
+    hs->numHeaps = 0;
+    hs->sawZygote = gDvm.zygote;
+    if (!addNewHeap(hs, msp, absoluteMaxSize)) {
+        LOGE_HEAP("Can't add initial heap\n");
+        goto fail;
+    }
+
+    gcHeap->heapSource = hs;
+
+    countAllocation(hs2heap(hs), gcHeap, false);
+    countAllocation(hs2heap(hs), hs, false);
+
+    gHs = hs;
+    return gcHeap;
+
+fail:
+    destroy_contiguous_mspace(msp);
+    return NULL;
+}
+
+/*
+ * If the HeapSource was created while in zygote mode, this
+ * will create a new heap for post-zygote allocations.
+ * Having a separate heap should maximize the number of pages
+ * that a given app_process shares with the zygote process.
+ */
+bool
+dvmHeapSourceStartupAfterZygote()
+{
+    HeapSource *hs = gHs; // use a local to avoid the implicit "volatile"
+
+    HS_BOILERPLATE();
+
+    assert(!gDvm.zygote);
+
+    if (hs->sawZygote) {
+        /* Create a new heap for post-zygote allocations.
+         */
+        return addNewHeap(hs, NULL, 0);
+    }
+    return true;
+}
+
+/*
+ * This is called while in zygote mode, right before we fork() for the
+ * first time.  We create a heap for all future zygote process allocations,
+ * in an attempt to avoid touching pages in the zygote heap.  (This would
+ * probably be unnecessary if we had a compacting GC -- the source of our
+ * troubles is small allocations filling in the gaps from larger ones.)
+ */
+bool
+dvmHeapSourceStartupBeforeFork()
+{
+    HeapSource *hs = gHs; // use a local to avoid the implicit "volatile"
+
+    HS_BOILERPLATE();
+
+    assert(gDvm.zygote);
+
+    if (!gDvm.newZygoteHeapAllocated) {
+        /* Create a new heap for post-fork zygote allocations.  We only
+         * try once, even if it fails.
+         */
+        LOGI("Splitting out new zygote heap\n");
+        gDvm.newZygoteHeapAllocated = true;
+        return addNewHeap(hs, NULL, 0);
+    }
+    return true;
+}
+
+/*
+ * Tears down the heap source and frees any resources associated with it.
+ */
+void
+dvmHeapSourceShutdown(GcHeap *gcHeap)
+{
+    if (gcHeap != NULL && gcHeap->heapSource != NULL) {
+        HeapSource *hs;
+        size_t numHeaps;
+        size_t i;
+
+        hs = gcHeap->heapSource;
+        gHs = NULL;
+
+        /* Cache numHeaps because hs will be invalid after the last
+         * heap is freed.
+         */
+        numHeaps = hs->numHeaps;
+
+        for (i = 0; i < numHeaps; i++) {
+            Heap *heap = &hs->heaps[i];
+
+            dvmHeapBitmapDelete(&heap->objectBitmap);
+            destroy_contiguous_mspace(heap->msp);
+        }
+        /* The last heap is the original one, which contains the
+         * HeapSource object itself.
+         */
+    }
+}
+
+/*
+ * Returns the requested value. If the per-heap stats are requested, fill
+ * them as well.
+ *
+ * Caller must hold the heap lock.
+ */
+size_t
+dvmHeapSourceGetValue(enum HeapSourceValueSpec spec, size_t perHeapStats[],
+                      size_t arrayLen)
+{
+    HeapSource *hs = gHs;
+    size_t value = 0;
+    size_t total = 0;
+    size_t i;
+
+    HS_BOILERPLATE();
+
+    switch (spec) {
+    case HS_EXTERNAL_BYTES_ALLOCATED:
+        return hs->externalBytesAllocated;
+    case HS_EXTERNAL_LIMIT:
+        return hs->externalLimit;
+    default:
+        // look at all heaps.
+        ;
+    }
+
+    assert(arrayLen >= hs->numHeaps || perHeapStats == NULL);
+    for (i = 0; i < hs->numHeaps; i++) {
+        Heap *const heap = &hs->heaps[i];
+
+        switch (spec) {
+        case HS_FOOTPRINT:
+            value = mspace_footprint(heap->msp);
+            break;
+        case HS_ALLOWED_FOOTPRINT:
+            value = mspace_max_allowed_footprint(heap->msp);
+            break;
+        case HS_BYTES_ALLOCATED:
+            value = heap->bytesAllocated;
+            break;
+        case HS_OBJECTS_ALLOCATED:
+            value = heap->objectsAllocated;
+            break;
+        default:
+            // quiet gcc
+            break;
+        }
+        if (perHeapStats) {
+            perHeapStats[i] = value;
+        }
+        total += value;
+    }
+    return total;
+}
+
+/*
+ * Writes shallow copies of the currently-used bitmaps into outBitmaps,
+ * returning the number of bitmaps written.  Returns <0 if the array
+ * was not long enough.
+ */
+ssize_t
+dvmHeapSourceGetObjectBitmaps(HeapBitmap outBitmaps[], size_t maxBitmaps)
+{
+    HeapSource *hs = gHs;
+
+    HS_BOILERPLATE();
+
+    if (maxBitmaps >= hs->numHeaps) {
+        size_t i;
+
+        for (i = 0; i < hs->numHeaps; i++) {
+            outBitmaps[i] = hs->heaps[i].objectBitmap;
+        }
+        return i;
+    }
+    return -1;
+}
+
+/*
+ * Replaces the object location HeapBitmaps with the elements of
+ * <objectBitmaps>.  The elements of <objectBitmaps> are overwritten
+ * with shallow copies of the old bitmaps.
+ *
+ * Returns false if the number of bitmaps doesn't match the number
+ * of heaps.
+ */
+bool
+dvmHeapSourceReplaceObjectBitmaps(HeapBitmap objectBitmaps[], size_t nBitmaps)
+{
+    HeapSource *hs = gHs;
+    size_t i;
+
+    HS_BOILERPLATE();
+
+    if (nBitmaps != hs->numHeaps) {
+        return false;
+    }
+
+    for (i = 0; i < hs->numHeaps; i++) {
+        Heap *heap = &hs->heaps[i];
+        HeapBitmap swap;
+
+        swap = heap->objectBitmap;
+        heap->objectBitmap = objectBitmaps[i];
+        objectBitmaps[i] = swap;
+    }
+    return true;
+}
+
+/*
+ * Allocates <n> bytes of zeroed data.
+ */
+void *
+dvmHeapSourceAlloc(size_t n)
+{
+    HeapSource *hs = gHs;
+    Heap *heap;
+    void *ptr;
+
+    HS_BOILERPLATE();
+    heap = hs2heap(hs);
+
+    if (heap->bytesAllocated + n <= hs->softLimit) {
+// TODO: allocate large blocks (>64k?) as separate mmap regions so that
+//       they don't increase the high-water mark when they're freed.
+// TODO: zero out large objects using madvise
+        ptr = mspace_calloc(heap->msp, 1, n);
+        if (ptr != NULL) {
+            countAllocation(heap, ptr, true);
+        }
+    } else {
+        /* This allocation would push us over the soft limit;
+         * act as if the heap is full.
+         */
+        LOGV_HEAP("softLimit of %zd.%03zdMB hit for %zd-byte allocation\n",
+                FRACTIONAL_MB(hs->softLimit), n);
+        ptr = NULL;
+    }
+    return ptr;
+}
+
+/* Remove any hard limits, try to allocate, and shrink back down.
+ * Last resort when trying to allocate an object.
+ */
+static void *
+heapAllocAndGrow(HeapSource *hs, Heap *heap, size_t n)
+{
+    void *ptr;
+    size_t max;
+
+    /* Grow as much as possible, but don't let the real footprint
+     * plus external allocations go over the absolute max.
+     */
+    max = heap->absoluteMaxSize;
+    if (max > hs->externalBytesAllocated) {
+        max -= hs->externalBytesAllocated;
+
+        mspace_set_max_allowed_footprint(heap->msp, max);
+        ptr = dvmHeapSourceAlloc(n);
+
+        /* Shrink back down as small as possible.  Our caller may
+         * readjust max_allowed to a more appropriate value.
+         */
+        mspace_set_max_allowed_footprint(heap->msp,
+                mspace_footprint(heap->msp));
+    } else {
+        ptr = NULL;
+    }
+
+    return ptr;
+}
+
+/*
+ * Allocates <n> bytes of zeroed data, growing as much as possible
+ * if necessary.
+ */
+void *
+dvmHeapSourceAllocAndGrow(size_t n)
+{
+    HeapSource *hs = gHs;
+    Heap *heap;
+    void *ptr;
+    size_t oldIdealSize;
+
+    HS_BOILERPLATE();
+    heap = hs2heap(hs);
+
+    ptr = dvmHeapSourceAlloc(n);
+    if (ptr != NULL) {
+        return ptr;
+    }
+
+    oldIdealSize = hs->idealSize;
+    if (softLimited(hs)) {
+        /* We're soft-limited.  Try removing the soft limit to
+         * see if we can allocate without actually growing.
+         */
+        hs->softLimit = INT_MAX;
+        ptr = dvmHeapSourceAlloc(n);
+        if (ptr != NULL) {
+            /* Removing the soft limit worked;  fix things up to
+             * reflect the new effective ideal size.
+             */
+            snapIdealFootprint();
+            return ptr;
+        }
+        // softLimit intentionally left at INT_MAX.
+    }
+
+    /* We're not soft-limited.  Grow the heap to satisfy the request.
+     * If this call fails, no footprints will have changed.
+     */
+    ptr = heapAllocAndGrow(hs, heap, n);
+    if (ptr != NULL) {
+        /* The allocation succeeded.  Fix up the ideal size to
+         * reflect any footprint modifications that had to happen.
+         */
+        snapIdealFootprint();
+    } else {
+        /* We just couldn't do it.  Restore the original ideal size,
+         * fixing up softLimit if necessary.
+         */
+        setIdealFootprint(oldIdealSize);
+    }
+    return ptr;
+}
+
+/*
+ * Frees the memory pointed to by <ptr>, which may be NULL.
+ */
+void
+dvmHeapSourceFree(void *ptr)
+{
+    Heap *heap;
+
+    HS_BOILERPLATE();
+
+    heap = ptr2heap(gHs, ptr);
+    if (heap != NULL) {
+        countFree(heap, ptr, true);
+        /* Only free objects that are in the active heap.
+         * Touching old heaps would pull pages into this process.
+         */
+        if (heap == gHs->heaps) {
+            mspace_free(heap->msp, ptr);
+        }
+    }
+}
+
+/*
+ * Returns true iff <ptr> was allocated from the heap source.
+ */
+bool
+dvmHeapSourceContains(const void *ptr)
+{
+    Heap *heap;
+
+    HS_BOILERPLATE();
+
+    heap = ptr2heap(gHs, ptr);
+    if (heap != NULL) {
+        return dvmHeapBitmapIsObjectBitSet(&heap->objectBitmap, ptr) != 0;
+    }
+    return false;
+}
+
+/*
+ * Returns the value of the requested flag.
+ */
+bool
+dvmHeapSourceGetPtrFlag(const void *ptr, enum HeapSourcePtrFlag flag)
+{
+    if (ptr == NULL) {
+        return false;
+    }
+
+    if (flag == HS_CONTAINS) {
+        return dvmHeapSourceContains(ptr);
+    } else if (flag == HS_ALLOCATED_IN_ZYGOTE) {
+        HeapSource *hs = gHs;
+
+        HS_BOILERPLATE();
+
+        if (hs->sawZygote) {
+            Heap *heap;
+
+            heap = ptr2heap(hs, ptr);
+            if (heap != NULL) {
+                /* If the object is not in the active heap, we assume that
+                 * it was allocated as part of zygote.
+                 */
+                return heap != hs->heaps;
+            }
+        }
+        /* The pointer is outside of any known heap, or we are not
+         * running in zygote mode.
+         */
+        return false;
+    }
+
+    return false;
+}
+
+/*
+ * Returns the number of usable bytes in an allocated chunk; the size
+ * may be larger than the size passed to dvmHeapSourceAlloc().
+ */
+size_t
+dvmHeapSourceChunkSize(const void *ptr)
+{
+    Heap *heap;
+
+    HS_BOILERPLATE();
+
+    heap = ptr2heap(gHs, ptr);
+    if (heap != NULL) {
+        return mspace_usable_size(heap->msp, ptr);
+    }
+    return 0;
+}
+
+/*
+ * Returns the number of bytes that the heap source has allocated
+ * from the system using sbrk/mmap, etc.
+ *
+ * Caller must hold the heap lock.
+ */
+size_t
+dvmHeapSourceFootprint()
+{
+    HS_BOILERPLATE();
+
+//TODO: include size of bitmaps?
+    return oldHeapOverhead(gHs, true);
+}
+
+/*
+ * Return the real bytes used by old heaps and external memory
+ * plus the soft usage of the current heap.  When a soft limit
+ * is in effect, this is effectively what it's compared against
+ * (though, in practice, it only looks at the current heap).
+ */
+static size_t
+getSoftFootprint(bool includeActive)
+{
+    HeapSource *hs = gHs;
+    size_t ret;
+
+    HS_BOILERPLATE();
+
+    ret = oldHeapOverhead(hs, false) + hs->externalBytesAllocated;
+    if (includeActive) {
+        ret += hs->heaps[0].bytesAllocated;
+    }
+
+    return ret;
+}
+
+/*
+ * Gets the maximum number of bytes that the heap source is allowed
+ * to allocate from the system.
+ */
+size_t
+dvmHeapSourceGetIdealFootprint()
+{
+    HeapSource *hs = gHs;
+
+    HS_BOILERPLATE();
+
+    return hs->idealSize;
+}
+
+/*
+ * Sets the soft limit, handling any necessary changes to the allowed
+ * footprint of the active heap.
+ */
+static void
+setSoftLimit(HeapSource *hs, size_t softLimit)
+{
+    /* Compare against the actual footprint, rather than the
+     * max_allowed, because the heap may not have grown all the
+     * way to the allowed size yet.
+     */
+    mspace *msp = hs->heaps[0].msp;
+    size_t currentHeapSize = mspace_footprint(msp);
+    if (softLimit < currentHeapSize) {
+        /* Don't let the heap grow any more, and impose a soft limit.
+         */
+        mspace_set_max_allowed_footprint(msp, currentHeapSize);
+        hs->softLimit = softLimit;
+    } else {
+        /* Let the heap grow to the requested max, and remove any
+         * soft limit, if set.
+         */
+        mspace_set_max_allowed_footprint(msp, softLimit);
+        hs->softLimit = INT_MAX;
+    }
+}
+
+/*
+ * Sets the maximum number of bytes that the heap source is allowed
+ * to allocate from the system.  Clamps to the appropriate maximum
+ * value.
+ */
+static void
+setIdealFootprint(size_t max)
+{
+    HeapSource *hs = gHs;
+#if DEBUG_HEAP_SOURCE
+    HeapSource oldHs = *hs;
+    mspace *msp = hs->heaps[0].msp;
+    size_t oldAllowedFootprint =
+            mspace_max_allowed_footprint(msp);
+#endif
+
+    HS_BOILERPLATE();
+
+    if (max > hs->absoluteMaxSize) {
+        LOGI_HEAP("Clamp target GC heap from %zd.%03zdMB to %u.%03uMB\n",
+                FRACTIONAL_MB(max),
+                FRACTIONAL_MB(hs->absoluteMaxSize));
+        max = hs->absoluteMaxSize;
+    } else if (max < hs->minimumSize) {
+        max = hs->minimumSize;
+    }
+
+    /* Convert max into a size that applies to the active heap.
+     * Old heaps and external allocations will count against the ideal size.
+     */
+    size_t overhead = getSoftFootprint(false);
+    size_t activeMax;
+    if (overhead < max) {
+        activeMax = max - overhead;
+    } else {
+        activeMax = 0;
+    }
+
+    setSoftLimit(hs, activeMax);
+    hs->idealSize = max;
+
+    HSTRACE("IDEAL %zd->%zd (%d), soft %zd->%zd (%d), allowed %zd->%zd (%d), "
+            "ext %zd\n",
+            oldHs.idealSize, hs->idealSize, hs->idealSize - oldHs.idealSize,
+            oldHs.softLimit, hs->softLimit, hs->softLimit - oldHs.softLimit,
+            oldAllowedFootprint, mspace_max_allowed_footprint(msp),
+            mspace_max_allowed_footprint(msp) - oldAllowedFootprint,
+            hs->externalBytesAllocated);
+
+}
+
+/*
+ * Make the ideal footprint equal to the current footprint.
+ */
+static void
+snapIdealFootprint()
+{
+    HeapSource *hs = gHs;
+
+    HS_BOILERPLATE();
+
+    setIdealFootprint(getSoftFootprint(true));
+}
+
+/*
+ * Gets the current ideal heap utilization, represented as a number
+ * between zero and one.
+ */
+float dvmGetTargetHeapUtilization()
+{
+    HeapSource *hs = gHs;
+
+    HS_BOILERPLATE();
+
+    return (float)hs->targetUtilization / (float)HEAP_UTILIZATION_MAX;
+}
+
+/*
+ * Sets the new ideal heap utilization, represented as a number
+ * between zero and one.
+ */
+void dvmSetTargetHeapUtilization(float newTarget)
+{
+    HeapSource *hs = gHs;
+    size_t newUtilization;
+
+    HS_BOILERPLATE();
+
+    /* Clamp it to a reasonable range.
+     */
+    // TODO: This may need some tuning.
+    if (newTarget < 0.2) {
+        newTarget = 0.2;
+    } else if (newTarget > 0.8) {
+        newTarget = 0.8;
+    }
+
+    hs->targetUtilization =
+            (size_t)(newTarget * (float)HEAP_UTILIZATION_MAX);
+    LOGV("Set heap target utilization to %zd/%d (%f)\n", 
+            hs->targetUtilization, HEAP_UTILIZATION_MAX, newTarget);
+}
+
+/*
+ * If set is true, sets the new minimum heap size to size; always
+ * returns the current (or previous) size.  If size is negative,
+ * removes the current minimum constraint (if present).
+ */
+size_t
+dvmMinimumHeapSize(size_t size, bool set)
+{
+    HeapSource *hs = gHs;
+    size_t oldMinimumSize;
+
+    /* gHs caches an entry in gDvm.gcHeap;  we need to hold the
+     * heap lock if we're going to look at it.  We also need the
+     * lock for the call to setIdealFootprint().
+     */
+    dvmLockHeap();
+
+    HS_BOILERPLATE();
+
+    oldMinimumSize = hs->minimumSize;
+
+    if (set) {
+        /* Don't worry about external allocations right now.
+         * setIdealFootprint() will take them into account when
+         * minimumSize is used, and it's better to hold onto the
+         * intended minimumSize than to clamp it arbitrarily based
+         * on the current allocations.
+         */
+        if (size > hs->absoluteMaxSize) {
+            size = hs->absoluteMaxSize;
+        }
+        hs->minimumSize = size;
+        if (size > hs->idealSize) {
+            /* Force a snap to the minimum value, which we just set
+             * and which setIdealFootprint() will take into consideration.
+             */
+            setIdealFootprint(hs->idealSize);
+        }
+        /* Otherwise we'll just keep it in mind the next time
+         * setIdealFootprint() is called.
+         */
+    }
+
+    dvmUnlockHeap();
+
+    return oldMinimumSize;
+}
+
+/*
+ * Given the size of a live set, returns the ideal heap size given
+ * the current target utilization and MIN/MAX values.
+ *
+ * targetUtilization is in the range 1..HEAP_UTILIZATION_MAX.
+ */
+static size_t
+getUtilizationTarget(const HeapSource *hs,
+        size_t liveSize, size_t targetUtilization)
+{
+    size_t targetSize;
+
+    /* Use the current target utilization ratio to determine the
+     * ideal heap size based on the size of the live set.
+     */
+    targetSize = (liveSize / targetUtilization) * HEAP_UTILIZATION_MAX;
+
+    /* Cap the amount of free space, though, so we don't end up
+     * with, e.g., 8MB of free space when the live set size hits 8MB.
+     */
+    if (targetSize > liveSize + HEAP_IDEAL_FREE) {
+        targetSize = liveSize + HEAP_IDEAL_FREE;
+    } else if (targetSize < liveSize + HEAP_MIN_FREE) {
+        targetSize = liveSize + HEAP_MIN_FREE;
+    }
+    return targetSize;
+}
+
+/*
+ * Given the current contents of the active heap, increase the allowed
+ * heap footprint to match the target utilization ratio.  This
+ * should only be called immediately after a full mark/sweep.
+ */
+void dvmHeapSourceGrowForUtilization()
+{
+    HeapSource *hs = gHs;
+    Heap *heap;
+    size_t targetHeapSize;
+    size_t currentHeapUsed;
+    size_t oldIdealSize;
+    size_t newHeapMax;
+    size_t overhead;
+
+    HS_BOILERPLATE();
+    heap = hs2heap(hs);
+
+    /* Use the current target utilization ratio to determine the
+     * ideal heap size based on the size of the live set.
+     * Note that only the active heap plays any part in this.
+     *
+     * Avoid letting the old heaps influence the target free size,
+     * because they may be full of objects that aren't actually
+     * in the working set.  Just look at the allocated size of
+     * the current heap.
+     */
+    currentHeapUsed = heap->bytesAllocated;
+#define LET_EXTERNAL_INFLUENCE_UTILIZATION 1
+#if LET_EXTERNAL_INFLUENCE_UTILIZATION
+    /* This is a hack to deal with the side-effects of moving
+     * bitmap data out of the Dalvik heap.  Since the amount
+     * of free space after a GC scales with the size of the
+     * live set, many apps expected the large free space that
+     * appeared along with megabytes' worth of bitmaps.  When
+     * the bitmaps were removed, the free size shrank significantly,
+     * and apps started GCing constantly.  This makes it so the
+     * post-GC free space is the same size it would have been
+     * if the bitmaps were still in the Dalvik heap.
+     */
+    currentHeapUsed += hs->externalBytesAllocated;
+#endif
+    targetHeapSize =
+            getUtilizationTarget(hs, currentHeapUsed, hs->targetUtilization);
+#if LET_EXTERNAL_INFLUENCE_UTILIZATION
+    currentHeapUsed -= hs->externalBytesAllocated;
+    targetHeapSize -= hs->externalBytesAllocated;
+#endif
+
+    /* The ideal size includes the old heaps; add overhead so that
+     * it can be immediately subtracted again in setIdealFootprint().
+     * If the target heap size would exceed the max, setIdealFootprint()
+     * will clamp it to a legal value.
+     */
+    overhead = getSoftFootprint(false);
+    oldIdealSize = hs->idealSize;
+    setIdealFootprint(targetHeapSize + overhead);
+
+    newHeapMax = mspace_max_allowed_footprint(heap->msp);
+    if (softLimited(hs)) {
+        LOGD_HEAP("GC old usage %zd.%zd%%; now "
+                "%zd.%03zdMB used / %zd.%03zdMB soft max "
+                "(%zd.%03zdMB over, "
+                "%zd.%03zdMB ext, "
+                "%zd.%03zdMB real max)\n",
+                FRACTIONAL_PCT(currentHeapUsed, oldIdealSize),
+                FRACTIONAL_MB(currentHeapUsed),
+                FRACTIONAL_MB(hs->softLimit),
+                FRACTIONAL_MB(overhead),
+                FRACTIONAL_MB(hs->externalBytesAllocated),
+                FRACTIONAL_MB(newHeapMax));
+    } else {
+        LOGD_HEAP("GC old usage %zd.%zd%%; now "
+                "%zd.%03zdMB used / %zd.%03zdMB real max "
+                "(%zd.%03zdMB over, "
+                "%zd.%03zdMB ext)\n",
+                FRACTIONAL_PCT(currentHeapUsed, oldIdealSize),
+                FRACTIONAL_MB(currentHeapUsed),
+                FRACTIONAL_MB(newHeapMax),
+                FRACTIONAL_MB(overhead),
+                FRACTIONAL_MB(hs->externalBytesAllocated));
+    }
+}
+
+/*
+ * Return free pages to the system.
+ * TODO: move this somewhere else, especially the native heap part.
+ */
+
+static void releasePagesInRange(void *start, void *end, void *nbytes)
+{
+    /* Linux requires that the madvise() start address is page-aligned.
+    * We also align the end address.
+    */
+    start = (void *)ALIGN_UP_TO_PAGE_SIZE(start);
+    end = (void *)((size_t)end & ~(PAGE_SIZE - 1));
+    if (start < end) {
+        size_t length = (char *)end - (char *)start;
+        madvise(start, length, MADV_DONTNEED);
+        *(size_t *)nbytes += length;
+    }
+}
+
+/*
+ * Return unused memory to the system if possible.
+ */
+void
+dvmHeapSourceTrim(size_t bytesTrimmed[], size_t arrayLen)
+{
+    HeapSource *hs = gHs;
+    size_t nativeBytes, heapBytes;
+    size_t i;
+
+    HS_BOILERPLATE();
+
+    assert(arrayLen >= hs->numHeaps);
+
+    heapBytes = 0;
+    for (i = 0; i < hs->numHeaps; i++) {
+        Heap *heap = &hs->heaps[i];
+
+        /* Return the wilderness chunk to the system.
+         */
+        mspace_trim(heap->msp, 0);
+
+        /* Return any whole free pages to the system.
+         */
+        bytesTrimmed[i] = 0;
+        mspace_walk_free_pages(heap->msp, releasePagesInRange, 
+                               &bytesTrimmed[i]);
+        heapBytes += bytesTrimmed[i];
+    }
+
+    /* Same for the native heap.
+     */
+    dlmalloc_trim(0);
+    nativeBytes = 0;
+    dlmalloc_walk_free_pages(releasePagesInRange, &nativeBytes);
+
+    LOGD_HEAP("madvised %zd (GC) + %zd (native) = %zd total bytes\n",
+            heapBytes, nativeBytes, heapBytes + nativeBytes);
+}
+
+/*
+ * Walks over the heap source and passes every allocated and
+ * free chunk to the callback.
+ */
+void
+dvmHeapSourceWalk(void(*callback)(const void *chunkptr, size_t chunklen,
+                                      const void *userptr, size_t userlen,
+                                      void *arg),
+                  void *arg)
+{
+    HeapSource *hs = gHs;
+    size_t i;
+
+    HS_BOILERPLATE();
+
+    /* Walk the heaps from oldest to newest.
+     */
+//TODO: do this in address order
+    for (i = hs->numHeaps; i > 0; --i) {
+        mspace_walk_heap(hs->heaps[i-1].msp, callback, arg);
+    }
+}
+
+/*
+ * Gets the number of heaps available in the heap source.
+ *
+ * Caller must hold the heap lock, because gHs caches a field
+ * in gDvm.gcHeap.
+ */
+size_t
+dvmHeapSourceGetNumHeaps()
+{
+    HeapSource *hs = gHs;
+
+    HS_BOILERPLATE();
+
+    return hs->numHeaps;
+}
+
+
+/*
+ * External allocation tracking
+ *
+ * In some situations, memory outside of the heap is tied to the
+ * lifetime of objects in the heap.  Since that memory is kept alive
+ * by heap objects, it should provide memory pressure that can influence
+ * GCs.
+ */
+
+
+static bool
+externalAllocPossible(const HeapSource *hs, size_t n)
+{
+    const Heap *heap;
+    size_t currentHeapSize;
+
+    /* Make sure that this allocation is even possible.
+     * Don't let the external size plus the actual heap size
+     * go over the absolute max.  This essentially treats
+     * external allocations as part of the active heap.
+     *
+     * Note that this will fail "mysteriously" if there's
+     * a small softLimit but a large heap footprint.
+     */
+    heap = hs2heap(hs);
+    currentHeapSize = mspace_max_allowed_footprint(heap->msp);
+    if (currentHeapSize + hs->externalBytesAllocated + n <=
+            heap->absoluteMaxSize)
+    {
+        return true;
+    }
+    HSTRACE("externalAllocPossible(): "
+            "footprint %zu + extAlloc %zu + n %zu >= max %zu (space for %zu)\n",
+            currentHeapSize, hs->externalBytesAllocated, n,
+            heap->absoluteMaxSize,
+            heap->absoluteMaxSize -
+                    (currentHeapSize + hs->externalBytesAllocated));
+    return false;
+}
+
+#define EXTERNAL_TARGET_UTILIZATION 820  // 80%
+
+/*
+ * Tries to update the internal count of externally-allocated memory.
+ * If there's enough room for that memory, returns true.  If not, returns
+ * false and does not update the count.
+ * 
+ * The caller must ensure externalAllocPossible(hs, n) == true.
+ */
+static bool
+externalAlloc(HeapSource *hs, size_t n, bool grow)
+{
+    Heap *heap;
+    size_t currentHeapSize;
+    size_t newTotal;
+    size_t max;
+    bool grew;
+
+    assert(hs->externalLimit >= hs->externalBytesAllocated);
+
+    HSTRACE("externalAlloc(%zd%s)\n", n, grow ? ", grow" : "");
+    assert(externalAllocPossible(hs, n));  // The caller must ensure this.
+
+    /* External allocations have their own "free space" that they
+     * can allocate from without causing a GC.
+     */
+    if (hs->externalBytesAllocated + n <= hs->externalLimit) {
+        hs->externalBytesAllocated += n;
+#if defined(WITH_PROFILER) && PROFILE_EXTERNAL_ALLOCATIONS
+        if (gDvm.allocProf.enabled) {
+            Thread* self = dvmThreadSelf();
+            gDvm.allocProf.externalAllocCount++;
+            gDvm.allocProf.externalAllocSize += n;
+            if (self != NULL) {
+                self->allocProf.externalAllocCount++;
+                self->allocProf.externalAllocSize += n;
+            }
+        }
+#endif
+        return true;
+    }
+    if (!grow) {
+        return false;
+    }
+
+    /* GROW */
+    hs->externalBytesAllocated += n;
+    hs->externalLimit = getUtilizationTarget(hs,
+            hs->externalBytesAllocated, EXTERNAL_TARGET_UTILIZATION);
+    HSTRACE("EXTERNAL grow limit to %zd\n", hs->externalLimit);
+    return true;
+}
+
+static void
+gcForExternalAlloc(bool collectSoftReferences)
+{
+#ifdef WITH_PROFILER  // even if !PROFILE_EXTERNAL_ALLOCATIONS
+    if (gDvm.allocProf.enabled) {
+        Thread* self = dvmThreadSelf();
+        gDvm.allocProf.gcCount++;
+        if (self != NULL) {
+            self->allocProf.gcCount++;
+        }
+    }
+#endif
+    dvmCollectGarbageInternal(collectSoftReferences);
+}
+
+/*
+ * Updates the internal count of externally-allocated memory.  If there's
+ * enough room for that memory, returns true.  If not, returns false and
+ * does not update the count.
+ *
+ * May cause a GC as a side-effect.
+ */
+bool
+dvmTrackExternalAllocation(size_t n)
+{
+    HeapSource *hs = gHs;
+    size_t overhead;
+    bool ret = false;
+
+    /* gHs caches an entry in gDvm.gcHeap;  we need to hold the
+     * heap lock if we're going to look at it.
+     */
+    dvmLockHeap();
+
+    HS_BOILERPLATE();
+    assert(hs->externalLimit >= hs->externalBytesAllocated);
+
+    if (!externalAllocPossible(hs, n)) {
+        LOGE_HEAP("%zd-byte external allocation "
+                "too large for this process.\n", n);
+        goto out;
+    }
+
+    /* Try "allocating" using the existing "free space".
+     */
+    HSTRACE("EXTERNAL alloc %zu (%zu < %zu)\n",
+            n, hs->externalBytesAllocated, hs->externalLimit);
+    if (externalAlloc(hs, n, false)) {
+        ret = true;
+        goto out;
+    }
+
+    /* The "allocation" failed.  Free up some space by doing
+     * a full garbage collection.  This may grow the heap source
+     * if the live set is sufficiently large.
+     */
+    HSTRACE("EXTERNAL alloc %zd: GC 1\n", n);
+    gcForExternalAlloc(false);  // don't collect SoftReferences
+    if (externalAlloc(hs, n, false)) {
+        ret = true;
+        goto out;
+    }
+
+    /* Even that didn't work;  this is an exceptional state.
+     * Try harder, growing the heap source if necessary.
+     */
+    HSTRACE("EXTERNAL alloc %zd: frag\n", n);
+    ret = externalAlloc(hs, n, true);
+    dvmHeapSizeChanged();
+    if (ret) {
+        goto out;
+    }
+
+    /* We couldn't even grow enough to satisfy the request.
+     * Try one last GC, collecting SoftReferences this time.
+     */
+    HSTRACE("EXTERNAL alloc %zd: GC 2\n", n);
+    gcForExternalAlloc(true);  // collect SoftReferences
+    ret = externalAlloc(hs, n, true);
+    dvmHeapSizeChanged();
+    if (!ret) {
+        LOGE_HEAP("Out of external memory on a %zu-byte allocation.\n", n);
+    }
+
+#if defined(WITH_PROFILER) && PROFILE_EXTERNAL_ALLOCATIONS
+    if (gDvm.allocProf.enabled) {
+        Thread* self = dvmThreadSelf();
+        gDvm.allocProf.failedExternalAllocCount++;
+        gDvm.allocProf.failedExternalAllocSize += n;
+        if (self != NULL) {
+            self->allocProf.failedExternalAllocCount++;
+            self->allocProf.failedExternalAllocSize += n;
+        }
+    }
+#endif
+
+out:
+    dvmUnlockHeap();
+
+    return ret;
+}
+
+/*
+ * Reduces the internal count of externally-allocated memory.
+ */
+void
+dvmTrackExternalFree(size_t n)
+{
+    HeapSource *hs = gHs;
+    size_t newIdealSize;
+    size_t newExternalLimit;
+    size_t oldExternalBytesAllocated;
+
+    HSTRACE("EXTERNAL free %zu (%zu < %zu)\n",
+            n, hs->externalBytesAllocated, hs->externalLimit);
+
+    /* gHs caches an entry in gDvm.gcHeap;  we need to hold the
+     * heap lock if we're going to look at it.
+     */
+    dvmLockHeap();
+
+    HS_BOILERPLATE();
+    assert(hs->externalLimit >= hs->externalBytesAllocated);
+
+    oldExternalBytesAllocated = hs->externalBytesAllocated;
+    if (n <= hs->externalBytesAllocated) {
+        hs->externalBytesAllocated -= n;
+    } else {
+        n = hs->externalBytesAllocated;
+        hs->externalBytesAllocated = 0;
+    }
+
+#if defined(WITH_PROFILER) && PROFILE_EXTERNAL_ALLOCATIONS
+    if (gDvm.allocProf.enabled) {
+        Thread* self = dvmThreadSelf();
+        gDvm.allocProf.externalFreeCount++;
+        gDvm.allocProf.externalFreeSize += n;
+        if (self != NULL) {
+            self->allocProf.externalFreeCount++;
+            self->allocProf.externalFreeSize += n;
+        }
+    }
+#endif
+
+    /* Shrink as quickly as we can.
+     */
+    newExternalLimit = getUtilizationTarget(hs,
+            hs->externalBytesAllocated, EXTERNAL_TARGET_UTILIZATION);
+    if (newExternalLimit < oldExternalBytesAllocated) {
+        /* Make sure that the remaining free space is at least
+         * big enough to allocate something of the size that was
+         * just freed.  This makes it more likely that
+         *     externalFree(N); externalAlloc(N);
+         * will work without causing a GC.
+         */
+        HSTRACE("EXTERNAL free preserved %zu extra free bytes\n",
+                oldExternalBytesAllocated - newExternalLimit);
+        newExternalLimit = oldExternalBytesAllocated;
+    }
+    if (newExternalLimit < hs->externalLimit) {
+        hs->externalLimit = newExternalLimit;
+    }
+
+    dvmUnlockHeap();
+}
+
+/*
+ * Returns the number of externally-allocated bytes being tracked by
+ * dvmTrackExternalAllocation/Free().
+ */
+size_t
+dvmGetExternalBytesAllocated()
+{
+    const HeapSource *hs = gHs;
+    size_t ret;
+
+    /* gHs caches an entry in gDvm.gcHeap;  we need to hold the
+     * heap lock if we're going to look at it.  We also need the
+     * lock for the call to setIdealFootprint().
+     */
+    dvmLockHeap();
+    HS_BOILERPLATE();
+    ret = hs->externalBytesAllocated;
+    dvmUnlockHeap();
+
+    return ret;
+}
diff --git a/vm/alloc/HeapSource.h b/vm/alloc/HeapSource.h
new file mode 100644
index 0000000..3007d4f
--- /dev/null
+++ b/vm/alloc/HeapSource.h
@@ -0,0 +1,165 @@
+/*
+ * Copyright (C) 2008 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#ifndef _DALVIK_HEAP_SOURCE
+#define _DALVIK_HEAP_SOURCE
+
+#include "alloc/HeapInternal.h" // for GcHeap
+
+/* dlmalloc uses one size_t per allocated chunk.
+ */
+#define HEAP_SOURCE_CHUNK_OVERHEAD         (1 * sizeof (size_t))
+#define HEAP_SOURCE_WORST_CHUNK_OVERHEAD   (32 * sizeof (size_t))
+
+/* The largest number of separate heaps we can handle.
+ */
+#define HEAP_SOURCE_MAX_HEAP_COUNT 3
+
+/*
+ * Initializes the heap source; must be called before any other
+ * dvmHeapSource*() functions.
+ */
+GcHeap *dvmHeapSourceStartup(size_t startSize, size_t absoluteMaxSize);
+
+/*
+ * If the HeapSource was created while in zygote mode, this
+ * will create a new heap for post-zygote allocations.
+ * Having a separate heap should maximize the number of pages
+ * that a given app_process shares with the zygote process.
+ */
+bool dvmHeapSourceStartupAfterZygote(void);
+
+/*
+ * If the HeapSource was created while in zygote mode, this
+ * will create an additional zygote heap before the first fork().
+ * Having a separate heap should reduce the number of shared
+ * pages subsequently touched by the zygote process.
+ */
+bool dvmHeapSourceStartupBeforeFork(void);
+
+/*
+ * Tears down the heap source and frees any resources associated with it.
+ */
+void dvmHeapSourceShutdown(GcHeap *gcHeap);
+
+/*
+ * Writes shallow copies of the currently-used bitmaps into outBitmaps,
+ * returning the number of bitmaps written.  Returns <0 if the array
+ * was not long enough.
+ */
+ssize_t dvmHeapSourceGetObjectBitmaps(HeapBitmap outBitmaps[],
+        size_t maxBitmaps);
+
+/*
+ * Replaces the object location HeapBitmaps with the elements of
+ * <objectBitmaps>.  The elements of <objectBitmaps> are overwritten
+ * with shallow copies of the old bitmaps.
+ *
+ * Returns false if the number of bitmaps doesn't match the number
+ * of heaps.
+ */
+bool dvmHeapSourceReplaceObjectBitmaps(HeapBitmap objectBitmaps[],
+        size_t nBitmaps);
+
+/*
+ * Returns the requested value. If the per-heap stats are requested, fill
+ * them as well.
+ */
+enum HeapSourceValueSpec {
+    HS_FOOTPRINT,
+    HS_ALLOWED_FOOTPRINT,
+    HS_BYTES_ALLOCATED,
+    HS_OBJECTS_ALLOCATED,
+    HS_EXTERNAL_BYTES_ALLOCATED,
+    HS_EXTERNAL_LIMIT
+};
+size_t dvmHeapSourceGetValue(enum HeapSourceValueSpec spec, 
+                             size_t perHeapStats[], size_t arrayLen);
+
+/*
+ * Allocates <n> bytes of zeroed data.
+ */
+void *dvmHeapSourceAlloc(size_t n);
+
+/*
+ * Allocates <n> bytes of zeroed data, growing up to absoluteMaxSize
+ * if necessary.
+ */
+void *dvmHeapSourceAllocAndGrow(size_t n);
+
+/*
+ * Frees the memory pointed to by <ptr>, which may be NULL.
+ */
+void dvmHeapSourceFree(void *ptr);
+
+/*
+ * Returns true iff <ptr> was allocated from the heap source.
+ */
+bool dvmHeapSourceContains(const void *ptr);
+
+/*
+ * Returns the value of the requested flag.
+ */
+enum HeapSourcePtrFlag {
+    HS_CONTAINS,    // identical to dvmHeapSourceContains()
+    HS_ALLOCATED_IN_ZYGOTE
+};
+bool dvmHeapSourceGetPtrFlag(const void *ptr, enum HeapSourcePtrFlag flag);
+
+/*
+ * Returns the number of usable bytes in an allocated chunk; the size
+ * may be larger than the size passed to dvmHeapSourceAlloc().
+ */
+size_t dvmHeapSourceChunkSize(const void *ptr);
+
+/*
+ * Returns the number of bytes that the heap source has allocated
+ * from the system using sbrk/mmap, etc.
+ */
+size_t dvmHeapSourceFootprint(void);
+
+/*
+ * Gets the maximum number of bytes that the heap source is allowed
+ * to allocate from the system.
+ */
+size_t dvmHeapSourceGetIdealFootprint(void);
+
+/*
+ * Given the current contents of the heap, increase the allowed
+ * heap footprint to match the target utilization ratio.  This
+ * should only be called immediately after a full mark/sweep.
+ */
+void dvmHeapSourceGrowForUtilization(void);
+
+/*
+ * Return unused memory to the system if possible.  If <bytesTrimmed>
+ * is non-NULL, the number of bytes returned to the system is written to it.
+ */
+void dvmHeapSourceTrim(size_t bytesTrimmed[], size_t arrayLen);
+
+/*
+ * Walks over the heap source and passes every allocated and
+ * free chunk to the callback.
+ */
+void dvmHeapSourceWalk(void(*callback)(const void *chunkptr, size_t chunklen,
+                                      const void *userptr, size_t userlen,
+                                      void *arg),
+                       void *arg);
+/*
+ * Gets the number of heaps available in the heap source.
+ */
+size_t dvmHeapSourceGetNumHeaps(void);
+
+#endif  // _DALVIK_HEAP_SOURCE
diff --git a/vm/alloc/HeapTable.c b/vm/alloc/HeapTable.c
new file mode 100644
index 0000000..b56de64
--- /dev/null
+++ b/vm/alloc/HeapTable.c
@@ -0,0 +1,226 @@
+/*
+ * Copyright (C) 2008 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "Dalvik.h"
+#include "alloc/HeapTable.h"
+#include "alloc/HeapInternal.h"
+
+#include <limits.h> // for INT_MAX
+
+static void *heapTableRealloc(void *oldPtr, size_t newSize)
+{
+    /* Don't just call realloc(), in case the native system
+     * doesn't malloc() on realloc(NULL).
+     */
+    if (oldPtr != NULL) {
+        return realloc(oldPtr, newSize);
+    } else {
+        return malloc(newSize);
+    }
+}
+
+void dvmHeapHeapTableFree(void *ptr)
+{
+    free(ptr);
+}
+
+#define heapRefTableIsFull(refs) \
+    ({ \
+        const HeapRefTable *HRTIF_refs = (refs); \
+        dvmIsReferenceTableFull(refs); \
+    })
+
+bool dvmHeapInitHeapRefTable(HeapRefTable *refs, size_t nelems)
+{
+    memset(refs, 0, sizeof(*refs));
+    return dvmInitReferenceTable(refs, nelems, INT_MAX);
+}
+
+/* Frees the array inside the HeapRefTable, not the HeapRefTable itself.
+ */
+void dvmHeapFreeHeapRefTable(HeapRefTable *refs)
+{
+    dvmClearReferenceTable(refs);
+}
+
+/*
+ * Large, non-contiguous reference tables
+ */
+
+#define kLargeHeapRefTableNElems 1024
+bool dvmHeapAddRefToLargeTable(LargeHeapRefTable **tableP, Object *ref)
+{
+    LargeHeapRefTable *table;
+
+    assert(tableP != NULL);
+    assert(ref != NULL);
+
+    /* Make sure that a table with a free slot is
+     * at the head of the list.
+     */
+    if (*tableP != NULL) {
+        table = *tableP;
+        LargeHeapRefTable *prevTable;
+
+        /* Find an empty slot for this reference.
+         */
+        prevTable = NULL;
+        while (table != NULL && heapRefTableIsFull(&table->refs)) {
+            prevTable = table;
+            table = table->next;
+        }
+        if (table != NULL) {
+            if (prevTable != NULL) {
+                /* Move the table to the head of the list.
+                 */
+                prevTable->next = table->next;
+                table->next = *tableP;
+                *tableP = table;
+            }
+            /* else it's already at the head. */
+
+            goto insert;
+        }
+        /* else all tables are already full;
+         * fall through to the alloc case.
+         */
+    }
+
+    /* Allocate a new table.
+     */
+    table = (LargeHeapRefTable *)heapTableRealloc(NULL,
+            sizeof(LargeHeapRefTable));
+    if (table == NULL) {
+        LOGE_HEAP("Can't allocate a new large ref table\n");
+        return false;
+    }
+    if (!dvmHeapInitHeapRefTable(&table->refs, kLargeHeapRefTableNElems)) {
+        LOGE_HEAP("Can't initialize a new large ref table\n");
+        dvmHeapHeapTableFree(table);
+        return false;
+    }
+
+    /* Stick it at the head.
+     */
+    table->next = *tableP;
+    *tableP = table;
+
+insert:
+    /* Insert the reference.
+     */
+    assert(table == *tableP);
+    assert(table != NULL);
+    assert(!heapRefTableIsFull(&table->refs));
+    *table->refs.nextEntry++ = ref;
+
+    return true;
+}
+
+bool dvmHeapAddTableToLargeTable(LargeHeapRefTable **tableP, HeapRefTable *refs)
+{
+    LargeHeapRefTable *table;
+
+    /* Allocate a node.
+     */
+    table = (LargeHeapRefTable *)heapTableRealloc(NULL,
+            sizeof(LargeHeapRefTable));
+    if (table == NULL) {
+        LOGE_HEAP("Can't allocate a new large ref table\n");
+        return false;
+    }
+    table->refs = *refs;
+
+    /* Insert the table into the list.
+     */
+    table->next = *tableP;
+    *tableP = table;
+
+    return true;
+}
+
+/* Frees everything associated with the LargeHeapRefTable.
+ */
+void dvmHeapFreeLargeTable(LargeHeapRefTable *table)
+{
+    while (table != NULL) {
+        LargeHeapRefTable *next = table->next;
+        dvmHeapFreeHeapRefTable(&table->refs);
+        dvmHeapHeapTableFree(table);
+        table = next;
+    }
+}
+
+Object *dvmHeapGetNextObjectFromLargeTable(LargeHeapRefTable **pTable)
+{
+    LargeHeapRefTable *table;
+    Object *obj;
+
+    assert(pTable != NULL);
+
+    obj = NULL;
+    table = *pTable;
+    if (table != NULL) {
+        GcHeap *gcHeap = gDvm.gcHeap;
+        HeapRefTable *refs = &table->refs;
+
+        /* We should never have an empty table node in the list.
+         */
+        assert(dvmReferenceTableEntries(refs) != 0);
+
+        /* Remove and return the last entry in the list.
+         */
+        obj = *--refs->nextEntry;
+
+        /* If this was the last entry in the table node,
+         * free it and patch up the list.
+         */
+        if (refs->nextEntry == refs->table) {
+            *pTable = table->next;
+            dvmClearReferenceTable(refs);
+            dvmHeapHeapTableFree(table);
+        }
+    }
+
+    return obj;
+}
+
+void dvmHeapMarkLargeTableRefs(LargeHeapRefTable *table, bool stripLowBits)
+{
+    while (table != NULL) {
+        Object **ref, **lastRef;
+
+        ref = table->refs.table;
+        lastRef = table->refs.nextEntry;
+        if (stripLowBits) {
+            /* This case is used for marking reference objects that
+             * are still waiting for the heap worker thread to get to
+             * them.  The referents pointed to by the references are
+             * marked when a SCHEDULED_REFERENCE_MAGIC is encountered
+             * during scanning.
+             */
+            while (ref < lastRef) {
+                dvmMarkObjectNonNull((Object *)((uintptr_t)*ref++ & ~3));
+            }
+        } else {
+            while (ref < lastRef) {
+                dvmMarkObjectNonNull(*ref++);
+            }
+        }
+        table = table->next;
+    }
+}
+
+
diff --git a/vm/alloc/HeapTable.h b/vm/alloc/HeapTable.h
new file mode 100644
index 0000000..784e8fd
--- /dev/null
+++ b/vm/alloc/HeapTable.h
@@ -0,0 +1,52 @@
+/*
+ * Copyright (C) 2008 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#ifndef _DALVIK_ALLOC_HEAP_TABLE
+#define _DALVIK_ALLOC_HEAP_TABLE
+
+#include "ReferenceTable.h"
+
+typedef ReferenceTable HeapRefTable;
+typedef struct LargeHeapRefTable LargeHeapRefTable;
+typedef struct HeapSource HeapSource;
+
+struct LargeHeapRefTable {
+    LargeHeapRefTable *next;
+    HeapRefTable refs;
+};
+
+bool dvmHeapInitHeapRefTable(HeapRefTable *refs, size_t nelems);
+void dvmHeapFreeHeapRefTable(HeapRefTable *refs);
+void dvmHeapFreeLargeTable(LargeHeapRefTable *table);
+void dvmHeapHeapTableFree(void *ptr);
+bool dvmHeapAddRefToLargeTable(LargeHeapRefTable **tableP, Object *ref);
+void dvmHeapMarkLargeTableRefs(LargeHeapRefTable *table, bool stripLowBits);
+bool dvmHeapAddTableToLargeTable(LargeHeapRefTable **tableP,
+        HeapRefTable *refs);
+Object *dvmHeapGetNextObjectFromLargeTable(LargeHeapRefTable **pTable);
+
+#define dvmHeapAddToHeapRefTable(refs, ptr) \
+            dvmAddToReferenceTable((refs), (ptr))
+
+#define dvmHeapNumHeapRefTableEntries(refs) \
+    ({ \
+        const HeapRefTable *NHRTE_refs = (refs); \
+        dvmReferenceTableEntries(refs); \
+    })
+
+#define dvmHeapRemoveFromHeapRefTable(refs, ptr) \
+            dvmRemoveFromReferenceTable((refs), (refs)->table, (ptr))
+
+#endif  // _DALVIK_ALLOC_HEAP_TABLE
diff --git a/vm/alloc/HeapWorker.c b/vm/alloc/HeapWorker.c
new file mode 100644
index 0000000..0244cca
--- /dev/null
+++ b/vm/alloc/HeapWorker.c
@@ -0,0 +1,499 @@
+/*
+ * Copyright (C) 2008 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/*
+ * An async worker thread to handle certain heap operations that
+ * need to be done in a separate thread to avoid synchronization
+ * problems.  HeapWorkers and reference clearing/enqueuing are
+ * handled by this thread.
+ */
+#include "Dalvik.h"
+#include "HeapInternal.h"
+
+#include <sys/time.h>
+#include <stdlib.h>
+#include <pthread.h>
+#include <signal.h>
+#include <errno.h>  // for ETIMEDOUT, etc.
+
+static void* heapWorkerThreadStart(void* arg);
+
+/*
+ * Initialize any HeapWorker state that Heap.c
+ * cares about.  This lets the GC start before the
+ * HeapWorker thread is initialized.
+ */
+void dvmInitializeHeapWorkerState()
+{
+    assert(!gDvm.heapWorkerInitialized);
+
+    dvmInitMutex(&gDvm.heapWorkerLock);
+    pthread_cond_init(&gDvm.heapWorkerCond, NULL);
+    pthread_cond_init(&gDvm.heapWorkerIdleCond, NULL);
+
+    gDvm.heapWorkerInitialized = true;
+}
+
+/*
+ * Crank up the heap worker thread.
+ *
+ * Does not return until the thread is ready for business.
+ */
+bool dvmHeapWorkerStartup(void)
+{
+    assert(!gDvm.haltHeapWorker);
+    assert(!gDvm.heapWorkerReady);
+    assert(gDvm.heapWorkerHandle == 0);
+    assert(gDvm.heapWorkerInitialized);
+
+    /* use heapWorkerLock/heapWorkerCond to communicate readiness */
+    dvmLockMutex(&gDvm.heapWorkerLock);
+
+//BUG: If a GC happens in here or in the new thread while we hold the lock,
+//     the GC will deadlock when trying to acquire heapWorkerLock.
+    if (!dvmCreateInternalThread(&gDvm.heapWorkerHandle,
+                "HeapWorker", heapWorkerThreadStart, NULL))
+    {
+        dvmUnlockMutex(&gDvm.heapWorkerLock);
+        return false;
+    }
+
+    /*
+     * Wait for the heap worker to come up.  We know the thread was created,
+     * so this should not get stuck.
+     */
+    while (!gDvm.heapWorkerReady) {
+        int cc = pthread_cond_wait(&gDvm.heapWorkerCond, &gDvm.heapWorkerLock);
+        assert(cc == 0);
+    }
+
+    dvmUnlockMutex(&gDvm.heapWorkerLock);
+    return true;
+}
+
+/*
+ * Shut down the heap worker thread if it was started.
+ */
+void dvmHeapWorkerShutdown(void)
+{
+    void* threadReturn;
+
+    /* note: assuming that (pthread_t)0 is not a valid thread handle */
+    if (gDvm.heapWorkerHandle != 0) {
+        gDvm.haltHeapWorker = true;
+        dvmSignalHeapWorker(true);
+
+        /*
+         * We may not want to wait for the heapWorkers to complete.  It's
+         * a good idea to do so, in case they're holding some sort of OS
+         * resource that doesn't get reclaimed when the process exits
+         * (e.g. an open temp file).
+         */
+        if (pthread_join(gDvm.heapWorkerHandle, &threadReturn) != 0)
+            LOGW("HeapWorker thread join failed\n");
+        else
+            LOGD("HeapWorker thread has shut down\n");
+
+        gDvm.heapWorkerReady = false;
+    }
+}
+
+/* Make sure that the HeapWorker thread hasn't spent an inordinate
+ * amount of time inside interpreted a finalizer.
+ *
+ * Aborts the VM if the thread appears to be wedged.
+ *
+ * The caller must hold the heapWorkerLock to guarantee an atomic
+ * read of the watchdog values.
+ */
+void dvmAssertHeapWorkerThreadRunning()
+{
+    if (gDvm.gcHeap->heapWorkerCurrentObject != NULL) {
+        static const u8 HEAP_WORKER_WATCHDOG_TIMEOUT = 10*1000*1000LL; // 10sec
+
+        u8 heapWorkerInterpStartTime = gDvm.gcHeap->heapWorkerInterpStartTime;
+        u8 now = dvmGetRelativeTimeUsec();
+        u8 delta = now - heapWorkerInterpStartTime;
+
+        u8 heapWorkerInterpCpuStartTime =
+            gDvm.gcHeap->heapWorkerInterpCpuStartTime;
+        u8 nowCpu = dvmGetOtherThreadCpuTimeUsec(gDvm.heapWorkerHandle);
+        u8 deltaCpu = nowCpu - heapWorkerInterpCpuStartTime;
+
+        if (delta > HEAP_WORKER_WATCHDOG_TIMEOUT && gDvm.debuggerActive) {
+            /*
+             * Debugger suspension can block the thread indefinitely.  For
+             * best results we should reset this explicitly whenever the
+             * HeapWorker thread is resumed.  Ignoring the yelp isn't
+             * quite right but will do for a quick fix.
+             */
+            LOGI("Debugger is attached -- suppressing HeapWorker watchdog\n");
+            heapWorkerInterpStartTime = now;        /* reset timer */
+        } else if (delta > HEAP_WORKER_WATCHDOG_TIMEOUT) {
+            char* desc = dexProtoCopyMethodDescriptor(
+                    &gDvm.gcHeap->heapWorkerCurrentMethod->prototype);
+            LOGE("HeapWorker is wedged: %lldms spent inside %s.%s%s\n",
+                    delta / 1000,
+                    gDvm.gcHeap->heapWorkerCurrentObject->clazz->descriptor,
+                    gDvm.gcHeap->heapWorkerCurrentMethod->name, desc);
+            free(desc);
+            dvmDumpAllThreads(true);
+
+            /* abort the VM */
+            dvmAbort();
+        } else if (delta > HEAP_WORKER_WATCHDOG_TIMEOUT / 2) {
+            char* desc = dexProtoCopyMethodDescriptor(
+                    &gDvm.gcHeap->heapWorkerCurrentMethod->prototype);
+            LOGW("HeapWorker may be wedged: %lldms spent inside %s.%s%s\n",
+                    delta / 1000,
+                    gDvm.gcHeap->heapWorkerCurrentObject->clazz->descriptor,
+                    gDvm.gcHeap->heapWorkerCurrentMethod->name, desc);
+            free(desc);
+        }
+    }
+}
+
+static void callMethod(Thread *self, Object *obj, Method *method)
+{
+    JValue unused;
+
+    /* Keep track of the method we're about to call and
+     * the current time so that other threads can detect
+     * when this thread wedges and provide useful information.
+     */
+    gDvm.gcHeap->heapWorkerInterpStartTime = dvmGetRelativeTimeUsec();
+    gDvm.gcHeap->heapWorkerInterpCpuStartTime = dvmGetThreadCpuTimeUsec();
+    gDvm.gcHeap->heapWorkerCurrentMethod = method;
+    gDvm.gcHeap->heapWorkerCurrentObject = obj;
+
+    /* Call the method.
+     *
+     * Don't hold the lock when executing interpreted
+     * code.  It may suspend, and the GC needs to grab
+     * heapWorkerLock.
+     */
+    dvmUnlockMutex(&gDvm.heapWorkerLock);
+    if (false) {
+        /* Log entry/exit; this will likely flood the log enough to
+         * cause "logcat" to drop entries.
+         */
+        char tmpTag[16];
+        sprintf(tmpTag, "HW%d", self->systemTid);
+        LOG(LOG_DEBUG, tmpTag, "Call %s\n", method->clazz->descriptor);
+        dvmCallMethod(self, method, obj, &unused);
+        LOG(LOG_DEBUG, tmpTag, " done\n");
+    } else {
+        dvmCallMethod(self, method, obj, &unused);
+    }
+    dvmLockMutex(&gDvm.heapWorkerLock);
+
+    gDvm.gcHeap->heapWorkerCurrentObject = NULL;
+    gDvm.gcHeap->heapWorkerCurrentMethod = NULL;
+    gDvm.gcHeap->heapWorkerInterpStartTime = 0LL;
+
+    /* Exceptions thrown during these calls interrupt
+     * the method, but are otherwise ignored.
+     */
+    if (dvmCheckException(self)) {
+#if DVM_SHOW_EXCEPTION >= 1
+        LOGI("Uncaught exception thrown by finalizer (will be discarded):\n");
+        dvmLogExceptionStackTrace();
+#endif
+        dvmClearException(self);
+    }
+}
+
+/* Process all enqueued heap work, including finalizers and reference
+ * clearing/enqueueing.
+ *
+ * Caller must hold gDvm.heapWorkerLock.
+ */
+static void doHeapWork(Thread *self)
+{
+    Object *obj;
+    HeapWorkerOperation op;
+    int numFinalizersCalled, numReferencesEnqueued;
+#if FANCY_REFERENCE_SUBCLASS
+    int numReferencesCleared = 0;
+#endif
+
+    assert(gDvm.voffJavaLangObject_finalize >= 0);
+#if FANCY_REFERENCE_SUBCLASS
+    assert(gDvm.voffJavaLangRefReference_clear >= 0);
+    assert(gDvm.voffJavaLangRefReference_enqueue >= 0);
+#else
+    assert(gDvm.methJavaLangRefReference_enqueueInternal != NULL);
+#endif
+
+    numFinalizersCalled = 0;
+    numReferencesEnqueued = 0;
+    while ((obj = dvmGetNextHeapWorkerObject(&op)) != NULL) {
+        Method *method = NULL;
+
+        /* Make sure the object hasn't been collected since
+         * being scheduled.
+         */
+        assert(dvmIsValidObject(obj));
+
+        /* Call the appropriate method(s).
+         */
+        if (op == WORKER_FINALIZE) {
+            numFinalizersCalled++;
+            method = obj->clazz->vtable[gDvm.voffJavaLangObject_finalize];
+            assert(dvmCompareNameDescriptorAndMethod("finalize", "()V",
+                            method) == 0);
+            assert(method->clazz != gDvm.classJavaLangObject);
+            callMethod(self, obj, method);
+        } else {
+#if FANCY_REFERENCE_SUBCLASS
+            /* clear() *must* happen before enqueue(), otherwise
+             * a non-clear reference could appear on a reference
+             * queue.
+             */
+            if (op & WORKER_CLEAR) {
+                numReferencesCleared++;
+                method = obj->clazz->vtable[
+                        gDvm.voffJavaLangRefReference_clear];
+                assert(dvmCompareNameDescriptorAndMethod("clear", "()V",
+                                method) == 0);
+                assert(method->clazz != gDvm.classJavaLangRefReference);
+                callMethod(self, obj, method);
+            }
+            if (op & WORKER_ENQUEUE) {
+                numReferencesEnqueued++;
+                method = obj->clazz->vtable[
+                        gDvm.voffJavaLangRefReference_enqueue];
+                assert(dvmCompareNameDescriptorAndMethod("enqueue", "()Z",
+                                method) == 0);
+                /* We call enqueue() even when it isn't overridden,
+                 * so don't assert(!classJavaLangRefReference) here.
+                 */
+                callMethod(self, obj, method);
+            }
+#else
+            assert((op & WORKER_CLEAR) == 0);
+            if (op & WORKER_ENQUEUE) {
+                numReferencesEnqueued++;
+                callMethod(self, obj,
+                        gDvm.methJavaLangRefReference_enqueueInternal);
+            }
+#endif
+        }
+
+        /* Let the GC collect the object.
+         */
+        dvmReleaseTrackedAlloc(obj, self);
+    }
+    LOGV("Called %d finalizers\n", numFinalizersCalled);
+    LOGV("Enqueued %d references\n", numReferencesEnqueued);
+#if FANCY_REFERENCE_SUBCLASS
+    LOGV("Cleared %d overridden references\n", numReferencesCleared);
+#endif
+}
+
+/*
+ * The heap worker thread sits quietly until the GC tells it there's work
+ * to do.
+ */
+static void* heapWorkerThreadStart(void* arg)
+{
+    Thread *self = dvmThreadSelf();
+    int cc;
+
+    UNUSED_PARAMETER(arg);
+
+    LOGV("HeapWorker thread started (threadid=%d)\n", self->threadId);
+
+    /* tell the main thread that we're ready */
+    dvmLockMutex(&gDvm.heapWorkerLock);
+    gDvm.heapWorkerReady = true;
+    cc = pthread_cond_signal(&gDvm.heapWorkerCond);
+    assert(cc == 0);
+    dvmUnlockMutex(&gDvm.heapWorkerLock);
+
+    dvmLockMutex(&gDvm.heapWorkerLock);
+    while (!gDvm.haltHeapWorker) {
+        struct timespec trimtime;
+        bool timedwait = false;
+
+        /* We're done running interpreted code for now. */
+        dvmChangeStatus(NULL, THREAD_VMWAIT);
+
+        /* Signal anyone who wants to know when we're done. */
+        cc = pthread_cond_broadcast(&gDvm.heapWorkerIdleCond);
+        assert(cc == 0);
+
+        /* Trim the heap if we were asked to. */
+        trimtime = gDvm.gcHeap->heapWorkerNextTrim;
+        if (trimtime.tv_sec != 0 && trimtime.tv_nsec != 0) {
+            struct timeval now;
+
+            gettimeofday(&now, NULL);
+            if (trimtime.tv_sec < now.tv_sec ||
+                (trimtime.tv_sec == now.tv_sec && 
+                 trimtime.tv_nsec <= now.tv_usec * 1000))
+            {
+                size_t madvisedSizes[HEAP_SOURCE_MAX_HEAP_COUNT];
+
+                /* The heap must be locked before the HeapWorker;
+                 * unroll and re-order the locks.  dvmLockHeap()
+                 * will put us in VMWAIT if necessary.  Once it
+                 * returns, there shouldn't be any contention on
+                 * heapWorkerLock.
+                 */
+                dvmUnlockMutex(&gDvm.heapWorkerLock);
+                dvmLockHeap();
+                dvmLockMutex(&gDvm.heapWorkerLock);
+
+                memset(madvisedSizes, 0, sizeof(madvisedSizes));
+                dvmHeapSourceTrim(madvisedSizes, HEAP_SOURCE_MAX_HEAP_COUNT);
+                dvmLogMadviseStats(madvisedSizes, HEAP_SOURCE_MAX_HEAP_COUNT);
+
+                dvmUnlockHeap();
+
+                trimtime.tv_sec = 0;
+                trimtime.tv_nsec = 0;
+                gDvm.gcHeap->heapWorkerNextTrim = trimtime;
+            } else {
+                timedwait = true;
+            }
+        }
+
+        /* sleep until signaled */
+        if (timedwait) {
+            cc = pthread_cond_timedwait(&gDvm.heapWorkerCond,
+                    &gDvm.heapWorkerLock, &trimtime);
+            assert(cc == 0 || cc == ETIMEDOUT || cc == EINTR);
+        } else {
+            cc = pthread_cond_wait(&gDvm.heapWorkerCond, &gDvm.heapWorkerLock);
+            assert(cc == 0);
+        }
+
+        /* dvmChangeStatus() may block;  don't hold heapWorkerLock.
+         */
+        dvmUnlockMutex(&gDvm.heapWorkerLock);
+        dvmChangeStatus(NULL, THREAD_RUNNING);
+        dvmLockMutex(&gDvm.heapWorkerLock);
+        LOGV("HeapWorker is awake\n");
+
+        /* Process any events in the queue.
+         */
+        doHeapWork(self);
+    }
+    dvmUnlockMutex(&gDvm.heapWorkerLock);
+
+    LOGD("HeapWorker thread shutting down\n");
+    return NULL;
+}
+
+/*
+ * Wake up the heap worker to let it know that there's work to be done.
+ */
+void dvmSignalHeapWorker(bool shouldLock)
+{
+    int cc;
+
+    if (shouldLock) {
+        dvmLockMutex(&gDvm.heapWorkerLock);
+    }
+
+    cc = pthread_cond_signal(&gDvm.heapWorkerCond);
+    assert(cc == 0);
+
+    if (shouldLock) {
+        dvmUnlockMutex(&gDvm.heapWorkerLock);
+    }
+}
+
+/*
+ * Block until all pending heap worker work has finished.
+ */
+void dvmWaitForHeapWorkerIdle()
+{
+    int cc;
+
+    assert(gDvm.heapWorkerReady);
+
+    dvmChangeStatus(NULL, THREAD_VMWAIT);
+
+    dvmLockMutex(&gDvm.heapWorkerLock);
+
+    /* Wake up the heap worker and wait for it to finish. */
+    //TODO(http://b/issue?id=699704): This will deadlock if
+    //     called from finalize(), enqueue(), or clear().  We
+    //     need to detect when this is called from the HeapWorker
+    //     context and just give up.
+    dvmSignalHeapWorker(false);
+    cc = pthread_cond_wait(&gDvm.heapWorkerIdleCond, &gDvm.heapWorkerLock);
+    assert(cc == 0);
+
+    dvmUnlockMutex(&gDvm.heapWorkerLock);
+
+    dvmChangeStatus(NULL, THREAD_RUNNING);
+}
+
+/*
+ * Do not return until any pending heap work has finished.  This may
+ * or may not happen in the context of the calling thread.
+ * No exceptions will escape.
+ */
+void dvmRunFinalizationSync()
+{
+    if (gDvm.zygote) {
+        assert(!gDvm.heapWorkerReady);
+
+        /* When in zygote mode, there is no heap worker.
+         * Do the work in the current thread.
+         */
+        dvmLockMutex(&gDvm.heapWorkerLock);
+        doHeapWork(dvmThreadSelf());
+        dvmUnlockMutex(&gDvm.heapWorkerLock);
+    } else {
+        /* Outside of zygote mode, we can just ask the
+         * heap worker thread to do the work.
+         */
+        dvmWaitForHeapWorkerIdle();
+    }
+}
+
+/*
+ * Requests that dvmHeapSourceTrim() be called no sooner
+ * than timeoutSec seconds from now.  If timeoutSec
+ * is zero, any pending trim is cancelled.
+ *
+ * Caller must hold heapWorkerLock.
+ */
+void dvmScheduleHeapSourceTrim(size_t timeoutSec)
+{
+    GcHeap *gcHeap = gDvm.gcHeap;
+    struct timespec timeout;
+
+    if (timeoutSec == 0) {
+        timeout.tv_sec = 0;
+        timeout.tv_nsec = 0;
+        /* Don't wake up the thread just to tell it to cancel.
+         * If it wakes up naturally, we can avoid the extra
+         * context switch.
+         */
+    } else {
+        struct timeval now;
+
+        gettimeofday(&now, NULL);
+        timeout.tv_sec = now.tv_sec + timeoutSec;
+        timeout.tv_nsec = now.tv_usec * 1000;
+        dvmSignalHeapWorker(false);
+    }
+    gcHeap->heapWorkerNextTrim = timeout;
+}
diff --git a/vm/alloc/HeapWorker.h b/vm/alloc/HeapWorker.h
new file mode 100644
index 0000000..c079570
--- /dev/null
+++ b/vm/alloc/HeapWorker.h
@@ -0,0 +1,97 @@
+/*
+ * Copyright (C) 2008 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/*
+ * Manage async heap tasks.
+ */
+#ifndef _DALVIK_ALLOC_HEAP_WORKER
+#define _DALVIK_ALLOC_HEAP_WORKER
+
+/*
+ * Initialize any HeapWorker state that Heap.c
+ * cares about.  This lets the GC start before the
+ * HeapWorker thread is initialized.
+ */
+void dvmInitializeHeapWorkerState(void);
+
+/*
+ * Initialization.  Starts/stops the worker thread.
+ */
+bool dvmHeapWorkerStartup(void);
+void dvmHeapWorkerShutdown(void);
+
+/*
+ * Tell the worker thread to wake up and do work.
+ * If shouldLock is false, the caller must have already
+ * acquired gDvm.heapWorkerLock.
+ */
+void dvmSignalHeapWorker(bool shouldLock);
+
+/*
+ * Block until all pending heap worker work has finished.
+ */
+void dvmWaitForHeapWorkerIdle(void);
+
+/*
+ * Does not return until any pending finalizers have been called.
+ * This may or may not happen in the context of the calling thread.
+ * No exceptions will escape.
+ *
+ * Used by zygote, which doesn't have a HeapWorker thread.
+ */
+void dvmRunFinalizationSync(void);
+
+/*
+ * Requests that dvmHeapSourceTrim() be called no sooner
+ * than timeoutSec seconds from now.  If timeoutSec
+ * is zero, any pending trim is cancelled.
+ *
+ * Caller must hold heapWorkerLock.
+ */
+void dvmScheduleHeapSourceTrim(size_t timeoutSec);
+
+/* Make sure that the HeapWorker thread hasn't spent an inordinate
+ * amount of time inside interpreted code.
+ *
+ * Aborts the VM if the thread appears to be wedged.
+ *
+ * The caller must hold the heapWorkerLock.
+ */
+void dvmAssertHeapWorkerThreadRunning();
+
+/*
+ * The type of operation for HeapWorker to perform on an object.
+ */
+typedef enum HeapWorkerOperation {
+    WORKER_FINALIZE = 0,
+
+    /* Required: (WORKER_CLEAR | WORKER_ENQUEUE) <= (4-1)
+     * These values will be stuffed in the low bits of a pointer.
+     */
+    WORKER_CLEAR = (1<<0),
+    WORKER_ENQUEUE = (1<<1),
+} HeapWorkerOperation;
+
+/*
+ * Called by the worker thread to get the next object
+ * to finalize/enqueue/clear.  Implemented in Heap.c.
+ *
+ * @param op The operation to perform on the returned object.
+ *           Must be non-NULL.
+ * @return The object to operate on, or NULL.
+ */
+Object *dvmGetNextHeapWorkerObject(HeapWorkerOperation *op);
+
+#endif /*_DALVIK_ALLOC_HEAP_WORKER*/
diff --git a/vm/alloc/MarkSweep.c b/vm/alloc/MarkSweep.c
new file mode 100644
index 0000000..a0601d7
--- /dev/null
+++ b/vm/alloc/MarkSweep.c
@@ -0,0 +1,1332 @@
+/*
+ * Copyright (C) 2008 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "Dalvik.h"
+#include "alloc/HeapBitmap.h"
+#include "alloc/HeapInternal.h"
+#include "alloc/HeapSource.h"
+#include "alloc/MarkSweep.h"
+#include <limits.h>     // for ULONG_MAX
+#include <sys/mman.h>   // for madvise(), mmap()
+#include <cutils/ashmem.h>
+
+#define GC_DEBUG_PARANOID   2
+#define GC_DEBUG_BASIC      1
+#define GC_DEBUG_OFF        0
+#define GC_DEBUG(l)         (GC_DEBUG_LEVEL >= (l))
+
+#if 1
+#define GC_DEBUG_LEVEL      GC_DEBUG_PARANOID
+#else
+#define GC_DEBUG_LEVEL      GC_DEBUG_OFF
+#endif
+
+#define VERBOSE_GC          0
+
+#define GC_LOG_TAG      LOG_TAG "-gc"
+
+#if LOG_NDEBUG
+#define LOGV_GC(...)    ((void)0)
+#define LOGD_GC(...)    ((void)0)
+#else
+#define LOGV_GC(...)    LOG(LOG_VERBOSE, GC_LOG_TAG, __VA_ARGS__)
+#define LOGD_GC(...)    LOG(LOG_DEBUG, GC_LOG_TAG, __VA_ARGS__)
+#endif
+
+#if VERBOSE_GC
+#define LOGVV_GC(...)   LOGV_GC(__VA_ARGS__)
+#else
+#define LOGVV_GC(...)   ((void)0)
+#endif
+
+#define LOGI_GC(...)    LOG(LOG_INFO, GC_LOG_TAG, __VA_ARGS__)
+#define LOGW_GC(...)    LOG(LOG_WARN, GC_LOG_TAG, __VA_ARGS__)
+#define LOGE_GC(...)    LOG(LOG_ERROR, GC_LOG_TAG, __VA_ARGS__)
+
+#define LOG_SCAN(...)   LOGV_GC("SCAN: " __VA_ARGS__)
+#define LOG_MARK(...)   LOGV_GC("MARK: " __VA_ARGS__)
+#define LOG_SWEEP(...)  LOGV_GC("SWEEP: " __VA_ARGS__)
+#define LOG_REF(...)    LOGV_GC("REF: " __VA_ARGS__)
+
+#define LOGV_SCAN(...)  LOGVV_GC("SCAN: " __VA_ARGS__)
+#define LOGV_MARK(...)  LOGVV_GC("MARK: " __VA_ARGS__)
+#define LOGV_SWEEP(...) LOGVV_GC("SWEEP: " __VA_ARGS__)
+#define LOGV_REF(...)   LOGVV_GC("REF: " __VA_ARGS__)
+
+#if WITH_OBJECT_HEADERS
+u2 gGeneration = 0;
+static const Object *gMarkParent = NULL;
+#endif
+
+#ifndef PAGE_SIZE
+#define PAGE_SIZE 4096
+#endif
+#define ALIGN_UP_TO_PAGE_SIZE(p) \
+    (((size_t)(p) + (PAGE_SIZE - 1)) & ~(PAGE_SIZE - 1))
+
+/* Do not cast the result of this to a boolean; the only set bit
+ * may be > 1<<8.
+ */
+static inline long isMarked(const DvmHeapChunk *hc, const GcMarkContext *ctx)
+        __attribute__((always_inline));
+static inline long isMarked(const DvmHeapChunk *hc, const GcMarkContext *ctx)
+{
+    return dvmHeapBitmapIsObjectBitSetInList(ctx->bitmaps, ctx->numBitmaps, hc);
+}
+
+static bool
+createMarkStack(GcMarkStack *stack)
+{
+    const Object **limit;
+    size_t size;
+    int fd;
+
+    /* Create a stack big enough for the worst possible case,
+     * where the heap is perfectly full of the smallest object.
+     * TODO: be better about memory usage; use a smaller stack with
+     *       overflow detection and recovery.
+     */
+    size = dvmHeapSourceGetIdealFootprint() * sizeof(Object*) /
+            (sizeof(Object) + HEAP_SOURCE_CHUNK_OVERHEAD);
+    size = ALIGN_UP_TO_PAGE_SIZE(size);
+    fd = ashmem_create_region("dalvik-heap-markstack", size);
+    if (fd < 0) {
+        LOGE_GC("Could not create %d-byte ashmem mark stack\n", size);
+        return false;
+    }
+    limit = (const Object **)mmap(NULL, size, PROT_READ | PROT_WRITE,
+            MAP_PRIVATE, fd, 0);
+    close(fd);
+    if (limit == MAP_FAILED) {
+        LOGE_GC("Could not mmap %d-byte ashmem mark stack\n", size);
+        return false;
+    }
+
+    memset(stack, 0, sizeof(*stack));
+    stack->limit = limit;
+    stack->base = (const Object **)((uintptr_t)limit + size);
+    stack->top = stack->base;
+
+    return true;
+}
+
+static void
+destroyMarkStack(GcMarkStack *stack)
+{
+    munmap((char *)stack->limit,
+            (uintptr_t)stack->base - (uintptr_t)stack->limit);
+    memset(stack, 0, sizeof(*stack));
+}
+
+#define MARK_STACK_PUSH(stack, obj) \
+    do { \
+        *--(stack).top = (obj); \
+    } while (false)
+
+bool
+dvmHeapBeginMarkStep()
+{
+    GcMarkContext *mc = &gDvm.gcHeap->markContext;
+    HeapBitmap objectBitmaps[HEAP_SOURCE_MAX_HEAP_COUNT];
+    size_t numBitmaps;
+
+    if (!createMarkStack(&mc->stack)) {
+        return false;
+    }
+
+    numBitmaps = dvmHeapSourceGetObjectBitmaps(objectBitmaps,
+            HEAP_SOURCE_MAX_HEAP_COUNT);
+    if (numBitmaps <= 0) {
+        return false;
+    }
+
+    /* Create mark bitmaps that cover the same ranges as the
+     * current object bitmaps.
+     */
+    if (!dvmHeapBitmapInitListFromTemplates(mc->bitmaps, objectBitmaps,
+            numBitmaps, "mark"))
+    {
+        return false;
+    }
+
+    mc->numBitmaps = numBitmaps;
+    mc->finger = NULL;
+
+#if WITH_OBJECT_HEADERS
+    gGeneration++;
+#endif
+
+    return true;
+}
+
+static long setAndReturnMarkBit(GcMarkContext *ctx, const DvmHeapChunk *hc)
+        __attribute__((always_inline));
+static long
+setAndReturnMarkBit(GcMarkContext *ctx, const DvmHeapChunk *hc)
+{
+    return dvmHeapBitmapSetAndReturnObjectBitInList(ctx->bitmaps,
+        ctx->numBitmaps, hc);
+}
+
+static void _markObjectNonNullCommon(const Object *obj, GcMarkContext *ctx,
+        bool checkFinger, bool forceStack)
+        __attribute__((always_inline));
+static void
+_markObjectNonNullCommon(const Object *obj, GcMarkContext *ctx,
+        bool checkFinger, bool forceStack)
+{
+    DvmHeapChunk *hc;
+
+    assert(obj != NULL);
+
+#if GC_DEBUG(GC_DEBUG_PARANOID)
+//TODO: make sure we're locked
+    assert(obj != (Object *)gDvm.unlinkedJavaLangClass);
+    assert(dvmIsValidObject(obj));
+#endif
+
+    hc = ptr2chunk(obj);
+    if (!setAndReturnMarkBit(ctx, hc)) {
+        /* This object was not previously marked.
+         */
+        if (forceStack || (checkFinger && (void *)hc < ctx->finger)) {
+            /* This object will need to go on the mark stack.
+             */
+            MARK_STACK_PUSH(ctx->stack, obj);
+        }
+
+#if WITH_OBJECT_HEADERS
+        if (hc->scanGeneration != hc->markGeneration) {
+            LOGE("markObject(0x%08x): wasn't scanned last time\n", (uint)obj);
+            dvmAbort();
+        }
+        if (hc->markGeneration == gGeneration) {
+            LOGE("markObject(0x%08x): already marked this generation\n",
+                    (uint)obj);
+            dvmAbort();
+        }
+        hc->oldMarkGeneration = hc->markGeneration;
+        hc->markGeneration = gGeneration;
+        hc->markFingerOld = hc->markFinger;
+        hc->markFinger = ctx->finger;
+        if (gMarkParent != NULL) {
+            hc->parentOld = hc->parent;
+            hc->parent = gMarkParent;
+        } else {
+            hc->parent = (const Object *)((uintptr_t)hc->parent | 1);
+        }
+        hc->markCount++;
+#endif
+#if WITH_HPROF
+        if (gDvm.gcHeap->hprofContext != NULL) {
+            hprofMarkRootObject(gDvm.gcHeap->hprofContext, obj, 0);
+        }
+#endif
+#if DVM_TRACK_HEAP_MARKING
+        gDvm.gcHeap->markCount++;
+        gDvm.gcHeap->markSize += dvmHeapSourceChunkSize((void *)hc) +
+                HEAP_SOURCE_CHUNK_OVERHEAD;
+#endif
+
+        /* obj->clazz can be NULL if we catch an object between
+         * dvmMalloc() and DVM_OBJECT_INIT().  This is ok.
+         */
+        LOGV_MARK("0x%08x %s\n", (uint)obj,
+                obj->clazz == NULL ? "<null class>" : obj->clazz->name);
+    }
+}
+
+/* Used to mark objects when recursing.  Recursion is done by moving
+ * the finger across the bitmaps in address order and marking child
+ * objects.  Any newly-marked objects whose addresses are lower than
+ * the finger won't be visited by the bitmap scan, so those objects
+ * need to be added to the mark stack.
+ */
+static void
+markObjectNonNull(const Object *obj, GcMarkContext *ctx)
+{
+    _markObjectNonNullCommon(obj, ctx, true, false);
+}
+
+#define markObject(obj, ctx) \
+    do { \
+        Object *MO_obj_ = (Object *)(obj); \
+        if (MO_obj_ != NULL) { \
+            markObjectNonNull(MO_obj_, (ctx)); \
+        } \
+    } while (false)
+
+/* If the object hasn't already been marked, mark it and
+ * schedule it to be scanned for references.
+ *
+ * obj may not be NULL.  The macro dvmMarkObject() should
+ * be used in situations where a reference may be NULL.
+ *
+ * This function may only be called when marking the root
+ * set.  When recursing, use the internal markObject[NonNull]().
+ */
+void
+dvmMarkObjectNonNull(const Object *obj)
+{
+    _markObjectNonNullCommon(obj, &gDvm.gcHeap->markContext, false, false);
+}
+
+/* Mark the set of root objects.
+ *
+ * Things we need to scan:
+ * - System classes defined by root classloader
+ * - For each thread:
+ *   - Interpreted stack, from top to "curFrame"
+ *     - Dalvik registers (args + local vars)
+ *   - JNI local references
+ *   - Automatic VM local references (TrackedAlloc)
+ *   - Associated Thread/VMThread object
+ *   - ThreadGroups (could track & start with these instead of working
+ *     upward from Threads)
+ *   - Exception currently being thrown, if present
+ * - JNI global references
+ * - Interned string table
+ * - Primitive classes
+ * - Special objects
+ *   - gDvm.outOfMemoryObj
+ * - Objects allocated with ALLOC_NO_GC
+ * - Objects pending finalization (but not yet finalized)
+ * - Objects in debugger object registry
+ *
+ * Don't need:
+ * - Native stack (for in-progress stuff in the VM)
+ *   - The TrackedAlloc stuff watches all native VM references.
+ */
+void dvmHeapMarkRootSet()
+{
+    HeapRefTable *refs;
+    GcHeap *gcHeap;
+    Object **op;
+
+    gcHeap = gDvm.gcHeap;
+
+    HPROF_SET_GC_SCAN_STATE(HPROF_ROOT_STICKY_CLASS, 0);
+
+    LOG_SCAN("root class loader\n");
+    dvmGcScanRootClassLoader();
+    LOG_SCAN("primitive classes\n");
+    dvmGcScanPrimitiveClasses();
+
+    /* dvmGcScanRootThreadGroups() sets a bunch of
+     * different scan states internally.
+     */
+    HPROF_CLEAR_GC_SCAN_STATE();
+
+    LOG_SCAN("root thread groups\n");
+    dvmGcScanRootThreadGroups();
+
+    HPROF_SET_GC_SCAN_STATE(HPROF_ROOT_INTERNED_STRING, 0);
+
+    LOG_SCAN("interned strings\n");
+    dvmGcScanInternedStrings();
+
+    HPROF_SET_GC_SCAN_STATE(HPROF_ROOT_JNI_GLOBAL, 0);
+
+    LOG_SCAN("JNI global refs\n");
+    dvmGcMarkJniGlobalRefs();
+
+    HPROF_SET_GC_SCAN_STATE(HPROF_ROOT_REFERENCE_CLEANUP, 0);
+
+    LOG_SCAN("pending reference operations\n");
+    dvmHeapMarkLargeTableRefs(gcHeap->referenceOperations, true);
+
+    HPROF_SET_GC_SCAN_STATE(HPROF_ROOT_FINALIZING, 0);
+
+    LOG_SCAN("pending finalizations\n");
+    dvmHeapMarkLargeTableRefs(gcHeap->pendingFinalizationRefs, false);
+
+    HPROF_SET_GC_SCAN_STATE(HPROF_ROOT_DEBUGGER, 0);
+
+    LOG_SCAN("debugger refs\n");
+    dvmGcMarkDebuggerRefs();
+
+    HPROF_SET_GC_SCAN_STATE(HPROF_ROOT_VM_INTERNAL, 0);
+
+    /* Mark all ALLOC_NO_GC objects.
+     */
+    LOG_SCAN("ALLOC_NO_GC objects\n");
+    refs = &gcHeap->nonCollectableRefs;
+    op = refs->table;
+    while ((uintptr_t)op < (uintptr_t)refs->nextEntry) {
+        dvmMarkObjectNonNull(*(op++));
+    }
+
+    /* Mark any special objects we have sitting around.
+     */
+    LOG_SCAN("special objects\n");
+    dvmMarkObjectNonNull(gDvm.outOfMemoryObj);
+    dvmMarkObjectNonNull(gDvm.internalErrorObj);
+//TODO: scan object references sitting in gDvm;  use pointer begin & end
+
+    HPROF_CLEAR_GC_SCAN_STATE();
+}
+
+/*
+ * Nothing past this point is allowed to use dvmMarkObject*().
+ * Scanning/recursion must use markObject*(), which takes the
+ * finger into account.
+ */
+#define dvmMarkObjectNonNull __dont_use_dvmMarkObjectNonNull__
+
+
+/* Mark all of a ClassObject's interfaces.
+ */
+static void markInterfaces(const ClassObject *clazz, GcMarkContext *ctx)
+{
+    ClassObject **interfaces;
+    int interfaceCount;
+    int i;
+
+    /* Mark all interfaces.
+     */
+    interfaces = clazz->interfaces;
+    interfaceCount = clazz->interfaceCount;
+    for (i = 0; i < interfaceCount; i++) {
+        markObjectNonNull((Object *)*interfaces, ctx);
+        interfaces++;
+    }
+}
+
+/* Mark all objects referred to by a ClassObject's static fields.
+ */
+static void scanStaticFields(const ClassObject *clazz, GcMarkContext *ctx)
+{
+    StaticField *f;
+    int i;
+
+    //TODO: Optimize this with a bit vector or something
+    f = clazz->sfields;
+    for (i = 0; i < clazz->sfieldCount; i++) {
+        char c = f->field.signature[0];
+        if (c == '[' || c == 'L') {
+            /* It's an array or class reference.
+             */
+            markObject((Object *)f->value.l, ctx);
+        }
+        f++;
+    }
+}
+
+/* Mark all objects referred to by a DataObject's instance fields.
+ */
+static void scanInstanceFields(const DataObject *obj, ClassObject *clazz,
+        GcMarkContext *ctx)
+{
+//TODO: Optimize this by avoiding walking the superclass chain
+    while (clazz != NULL) {
+        InstField *f;
+        int i;
+
+        /* All of the fields that contain object references
+         * are guaranteed to be at the beginning of the ifields list.
+         */
+        f = clazz->ifields;
+        for (i = 0; i < clazz->ifieldRefCount; i++) {
+            /* Mark the array or object reference.
+             * May be NULL.
+             *
+             * Note that, per the comment on struct InstField,
+             * f->byteOffset is the offset from the beginning of
+             * obj, not the offset into obj->instanceData.
+             */
+            markObject(dvmGetFieldObject((Object*)obj, f->byteOffset), ctx);
+            f++;
+        }
+
+        /* This will be NULL when we hit java.lang.Object
+         */
+        clazz = clazz->super;
+    }
+}
+
+/* Mark all objects referred to by the array's contents.
+ */
+static void scanObjectArray(const ArrayObject *array, GcMarkContext *ctx)
+{
+    Object **contents;
+    u4 length;
+    u4 i;
+
+    contents = (Object **)array->contents;
+    length = array->length;
+
+    for (i = 0; i < length; i++) {
+        markObject(*contents, ctx); // may be NULL
+        contents++;
+    }
+}
+
+/* Mark all objects referred to by the ClassObject.
+ */
+static void scanClassObject(const ClassObject *clazz, GcMarkContext *ctx)
+{
+    LOGV_SCAN("---------> %s\n", clazz->name);
+
+    if (IS_CLASS_FLAG_SET(clazz, CLASS_ISARRAY)) {
+        /* We're an array; mark the class object of the contents
+         * of the array.
+         *
+         * Note that we won't necessarily reach the array's element
+         * class by scanning the array contents;  the array may be
+         * zero-length, or may only contain null objects.
+         */
+        markObjectNonNull((Object *)clazz->elementClass, ctx);
+    }
+
+    /* We scan these explicitly in case the only remaining
+     * reference to a particular class object is via a data
+     * object;  we may not be guaranteed to reach all
+     * live class objects via a classloader.
+     */
+    markObject((Object *)clazz->super, ctx);  // may be NULL (java.lang.Object)
+    markObject(clazz->classLoader, ctx);      // may be NULL
+
+    scanStaticFields(clazz, ctx);
+    markInterfaces(clazz, ctx);
+}
+
+/* Mark all objects that obj refers to.
+ *
+ * Called on every object in markList.
+ */
+static void scanObject(const Object *obj, GcMarkContext *ctx)
+{
+    ClassObject *clazz;
+
+    assert(dvmIsValidObject(obj));
+    LOGV_SCAN("0x%08x %s\n", (uint)obj, obj->clazz->name);
+
+#if WITH_HPROF
+    if (gDvm.gcHeap->hprofContext != NULL) {
+        hprofDumpHeapObject(gDvm.gcHeap->hprofContext, obj);
+    }
+#endif
+
+    /* Get and mark the class object for this particular instance.
+     */
+    clazz = obj->clazz;
+    if (clazz == NULL) {
+        /* This can happen if we catch an object between
+         * dvmMalloc() and DVM_OBJECT_INIT().  The object
+         * won't contain any references yet, so we can
+         * just skip it.
+         */
+        return;
+    } else if (clazz == gDvm.unlinkedJavaLangClass) {
+        /* This class hasn't been linked yet.  We're guaranteed
+         * that the object doesn't contain any references that
+         * aren't already tracked, so we can skip scanning it.
+         *
+         * NOTE: unlinkedJavaLangClass is not on the heap, so
+         * it's very important that we don't try marking it.
+         */
+        return;
+    }
+#if WITH_OBJECT_HEADERS
+    gMarkParent = obj;
+    if (ptr2chunk(obj)->scanGeneration == gGeneration) {
+        LOGE("object 0x%08x was already scanned this generation\n",
+                (uintptr_t)obj);
+        dvmAbort();
+    }
+    ptr2chunk(obj)->oldScanGeneration = ptr2chunk(obj)->scanGeneration;
+    ptr2chunk(obj)->scanGeneration = gGeneration;
+    ptr2chunk(obj)->scanCount++;
+#endif
+
+    assert(dvmIsValidObject((Object *)clazz));
+    markObjectNonNull((Object *)clazz, ctx);
+
+    /* Mark any references in this object.
+     */
+    if (IS_CLASS_FLAG_SET(clazz, CLASS_ISARRAY)) {
+        /* It's an array object.
+         */
+        if (IS_CLASS_FLAG_SET(clazz, CLASS_ISOBJECTARRAY)) {
+            /* It's an array of object references.
+             */
+            scanObjectArray((ArrayObject *)obj, ctx);
+        }
+        // else there's nothing else to scan
+    } else {
+        /* It's a DataObject-compatible object.
+         */
+        scanInstanceFields((DataObject *)obj, clazz, ctx);
+
+        if (IS_CLASS_FLAG_SET(clazz, CLASS_ISREFERENCE)) {
+            GcHeap *gcHeap = gDvm.gcHeap;
+            Object *referent;
+
+            /* It's a subclass of java/lang/ref/Reference.
+             * The fields in this class have been arranged
+             * such that scanInstanceFields() did not actually
+             * mark the "referent" field;  we need to handle
+             * it specially.
+             *
+             * If the referent already has a strong mark (isMarked(referent)),
+             * we don't care about its reference status.
+             */
+            referent = dvmGetFieldObject(obj,
+                    gDvm.offJavaLangRefReference_referent);
+            if (referent != NULL &&
+                    !isMarked(ptr2chunk(referent), &gcHeap->markContext))
+            {
+                u4 refFlags;
+
+                if (gcHeap->markAllReferents) {
+                    LOG_REF("Hard-marking a reference\n");
+
+                    /* Don't bother with normal reference-following
+                     * behavior, just mark the referent.  This should
+                     * only be used when following objects that just
+                     * became scheduled for finalization.
+                     */
+                    markObjectNonNull(referent, ctx);
+                    goto skip_reference;
+                }
+
+                /* See if this reference was handled by a previous GC.
+                 */
+                if (dvmGetFieldObject(obj,
+                            gDvm.offJavaLangRefReference_vmData) ==
+                        SCHEDULED_REFERENCE_MAGIC)
+                {
+                    LOG_REF("Skipping scheduled reference\n");
+
+                    /* Don't reschedule it, but make sure that its
+                     * referent doesn't get collected (in case it's
+                     * a PhantomReference and wasn't cleared automatically).
+                     */
+                    //TODO: Mark these after handling all new refs of
+                    //      this strength, in case the new refs refer
+                    //      to the same referent.  Not a very common
+                    //      case, though.
+                    markObjectNonNull(referent, ctx);
+                    goto skip_reference;
+                }
+
+                /* Find out what kind of reference is pointing
+                 * to referent.
+                 */
+                refFlags = GET_CLASS_FLAG_GROUP(clazz,
+                    CLASS_ISREFERENCE |
+                    CLASS_ISWEAKREFERENCE |
+                    CLASS_ISPHANTOMREFERENCE);
+
+            /* We use the vmData field of Reference objects
+             * as a next pointer in a singly-linked list.
+             * That way, we don't need to allocate any memory
+             * while we're doing a GC.
+             */
+#define ADD_REF_TO_LIST(list, ref) \
+            do { \
+                Object *ARTL_ref_ = (/*de-const*/Object *)(ref); \
+                dvmSetFieldObject(ARTL_ref_, \
+                        gDvm.offJavaLangRefReference_vmData, list); \
+                list = ARTL_ref_; \
+            } while (false)
+
+                /* At this stage, we just keep track of all of
+                 * the live references that we've seen.  Later,
+                 * we'll walk through each of these lists and
+                 * deal with the referents.
+                 */
+                if (refFlags == CLASS_ISREFERENCE) {
+                    /* It's a soft reference.  Depending on the state,
+                     * we'll attempt to collect all of them, some of
+                     * them, or none of them.
+                     */
+                    if (gcHeap->softReferenceCollectionState ==
+                            SR_COLLECT_NONE)
+                    {
+                sr_collect_none:
+                        markObjectNonNull(referent, ctx);
+                    } else if (gcHeap->softReferenceCollectionState ==
+                            SR_COLLECT_ALL)
+                    {
+                sr_collect_all:
+                        ADD_REF_TO_LIST(gcHeap->softReferences, obj);
+                    } else {
+                        /* We'll only try to collect half of the
+                         * referents.
+                         */
+                        if (gcHeap->softReferenceColor++ & 1) {
+                            goto sr_collect_none;
+                        }
+                        goto sr_collect_all;
+                    }
+                } else {
+                    /* It's a weak or phantom reference.
+                     * Clearing CLASS_ISREFERENCE will reveal which.
+                     */
+                    refFlags &= ~CLASS_ISREFERENCE;
+                    if (refFlags == CLASS_ISWEAKREFERENCE) {
+                        ADD_REF_TO_LIST(gcHeap->weakReferences, obj);
+                    } else if (refFlags == CLASS_ISPHANTOMREFERENCE) {
+                        ADD_REF_TO_LIST(gcHeap->phantomReferences, obj);
+                    } else {
+                        assert(!"Unknown reference type");
+                    }
+                }
+#undef ADD_REF_TO_LIST
+            }
+        }
+
+    skip_reference:
+        /* If this is a class object, mark various other things that
+         * its internals point to.
+         *
+         * All class objects are instances of java.lang.Class,
+         * including the java.lang.Class class object.
+         */
+        if (clazz == gDvm.classJavaLangClass) {
+            scanClassObject((ClassObject *)obj, ctx);
+        }
+    }
+
+#if WITH_OBJECT_HEADERS
+    gMarkParent = NULL;
+#endif
+}
+
+static void
+processMarkStack(GcMarkContext *ctx)
+{
+    const Object **const base = ctx->stack.base;
+
+    /* Scan anything that's on the mark stack.
+     * We can't use the bitmaps anymore, so use
+     * a finger that points past the end of them.
+     */
+    ctx->finger = (void *)ULONG_MAX;
+    while (ctx->stack.top != base) {
+        scanObject(*ctx->stack.top++, ctx);
+    }
+}
+
+#ifndef NDEBUG
+static uintptr_t gLastFinger = 0;
+#endif
+
+static bool
+scanBitmapCallback(size_t numPtrs, void **ptrs, const void *finger, void *arg)
+{
+    GcMarkContext *ctx = (GcMarkContext *)arg;
+    size_t i;
+
+#ifndef NDEBUG
+    assert((uintptr_t)finger >= gLastFinger);
+    gLastFinger = (uintptr_t)finger;
+#endif
+
+    ctx->finger = finger;
+    for (i = 0; i < numPtrs; i++) {
+        /* The pointers we're getting back are DvmHeapChunks,
+         * not Objects.
+         */
+        scanObject(chunk2ptr(*ptrs++), ctx);
+    }
+
+    return true;
+}
+
+/* Given bitmaps with the root set marked, find and mark all
+ * reachable objects.  When this returns, the entire set of
+ * live objects will be marked and the mark stack will be empty.
+ */
+void dvmHeapScanMarkedObjects()
+{
+    GcMarkContext *ctx = &gDvm.gcHeap->markContext;
+
+    assert(ctx->finger == NULL);
+
+    /* The bitmaps currently have bits set for the root set.
+     * Walk across the bitmaps and scan each object.
+     */
+#ifndef NDEBUG
+    gLastFinger = 0;
+#endif
+    dvmHeapBitmapWalkList(ctx->bitmaps, ctx->numBitmaps,
+            scanBitmapCallback, ctx);
+
+    /* We've walked the mark bitmaps.  Scan anything that's
+     * left on the mark stack.
+     */
+    processMarkStack(ctx);
+
+    LOG_SCAN("done with marked objects\n");
+}
+
+/** @return true if we need to schedule a call to clear().
+ */
+static bool clearReference(Object *reference)
+{
+    /* This is what the default implementation of Reference.clear()
+     * does.  We're required to clear all references to a given
+     * referent atomically, so we can't pop in and out of interp
+     * code each time.
+     *
+     * Also, someone may have subclassed one of the basic Reference
+     * types, overriding clear().  We can't trust the clear()
+     * implementation to call super.clear();  we cannot let clear()
+     * resurrect the referent.  If we clear it here, we can safely
+     * call any overriding implementations.
+     */
+    dvmSetFieldObject(reference,
+            gDvm.offJavaLangRefReference_referent, NULL);
+
+#if FANCY_REFERENCE_SUBCLASS
+    /* See if clear() has actually been overridden.  If so,
+     * we need to schedule a call to it before calling enqueue().
+     */
+    if (reference->clazz->vtable[gDvm.voffJavaLangRefReference_clear]->clazz !=
+            gDvm.classJavaLangRefReference)
+    {
+        /* clear() has been overridden;  return true to indicate
+         * that we need to schedule a call to the real clear()
+         * implementation.
+         */
+        return true;
+    }
+#endif
+
+    return false;
+}
+
+/** @return true if we need to schedule a call to enqueue().
+ */
+static bool enqueueReference(Object *reference)
+{
+#if FANCY_REFERENCE_SUBCLASS
+    /* See if this reference class has overridden enqueue();
+     * if not, we can take a shortcut.
+     */
+    if (reference->clazz->vtable[gDvm.voffJavaLangRefReference_enqueue]->clazz
+            == gDvm.classJavaLangRefReference)
+#endif
+    {
+        Object *queue = dvmGetFieldObject(reference,
+                gDvm.offJavaLangRefReference_queue);
+        Object *queueNext = dvmGetFieldObject(reference,
+                gDvm.offJavaLangRefReference_queueNext);
+        if (queue == NULL || queueNext != NULL) {
+            /* There is no queue, or the reference has already
+             * been enqueued.  The Reference.enqueue() method
+             * will do nothing even if we call it.
+             */
+            return false;
+        }
+    }
+
+    /* We need to call enqueue(), but if we called it from
+     * here we'd probably deadlock.  Schedule a call.
+     */
+    return true;
+}
+
+/* All objects for stronger reference levels have been
+ * marked before this is called.
+ */
+void dvmHeapHandleReferences(Object *refListHead, enum RefType refType)
+{
+    Object *reference;
+    GcMarkContext *markContext = &gDvm.gcHeap->markContext;
+    const int offVmData = gDvm.offJavaLangRefReference_vmData;
+    const int offReferent = gDvm.offJavaLangRefReference_referent;
+    bool workRequired = false;
+
+size_t numCleared = 0;
+size_t numEnqueued = 0;
+    reference = refListHead;
+    while (reference != NULL) {
+        Object *next;
+        Object *referent;
+
+        /* Pull the interesting fields out of the Reference object.
+         */
+        next = dvmGetFieldObject(reference, offVmData);
+        referent = dvmGetFieldObject(reference, offReferent);
+
+        //TODO: when handling REF_PHANTOM, unlink any references
+        //      that fail this initial if().  We need to re-walk
+        //      the list, and it would be nice to avoid the extra
+        //      work.
+        if (referent != NULL && !isMarked(ptr2chunk(referent), markContext)) {
+            bool schedClear, schedEnqueue;
+
+            /* This is the strongest reference that refers to referent.
+             * Do the right thing.
+             */
+            switch (refType) {
+            case REF_SOFT:
+            case REF_WEAK:
+                schedClear = clearReference(reference);
+                schedEnqueue = enqueueReference(reference);
+                break;
+            case REF_PHANTOM:
+                /* PhantomReferences are not cleared automatically.
+                 * Until someone clears it (or the reference itself
+                 * is collected), the referent must remain alive.
+                 *
+                 * It's necessary to fully mark the referent because
+                 * it will still be present during the next GC, and
+                 * all objects that it points to must be valid.
+                 * (The referent will be marked outside of this loop,
+                 * after handing all references of this strength, in
+                 * case multiple references point to the same object.)
+                 */
+                schedClear = false;
+
+                /* A PhantomReference is only useful with a
+                 * queue, but since it's possible to create one
+                 * without a queue, we need to check.
+                 */
+                schedEnqueue = enqueueReference(reference);
+                break;
+            default:
+                assert(!"Bad reference type");
+                schedClear = false;
+                schedEnqueue = false;
+                break;
+            }
+numCleared += schedClear ? 1 : 0;
+numEnqueued += schedEnqueue ? 1 : 0;
+
+            if (schedClear || schedEnqueue) {
+                uintptr_t workBits;
+
+                /* Stuff the clear/enqueue bits in the bottom of
+                 * the pointer.  Assumes that objects are 8-byte
+                 * aligned.
+                 *
+                 * Note that we are adding the *Reference* (which
+                 * is by definition already marked at this point) to
+                 * this list; we're not adding the referent (which
+                 * has already been cleared).
+                 */
+                assert(((intptr_t)reference & 3) == 0);
+                assert(((WORKER_CLEAR | WORKER_ENQUEUE) & ~3) == 0);
+                workBits = (schedClear ? WORKER_CLEAR : 0) |
+                           (schedEnqueue ? WORKER_ENQUEUE : 0);
+                if (!dvmHeapAddRefToLargeTable(
+                        &gDvm.gcHeap->referenceOperations,
+                        (Object *)((uintptr_t)reference | workBits)))
+                {
+                    LOGE_HEAP("dvmMalloc(): no room for any more "
+                            "reference operations\n");
+                    dvmAbort();
+                }
+                workRequired = true;
+            }
+
+            if (refType != REF_PHANTOM) {
+                /* Let later GCs know not to reschedule this reference.
+                 */
+                dvmSetFieldObject(reference, offVmData,
+                        SCHEDULED_REFERENCE_MAGIC);
+            } // else this is handled later for REF_PHANTOM
+
+        } // else there was a stronger reference to the referent.
+
+        reference = next;
+    }
+#define refType2str(r) \
+    ((r) == REF_SOFT ? "soft" : ( \
+     (r) == REF_WEAK ? "weak" : ( \
+     (r) == REF_PHANTOM ? "phantom" : "UNKNOWN" )))
+LOGD_HEAP("dvmHeapHandleReferences(): cleared %zd, enqueued %zd %s references\n", numCleared, numEnqueued, refType2str(refType));
+
+    /* Walk though the reference list again, and mark any non-clear/marked
+     * referents.  Only PhantomReferences can have non-clear referents
+     * at this point.
+     */
+    if (refType == REF_PHANTOM) {
+        bool scanRequired = false;
+
+        HPROF_SET_GC_SCAN_STATE(HPROF_ROOT_REFERENCE_CLEANUP, 0);
+        reference = refListHead;
+        while (reference != NULL) {
+            Object *next;
+            Object *referent;
+
+            /* Pull the interesting fields out of the Reference object.
+             */
+            next = dvmGetFieldObject(reference, offVmData);
+            referent = dvmGetFieldObject(reference, offReferent);
+
+            if (referent != NULL && !isMarked(ptr2chunk(referent), markContext)) {
+                markObjectNonNull(referent, markContext);
+                scanRequired = true;
+
+                /* Let later GCs know not to reschedule this reference.
+                 */
+                dvmSetFieldObject(reference, offVmData,
+                        SCHEDULED_REFERENCE_MAGIC);
+            }
+
+            reference = next;
+        }
+        HPROF_CLEAR_GC_SCAN_STATE();
+
+        if (scanRequired) {
+            processMarkStack(markContext);
+        }
+    }
+
+    if (workRequired) {
+        dvmSignalHeapWorker(false);
+    }
+}
+
+
+/* Find unreachable objects that need to be finalized,
+ * and schedule them for finalization.
+ */
+void dvmHeapScheduleFinalizations()
+{
+    HeapRefTable newPendingRefs;
+    LargeHeapRefTable *finRefs = gDvm.gcHeap->finalizableRefs;
+    Object **ref;
+    Object **lastRef;
+    size_t totalPendCount;
+    GcMarkContext *markContext = &gDvm.gcHeap->markContext;
+
+    /*
+     * All reachable objects have been marked.
+     * Any unmarked finalizable objects need to be finalized.
+     */
+
+    /* Create a table that the new pending refs will
+     * be added to.
+     */
+    if (!dvmHeapInitHeapRefTable(&newPendingRefs, 128)) {
+        //TODO: mark all finalizable refs and hope that
+        //      we can schedule them next time.  Watch out,
+        //      because we may be expecting to free up space
+        //      by calling finalizers.
+        LOGE_GC("dvmHeapScheduleFinalizations(): no room for "
+                "pending finalizations\n");
+        dvmAbort();
+    }
+
+    /* Walk through finalizableRefs and move any unmarked references
+     * to the list of new pending refs.
+     */
+    totalPendCount = 0;
+    while (finRefs != NULL) {
+        Object **gapRef;
+        size_t newPendCount = 0;
+
+        gapRef = ref = finRefs->refs.table;
+        lastRef = finRefs->refs.nextEntry;
+        while (ref < lastRef) {
+            DvmHeapChunk *hc;
+
+            hc = ptr2chunk(*ref);
+            if (!isMarked(hc, markContext)) {
+                if (!dvmHeapAddToHeapRefTable(&newPendingRefs, *ref)) {
+                    //TODO: add the current table and allocate
+                    //      a new, smaller one.
+                    LOGE_GC("dvmHeapScheduleFinalizations(): "
+                            "no room for any more pending finalizations: %zd\n",
+                            dvmHeapNumHeapRefTableEntries(&newPendingRefs));
+                    dvmAbort();
+                }
+                newPendCount++;
+            } else {
+                /* This ref is marked, so will remain on finalizableRefs.
+                 */
+                if (newPendCount > 0) {
+                    /* Copy it up to fill the holes.
+                     */
+                    *gapRef++ = *ref;
+                } else {
+                    /* No holes yet; don't bother copying.
+                     */
+                    gapRef++;
+                }
+            }
+            ref++;
+        }
+        finRefs->refs.nextEntry = gapRef;
+        //TODO: if the table is empty when we're done, free it.
+        totalPendCount += newPendCount;
+        finRefs = finRefs->next;
+    }
+    LOGD_GC("dvmHeapScheduleFinalizations(): %zd finalizers triggered.\n",
+            totalPendCount);
+    if (totalPendCount == 0) {
+        /* No objects required finalization.
+         * Free the empty temporary table.
+         */
+        dvmClearReferenceTable(&newPendingRefs);
+        return;
+    }
+
+    /* Add the new pending refs to the main list.
+     */
+    if (!dvmHeapAddTableToLargeTable(&gDvm.gcHeap->pendingFinalizationRefs,
+                &newPendingRefs))
+    {
+        LOGE_GC("dvmHeapScheduleFinalizations(): can't insert new "
+                "pending finalizations\n");
+        dvmAbort();
+    }
+
+    //TODO: try compacting the main list with a memcpy loop
+
+    /* Mark the refs we just moved;  we don't want them or their
+     * children to get swept yet.
+     */
+    ref = newPendingRefs.table;
+    lastRef = newPendingRefs.nextEntry;
+    assert(ref < lastRef);
+    HPROF_SET_GC_SCAN_STATE(HPROF_ROOT_FINALIZING, 0);
+    while (ref < lastRef) {
+        markObjectNonNull(*ref, markContext);
+        ref++;
+    }
+    HPROF_CLEAR_GC_SCAN_STATE();
+
+    /* Set markAllReferents so that we don't collect referents whose
+     * only references are in final-reachable objects.
+     * TODO: eventually provide normal reference behavior by properly
+     *       marking these references.
+     */
+    gDvm.gcHeap->markAllReferents = true;
+    processMarkStack(markContext);
+    gDvm.gcHeap->markAllReferents = false;
+
+    dvmSignalHeapWorker(false);
+}
+
+void dvmHeapFinishMarkStep()
+{
+    HeapBitmap *markBitmap;
+    HeapBitmap objectBitmap;
+    GcMarkContext *markContext;
+
+    markContext = &gDvm.gcHeap->markContext;
+
+    /* The sweep step freed every object that appeared in the
+     * HeapSource bitmaps that didn't appear in the mark bitmaps.
+     * The new state of the HeapSource is exactly the final
+     * mark bitmaps, so swap them in.
+     *
+     * The old bitmaps will be swapped into the context so that
+     * we can clean them up.
+     */
+    dvmHeapSourceReplaceObjectBitmaps(markContext->bitmaps,
+            markContext->numBitmaps);
+
+    /* Clean up the old HeapSource bitmaps and anything else associated
+     * with the marking process.
+     */
+    dvmHeapBitmapDeleteList(markContext->bitmaps, markContext->numBitmaps);
+    destroyMarkStack(&markContext->stack);
+
+    memset(markContext, 0, sizeof(*markContext));
+}
+
+#if WITH_HPROF && WITH_HPROF_UNREACHABLE
+static bool
+hprofUnreachableBitmapCallback(size_t numPtrs, void **ptrs,
+        const void *finger, void *arg)
+{
+    hprof_context_t *hctx = (hprof_context_t *)arg;
+    size_t i;
+
+    for (i = 0; i < numPtrs; i++) {
+        Object *obj;
+
+        /* The pointers we're getting back are DvmHeapChunks, not
+         * Objects.
+         */
+        obj = (Object *)chunk2ptr(*ptrs++);
+
+        hprofMarkRootObject(hctx, obj, 0);
+        hprofDumpHeapObject(hctx, obj);
+    }
+
+    return true;
+}
+
+static void
+hprofDumpUnmarkedObjects(const HeapBitmap markBitmaps[],
+        const HeapBitmap objectBitmaps[], size_t numBitmaps)
+{
+    hprof_context_t *hctx = gDvm.gcHeap->hprofContext;
+    if (hctx == NULL) {
+        return;
+    }
+
+    LOGI("hprof: dumping unreachable objects\n");
+
+    HPROF_SET_GC_SCAN_STATE(HPROF_UNREACHABLE, 0);
+
+    dvmHeapBitmapXorWalkLists(markBitmaps, objectBitmaps, numBitmaps,
+            hprofUnreachableBitmapCallback, hctx);
+
+    HPROF_CLEAR_GC_SCAN_STATE();
+}
+#endif
+
+static bool
+sweepBitmapCallback(size_t numPtrs, void **ptrs, const void *finger, void *arg)
+{
+    const ClassObject *const classJavaLangClass = gDvm.classJavaLangClass;
+    size_t i;
+
+    for (i = 0; i < numPtrs; i++) {
+        DvmHeapChunk *hc;
+        Object *obj;
+
+        /* The pointers we're getting back are DvmHeapChunks, not
+         * Objects.
+         */
+        hc = (DvmHeapChunk *)*ptrs++;
+        obj = (Object *)chunk2ptr(hc);
+
+#if WITH_OBJECT_HEADERS
+        if (hc->markGeneration == gGeneration) {
+            LOGE("sweeping marked object: 0x%08x\n", (uint)obj);
+            dvmAbort();
+        }
+#endif
+
+        /* Free the monitor associated with the object.
+         */
+        dvmFreeObjectMonitor(obj);
+
+        /* NOTE: Dereferencing clazz is dangerous.  If obj was the last
+         * one to reference its class object, the class object could be
+         * on the sweep list, and could already have been swept, leaving
+         * us with a stale pointer.
+         */
+        LOGV_SWEEP("FREE: 0x%08x %s\n", (uint)obj, obj->clazz->name);
+
+        /* This assumes that java.lang.Class will never go away.
+         * If it can, and we were the last reference to it, it
+         * could have already been swept.  However, even in that case,
+         * gDvm.classJavaLangClass should still have a useful
+         * value.
+         */
+        if (obj->clazz == classJavaLangClass) {
+            LOGV_SWEEP("---------------> %s\n", ((ClassObject *)obj)->name);
+            /* dvmFreeClassInnards() may have already been called,
+             * but it's safe to call on the same ClassObject twice.
+             */
+            dvmFreeClassInnards((ClassObject *)obj);
+        }
+
+#if 0
+        /* Overwrite the to-be-freed object to make stale references
+         * more obvious.
+         */
+        {
+            int chunklen;
+            ClassObject *clazz = obj->clazz;
+#if WITH_OBJECT_HEADERS
+            DvmHeapChunk chunk = *hc;
+            chunk.header = ~OBJECT_HEADER | 1;
+#endif
+            chunklen = dvmHeapSourceChunkSize(hc);
+            memset(hc, 0xa5, chunklen);
+            obj->clazz = (ClassObject *)((uintptr_t)clazz ^ 0xffffffff);
+#if WITH_OBJECT_HEADERS
+            *hc = chunk;
+#endif
+        }
+#endif
+
+//TODO: provide a heapsource function that takes a list of pointers to free
+//      and call it outside of this loop.
+        dvmHeapSourceFree(hc);
+    }
+
+    return true;
+}
+
+/* A function suitable for passing to dvmHashForeachRemove()
+ * to clear out any unmarked objects.  Clears the low bits
+ * of the pointer because the intern table may set them.
+ */
+static int isUnmarkedObject(void *object)
+{
+    return !isMarked(ptr2chunk((uintptr_t)object & ~(HB_OBJECT_ALIGNMENT-1)),
+            &gDvm.gcHeap->markContext);
+}
+
+/* Walk through the list of objects that haven't been
+ * marked and free them.
+ */
+void
+dvmHeapSweepUnmarkedObjects(int *numFreed, size_t *sizeFreed)
+{
+    const HeapBitmap *markBitmaps;
+    const GcMarkContext *markContext;
+    HeapBitmap objectBitmaps[HEAP_SOURCE_MAX_HEAP_COUNT];
+    size_t origObjectsAllocated;
+    size_t origBytesAllocated;
+    size_t numBitmaps;
+
+    /* All reachable objects have been marked.
+     * Detach any unreachable interned strings before
+     * we sweep.
+     */
+    dvmGcDetachDeadInternedStrings(isUnmarkedObject);
+
+    /* Free any known objects that are not marked.
+     */
+    origObjectsAllocated = dvmHeapSourceGetValue(HS_OBJECTS_ALLOCATED, NULL, 0);
+    origBytesAllocated = dvmHeapSourceGetValue(HS_BYTES_ALLOCATED, NULL, 0);
+
+    markContext = &gDvm.gcHeap->markContext;
+    markBitmaps = markContext->bitmaps;
+    numBitmaps = dvmHeapSourceGetObjectBitmaps(objectBitmaps,
+            HEAP_SOURCE_MAX_HEAP_COUNT);
+#ifndef NDEBUG
+    if (numBitmaps != markContext->numBitmaps) {
+        LOGE("heap bitmap count mismatch: %zd != %zd\n",
+                numBitmaps, markContext->numBitmaps);
+        dvmAbort();
+    }
+#endif
+
+#if WITH_HPROF && WITH_HPROF_UNREACHABLE
+    hprofDumpUnmarkedObjects(markBitmaps, objectBitmaps, numBitmaps);
+#endif
+
+    dvmHeapBitmapXorWalkLists(markBitmaps, objectBitmaps, numBitmaps,
+            sweepBitmapCallback, NULL);
+
+    *numFreed = origObjectsAllocated -
+            dvmHeapSourceGetValue(HS_OBJECTS_ALLOCATED, NULL, 0);
+    *sizeFreed = origBytesAllocated -
+            dvmHeapSourceGetValue(HS_BYTES_ALLOCATED, NULL, 0);
+
+#ifdef WITH_PROFILER
+    if (gDvm.allocProf.enabled) {
+        gDvm.allocProf.freeCount += *numFreed;
+        gDvm.allocProf.freeSize += *sizeFreed;
+    }
+#endif
+}
diff --git a/vm/alloc/MarkSweep.h b/vm/alloc/MarkSweep.h
new file mode 100644
index 0000000..b087b40
--- /dev/null
+++ b/vm/alloc/MarkSweep.h
@@ -0,0 +1,63 @@
+/*
+ * Copyright (C) 2008 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#ifndef _DALVIK_ALLOC_MARK_SWEEP
+#define _DALVIK_ALLOC_MARK_SWEEP
+
+#include "alloc/HeapBitmap.h"
+#include "alloc/HeapSource.h"
+
+/* Downward-growing stack for better cache read behavior.
+ */
+typedef struct {
+    /* Lowest address (inclusive)
+     */
+    const Object **limit;
+
+    /* Current top of the stack (inclusive)
+     */
+    const Object **top;
+
+    /* Highest address (exclusive)
+     */
+    const Object **base;
+} GcMarkStack;
+
+/* This is declared publicly so that it can be included in gDvm.gcHeap.
+ */
+typedef struct {
+    HeapBitmap bitmaps[HEAP_SOURCE_MAX_HEAP_COUNT];
+    size_t numBitmaps;
+    GcMarkStack stack;
+    const void *finger;   // only used while scanning/recursing.
+} GcMarkContext;
+
+enum RefType {
+    REF_SOFT,
+    REF_WEAK,
+    REF_PHANTOM,
+    REF_WEAKGLOBAL
+};
+
+bool dvmHeapBeginMarkStep(void);
+void dvmHeapMarkRootSet(void);
+void dvmHeapScanMarkedObjects(void);
+void dvmHeapHandleReferences(Object *refListHead, enum RefType refType);
+void dvmHeapScheduleFinalizations(void);
+void dvmHeapFinishMarkStep(void);
+
+void dvmHeapSweepUnmarkedObjects(int *numFreed, size_t *sizeFreed);
+
+#endif  // _DALVIK_ALLOC_MARK_SWEEP
diff --git a/vm/alloc/TEST/HeapBitmapTest/Makefile b/vm/alloc/TEST/HeapBitmapTest/Makefile
new file mode 100644
index 0000000..fe31b24
--- /dev/null
+++ b/vm/alloc/TEST/HeapBitmapTest/Makefile
@@ -0,0 +1,28 @@
+.PHONY: all
+all: runtest
+
+$(shell mkdir -p out)
+
+CC := gcc
+CFLAGS := -g -Wall -Werror
+#CFLAGS += -O2
+
+out/main.o: main.c ../../HeapBitmap.h
+	$(CC) $(CFLAGS) -c $< -o $@ -I ../..
+
+out/HeapBitmap.o: ../../HeapBitmap.c ../../HeapBitmap.h ../../clz.h include/cutils/ashmem.h include/Dalvik.h
+	$(CC) $(CFLAGS) -c $< -o $@ -I ../.. -I include
+
+out/clz.o: ../../clz.c ../../clz.h
+	$(CC) $(CFLAGS) -c $< -o $@ -I ../..
+
+out/hbtest: out/main.o out/HeapBitmap.o out/clz.o
+	$(CC) $^ -o $@
+
+.PHONY: runtest
+runtest: out/hbtest
+	out/hbtest
+
+.PHONY: clean
+clean:
+	rm -rf out
diff --git a/vm/alloc/TEST/HeapBitmapTest/include/Dalvik.h b/vm/alloc/TEST/HeapBitmapTest/include/Dalvik.h
new file mode 100644
index 0000000..4c9f608
--- /dev/null
+++ b/vm/alloc/TEST/HeapBitmapTest/include/Dalvik.h
@@ -0,0 +1,18 @@
+#ifndef DALVIK_H_
+#define DALVIK_H_
+
+#include <stdio.h>
+#include <stdbool.h>
+#include <stdlib.h>
+#include <string.h>
+#include <unistd.h>
+#include <limits.h>
+
+#define LOGW(...) printf("W/" __VA_ARGS__)
+#define LOGE(...) printf("E/" __VA_ARGS__)
+
+inline void dvmAbort(void) {
+    exit(1);
+}
+
+#endif  // DALVIK_H_
diff --git a/vm/alloc/TEST/HeapBitmapTest/include/cutils/ashmem.h b/vm/alloc/TEST/HeapBitmapTest/include/cutils/ashmem.h
new file mode 100644
index 0000000..8680c77
--- /dev/null
+++ b/vm/alloc/TEST/HeapBitmapTest/include/cutils/ashmem.h
@@ -0,0 +1,14 @@
+#ifndef ASHMEM_H_
+#define ASHMEM_H_
+
+#include <fcntl.h>
+
+#define ASHMEM_NAME_LEN 128
+
+inline int
+ashmem_create_region(const char *name, size_t len)
+{
+    return open("/dev/zero", O_RDWR);
+}
+
+#endif
diff --git a/vm/alloc/TEST/HeapBitmapTest/main.c b/vm/alloc/TEST/HeapBitmapTest/main.c
new file mode 100644
index 0000000..10fa7f8
--- /dev/null
+++ b/vm/alloc/TEST/HeapBitmapTest/main.c
@@ -0,0 +1,496 @@
+#include <stdio.h>
+#include <stdbool.h>
+#include <stdint.h>
+#include <assert.h>
+#include <string.h>
+#include <unistd.h>
+#define __attribute(x) /* disable inlining */
+#include "HeapBitmap.h"
+#undef __attribute
+
+#define PAGE_SIZE 4096
+#define HEAP_BASE ((void *)0x10000)
+#define HEAP_SIZE (5 * PAGE_SIZE + 888)
+
+#define VERBOSE 1
+#if VERBOSE
+#define TRACE(...) printf(__VA_ARGS__)
+#else
+#define TRACE(...) /**/
+#endif
+
+void
+test_init()
+{
+    HeapBitmap hb;
+    bool ok;
+
+    memset(&hb, 0x55, sizeof(hb));
+
+    ok = dvmHeapBitmapInit(&hb, HEAP_BASE, HEAP_SIZE, "test");
+    assert(ok);
+
+    assert(hb.bits != NULL);
+    assert(hb.bitsLen >= HB_OFFSET_TO_INDEX(HEAP_SIZE));
+    assert(hb.base == (uintptr_t)HEAP_BASE);
+    assert(hb.max < hb.base);
+
+    /* Make sure hb.bits is mapped.
+     */
+    *hb.bits = 0x55;
+    assert(*hb.bits = 0x55);
+    *hb.bits = 0;
+
+#define TEST_UNMAP 0
+#if TEST_UNMAP
+    /* Hold onto this to make sure it's unmapped later.
+     */
+    unsigned long int *bits = hb.bits;
+#endif
+
+    dvmHeapBitmapDelete(&hb);
+
+    assert(hb.bits == NULL);
+    assert(hb.bitsLen == 0);
+    assert(hb.base == 0);
+    assert(hb.max == 0);
+
+#if TEST_UNMAP
+    /* This pointer shouldn't be mapped anymore.
+     */
+    *bits = 0x55;
+    assert(!"Should have segfaulted");
+#endif
+}
+
+bool is_zeroed(const HeapBitmap *hb)
+{
+    int i;
+
+    for (i = 0; i < hb->bitsLen / sizeof (*hb->bits); i++) {
+        if (hb->bits[i] != 0L) {
+            return false;
+        }
+    }
+    return true;
+}
+
+void assert_empty(const HeapBitmap *hb)
+{
+    assert(hb->bits != NULL);
+    assert(hb->bitsLen >= HB_OFFSET_TO_INDEX(HEAP_SIZE));
+    assert(hb->base == (uintptr_t)HEAP_BASE);
+    assert(hb->max < hb->base);
+
+    assert(is_zeroed(hb));
+
+    assert(!dvmHeapBitmapMayContainObject(hb,
+            HEAP_BASE));
+    assert(!dvmHeapBitmapMayContainObject(hb,
+            HEAP_BASE + HB_OBJECT_ALIGNMENT));
+    assert(!dvmHeapBitmapMayContainObject(hb,
+            HEAP_BASE + HEAP_SIZE - HB_OBJECT_ALIGNMENT));
+    assert(!dvmHeapBitmapMayContainObject(hb,
+            HEAP_BASE + HEAP_SIZE));
+
+    assert(!dvmHeapBitmapIsObjectBitSet(hb,
+            HEAP_BASE));
+    assert(!dvmHeapBitmapIsObjectBitSet(hb,
+            HEAP_BASE + HB_OBJECT_ALIGNMENT));
+    assert(!dvmHeapBitmapIsObjectBitSet(hb,
+            HEAP_BASE + HEAP_SIZE - HB_OBJECT_ALIGNMENT));
+}
+
+void
+test_bits()
+{
+    HeapBitmap hb;
+    bool ok;
+
+    ok = dvmHeapBitmapInit(&hb, HEAP_BASE, HEAP_SIZE, "test");
+    assert(ok);
+
+    assert_empty(&hb);
+
+    /* Set the lowest address.
+     */
+    dvmHeapBitmapSetObjectBit(&hb, HEAP_BASE);
+    assert(dvmHeapBitmapMayContainObject(&hb,
+            HEAP_BASE));
+    assert(!dvmHeapBitmapMayContainObject(&hb,
+            HEAP_BASE + HB_OBJECT_ALIGNMENT));
+    assert(!dvmHeapBitmapMayContainObject(&hb,
+            HEAP_BASE + HEAP_SIZE - HB_OBJECT_ALIGNMENT));
+    assert(!dvmHeapBitmapMayContainObject(&hb,
+            HEAP_BASE + HEAP_SIZE));
+
+    assert(dvmHeapBitmapIsObjectBitSet(&hb,
+            HEAP_BASE));
+    assert(!dvmHeapBitmapIsObjectBitSet(&hb,
+            HEAP_BASE + HB_OBJECT_ALIGNMENT));
+    assert(!dvmHeapBitmapIsObjectBitSet(&hb,
+            HEAP_BASE + HEAP_SIZE - HB_OBJECT_ALIGNMENT));
+
+    /* Set the highest address.
+     */
+    dvmHeapBitmapSetObjectBit(&hb, HEAP_BASE + HEAP_SIZE - HB_OBJECT_ALIGNMENT);
+    assert(dvmHeapBitmapMayContainObject(&hb,
+            HEAP_BASE));
+    assert(dvmHeapBitmapMayContainObject(&hb,
+            HEAP_BASE + HB_OBJECT_ALIGNMENT));
+    assert(dvmHeapBitmapMayContainObject(&hb,
+            HEAP_BASE + HEAP_SIZE - HB_OBJECT_ALIGNMENT));
+    assert(!dvmHeapBitmapMayContainObject(&hb,
+            HEAP_BASE + HEAP_SIZE));
+
+    assert(dvmHeapBitmapIsObjectBitSet(&hb,
+            HEAP_BASE));
+    assert(!dvmHeapBitmapIsObjectBitSet(&hb,
+            HEAP_BASE + HB_OBJECT_ALIGNMENT));
+    assert(dvmHeapBitmapIsObjectBitSet(&hb,
+            HEAP_BASE + HEAP_SIZE - HB_OBJECT_ALIGNMENT));
+
+    /* Clear the lowest address.
+     */
+    dvmHeapBitmapClearObjectBit(&hb, HEAP_BASE);
+    assert(!dvmHeapBitmapIsObjectBitSet(&hb,
+            HEAP_BASE));
+    assert(!dvmHeapBitmapIsObjectBitSet(&hb,
+            HEAP_BASE + HB_OBJECT_ALIGNMENT));
+    assert(dvmHeapBitmapIsObjectBitSet(&hb,
+            HEAP_BASE + HEAP_SIZE - HB_OBJECT_ALIGNMENT));
+    assert(!is_zeroed(&hb));
+
+    /* Clear the highest address.
+     */
+    dvmHeapBitmapClearObjectBit(&hb,
+            HEAP_BASE + HEAP_SIZE - HB_OBJECT_ALIGNMENT);
+    assert(!dvmHeapBitmapIsObjectBitSet(&hb,
+            HEAP_BASE));
+    assert(!dvmHeapBitmapIsObjectBitSet(&hb,
+            HEAP_BASE + HB_OBJECT_ALIGNMENT));
+    assert(!dvmHeapBitmapIsObjectBitSet(&hb,
+            HEAP_BASE + HEAP_SIZE - HB_OBJECT_ALIGNMENT));
+    assert(is_zeroed(&hb));
+
+    /* Clean up.
+     */
+    dvmHeapBitmapDelete(&hb);
+}
+
+void
+test_clear()
+{
+    HeapBitmap hb;
+    bool ok;
+
+    ok = dvmHeapBitmapInit(&hb, HEAP_BASE, HEAP_SIZE, "test");
+    assert(ok);
+    assert_empty(&hb);
+
+    /* Set the highest address.
+     */
+    dvmHeapBitmapSetObjectBit(&hb, HEAP_BASE + HEAP_SIZE - HB_OBJECT_ALIGNMENT);
+    assert(!is_zeroed(&hb));
+
+    /* Clear the bitmap.
+     */
+    dvmHeapBitmapZero(&hb);
+    assert_empty(&hb);
+
+    /* Clean up.
+     */
+    dvmHeapBitmapDelete(&hb);
+}
+
+void
+test_modify()
+{
+    HeapBitmap hb;
+    bool ok;
+    unsigned long bit;
+
+    ok = dvmHeapBitmapInit(&hb, HEAP_BASE, HEAP_SIZE, "test");
+    assert(ok);
+    assert_empty(&hb);
+
+    /* Set the lowest address.
+     */
+    bit = dvmHeapBitmapSetAndReturnObjectBit(&hb, HEAP_BASE);
+    assert(bit == 0);
+    assert(dvmHeapBitmapMayContainObject(&hb,
+            HEAP_BASE));
+    assert(!dvmHeapBitmapMayContainObject(&hb,
+            HEAP_BASE + HB_OBJECT_ALIGNMENT));
+    assert(!dvmHeapBitmapMayContainObject(&hb,
+            HEAP_BASE + HEAP_SIZE - HB_OBJECT_ALIGNMENT));
+    assert(!dvmHeapBitmapMayContainObject(&hb,
+            HEAP_BASE + HEAP_SIZE));
+
+    assert(dvmHeapBitmapIsObjectBitSet(&hb,
+            HEAP_BASE));
+    assert(!dvmHeapBitmapIsObjectBitSet(&hb,
+            HEAP_BASE + HB_OBJECT_ALIGNMENT));
+    assert(!dvmHeapBitmapIsObjectBitSet(&hb,
+            HEAP_BASE + HEAP_SIZE - HB_OBJECT_ALIGNMENT));
+
+    /* Set the lowest address again.
+     */
+    bit = dvmHeapBitmapSetAndReturnObjectBit(&hb, HEAP_BASE);
+    assert(bit != 0);
+    assert(dvmHeapBitmapMayContainObject(&hb,
+            HEAP_BASE));
+    assert(!dvmHeapBitmapMayContainObject(&hb,
+            HEAP_BASE + HB_OBJECT_ALIGNMENT));
+    assert(!dvmHeapBitmapMayContainObject(&hb,
+            HEAP_BASE + HEAP_SIZE - HB_OBJECT_ALIGNMENT));
+    assert(!dvmHeapBitmapMayContainObject(&hb,
+            HEAP_BASE + HEAP_SIZE));
+
+    assert(dvmHeapBitmapIsObjectBitSet(&hb,
+            HEAP_BASE));
+    assert(!dvmHeapBitmapIsObjectBitSet(&hb,
+            HEAP_BASE + HB_OBJECT_ALIGNMENT));
+    assert(!dvmHeapBitmapIsObjectBitSet(&hb,
+            HEAP_BASE + HEAP_SIZE - HB_OBJECT_ALIGNMENT));
+
+    /* Set the highest address.
+     */
+    bit = dvmHeapBitmapSetAndReturnObjectBit(&hb,
+            HEAP_BASE + HEAP_SIZE - HB_OBJECT_ALIGNMENT);
+    assert(bit == 0);
+    assert(dvmHeapBitmapMayContainObject(&hb,
+            HEAP_BASE));
+    assert(dvmHeapBitmapMayContainObject(&hb,
+            HEAP_BASE + HB_OBJECT_ALIGNMENT));
+    assert(dvmHeapBitmapMayContainObject(&hb,
+            HEAP_BASE + HEAP_SIZE - HB_OBJECT_ALIGNMENT));
+    assert(!dvmHeapBitmapMayContainObject(&hb,
+            HEAP_BASE + HEAP_SIZE));
+
+    assert(dvmHeapBitmapIsObjectBitSet(&hb,
+            HEAP_BASE));
+    assert(!dvmHeapBitmapIsObjectBitSet(&hb,
+            HEAP_BASE + HB_OBJECT_ALIGNMENT));
+    assert(dvmHeapBitmapIsObjectBitSet(&hb,
+            HEAP_BASE + HEAP_SIZE - HB_OBJECT_ALIGNMENT));
+
+    /* Set the highest address again.
+     */
+    bit = dvmHeapBitmapSetAndReturnObjectBit(&hb,
+            HEAP_BASE + HEAP_SIZE - HB_OBJECT_ALIGNMENT);
+    assert(bit != 0);
+    assert(dvmHeapBitmapMayContainObject(&hb,
+            HEAP_BASE));
+    assert(dvmHeapBitmapMayContainObject(&hb,
+            HEAP_BASE + HB_OBJECT_ALIGNMENT));
+    assert(dvmHeapBitmapMayContainObject(&hb,
+            HEAP_BASE + HEAP_SIZE - HB_OBJECT_ALIGNMENT));
+    assert(!dvmHeapBitmapMayContainObject(&hb,
+            HEAP_BASE + HEAP_SIZE));
+
+    assert(dvmHeapBitmapIsObjectBitSet(&hb,
+            HEAP_BASE));
+    assert(!dvmHeapBitmapIsObjectBitSet(&hb,
+            HEAP_BASE + HB_OBJECT_ALIGNMENT));
+    assert(dvmHeapBitmapIsObjectBitSet(&hb,
+            HEAP_BASE + HEAP_SIZE - HB_OBJECT_ALIGNMENT));
+
+    /* Clean up.
+     */
+    dvmHeapBitmapDelete(&hb);
+}
+
+/*
+ * xor test support functions
+ */
+
+static void *gCallbackArg = NULL;
+
+#define NUM_XOR_PTRS  128
+static size_t gNumPtrs;
+static void *gXorPtrs[NUM_XOR_PTRS];
+static bool gClearedPtrs[NUM_XOR_PTRS];
+static bool gSeenPtrs[NUM_XOR_PTRS];
+
+bool
+xorCallback(size_t numPtrs, void **ptrs, const void *finger, void *arg)
+{
+    assert(numPtrs > 0);
+    assert(ptrs != NULL);
+    assert(arg == gCallbackArg);
+
+size_t i;
+    for (i = 0; i < numPtrs; i++) {
+        assert(ptrs[i] < finger);
+        printf("callback: 0x%08x ( < 0x%08x )\n",
+                (uintptr_t)ptrs[i], (uintptr_t)finger);
+    }
+
+    return true;
+}
+
+bool
+seenAndClearedMatch()
+{
+    size_t i;
+    for (i = 0; i < gNumPtrs; i++) {
+        if (gClearedPtrs[i] != gSeenPtrs[i]) {
+            return false;
+        }
+    }
+    return true;
+}
+
+void
+run_xor(ssize_t offset, size_t step)
+{
+    assert(step != 0);
+    assert(step < HEAP_SIZE);
+
+    /* Figure out the range.
+     */
+uintptr_t base;
+uintptr_t top;
+    if (offset >= 0) {
+        base = (uintptr_t)HEAP_BASE + offset;
+    } else {
+        base = (uintptr_t)HEAP_BASE + (uintptr_t)HEAP_SIZE + offset;
+    }
+    if (base < (uintptr_t)HEAP_BASE) {
+        base = (uintptr_t)HEAP_BASE;
+    } else if (base > (uintptr_t)(HEAP_BASE + HEAP_SIZE)) {
+        base = (uintptr_t)(HEAP_BASE + HEAP_SIZE);
+    } else {
+        base = (base + HB_OBJECT_ALIGNMENT - 1) & ~(HB_OBJECT_ALIGNMENT - 1);
+    }
+    step *= HB_OBJECT_ALIGNMENT;
+    top = base + step * NUM_XOR_PTRS;
+    if (top > (uintptr_t)(HEAP_BASE + HEAP_SIZE)) {
+        top = (uintptr_t)(HEAP_BASE + HEAP_SIZE);
+    }
+
+    /* Create the pointers.
+     */
+    gNumPtrs = 0;
+    memset(gXorPtrs, 0, sizeof(gXorPtrs));
+    memset(gClearedPtrs, 0, sizeof(gClearedPtrs));
+    memset(gSeenPtrs, 0, sizeof(gSeenPtrs));
+
+uintptr_t addr;
+void **p = gXorPtrs;
+    for (addr = base; addr < top; addr += step) {
+        *p++ = (void *)addr;
+        gNumPtrs++;
+    }
+    assert(seenAndClearedMatch());
+
+    /* Set up the bitmaps.
+     */
+HeapBitmap hb1, hb2;
+bool ok;
+
+    ok = dvmHeapBitmapInit(&hb1, HEAP_BASE, HEAP_SIZE, "test1");
+    assert(ok);
+    ok = dvmHeapBitmapInitFromTemplate(&hb2, &hb1, "test2");
+    assert(ok);
+
+    /* Walk two empty bitmaps.
+     */
+TRACE("walk 0\n");
+    ok = dvmHeapBitmapXorWalk(&hb1, &hb2, xorCallback, gCallbackArg);
+    assert(ok);
+    assert(seenAndClearedMatch());
+
+    /* Walk one empty bitmap.
+     */
+TRACE("walk 1\n");
+    dvmHeapBitmapSetObjectBit(&hb1, (void *)base);
+    ok = dvmHeapBitmapXorWalk(&hb1, &hb2, xorCallback, gCallbackArg);
+    assert(ok);
+
+    /* Make the bitmaps match.
+     */
+TRACE("walk 2\n");
+    dvmHeapBitmapSetObjectBit(&hb2, (void *)base);
+    ok = dvmHeapBitmapXorWalk(&hb1, &hb2, xorCallback, gCallbackArg);
+    assert(ok);
+
+    /* Clear the bitmaps.
+     */
+    dvmHeapBitmapZero(&hb1);
+    assert_empty(&hb1);
+    dvmHeapBitmapZero(&hb2);
+    assert_empty(&hb2);
+
+    /* Set the pointers we created in one of the bitmaps,
+     * then visit them.
+     */
+size_t i;
+    for (i = 0; i < gNumPtrs; i++) {
+        dvmHeapBitmapSetObjectBit(&hb1, gXorPtrs[i]);
+    }
+TRACE("walk 3\n");
+    ok = dvmHeapBitmapXorWalk(&hb1, &hb2, xorCallback, gCallbackArg);
+    assert(ok);
+
+    /* Set every third pointer in the other bitmap, and visit again.
+     */
+    for (i = 0; i < gNumPtrs; i += 3) {
+        dvmHeapBitmapSetObjectBit(&hb2, gXorPtrs[i]);
+    }
+TRACE("walk 4\n");
+    ok = dvmHeapBitmapXorWalk(&hb1, &hb2, xorCallback, gCallbackArg);
+    assert(ok);
+
+    /* Set every other pointer in the other bitmap, and visit again.
+     */
+    for (i = 0; i < gNumPtrs; i += 2) {
+        dvmHeapBitmapSetObjectBit(&hb2, gXorPtrs[i]);
+    }
+TRACE("walk 5\n");
+    ok = dvmHeapBitmapXorWalk(&hb1, &hb2, xorCallback, gCallbackArg);
+    assert(ok);
+
+    /* Walk just one bitmap.
+     */
+TRACE("walk 6\n");
+    ok = dvmHeapBitmapWalk(&hb2, xorCallback, gCallbackArg);
+    assert(ok);
+
+//xxx build an expect list for the callback
+//xxx test where max points to beginning, middle, and end of a word
+
+    /* Clean up.
+     */
+    dvmHeapBitmapDelete(&hb1);
+    dvmHeapBitmapDelete(&hb2);
+}
+
+void
+test_xor()
+{
+    run_xor(0, 1);
+    run_xor(100, 34);
+}
+
+int main(int argc, char *argv[])
+{
+    printf("test_init...\n");
+    test_init();
+
+    printf("test_bits...\n");
+    test_bits();
+
+    printf("test_clear...\n");
+    test_clear();
+
+    printf("test_modify...\n");
+    test_modify();
+
+    printf("test_xor...\n");
+    test_xor();
+
+    printf("done.\n");
+    return 0;
+}
diff --git a/vm/alloc/clz.c b/vm/alloc/clz.c
new file mode 100644
index 0000000..2ec873e
--- /dev/null
+++ b/vm/alloc/clz.c
@@ -0,0 +1,50 @@
+/*
+ * Copyright (C) 2007 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "clz.h"
+
+/*
+ * Implementation of CLZ; intended to mimic gcc's __builtin_clz.
+ *
+ * Returns the number of leading zero bits, starting at the most
+ * significant bit position.  If the argument is zero, the result is
+ * undefined.
+ *
+ * (For best results, this file should always be compiled for ARM, not THUMB.)
+ */
+int dvmClzImpl(unsigned int x)
+{
+#ifdef HAVE_BUILTIN_CLZ
+    /*
+     * This file was compiled with flags that allow it to use the built-in
+     * CLZ (e.g. ARM mode for ARMv5 or later).
+     */
+    return __builtin_clz(x);
+#else
+    /*
+     * Built-in version not available.
+     */
+    if (!x) return 32;
+    int e = 31;
+    if (x&0xFFFF0000)   { e -=16; x >>=16; }
+    if (x&0x0000FF00)   { e -= 8; x >>= 8; }
+    if (x&0x000000F0)   { e -= 4; x >>= 4; }
+    if (x&0x0000000C)   { e -= 2; x >>= 2; }
+    if (x&0x00000002)   { e -= 1; }
+    return e;
+#endif
+}
+
diff --git a/vm/alloc/clz.h b/vm/alloc/clz.h
new file mode 100644
index 0000000..77fa6d4
--- /dev/null
+++ b/vm/alloc/clz.h
@@ -0,0 +1,44 @@
+/*
+ * Copyright (C) 2007 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/*
+ * Implementation of clz(), which returns the number of leading zero bits,
+ * starting at the most significant bit position.  If the argument is zero,
+ * the result is undefined.
+ *
+ * On some platforms, gcc provides a __builtin_clz() function that uses
+ * an optimized implementation (e.g. the CLZ instruction on ARM).
+ *
+ * This gets a little tricky for ARM, because it's only available in ARMv5
+ * and above, and even on ARMv5 it's not available for THUMB code.  So we
+ * need to tailor this for every source file.
+ */
+#ifndef _DALVIK_CLZ
+
+#if defined(__arm__) && !defined(__thumb__)
+# include <machine/cpu-features.h>
+# if defined(__ARM_HAVE_CLZ)
+#  define CLZ(x) __builtin_clz(x)
+#  define HAVE_BUILTIN_CLZ
+# endif
+#endif
+
+#ifndef HAVE_BUILTIN_CLZ
+# define CLZ(x) dvmClzImpl(x)
+int dvmClzImpl(unsigned int x);
+#endif
+
+#endif // _DALVIK_CLZ