Initial Contribution
diff --git a/vm/AllocTracker.c b/vm/AllocTracker.c
new file mode 100644
index 0000000..9649e68
--- /dev/null
+++ b/vm/AllocTracker.c
@@ -0,0 +1,650 @@
+/*
+ * Copyright (C) 2008 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/*
+ * Allocation tracking and reporting.  We maintain a circular buffer with
+ * the most recent allocations.  The data can be viewed through DDMS.
+ *
+ * There are two basic approaches: manage the buffer with atomic updates
+ * and do a system-wide suspend when DDMS requests it, or protect all
+ * accesses with a mutex.  The former is potentially more efficient, but
+ * the latter is much simpler and more reliable.
+ *
+ * Ideally we'd just use the object heap allocation mutex to guard this
+ * structure, but at the point we grab that (under dvmMalloc()) we're just
+ * allocating a collection of bytes and no longer have the class reference.
+ * Because this is an optional feature it's best to leave the existing
+ * code undisturbed and just use an additional lock.
+ *
+ * We don't currently track allocations of class objects.  We could, but
+ * with the possible exception of Proxy objects they're not that interesting.
+ *
+ * TODO: if we add support for class unloading, we need to add the class
+ * references here to the root set (or just disable class unloading while
+ * this is active).
+ *
+ * TODO: consider making the parameters configurable, so DDMS can decide
+ * how many allocations it wants to see and what the stack depth should be.
+ */
+#include "Dalvik.h"
+
+#define kMaxAllocRecordStackDepth   8       /* max 255 */
+#define kNumAllocRecords            512     /* MUST be power of 2 */
+
+/*
+ * Record the details of an allocation.
+ */
+struct AllocRecord {
+    ClassObject*    clazz;      /* class allocated in this block */
+    u4              size;       /* total size requested */
+    u2              threadId;   /* simple thread ID; could be recycled */
+
+    /* stack trace elements; unused entries have method==NULL */
+    struct {
+        const Method* method;   /* which method we're executing in */
+        int         pc;         /* current execution offset, in 16-bit units */
+    } stackElem[kMaxAllocRecordStackDepth];
+
+    /*
+     * This was going to be either wall-clock time in seconds or monotonic
+     * time in milliseconds since the VM started, to give a rough sense for
+     * how long ago an allocation happened.  This adds a system call per
+     * allocation, which is too much overhead.
+     */
+    //u4      timestamp;
+};
+
+/*
+ * Initialize a few things.  This gets called early, so keep activity to
+ * a minimum.
+ */
+bool dvmAllocTrackerStartup(void)
+{
+    /* prep locks */
+    dvmInitMutex(&gDvm.allocTrackerLock);
+
+    /* initialized when enabled by DDMS */
+    assert(gDvm.allocRecords == NULL);
+
+    return true;
+}
+
+/*
+ * Release anything we're holding on to.
+ */
+void dvmAllocTrackerShutdown(void)
+{
+    free(gDvm.allocRecords);
+    dvmDestroyMutex(&gDvm.allocTrackerLock);
+}
+
+
+/*
+ * ===========================================================================
+ *      Collection
+ * ===========================================================================
+ */
+
+/*
+ * Enable allocation tracking.  Does nothing if tracking is already enabled.
+ *
+ * Returns "true" on success.
+ */
+bool dvmEnableAllocTracker(void)
+{
+    bool result = true;
+    dvmLockMutex(&gDvm.allocTrackerLock);
+
+    if (gDvm.allocRecords == NULL) {
+        LOGI("Enabling alloc tracker (%d entries / %d bytes)\n",
+            kNumAllocRecords, sizeof(AllocRecord) * kNumAllocRecords);
+        gDvm.allocRecordHead = gDvm.allocRecordCount = 0;
+        gDvm.allocRecords =
+            (AllocRecord*) malloc(sizeof(AllocRecord) * kNumAllocRecords);
+
+        if (gDvm.allocRecords == NULL)
+            result = false;
+    }
+
+    dvmUnlockMutex(&gDvm.allocTrackerLock);
+    return result;
+}
+
+/*
+ * Disable allocation tracking.  Does nothing if tracking is not enabled.
+ */
+void dvmDisableAllocTracker(void)
+{
+    dvmLockMutex(&gDvm.allocTrackerLock);
+
+    if (gDvm.allocRecords != NULL) {
+        free(gDvm.allocRecords);
+        gDvm.allocRecords = NULL;
+    }
+
+    dvmUnlockMutex(&gDvm.allocTrackerLock);
+}
+
+/*
+ * Get the last few stack frames.
+ */
+static void getStackFrames(Thread* self, AllocRecord* pRec)
+{
+    int stackDepth = 0;
+    void* fp;
+
+    fp = self->curFrame;
+
+    while ((fp != NULL) && (stackDepth < kMaxAllocRecordStackDepth)) {
+        const StackSaveArea* saveArea = SAVEAREA_FROM_FP(fp);
+        const Method* method = saveArea->method;
+
+        if (!dvmIsBreakFrame(fp)) {
+            pRec->stackElem[stackDepth].method = method;
+            if (dvmIsNativeMethod(method)) {
+                pRec->stackElem[stackDepth].pc = 0;
+            } else {
+                assert(saveArea->xtra.currentPc >= method->insns &&
+                        saveArea->xtra.currentPc <
+                        method->insns + dvmGetMethodInsnsSize(method));
+                pRec->stackElem[stackDepth].pc =
+                    (int) (saveArea->xtra.currentPc - method->insns);
+            }
+            stackDepth++;
+        }
+
+        assert(fp != saveArea->prevFrame);
+        fp = saveArea->prevFrame;
+    }
+
+    /* clear out the rest (normally there won't be any) */
+    while (stackDepth < kMaxAllocRecordStackDepth) {
+        pRec->stackElem[stackDepth].method = NULL;
+        pRec->stackElem[stackDepth].pc = 0;
+        stackDepth++;
+    }
+}
+
+/*
+ * Add a new allocation to the set.
+ */
+void dvmDoTrackAllocation(ClassObject* clazz, int size)
+{
+    dvmLockMutex(&gDvm.allocTrackerLock);
+    if (gDvm.allocRecords == NULL)
+        goto bail;
+
+    Thread* self = dvmThreadSelf();
+    if (self == NULL) {
+        LOGW("alloc tracker: no thread\n");
+        goto bail;
+    }
+
+    /* advance and clip */
+    if (++gDvm.allocRecordHead == kNumAllocRecords)
+        gDvm.allocRecordHead = 0;
+
+    AllocRecord* pRec = &gDvm.allocRecords[gDvm.allocRecordHead];
+
+    pRec->clazz = clazz;
+    pRec->size = size;
+    pRec->threadId = self->threadId;
+    getStackFrames(self, pRec);
+
+    if (gDvm.allocRecordCount < kNumAllocRecords)
+        gDvm.allocRecordCount++;
+
+bail:
+    dvmUnlockMutex(&gDvm.allocTrackerLock);
+}
+
+
+/*
+ * ===========================================================================
+ *      Reporting
+ * ===========================================================================
+ */
+
+/*
+The data we send to DDMS contains everything we have recorded.
+
+Message header (all values big-endian):
+  (1b) message header len (to allow future expansion); includes itself
+  (1b) entry header len
+  (1b) stack frame len
+  (2b) number of entries
+  (4b) offset to string table from start of message
+  (2b) number of class name strings
+  (2b) number of method name strings
+  (2b) number of source file name strings
+  For each entry:
+    (4b) total allocation size
+    (2b) threadId
+    (2b) allocated object's class name index
+    (1b) stack depth
+    For each stack frame:
+      (2b) method's class name
+      (2b) method name
+      (2b) method source file
+      (2b) line number, clipped to 32767; -2 if native; -1 if no source
+  (xb) class name strings
+  (xb) method name strings
+  (xb) source file strings
+
+  As with other DDM traffic, strings are sent as a 4-byte length
+  followed by UTF-16 data.
+
+We send up 16-bit unsigned indexes into string tables.  In theory there
+can be (kMaxAllocRecordStackDepth * kNumAllocRecords) unique strings in
+each table, but in practice there should be far fewer.
+
+The chief reason for using a string table here is to keep the size of
+the DDMS message to a minimum.  This is partly to make the protocol
+efficient, but also because we have to form the whole thing up all at
+once in a memory buffer.
+
+We use separate string tables for class names, method names, and source
+files to keep the indexes small.  There will generally be no overlap
+between the contents of these tables.
+*/
+const int kMessageHeaderLen = 15;
+const int kEntryHeaderLen = 9;
+const int kStackFrameLen = 8;
+
+/*
+ * Return the index of the head element.
+ *
+ * We point at the most-recently-written record, so if allocRecordCount is 1
+ * we want to use the current element.  Take "head+1" and subtract count
+ * from it.
+ *
+ * We need to handle underflow in our circular buffer, so we add
+ * kNumAllocRecords and then mask it back down.
+ */
+inline static int headIndex(void)
+{
+    return (gDvm.allocRecordHead+1 + kNumAllocRecords - gDvm.allocRecordCount)
+        & (kNumAllocRecords-1);
+}
+
+/*
+ * Dump the contents of a PointerSet full of character pointers.
+ */
+static void dumpStringTable(PointerSet* strings)
+{
+    int count = dvmPointerSetGetCount(strings);
+    int i;
+
+    for (i = 0; i < count; i++)
+        printf("  %s\n", (const char*) dvmPointerSetGetEntry(strings, i));
+}
+
+/*
+ * Get the method's source file.  If we don't know it, return "" instead
+ * of a NULL pointer.
+ */
+static const char* getMethodSourceFile(const Method* method)
+{
+    const char* fileName = dvmGetMethodSourceFile(method);
+    if (fileName == NULL)
+        fileName = "";
+    return fileName;
+}
+
+/*
+ * Generate string tables.
+ *
+ * Our source material is UTF-8 string constants from DEX files.  If we
+ * want to be thorough we can generate a hash value for each string and
+ * use the VM hash table implementation, or we can do a quick & dirty job
+ * by just maintaining a list of unique pointers.  If the same string
+ * constant appears in multiple DEX files we'll end up with duplicates,
+ * but in practice this shouldn't matter (and if it does, we can uniq-sort
+ * the result in a second pass).
+ */
+static bool populateStringTables(PointerSet* classNames,
+    PointerSet* methodNames, PointerSet* fileNames)
+{
+    int count = gDvm.allocRecordCount;
+    int idx = headIndex();
+    int classCount, methodCount, fileCount;         /* debug stats */
+
+    classCount = methodCount = fileCount = 0;
+
+    while (count--) {
+        AllocRecord* pRec = &gDvm.allocRecords[idx];
+
+        dvmPointerSetAddEntry(classNames, pRec->clazz->descriptor);
+        classCount++;
+
+        int i;
+        for (i = 0; i < kMaxAllocRecordStackDepth; i++) {
+            if (pRec->stackElem[i].method == NULL)
+                break;
+
+            const Method* method = pRec->stackElem[i].method;
+            dvmPointerSetAddEntry(classNames, method->clazz->descriptor);
+            classCount++;
+            dvmPointerSetAddEntry(methodNames, method->name);
+            methodCount++;
+            dvmPointerSetAddEntry(fileNames, getMethodSourceFile(method));
+            fileCount++;
+        }
+
+        idx = (idx + 1) & (kNumAllocRecords-1);
+    }
+
+    LOGI("class %d/%d, method %d/%d, file %d/%d\n",
+        dvmPointerSetGetCount(classNames), classCount,
+        dvmPointerSetGetCount(methodNames), methodCount,
+        dvmPointerSetGetCount(fileNames), fileCount);
+
+    return true;
+}
+
+/*
+ * Generate the base info (i.e. everything but the string tables).
+ *
+ * This should be called twice.  On the first call, "ptr" is NULL and
+ * "baseLen" is zero.  The return value is used to allocate a buffer.
+ * On the second call, "ptr" points to a data buffer, and "baseLen"
+ * holds the value from the result of the first call.
+ *
+ * The size of the output data is returned.
+ */
+static size_t generateBaseOutput(u1* ptr, size_t baseLen,
+    const PointerSet* classNames, const PointerSet* methodNames,
+    const PointerSet* fileNames)
+{
+    u1* origPtr = ptr;
+    int count = gDvm.allocRecordCount;
+    int idx = headIndex();
+
+    if (origPtr != NULL) {
+        set1(&ptr[0], kMessageHeaderLen);
+        set1(&ptr[1], kEntryHeaderLen);
+        set1(&ptr[2], kStackFrameLen);
+        set2BE(&ptr[3], count);
+        set4BE(&ptr[5], baseLen);
+        set2BE(&ptr[9], dvmPointerSetGetCount(classNames));
+        set2BE(&ptr[11], dvmPointerSetGetCount(methodNames));
+        set2BE(&ptr[13], dvmPointerSetGetCount(fileNames));
+    }
+    ptr += kMessageHeaderLen;
+
+    while (count--) {
+        AllocRecord* pRec = &gDvm.allocRecords[idx];
+
+        /* compute depth */
+        int  depth;
+        for (depth = 0; depth < kMaxAllocRecordStackDepth; depth++) {
+            if (pRec->stackElem[depth].method == NULL)
+                break;
+        }
+
+        /* output header */
+        if (origPtr != NULL) {
+            set4BE(&ptr[0], pRec->size);
+            set2BE(&ptr[4], pRec->threadId);
+            set2BE(&ptr[6],
+                dvmPointerSetFind(classNames, pRec->clazz->descriptor));
+            set1(&ptr[8], depth);
+        }
+        ptr += kEntryHeaderLen;
+
+        /* convert stack frames */
+        int i;
+        for (i = 0; i < depth; i++) {
+            if (origPtr != NULL) {
+                const Method* method = pRec->stackElem[i].method;
+                int lineNum;
+
+                lineNum = dvmLineNumFromPC(method, pRec->stackElem[i].pc);
+                if (lineNum > 32767)
+                    lineNum = 32767;
+
+                set2BE(&ptr[0], dvmPointerSetFind(classNames,
+                        method->clazz->descriptor));
+                set2BE(&ptr[2], dvmPointerSetFind(methodNames,
+                        method->name));
+                set2BE(&ptr[4], dvmPointerSetFind(fileNames,
+                        getMethodSourceFile(method)));
+                set2BE(&ptr[6], (u2)lineNum);
+            }
+            ptr += kStackFrameLen;
+        }
+
+        idx = (idx + 1) & (kNumAllocRecords-1);
+    }
+
+    return ptr - origPtr;
+}
+
+/*
+ * Compute the size required to store a string table.  Includes the length
+ * word and conversion to UTF-16.
+ */
+static size_t computeStringTableSize(PointerSet* strings)
+{
+    int count = dvmPointerSetGetCount(strings);
+    size_t size = 0;
+    int i;
+
+    for (i = 0; i < count; i++) {
+        const char* str = (const char*) dvmPointerSetGetEntry(strings, i);
+
+        size += 4 + dvmUtf8Len(str) * 2;
+    }
+
+    return size;
+}
+
+/*
+ * Convert a UTF-8 string to UTF-16.  We also need to byte-swap the values
+ * to big-endian, and we can't assume even alignment on the target.
+ *
+ * Returns the string's length, in characters.
+ */
+int convertUtf8ToUtf16BEUA(u1* utf16Str, const char* utf8Str)
+{
+    u1* origUtf16Str = utf16Str;
+
+    while (*utf8Str != '\0') {
+        u2 utf16 = dexGetUtf16FromUtf8(&utf8Str);       /* advances utf8Str */
+        set2BE(utf16Str, utf16);
+        utf16Str += 2;
+    }
+
+    return (utf16Str - origUtf16Str) / 2;
+}
+
+/*
+ * Output a string table serially.
+ */
+static size_t outputStringTable(PointerSet* strings, u1* ptr)
+{
+    int count = dvmPointerSetGetCount(strings);
+    u1* origPtr = ptr;
+    int i;
+
+    for (i = 0; i < count; i++) {
+        const char* str = (const char*) dvmPointerSetGetEntry(strings, i);
+        int charLen;
+
+        /* copy UTF-8 string to big-endian unaligned UTF-16 */
+        charLen = convertUtf8ToUtf16BEUA(&ptr[4], str);
+        set4BE(&ptr[0], charLen);
+
+        ptr += 4 + charLen * 2;
+    }
+
+    return ptr - origPtr;
+}
+
+/*
+ * Generate a DDM packet with all of the tracked allocation data.
+ *
+ * On success, returns "true" with "*pData" and "*pDataLen" set.
+ */
+bool dvmGenerateTrackedAllocationReport(u1** pData, size_t* pDataLen)
+{
+    bool result = false;
+    u1* buffer = NULL;
+
+    dvmLockMutex(&gDvm.allocTrackerLock);
+
+    /*
+     * Part 1: generate string tables.
+     */
+    PointerSet* classNames = NULL;
+    PointerSet* methodNames = NULL;
+    PointerSet* fileNames = NULL;
+
+    /*
+     * Allocate storage.  Usually there's 60-120 of each thing (sampled
+     * when max=512), but it varies widely and isn't closely bound to
+     * the number of allocations we've captured.  The sets expand quickly
+     * if needed.
+     */
+    classNames = dvmPointerSetAlloc(128);
+    methodNames = dvmPointerSetAlloc(128);
+    fileNames = dvmPointerSetAlloc(128);
+    if (classNames == NULL || methodNames == NULL || fileNames == NULL) {
+        LOGE("Failed allocating pointer sets\n");
+        goto bail;
+    }
+
+    if (!populateStringTables(classNames, methodNames, fileNames))
+        goto bail;
+
+    if (false) {
+        printf("Classes:\n");
+        dumpStringTable(classNames);
+        printf("Methods:\n");
+        dumpStringTable(methodNames);
+        printf("Files:\n");
+        dumpStringTable(fileNames);
+    }
+
+    /*
+     * Part 2: compute the size of the output.
+     *
+     * (Could also just write to an expanding buffer.)
+     */
+    size_t baseSize, totalSize;
+    baseSize = generateBaseOutput(NULL, 0, classNames, methodNames, fileNames);
+    assert(baseSize > 0);
+    totalSize = baseSize;
+    totalSize += computeStringTableSize(classNames);
+    totalSize += computeStringTableSize(methodNames);
+    totalSize += computeStringTableSize(fileNames);
+    LOGI("Generated AT, size is %zd/%zd\n", baseSize, totalSize);
+
+    /*
+     * Part 3: allocate a buffer and generate the output.
+     */
+    u1* strPtr;
+
+    buffer = (u1*) malloc(totalSize);
+    strPtr = buffer + baseSize;
+    generateBaseOutput(buffer, baseSize, classNames, methodNames, fileNames);
+    strPtr += outputStringTable(classNames, strPtr);
+    strPtr += outputStringTable(methodNames, strPtr);
+    strPtr += outputStringTable(fileNames, strPtr);
+    if (strPtr - buffer != (int)totalSize) {
+        LOGE("size mismatch (%d vs %zd)\n", strPtr - buffer, totalSize);
+        dvmAbort();
+    }
+    //dvmPrintHexDump(buffer, totalSize);
+
+    *pData = buffer;
+    *pDataLen = totalSize;
+    buffer = NULL;          // don't free -- caller will own
+    result = true;
+
+bail:
+    dvmPointerSetFree(classNames);
+    dvmPointerSetFree(methodNames);
+    dvmPointerSetFree(fileNames);
+    free(buffer);
+    dvmUnlockMutex(&gDvm.allocTrackerLock);
+    //dvmDumpTrackedAllocations(false);
+    return result;
+}
+
+/*
+ * Dump the tracked allocations to the log file.
+ *
+ * If "enable" is set, we try to enable the feature if it's not already
+ * active.
+ */
+void dvmDumpTrackedAllocations(bool enable)
+{
+    if (enable)
+        dvmEnableAllocTracker();
+
+    dvmLockMutex(&gDvm.allocTrackerLock);
+    if (gDvm.allocRecords == NULL)
+        goto bail;
+
+    /*
+     * "idx" is the head of the list.  We want to start at the end of the
+     * list and move forward to the tail.
+     */
+    int idx = headIndex();
+    int count = gDvm.allocRecordCount;
+    
+    LOGI("Tracked allocations, (head=%d count=%d)\n",
+        gDvm.allocRecordHead, count);
+    while (count--) {
+        AllocRecord* pRec = &gDvm.allocRecords[idx];
+        LOGI(" T=%-2d %6d %s\n",
+            pRec->threadId, pRec->size, pRec->clazz->descriptor);
+
+        if (true) {
+            int i;
+            for (i = 0; i < kMaxAllocRecordStackDepth; i++) {
+                if (pRec->stackElem[i].method == NULL)
+                    break;
+
+                const Method* method = pRec->stackElem[i].method;
+                if (dvmIsNativeMethod(method)) {
+                    LOGI("    %s.%s (Native)\n",
+                        method->clazz->descriptor, method->name);
+                } else {
+                    LOGI("    %s.%s +%d\n",
+                        method->clazz->descriptor, method->name,
+                        pRec->stackElem[i].pc);
+                }
+            }
+        }
+
+        /* pause periodically to help logcat catch up */
+        if ((count % 5) == 0)
+            usleep(40000);
+
+        idx = (idx + 1) & (kNumAllocRecords-1);
+    }
+
+bail:
+    dvmUnlockMutex(&gDvm.allocTrackerLock);
+    if (false) {
+        u1* data;
+        size_t dataLen;
+        if (dvmGenerateTrackedAllocationReport(&data, &dataLen))
+            free(data);
+    }
+}
+
diff --git a/vm/AllocTracker.h b/vm/AllocTracker.h
new file mode 100644
index 0000000..84ac9b8
--- /dev/null
+++ b/vm/AllocTracker.h
@@ -0,0 +1,63 @@
+/*
+ * Copyright (C) 2008 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/*
+ * Allocation tracking and reporting.
+ */
+#ifndef _DALVIK_ALLOCTRACKER
+#define _DALVIK_ALLOCTRACKER
+
+/* initialization */
+bool dvmAllocTrackerStartup(void);
+void dvmAllocTrackerShutdown(void);
+
+struct AllocRecord;
+typedef struct AllocRecord AllocRecord;
+
+/*
+ * Enable allocation tracking.  Does nothing if tracking is already enabled.
+ */
+bool dvmEnableAllocTracker(void);
+
+/*
+ * Disable allocation tracking.  Does nothing if tracking is not enabled.
+ */
+void dvmDisableAllocTracker(void);
+
+/*
+ * If allocation tracking is enabled, add a new entry to the set.
+ */
+#define dvmTrackAllocation(_clazz, _size)                                   \
+    {                                                                       \
+        if (gDvm.allocRecords != NULL)                                      \
+            dvmDoTrackAllocation(_clazz, _size);                            \
+    }
+void dvmDoTrackAllocation(ClassObject* clazz, int size);
+
+/*
+ * Generate a DDM packet with all of the tracked allocation data.
+ *
+ * On success, returns "true" with "*pData" and "*pDataLen" set.  "*pData"
+ * refers to newly-allocated storage that must be freed by the caller.
+ */
+bool dvmGenerateTrackedAllocationReport(u1** pData, size_t* pDataLen);
+
+/*
+ * Dump the tracked allocations to the log file.  If "enable" is set, this
+ * will enable tracking if it's not already on.
+ */
+void dvmDumpTrackedAllocations(bool enable);
+
+#endif /*_DALVIK_ALLOCTRACKER*/
diff --git a/vm/Android.mk b/vm/Android.mk
new file mode 100644
index 0000000..dfed78d
--- /dev/null
+++ b/vm/Android.mk
@@ -0,0 +1,221 @@
+# Copyright (C) 2008 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+#
+# Android.mk for Dalvik VM.  If you enable or disable optional features here,
+# rebuild the VM with "make clean-libdvm && make libdvm".
+#
+LOCAL_PATH:= $(call my-dir)
+include $(CLEAR_VARS)
+
+
+#
+# Compiler defines.
+#
+LOCAL_CFLAGS += -fstrict-aliasing -Wstrict-aliasing=2 -fno-align-jumps
+
+#
+# Optional features.  These may impact the size or performance of the VM.
+#
+LOCAL_CFLAGS += -DWITH_PROFILER -DWITH_DEBUGGER
+
+ifeq ($(WITH_DEADLOCK_PREDICTION),true)
+  LOCAL_CFLAGS += -DWITH_DEADLOCK_PREDICTION
+  WITH_MONITOR_TRACKING := true
+endif
+ifeq ($(WITH_MONITOR_TRACKING),true)
+  LOCAL_CFLAGS += -DWITH_MONITOR_TRACKING
+endif
+
+#
+# "Debug" profile:
+# - debugger enabled
+# - profiling enabled
+# - tracked-reference verification enabled
+# - allocation limits enabled
+# - GDB helpers enabled
+# - LOGV
+# - assert()  (NDEBUG is handled in the build system)
+#
+ifeq ($(TARGET_BUILD_TYPE),debug)
+LOCAL_CFLAGS += -DWITH_INSTR_CHECKS -DWITH_EXTRA_OBJECT_VALIDATION
+LOCAL_CFLAGS += -DWITH_TRACKREF_CHECKS
+LOCAL_CFLAGS += -DWITH_ALLOC_LIMITS
+#LOCAL_CFLAGS += -DCHECK_MUTEX
+#LOCAL_CFLAGS += -DPROFILE_FIELD_ACCESS
+LOCAL_CFLAGS += -DDVM_SHOW_EXCEPTION=3
+# add some extra stuff to make it easier to examine with GDB
+LOCAL_CFLAGS += -DEASY_GDB
+endif
+
+
+#
+# "Performance" profile:
+# - all development features disabled
+# - compiler optimizations enabled (redundant for "release" builds)
+# - (debugging and profiling still enabled)
+#
+ifeq ($(TARGET_BUILD_TYPE),release)
+#LOCAL_CFLAGS += -DNDEBUG -DLOG_NDEBUG=1
+# "-O2" is redundant for device (release) but useful for sim (debug)
+#LOCAL_CFLAGS += -O2 -Winline
+LOCAL_CFLAGS += -DDVM_SHOW_EXCEPTION=1
+# if you want to try with assertions on the device, add:
+#LOCAL_CFLAGS += -UNDEBUG -DDEBUG=1 -DLOG_NDEBUG=1 -DWITH_DALVIK_ASSERT
+endif
+
+# bug hunting: checksum and verify interpreted stack when making JNI calls
+#LOCAL_CFLAGS += -DWITH_JNI_STACK_CHECK
+
+LOCAL_SRC_FILES := \
+	AllocTracker.c \
+	AtomicCache.c \
+	CheckJni.c \
+	Ddm.c \
+	Debugger.c \
+	DvmDex.c \
+	Exception.c \
+	Hash.c \
+	Init.c \
+	InlineNative.c.arm \
+	Inlines.c \
+	Intern.c \
+	InternalNative.c \
+	Jni.c \
+	JarFile.c \
+	LinearAlloc.c \
+	Misc.c.arm \
+	Native.c \
+	PointerSet.c \
+	Profile.c \
+	Properties.c \
+	RawDexFile.c \
+	ReferenceTable.c \
+	SignalCatcher.c \
+	StdioConverter.c \
+	Sync.c \
+	Thread.c \
+	UtfString.c \
+	alloc/clz.c.arm \
+	alloc/Alloc.c \
+	alloc/HeapBitmap.c.arm \
+	alloc/HeapDebug.c \
+	alloc/HeapSource.c \
+	alloc/HeapTable.c \
+	alloc/HeapWorker.c \
+	alloc/Heap.c.arm \
+	alloc/MarkSweep.c.arm \
+	alloc/DdmHeap.c \
+	analysis/CodeVerify.c \
+	analysis/DexOptimize.c \
+	analysis/DexVerify.c \
+	interp/Interp.c.arm \
+	interp/InterpDbg.c.arm \
+	interp/InterpStd.c.arm \
+	interp/Stack.c \
+	jdwp/ExpandBuf.c \
+	jdwp/JdwpAdb.c \
+	jdwp/JdwpConstants.c \
+	jdwp/JdwpEvent.c \
+	jdwp/JdwpHandler.c \
+	jdwp/JdwpMain.c \
+	jdwp/JdwpSocket.c \
+	mterp/Mterp.c.arm \
+	oo/AccessCheck.c \
+	oo/Array.c \
+	oo/Class.c \
+	oo/Object.c \
+	oo/Resolve.c \
+	oo/TypeCheck.c \
+	reflect/Annotation.c \
+	reflect/Proxy.c \
+	reflect/Reflect.c \
+	test/TestHash.c
+
+WITH_HPROF := $(strip $(WITH_HPROF))
+ifeq ($(WITH_HPROF),)
+  WITH_HPROF := true
+endif
+ifeq ($(WITH_HPROF),true)
+  LOCAL_SRC_FILES += \
+	hprof/Hprof.c \
+	hprof/HprofClass.c \
+	hprof/HprofHeap.c \
+	hprof/HprofOutput.c \
+	hprof/HprofString.c
+  LOCAL_CFLAGS += -DWITH_HPROF=1
+
+  ifeq ($(strip $(WITH_HPROF_UNREACHABLE)),true)
+    LOCAL_CFLAGS += -DWITH_HPROF_UNREACHABLE=1
+  endif
+
+  ifeq ($(strip $(WITH_HPROF_STACK)),true)
+    LOCAL_SRC_FILES += \
+	hprof/HprofStack.c \
+	hprof/HprofStackFrame.c
+    LOCAL_CFLAGS += -DWITH_HPROF_STACK=1
+  endif # WITH_HPROF_STACK
+endif   # WITH_HPROF
+
+ifeq ($(strip $(DVM_TRACK_HEAP_MARKING)),true)
+  LOCAL_CFLAGS += -DDVM_TRACK_HEAP_MARKING=1
+endif
+
+LOCAL_C_INCLUDES += \
+	$(JNI_H_INCLUDE) \
+	dalvik \
+	dalvik/vm \
+	external/zlib \
+	$(KERNEL_HEADERS)
+
+LOCAL_LDLIBS += -lpthread -ldl
+
+ifeq ($(TARGET_SIMULATOR),true)
+  ifeq ($(HOST_OS),linux)
+    # need this for clock_gettime() in profiling
+    LOCAL_LDLIBS += -lrt
+  endif
+endif
+
+ifeq ($(TARGET_ARCH),arm)
+	# use custom version rather than FFI
+	#LOCAL_SRC_FILES += arch/arm/CallC.c
+	LOCAL_SRC_FILES += arch/arm/CallOldABI.S arch/arm/CallEABI.S
+	LOCAL_SRC_FILES += \
+		mterp/out/InterpC-armv5.c.arm \
+		mterp/out/InterpAsm-armv5.S
+	LOCAL_SHARED_LIBRARIES += libdl
+else
+	# use FFI
+	LOCAL_C_INCLUDES += external/libffi/$(TARGET_OS)-$(TARGET_ARCH)
+	LOCAL_SRC_FILES += arch/generic/Call.c
+	LOCAL_SRC_FILES += \
+		mterp/out/InterpC-desktop.c \
+		mterp/out/InterpAsm-desktop.S
+	LOCAL_SHARED_LIBRARIES += libffi
+endif
+
+LOCAL_MODULE := libdvm
+
+LOCAL_SHARED_LIBRARIES += \
+	liblog \
+	libcutils \
+	libnativehelper \
+	libz
+
+LOCAL_STATIC_LIBRARIES += \
+	libdex
+
+include $(BUILD_SHARED_LIBRARY)
+
diff --git a/vm/Atomic.h b/vm/Atomic.h
new file mode 100644
index 0000000..bc0203c
--- /dev/null
+++ b/vm/Atomic.h
@@ -0,0 +1,48 @@
+/*
+ * Copyright (C) 2008 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/*
+ * Atomic operations
+ */
+#ifndef _DALVIK_ATOMIC
+#define _DALVIK_ATOMIC
+
+#include <utils/Atomic.h>       /* use common Android atomic ops */
+
+/*
+ * Memory barrier.  Guarantee that register-resident variables
+ * are flushed to memory, and guarantee that instructions before
+ * the barrier do not get reordered to appear past it.
+ *
+ * 'asm volatile ("":::"memory")' is probably overkill, but it's correct.
+ * There may be a way to do it that doesn't flush every single register.
+ *
+ * TODO: look into the wmb() family on Linux and equivalents on other systems.
+ */
+#define MEM_BARRIER()   do { asm volatile ("":::"memory"); } while (0)
+
+/*
+ * Atomic compare-and-swap macro.
+ *
+ * If *_addr equals "_old", replace it with "_new" and return 1.  Otherwise
+ * return 0.  (e.g. x86 "cmpxchgl" instruction.)
+ *
+ * Underlying function is currently declared:
+ * int android_atomic_cmpxchg(int32_t old, int32_t new, volatile int32_t* addr)
+ */
+#define ATOMIC_CMP_SWAP(_addr, _old, _new) \
+            (android_atomic_cmpxchg((_old), (_new), (_addr)) == 0)
+
+#endif /*_DALVIK_ATOMIC*/
diff --git a/vm/AtomicCache.c b/vm/AtomicCache.c
new file mode 100644
index 0000000..cee84a8
--- /dev/null
+++ b/vm/AtomicCache.c
@@ -0,0 +1,173 @@
+/*
+ * Copyright (C) 2008 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/*
+ * Mutex-free cache.  Each entry has two 32-bit keys, one 32-bit value,
+ * and a 32-bit version.
+ */
+#include "Dalvik.h"
+
+#include <stdlib.h>
+
+/*
+ * I think modern C mandates that the results of a boolean expression are
+ * 0 or 1.  If not, or we suddenly turn into C++ and bool != int, use this.
+ */
+#define BOOL_TO_INT(x)  (x)
+//#define BOOL_TO_INT(x)  ((x) ? 1 : 0)
+
+#define CPU_CACHE_WIDTH         32
+#define CPU_CACHE_WIDTH_1       (CPU_CACHE_WIDTH-1)
+
+#define ATOMIC_LOCK_FLAG        (1 << 31)
+
+/*
+ * Allocate cache.
+ */
+AtomicCache* dvmAllocAtomicCache(int numEntries)
+{
+    AtomicCache* newCache;
+
+    newCache = (AtomicCache*) calloc(1, sizeof(AtomicCache));
+    if (newCache == NULL)
+        return NULL;
+
+    newCache->numEntries = numEntries;
+
+    newCache->entryAlloc = calloc(1,
+        sizeof(AtomicCacheEntry) * numEntries + CPU_CACHE_WIDTH);
+    if (newCache->entryAlloc == NULL)
+        return NULL;
+
+    /*
+     * Adjust storage to align on a 32-byte boundary.  Each entry is 16 bytes
+     * wide.  This ensures that each cache entry sits on a single CPU cache
+     * line.
+     */
+    assert(sizeof(AtomicCacheEntry) == 16);
+    newCache->entries = (AtomicCacheEntry*)
+        (((int) newCache->entryAlloc + CPU_CACHE_WIDTH_1) & ~CPU_CACHE_WIDTH_1);
+
+    return newCache;
+}
+
+/*
+ * Free cache.
+ */
+void dvmFreeAtomicCache(AtomicCache* cache)
+{
+    if (cache != NULL) {
+        free(cache->entryAlloc);
+        free(cache);
+    }
+}
+
+
+
+/*
+ * Update a cache entry.
+ *
+ * In the event of a collision with another thread, the update may be skipped.
+ *
+ * We only need "pCache" for stats.
+ */
+void dvmUpdateAtomicCache(u4 key1, u4 key2, u4 value, AtomicCacheEntry* pEntry,
+    u4 firstVersion
+#if CALC_CACHE_STATS > 0
+    , AtomicCache* pCache
+#endif
+    )
+{
+    /*
+     * The fields don't match, so we need to update them.  There is a
+     * risk that another thread is also trying to update them, so we
+     * grab an ownership flag to lock out other threads.
+     *
+     * If the lock flag was already set in "firstVersion", somebody else
+     * was in mid-update.  (This means that using "firstVersion" as the
+     * "before" argument to the CAS would succeed when it shouldn't and
+     * vice-versa -- we could also just pass in
+     * (firstVersion & ~ATOMIC_LOCK_FLAG) as the first argument.)
+     *
+     * NOTE: we don't really deal with the situation where we overflow
+     * the version counter (at 2^31).  Probably not a real concern.
+     */
+    if ((firstVersion & ATOMIC_LOCK_FLAG) != 0 ||
+        !ATOMIC_CMP_SWAP((volatile s4*) &pEntry->version,
+            firstVersion, firstVersion | ATOMIC_LOCK_FLAG))
+    {
+        /*
+         * We couldn't get the write lock.  Return without updating the table.
+         */
+#if CALC_CACHE_STATS > 0
+        pCache->fail++;
+#endif
+        return;
+    }
+
+    /* must be even-valued on entry */
+    assert((firstVersion & 0x01) == 0);
+
+#if CALC_CACHE_STATS > 0
+    /* for stats, assume a key value of zero indicates an empty entry */
+    if (pEntry->key1 == 0)
+        pCache->fills++;
+    else
+        pCache->misses++;
+#endif
+
+    /* volatile incr */
+    pEntry->version++;
+    MEM_BARRIER();
+
+    pEntry->key1 = key1;
+    pEntry->key2 = key2;
+    pEntry->value = value;
+
+    /* volatile incr */
+    pEntry->version++;
+    MEM_BARRIER();
+
+    /*
+     * Clear the lock flag.  Nobody else should have been able to modify
+     * pEntry->version, so if this fails the world is broken.
+     */
+    firstVersion += 2;
+    if (!ATOMIC_CMP_SWAP((volatile s4*) &pEntry->version,
+            firstVersion | ATOMIC_LOCK_FLAG, firstVersion))
+    {
+        //LOGE("unable to reset the instanceof cache ownership\n");
+        dvmAbort();
+    }
+}
+
+
+/*
+ * Dump the "instanceof" cache stats.
+ */
+void dvmDumpAtomicCacheStats(const AtomicCache* pCache)
+{
+    if (pCache == NULL)
+        return;
+    dvmFprintf(stdout,
+        "Cache stats: trv=%d fai=%d hit=%d mis=%d fil=%d %d%% (size=%d)\n",
+        pCache->trivial, pCache->fail, pCache->hits,
+        pCache->misses, pCache->fills,
+        (pCache->hits == 0) ? 0 :
+            pCache->hits * 100 /
+                (pCache->fail + pCache->hits + pCache->misses + pCache->fills),
+        pCache->numEntries);
+}
+
diff --git a/vm/AtomicCache.h b/vm/AtomicCache.h
new file mode 100644
index 0000000..1d59a47
--- /dev/null
+++ b/vm/AtomicCache.h
@@ -0,0 +1,175 @@
+/*
+ * Copyright (C) 2008 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/*
+ * Mutex-free cache for key1+key2=value.
+ */
+#ifndef _DALVIK_ATOMICCACHE
+#define _DALVIK_ATOMICCACHE
+
+/*
+ * If set to "1", gather some stats on our caching success rate.
+ */
+#define CALC_CACHE_STATS 0
+
+
+/*
+ * One entry in the cache.  We store two keys (e.g. the classes that are
+ * arguments to "instanceof") and one result (e.g. a boolean value).
+ *
+ * Must be exactly 16 bytes.
+ */
+typedef struct AtomicCacheEntry {
+    u4          key1;
+    u4          key2;
+    u4          value;
+    volatile u4 version;    /* version and lock flag */
+} AtomicCacheEntry;
+
+/*
+ * One cache.
+ *
+ * Thought: we might be able to save a few cycles by storing the cache
+ * struct and "entries" separately, avoiding an indirection.  (We already
+ * handle "numEntries" separately in ATOMIC_CACHE_LOOKUP.)
+ */
+typedef struct AtomicCache {
+    AtomicCacheEntry*   entries;        /* array of entries */
+    int         numEntries;             /* #of entries, must be power of 2 */
+
+    void*       entryAlloc;             /* memory allocated for entries */
+
+    /* cache stats; note we don't guarantee atomic increments for these */
+    int         trivial;                /* cache access not required */
+    int         fail;                   /* contention failure */
+    int         hits;                   /* found entry in cache */
+    int         misses;                 /* entry was for other keys */
+    int         fills;                  /* entry was empty */
+} AtomicCache;
+
+/*
+ * Do a cache lookup.  We need to be able to read and write entries
+ * atomically.  There are a couple of ways to do this:
+ *  (1) Have a global lock.  A mutex is too heavy, so instead we would use
+ *      an atomic flag.  If the flag is set, we could sit and spin, but
+ *      if we're a high-priority thread that may cause a lockup.  Better
+ *      to just ignore the cache and do the full computation.
+ *  (2) Have a "version" that gets incremented atomically when a write
+ *      begins and again when it completes.  Compare the version before
+ *      and after doing reads.  So long as "version" is volatile the
+ *      compiler will do the right thing, allowing us to skip atomic
+ *      ops in the common read case.  The table updates are expensive,
+ *      requiring two volatile writes and (for correctness on
+ *      multiprocessor systems) memory barriers.  We also need some
+ *      sort of lock to ensure that nobody else tries to start an
+ *      update while we're in the middle of one.
+ *
+ * We expect a 95+% hit rate for the things we use this for, so #2 is
+ * much better than #1.
+ *
+ * _cache is an AtomicCache*
+ * _cacheSize is _cache->cacheSize (can save a cycle avoiding the lookup)
+ * _key1, _key2 are the keys
+ *
+ * Define a function ATOMIC_CACHE_CALC that returns a 32-bit value.  This
+ * will be invoked when we need to compute the value.
+ *
+ * Returns the value.
+ */
+#if CALC_CACHE_STATS > 0
+# define CACHE_XARG(_value) ,_value
+#else
+# define CACHE_XARG(_value)
+#endif
+#define ATOMIC_CACHE_LOOKUP(_cache, _cacheSize, _key1, _key2) ({            \
+    AtomicCacheEntry* pEntry;                                               \
+    int hash;                                                               \
+    u4 firstVersion;                                                        \
+    u4 value;                                                               \
+                                                                            \
+    /* simple hash function */                                              \
+    hash = (((u4)(_key1) >> 2) ^ (u4)(_key2)) & ((_cacheSize)-1);           \
+    pEntry = (_cache)->entries + hash;                                      \
+                                                                            \
+    /* volatile read */                                                     \
+    firstVersion = pEntry->version;                                         \
+                                                                            \
+    if (pEntry->key1 == (u4)(_key1) && pEntry->key2 == (u4)(_key2)) {       \
+        /*                                                                  \
+         * The fields match.  Get the value, then read the version a        \
+         * second time to verify that we didn't catch a partial update.     \
+         * We're also hosed if "firstVersion" was odd, indicating that      \
+         * an update was in progress before we got here.                    \
+         */                                                                 \
+        value = pEntry->value;    /* must grab before next check */         \
+                                                                            \
+        if ((firstVersion & 0x01) != 0 || firstVersion != pEntry->version)  \
+        {                                                                   \
+            /*                                                              \
+             * We clashed with another thread.  Instead of sitting and      \
+             * spinning, which might not complete if we're a high priority  \
+             * thread, just do the regular computation.                     \
+             */                                                             \
+            if (CALC_CACHE_STATS)                                           \
+                (_cache)->fail++;                                           \
+            value = (u4) ATOMIC_CACHE_CALC;                                 \
+        } else {                                                            \
+            /* all good */                                                  \
+            if (CALC_CACHE_STATS)                                           \
+                (_cache)->hits++;                                           \
+        }                                                                   \
+    } else {                                                                \
+        /*                                                                  \
+         * Compute the result and update the cache.  We really want this    \
+         * to happen in a different method -- it makes the ARM frame        \
+         * setup for this method simpler, which gives us a ~10% speed       \
+         * boost.                                                           \
+         */                                                                 \
+        value = (u4) ATOMIC_CACHE_CALC;                                     \
+        dvmUpdateAtomicCache((u4) (_key1), (u4) (_key2), value, pEntry,     \
+                    firstVersion CACHE_XARG(_cache) );                      \
+    }                                                                       \
+    value;                                                                  \
+})
+
+/*
+ * Allocate a cache.
+ */
+AtomicCache* dvmAllocAtomicCache(int numEntries);
+
+/*
+ * Free a cache.
+ */
+void dvmFreeAtomicCache(AtomicCache* cache);
+
+/*
+ * Update a cache entry.
+ *
+ * Making the last argument optional, instead of merely unused, saves us
+ * a few percent in the ATOMIC_CACHE_LOOKUP time.
+ */
+void dvmUpdateAtomicCache(u4 key1, u4 key2, u4 value, AtomicCacheEntry* pEntry,
+    u4 firstVersion
+#if CALC_CACHE_STATS > 0
+    , AtomicCache* pCache
+#endif
+    );
+
+/*
+ * Debugging.
+ */
+void dvmDumpAtomicCacheStats(const AtomicCache* pCache);
+
+#endif /*_DALVIK_ATOMICCACHE*/
diff --git a/vm/Bits.h b/vm/Bits.h
new file mode 100644
index 0000000..38b016d
--- /dev/null
+++ b/vm/Bits.h
@@ -0,0 +1,358 @@
+/*
+ * Copyright (C) 2008 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/*
+ * Some handy functions for manipulating bits and bytes.
+ *
+ * These get inlined, so prefer small size over maximum speed.
+ */
+#ifndef _DALVIK_BITS
+#define _DALVIK_BITS
+
+#include "Common.h"
+#include "Inlines.h"
+
+#include <stdlib.h>
+#include <string.h>
+
+/*
+ * Get 1 byte.  (Included to make the code more legible.)
+ */
+INLINE u1 get1(unsigned const char* pSrc)
+{
+    return *pSrc;
+}
+
+/*
+ * Get 2 big-endian bytes.
+ */
+INLINE u2 get2BE(unsigned char const* pSrc)
+{
+    return (pSrc[0] << 8) | pSrc[1];
+}
+
+/*
+ * Get 4 big-endian bytes.
+ */
+INLINE u4 get4BE(unsigned char const* pSrc)
+{
+    return (pSrc[0] << 24) | (pSrc[1] << 16) | (pSrc[2] << 8) | pSrc[3];
+}
+
+/*
+ * Get 8 big-endian bytes.
+ */
+INLINE u8 get8BE(unsigned char const* pSrc)
+{
+    u4 low, high;
+
+    high = pSrc[0];
+    high = (high << 8) | pSrc[1];
+    high = (high << 8) | pSrc[2];
+    high = (high << 8) | pSrc[3];
+    low = pSrc[4];
+    low = (low << 8) | pSrc[5];
+    low = (low << 8) | pSrc[6];
+    low = (low << 8) | pSrc[7];
+
+    return ((u8) high << 32) | (u8) low;
+}
+
+/*
+ * Get 2 little-endian bytes.
+ */
+INLINE u2 get2LE(unsigned char const* pSrc)
+{
+    return pSrc[0] | (pSrc[1] << 8);
+}
+
+/*
+ * Get 4 little-endian bytes.
+ */
+INLINE u4 get4LE(unsigned char const* pSrc)
+{
+    u4 result;
+
+    result = pSrc[0];
+    result |= pSrc[1] << 8;
+    result |= pSrc[2] << 16;
+    result |= pSrc[3] << 24;
+
+    return result;
+}
+
+/*
+ * Get 8 little-endian bytes.
+ */
+INLINE u8 get8LE(unsigned char const* pSrc)
+{
+    u4 low, high;
+
+    low = pSrc[0];
+    low |= pSrc[1] << 8;
+    low |= pSrc[2] << 16;
+    low |= pSrc[3] << 24;
+    high = pSrc[4];
+    high |= pSrc[5] << 8;
+    high |= pSrc[6] << 16;
+    high |= pSrc[7] << 24;
+    return ((u8) high << 32) | (u8) low;
+}
+
+/*
+ * Grab 1 byte and advance the data pointer.
+ */
+INLINE u1 read1(unsigned const char** ppSrc)
+{
+    return *(*ppSrc)++;
+}
+
+/*
+ * Grab 2 big-endian bytes and advance the data pointer.
+ */
+INLINE u2 read2BE(unsigned char const** ppSrc)
+{
+    const unsigned char* pSrc = *ppSrc;
+
+    *ppSrc = pSrc + 2;
+    return pSrc[0] << 8 | pSrc[1];
+}
+
+/*
+ * Grab 4 big-endian bytes and advance the data pointer.
+ */
+INLINE u4 read4BE(unsigned char const** ppSrc)
+{
+    const unsigned char* pSrc = *ppSrc;
+    u4 result;
+
+    result = pSrc[0] << 24;
+    result |= pSrc[1] << 16;
+    result |= pSrc[2] << 8;
+    result |= pSrc[3];
+
+    *ppSrc = pSrc + 4;
+    return result;
+}
+
+/*
+ * Get 8 big-endian bytes and advance the data pointer.
+ */
+INLINE u8 read8BE(unsigned char const** ppSrc)
+{
+    const unsigned char* pSrc = *ppSrc;
+    u4 low, high;
+
+    high = pSrc[0];
+    high = (high << 8) | pSrc[1];
+    high = (high << 8) | pSrc[2];
+    high = (high << 8) | pSrc[3];
+    low = pSrc[4];
+    low = (low << 8) | pSrc[5];
+    low = (low << 8) | pSrc[6];
+    low = (low << 8) | pSrc[7];
+
+    *ppSrc = pSrc + 8;
+    return ((u8) high << 32) | (u8) low;
+}
+
+/*
+ * Grab 2 little-endian bytes and advance the data pointer.
+ */
+INLINE u2 read2LE(unsigned char const** ppSrc)
+{
+    const unsigned char* pSrc = *ppSrc;
+    *ppSrc = pSrc + 2;
+    return pSrc[0] | pSrc[1] << 8;
+}
+
+/*
+ * Grab 4 little-endian bytes and advance the data pointer.
+ */
+INLINE u4 read4LE(unsigned char const** ppSrc)
+{
+    const unsigned char* pSrc = *ppSrc;
+    u4 result;
+
+    result = pSrc[0];
+    result |= pSrc[1] << 8;
+    result |= pSrc[2] << 16;
+    result |= pSrc[3] << 24;
+
+    *ppSrc = pSrc + 4;
+    return result;
+}
+
+/*
+ * Get 8 little-endian bytes and advance the data pointer.
+ */
+INLINE u8 read8LE(unsigned char const** ppSrc)
+{
+    const unsigned char* pSrc = *ppSrc;
+    u4 low, high;
+
+    low = pSrc[0];
+    low |= pSrc[1] << 8;
+    low |= pSrc[2] << 16;
+    low |= pSrc[3] << 24;
+    high = pSrc[4];
+    high |= pSrc[5] << 8;
+    high |= pSrc[6] << 16;
+    high |= pSrc[7] << 24;
+
+    *ppSrc = pSrc + 8;
+    return ((u8) high << 32) | (u8) low;
+}
+
+/*
+ * Skip over a UTF-8 string (preceded by a 4-byte length).
+ */
+INLINE void skipUtf8String(unsigned char const** ppSrc)
+{
+    u4 length = read4BE(ppSrc);
+
+    (*ppSrc) += length;
+}
+
+/*
+ * Read a UTF-8 string into a fixed-size buffer, and null-terminate it.
+ *
+ * Returns the length of the original string.
+ */
+INLINE int readUtf8String(unsigned char const** ppSrc, char* buf, size_t bufLen)
+{
+    u4 length = read4BE(ppSrc);
+    size_t copyLen = (length < bufLen) ? length : bufLen-1;
+
+    memcpy(buf, *ppSrc, copyLen);
+    buf[copyLen] = '\0';
+
+    (*ppSrc) += length;
+    return length;
+}
+
+/*
+ * Read a UTF-8 string into newly-allocated storage, and null-terminate it.
+ *
+ * Returns the string and its length.  (The latter is probably unnecessary
+ * for the way we're using UTF8.)
+ */
+INLINE char* readNewUtf8String(unsigned char const** ppSrc, size_t* pLength)
+{
+    u4 length = read4BE(ppSrc);
+    char* buf;
+
+    buf = (char*) malloc(length+1);
+
+    memcpy(buf, *ppSrc, length);
+    buf[length] = '\0';
+
+    (*ppSrc) += length;
+
+    *pLength = length;
+    return buf;
+}
+
+
+/*
+ * Set 1 byte.  (Included to make code more consistent/legible.)
+ */
+INLINE void set1(u1* buf, u1 val)
+{
+    *buf = (u1)(val);
+}
+
+/*
+ * Set 2 big-endian bytes.
+ */
+INLINE void set2BE(u1* buf, u2 val)
+{
+    *buf++ = (u1)(val >> 8);
+    *buf = (u1)(val);
+}
+
+/*
+ * Set 4 big-endian bytes.
+ */
+INLINE void set4BE(u1* buf, u4 val)
+{
+    *buf++ = (u1)(val >> 24);
+    *buf++ = (u1)(val >> 16);
+    *buf++ = (u1)(val >> 8);
+    *buf = (u1)(val);
+}
+
+/*
+ * Set 8 big-endian bytes.
+ */
+INLINE void set8BE(u1* buf, u8 val)
+{
+    *buf++ = (u1)(val >> 56);
+    *buf++ = (u1)(val >> 48);
+    *buf++ = (u1)(val >> 40);
+    *buf++ = (u1)(val >> 32);
+    *buf++ = (u1)(val >> 24);
+    *buf++ = (u1)(val >> 16);
+    *buf++ = (u1)(val >> 8);
+    *buf = (u1)(val);
+}
+
+/*
+ * Set 2 little-endian bytes.
+ */
+INLINE void set2LE(u1* buf, u2 val)
+{
+    *buf++ = (u1)(val);
+    *buf = (u1)(val >> 8);
+}
+
+/*
+ * Set 4 little-endian bytes.
+ */
+INLINE void set4LE(u1* buf, u4 val)
+{
+    *buf++ = (u1)(val);
+    *buf++ = (u1)(val >> 8);
+    *buf++ = (u1)(val >> 16);
+    *buf = (u1)(val >> 24);
+}
+
+/*
+ * Set 8 little-endian bytes.
+ */
+INLINE void set8LE(u1* buf, u8 val)
+{
+    *buf++ = (u1)(val);
+    *buf++ = (u1)(val >> 8);
+    *buf++ = (u1)(val >> 16);
+    *buf++ = (u1)(val >> 24);
+    *buf++ = (u1)(val >> 32);
+    *buf++ = (u1)(val >> 40);
+    *buf++ = (u1)(val >> 48);
+    *buf = (u1)(val >> 56);
+}
+
+/*
+ * Stuff a UTF-8 string into the buffer.
+ */
+INLINE void setUtf8String(u1* buf, const u1* str)
+{
+    u4 strLen = strlen((const char*)str);
+
+    set4BE(buf, strLen);
+    memcpy(buf + sizeof(u4), str, strLen);
+}
+
+#endif /*_DALVIK_BITS*/
diff --git a/vm/CheckJni.c b/vm/CheckJni.c
new file mode 100644
index 0000000..5433763
--- /dev/null
+++ b/vm/CheckJni.c
@@ -0,0 +1,1950 @@
+/*
+ * Copyright (C) 2008 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/*
+ * Support for -Xcheck:jni (the "careful" version of the JNI interfaces).
+ *
+ * We want to verify types, make sure class and field IDs are valid, and
+ * ensure that JNI's semantic expectations are being met.  JNI seems to
+ * be relatively lax when it comes to requirements for permission checks,
+ * e.g. access to private methods is generally allowed from anywhere.
+ *
+ * TODO: keep a counter on global Get/Release.  Report a warning if some Gets
+ * were not Released.  Do not count explicit Add/DeleteGlobalRef calls (or
+ * count them separately, so we can complain if they exceed a certain
+ * threshold).
+ *
+ * TODO: verify that the methodID passed into the Call functions is for
+ * a method in the specified class.
+ */
+#include "Dalvik.h"
+#include "JniInternal.h"
+
+#define JNI_ENTER()     dvmChangeStatus(NULL, THREAD_RUNNING)
+#define JNI_EXIT()      dvmChangeStatus(NULL, THREAD_NATIVE)
+
+#define BASE_ENV(_env)  (((JNIEnvExt*)_env)->baseFuncTable)
+#define BASE_VM(_vm)    (((JavaVMExt*)_vm)->baseFuncTable)
+
+/*
+ * Flags passed into checkThread().
+ */
+#define kFlag_Default       0x0000
+
+#define kFlag_CritBad       0x0000      /* calling while in critical is bad */
+#define kFlag_CritOkay      0x0001      /* ...okay */
+#define kFlag_CritGet       0x0002      /* this is a critical "get" */
+#define kFlag_CritRelease   0x0003      /* this is a critical "release" */
+#define kFlag_CritMask      0x0003      /* bit mask to get "crit" value */
+
+#define kFlag_ExcepBad      0x0000      /* raised exceptions are bad */
+#define kFlag_ExcepOkay     0x0004      /* ...okay */
+
+/*
+ * Enter/exit macros for JNI env "check" functions.  These do not change
+ * the thread state within the VM.
+ */
+#define CHECK_ENTER(_env, _flags)                                           \
+    do {                                                                    \
+        JNI_TRACE(true, true);                                              \
+        checkThread(_env, _flags, __FUNCTION__);                            \
+    } while(false)
+
+#define CHECK_EXIT(_env)                                                    \
+    do { JNI_TRACE(false, true); } while(false)
+
+
+/*
+ * Enter/exit macros for JNI invocation interface "check" functions.  These
+ * do not change the thread state within the VM.
+ *
+ * Set "_hasmeth" to true if we have a valid thread with a method pointer.
+ * We won't have one before attaching a thread, after detaching a thread, or
+ * after destroying the VM.
+ */
+#define CHECK_VMENTER(_vm, _hasmeth)                                        \
+    do { JNI_TRACE(true, _hasmeth); } while(false)
+#define CHECK_VMEXIT(_vm, _hasmeth)                                         \
+    do { JNI_TRACE(false, _hasmeth); } while(false)
+
+#define CHECK_FIELD_TYPE(_obj, _fieldid, _prim, _isstatic)                  \
+    checkFieldType(_obj, _fieldid, _prim, _isstatic, __FUNCTION__)
+#define CHECK_CLASS(_env, _clazz)                                           \
+    checkClass(_env, _clazz, __FUNCTION__)
+#define CHECK_STRING(_env, _str)                                            \
+    checkString(_env, _str, __FUNCTION__)
+#define CHECK_UTF_STRING(_env, _str, _nullok)                               \
+    checkUtfString(_env, _str, _nullok, __FUNCTION__)
+#define CHECK_OBJECT(_env, _obj)                                            \
+    checkObject(_env, _obj, __FUNCTION__)
+#define CHECK_ARRAY(_env, _array)                                           \
+    checkArray(_env, _array, __FUNCTION__)
+#define CHECK_LENGTH_POSITIVE(_env, _length)                                \
+    checkLengthPositive(_env, _length, __FUNCTION__)
+
+#define CHECK_SIG(_env, _methid, _sigbyte, _isstatic)                       \
+    checkSig(_env, _methid, _sigbyte, _isstatic, __FUNCTION__)
+
+/*
+ * Print trace message when both "checkJNI" and "verbose:jni" are enabled.
+ */
+#define JNI_TRACE(_entry, _hasmeth)                                         \
+    do {                                                                    \
+        if (gDvm.verboseJni && (_entry)) {                                  \
+            static const char* classDescriptor = "???";                     \
+            static const char* methodName = "???";                          \
+            if (_hasmeth) {                                                 \
+                const Method* meth = dvmGetCurrentJNIMethod();              \
+                classDescriptor = meth->clazz->descriptor;                  \
+                methodName = meth->name;                                    \
+            }                                                               \
+            /* use +6 to drop the leading "Check_" */                       \
+            LOGI("JNI: %s (from %s.%s)",                                    \
+                (__FUNCTION__)+6, classDescriptor, methodName);             \
+        }                                                                   \
+    } while(false)
+
+/*
+ * Log the current location.
+ *
+ * "func" looks like "Check_DeleteLocalRef"; we drop the "Check_".
+ */
+static void showLocation(const Method* meth, const char* func)
+{
+    char* desc = dexProtoCopyMethodDescriptor(&meth->prototype);
+    LOGW("             in %s.%s %s (%s)\n",
+        meth->clazz->descriptor, meth->name, desc, func + 6);
+    free(desc);
+}
+
+/*
+ * Abort if we are configured to bail out on JNI warnings.
+ */
+static inline void abortMaybe()
+{
+    if (gDvm.jniWarnError) {
+        dvmDumpThread(dvmThreadSelf(), false);
+        dvmAbort();
+    }
+}
+
+/*
+ * Verify that the current thread is (a) attached and (b) associated with
+ * this particular instance of JNIEnv.
+ *
+ * Verify that, if this thread previously made a critical "get" call, we
+ * do the corresponding "release" call before we try anything else.
+ *
+ * Verify that, if an exception has been raised, the native code doesn't
+ * make any JNI calls other than the Exception* methods.
+ *
+ * TODO? if we add support for non-JNI native calls, make sure that the
+ * method at the top of the interpreted stack is a JNI method call.  (Or
+ * set a flag in the Thread/JNIEnv when the call is made and clear it on
+ * return?)
+ *
+ * NOTE: we are still in THREAD_NATIVE mode.  A GC could happen at any time.
+ */
+static void checkThread(JNIEnv* env, int flags, const char* func)
+{
+    JNIEnvExt* threadEnv;
+    bool printWarn = false;
+    bool printException = false;
+
+    /* get the *correct* JNIEnv by going through our TLS pointer */
+    threadEnv = dvmGetJNIEnvForThread();
+
+    /*
+     * Verify that the JNIEnv we've been handed matches what we expected
+     * to receive.
+     */
+    if (threadEnv == NULL) {
+        LOGE("JNI ERROR: non-VM thread making JNI calls\n");
+        // don't set printWarn
+    } else if ((JNIEnvExt*) env != threadEnv) {
+        if (dvmThreadSelf()->threadId != threadEnv->envThreadId) {
+            LOGE("JNI: threadEnv != thread->env?\n");
+            dvmAbort();
+        }
+
+        LOGW("JNI WARNING: threadid=%d using env from threadid=%d\n",
+            threadEnv->envThreadId, ((JNIEnvExt*)env)->envThreadId);
+        printWarn = true;
+
+        /* this is a bad idea -- need to throw as we exit, or abort func */
+        //dvmThrowException("Ljava/lang/RuntimeException;",
+        //    "invalid use of JNI env ptr");
+    } else if (((JNIEnvExt*) env)->self != dvmThreadSelf()) {
+        /* correct JNIEnv*; make sure the "self" pointer is correct */
+        LOGE("JNI: env->self != thread-self\n");
+        dvmAbort();
+    }
+
+    /*
+     * Check for critical resource misuse.
+     */
+    switch (flags & kFlag_CritMask) {
+    case kFlag_CritOkay:    // okay to call this method
+        break;
+    case kFlag_CritBad:     // not okay to call
+        if (threadEnv->critical) {
+            LOGW("JNI WARNING: threadid=%d using JNI after critical get\n",
+                threadEnv->envThreadId);
+            printWarn = true;
+        }
+        break;
+    case kFlag_CritGet:     // this is a "get" call
+        /* don't check here; we allow nested gets */
+        threadEnv->critical++;
+        break;
+    case kFlag_CritRelease: // this is a "release" call
+        threadEnv->critical--;
+        if (threadEnv->critical < 0) {
+            LOGW("JNI WARNING: threadid=%d called too many crit releases\n",
+                threadEnv->envThreadId);
+            printWarn = true;
+        }
+        break;
+    default:
+        assert(false);
+    }
+
+    /*
+     * Check for raised exceptions.
+     */
+    if ((flags & kFlag_ExcepOkay) == 0 && dvmCheckException(dvmThreadSelf())) {
+        LOGW("JNI WARNING: JNI method called with exception raised\n");
+        printWarn = true;
+        printException = true;
+    }
+
+    if (false) {
+        Thread* self = dvmThreadSelf();
+        LOGW("NOW: %d\n",
+            (int) dvmReferenceTableEntries(&self->internalLocalRefTable));
+    }
+
+    if (printWarn)
+        showLocation(dvmGetCurrentJNIMethod(), func);
+    if (printException) {
+        LOGW("Pending exception is:\n");
+        dvmLogExceptionStackTrace();
+    }
+    if (printWarn)
+        abortMaybe();
+}
+
+/*
+ * Verify that the field is of the appropriate type.  If the field has an
+ * object type, "obj" is the object we're trying to assign into it.
+ *
+ * Works for both static and instance fields.
+ */
+static void checkFieldType(jobject obj, jfieldID fieldID, PrimitiveType prim,
+    bool isStatic, const char* func)
+{
+    static const char* primNameList[] = {
+        "Object/Array", "boolean", "char", "float", "double",
+        "byte", "short", "int", "long", "void"
+    };
+    const char** primNames = &primNameList[1];      // shift up for PRIM_NOT
+    Field* field = (Field*) fieldID;
+    bool printWarn = false;
+
+    if (fieldID == NULL) {
+        LOGE("JNI ERROR: null field ID\n");
+        abortMaybe();
+    }
+
+    if (field->signature[0] == 'L' || field->signature[0] == '[') {
+        if (obj != NULL) {
+            ClassObject* fieldClass =
+                dvmFindLoadedClass(field->signature);
+            ClassObject* objClass = ((Object*)obj)->clazz;
+
+            assert(fieldClass != NULL);
+            assert(objClass != NULL);
+
+            if (!dvmInstanceof(objClass, fieldClass)) {
+                LOGW("JNI WARNING: field '%s' with type '%s' set with wrong type (%s)\n",
+                    field->name, field->signature, objClass->descriptor);
+                printWarn = true;
+            }
+        }
+    } else if (field->signature[0] != PRIM_TYPE_TO_LETTER[prim]) {
+        LOGW("JNI WARNING: field '%s' with type '%s' set with wrong type (%s)\n",
+            field->name, field->signature, primNames[prim]);
+        printWarn = true;
+    } else if (isStatic && !dvmIsStaticField(field)) {
+        if (isStatic)
+            LOGW("JNI WARNING: accessing non-static field %s as static\n",
+                field->name);
+        else
+            LOGW("JNI WARNING: accessing static field %s as non-static\n",
+                field->name);
+        printWarn = true;
+    }
+
+    if (printWarn) {
+        showLocation(dvmGetCurrentJNIMethod(), func);
+        abortMaybe();
+    }
+}
+
+/*
+ * Verify that "obj" is a valid object, and that it's an object that JNI
+ * is allowed to know about.  We allow NULL references.
+ *
+ * The caller should have switched to "running" mode before calling here.
+ */
+static void checkObject(JNIEnv* env, jobject obj, const char* func)
+{
+    UNUSED_PARAMETER(env);
+    bool printWarn = false;
+
+    if (obj == NULL)
+        return;
+    if (!dvmIsValidObject(obj)) {
+        LOGW("JNI WARNING: native code passing in bad object %p (%s)\n",
+            obj, func);
+        printWarn = true;
+    } else if (dvmGetJNIRefType(obj) == JNIInvalidRefType) {
+        LOGW("JNI WARNING: ref %p should not be visible to native code\n", obj);
+        printWarn = true;
+    }
+
+    if (printWarn) {
+        showLocation(dvmGetCurrentJNIMethod(), func);
+        abortMaybe();
+    }
+}
+
+/*
+ * Verify that "clazz" actually points to a class object.  (Also performs
+ * checkObject.)
+ *
+ * We probably don't need to identify where we're being called from,
+ * because the VM is most likely about to crash and leave a core dump
+ * if something is wrong.
+ *
+ * Because we're looking at an object on the GC heap, we have to switch
+ * to "running" mode before doing the checks.
+ */
+static void checkClass(JNIEnv* env, jclass jclazz, const char* func)
+{
+    JNI_ENTER();
+    bool printWarn = false;
+
+    ClassObject* clazz = (ClassObject*) jclazz;
+
+    if (clazz == NULL) {
+        LOGW("JNI WARNING: received null jclass\n");
+        printWarn = true;
+    } else if (!dvmIsValidObject((Object*) clazz)) {
+        LOGW("JNI WARNING: jclass points to invalid object %p\n", clazz);
+        printWarn = true;
+    } else if (clazz->obj.clazz != gDvm.classJavaLangClass) {
+        LOGW("JNI WARNING: jclass does not point to class object (%p - %s)\n",
+            jclazz, clazz->descriptor);
+        printWarn = true;
+    } else {
+        checkObject(env, jclazz, func);
+    }
+
+    if (printWarn)
+        abortMaybe();
+
+    JNI_EXIT();
+}
+
+/*
+ * Verify that "str" is non-NULL and points to a String object.
+ *
+ * Since we're dealing with objects, switch to "running" mode.
+ */
+static void checkString(JNIEnv* env, jstring str, const char* func)
+{
+    JNI_ENTER();
+    bool printWarn = false;
+
+    Object* obj = (Object*) str;
+
+    if (obj == NULL) {
+        LOGW("JNI WARNING: received null jstring (%s)\n", func);
+        printWarn = true;
+    } else if (obj->clazz != gDvm.classJavaLangString) {
+        if (dvmIsValidObject(obj))
+            LOGW("JNI WARNING: jstring points to non-string object\n");
+        else
+            LOGW("JNI WARNING: jstring is bogus (%p)\n", str);
+        printWarn = true;
+    } else {
+        checkObject(env, str, func);
+    }
+
+    if (printWarn)
+        abortMaybe();
+
+    JNI_EXIT();
+}
+
+/*
+ * Verify that "bytes" points to valid "modified UTF-8" data.
+ */
+static void checkUtfString(JNIEnv* env, const char* bytes, bool nullOk,
+    const char* func)
+{
+    const char* origBytes = bytes;
+
+    if (bytes == NULL) {
+        if (!nullOk) {
+            LOGW("JNI WARNING: unexpectedly null UTF string\n");
+            goto fail;
+        }
+
+        return;
+    }
+
+    while (*bytes != '\0') {
+        u1 utf8 = *(bytes++);
+        // Switch on the high four bits.
+        switch (utf8 >> 4) {
+            case 0x00:
+            case 0x01:
+            case 0x02:
+            case 0x03:
+            case 0x04:
+            case 0x05:
+            case 0x06:
+            case 0x07: {
+                // Bit pattern 0xxx. No need for any extra bytes.
+                break;
+            }
+            case 0x08:
+            case 0x09:
+            case 0x0a:
+            case 0x0b:
+            case 0x0f: {
+                /*
+                 * Bit pattern 10xx or 1111, which are illegal start bytes.
+                 * Note: 1111 is valid for normal UTF-8, but not the
+                 * modified UTF-8 used here.
+                 */
+                LOGW("JNI WARNING: illegal start byte 0x%x\n", utf8);
+                goto fail;
+            }
+            case 0x0e: {
+                // Bit pattern 1110, so there are two additional bytes.
+                utf8 = *(bytes++);
+                if ((utf8 & 0xc0) != 0x80) {
+                    LOGW("JNI WARNING: illegal continuation byte 0x%x\n", utf8);
+                    goto fail;
+                }
+                // Fall through to take care of the final byte.
+            }
+            case 0x0c:
+            case 0x0d: {
+                // Bit pattern 110x, so there is one additional byte.
+                utf8 = *(bytes++);
+                if ((utf8 & 0xc0) != 0x80) {
+                    LOGW("JNI WARNING: illegal continuation byte 0x%x\n", utf8);
+                    goto fail;
+                }
+                break;
+            }
+        }
+    }
+
+    return;
+
+fail:
+    LOGW("             string: '%s'\n", origBytes);
+    showLocation(dvmGetCurrentJNIMethod(), func);
+    abortMaybe();
+}
+
+/*
+ * Verify that "array" is non-NULL and points to an Array object.
+ *
+ * Since we're dealing with objects, switch to "running" mode.
+ */
+static void checkArray(JNIEnv* env, jarray array, const char* func)
+{
+    JNI_ENTER();
+    bool printWarn = false;
+
+    Object* obj = (Object*) array;
+
+    if (obj == NULL) {
+        LOGW("JNI WARNING: received null array (%s)\n", func);
+        printWarn = true;
+    } else if (obj->clazz->descriptor[0] != '[') {
+        if (dvmIsValidObject(obj))
+            LOGW("JNI WARNING: jarray points to non-array object\n");
+        else
+            LOGW("JNI WARNING: jarray is bogus (%p)\n", array);
+        printWarn = true;
+    } else {
+        checkObject(env, array, func);
+    }
+
+    if (printWarn)
+        abortMaybe();
+
+    JNI_EXIT();
+}
+
+/*
+ * Verify that the length argument to array-creation calls is >= 0.
+ */
+static void checkLengthPositive(JNIEnv* env, jsize length, const char* func)
+{
+    if (length < 0) {
+        LOGW("JNI WARNING: negative length for array allocation (%s)\n", func);
+        abortMaybe();
+    }
+}
+
+/*
+ * Verify that the method's return type matches the type of call.
+ *
+ * "expectedSigByte" will be 'L' for all objects, including arrays.
+ */
+static void checkSig(JNIEnv* env, jmethodID methodID, char expectedSigByte,
+    bool isStatic, const char* func)
+{
+    const Method* meth = (const Method*) methodID;
+    bool printWarn = false;
+
+    if (expectedSigByte != meth->shorty[0]) {
+        LOGW("JNI WARNING: expected return type '%c'\n", expectedSigByte);
+        printWarn = true;
+    } else if (isStatic && !dvmIsStaticMethod(meth)) {
+        if (isStatic)
+            LOGW("JNI WARNING: calling non-static method with static call\n");
+        else
+            LOGW("JNI WARNING: calling static method with non-static call\n");
+        printWarn = true;
+    }
+
+    if (printWarn) {
+        char* desc = dexProtoCopyMethodDescriptor(&meth->prototype);
+        LOGW("             calling %s.%s %s\n",
+            meth->clazz->descriptor, meth->name, desc);
+        free(desc);
+        showLocation(dvmGetCurrentJNIMethod(), func);
+        abortMaybe();
+    }
+}
+
+/*
+ * Verify that this static field ID is valid for this class.
+ */
+static void checkStaticFieldID(JNIEnv* env, jclass clazz, jfieldID fieldID)
+{
+    StaticField* base = ((ClassObject*) clazz)->sfields;
+    int fieldCount = ((ClassObject*) clazz)->sfieldCount;
+
+    if ((StaticField*) fieldID < base ||
+        (StaticField*) fieldID >= base + fieldCount)
+    {
+        LOGW("JNI WARNING: static fieldID %p not valid for class %s\n",
+            fieldID, ((ClassObject*) clazz)->descriptor);
+        LOGW("             base=%p count=%d\n", base, fieldCount);
+        abortMaybe();
+    }
+}
+
+/*
+ * Verify that this instance field ID is valid for this object.
+ */
+static void checkInstanceFieldID(JNIEnv* env, jobject obj, jfieldID fieldID)
+{
+    ClassObject* clazz = ((Object*)obj)->clazz;
+
+    /*
+     * Check this class and all of its superclasses for a matching field.
+     * Don't need to scan interfaces.
+     */
+    while (clazz != NULL) {
+        if ((InstField*) fieldID >= clazz->ifields &&
+            (InstField*) fieldID < clazz->ifields + clazz->ifieldCount)
+        {
+            return;
+        }
+
+        clazz = clazz->super;
+    }
+
+    LOGW("JNI WARNING: inst fieldID %p not valid for class %s\n",
+        fieldID, ((Object*)obj)->clazz->descriptor);
+    abortMaybe();
+}
+
+
+/*
+ * ===========================================================================
+ *      JNI functions
+ * ===========================================================================
+ */
+
+static jint Check_GetVersion(JNIEnv* env)
+{
+    CHECK_ENTER(env, kFlag_Default);
+    jint result;
+    result = BASE_ENV(env)->GetVersion(env);
+    CHECK_EXIT(env);
+    return result;
+}
+
+static jclass Check_DefineClass(JNIEnv* env, const char* name, jobject loader,
+    const jbyte* buf, jsize bufLen)
+{
+    CHECK_ENTER(env, kFlag_Default);
+    CHECK_OBJECT(env, loader);
+    CHECK_UTF_STRING(env, name, false);
+    jclass result;
+    result = BASE_ENV(env)->DefineClass(env, name, loader, buf, bufLen);
+    CHECK_EXIT(env);
+    return result;
+}
+
+static jclass Check_FindClass(JNIEnv* env, const char* name)
+{
+    CHECK_ENTER(env, kFlag_Default);
+    CHECK_UTF_STRING(env, name, false);
+    jclass result;
+    result = BASE_ENV(env)->FindClass(env, name);
+    CHECK_EXIT(env);
+    return result;
+}
+
+static jclass Check_GetSuperclass(JNIEnv* env, jclass clazz)
+{
+    CHECK_ENTER(env, kFlag_Default);
+    CHECK_CLASS(env, clazz);
+    jclass result;
+    result = BASE_ENV(env)->GetSuperclass(env, clazz);
+    CHECK_EXIT(env);
+    return result;
+}
+
+static jboolean Check_IsAssignableFrom(JNIEnv* env, jclass clazz1,
+    jclass clazz2)
+{
+    CHECK_ENTER(env, kFlag_Default);
+    CHECK_CLASS(env, clazz1);
+    CHECK_CLASS(env, clazz2);
+    jboolean result;
+    result = BASE_ENV(env)->IsAssignableFrom(env, clazz1, clazz2);
+    CHECK_EXIT(env);
+    return result;
+}
+
+static jmethodID Check_FromReflectedMethod(JNIEnv* env, jobject method)
+{
+    CHECK_ENTER(env, kFlag_Default);
+    CHECK_OBJECT(env, method);
+    jmethodID result;
+    result = BASE_ENV(env)->FromReflectedMethod(env, method);
+    CHECK_EXIT(env);
+    return result;
+}
+
+static jfieldID Check_FromReflectedField(JNIEnv* env, jobject field)
+{
+    CHECK_ENTER(env, kFlag_Default);
+    CHECK_OBJECT(env, field);
+    jfieldID result;
+    result = BASE_ENV(env)->FromReflectedField(env, field);
+    CHECK_EXIT(env);
+    return result;
+}
+
+static jobject Check_ToReflectedMethod(JNIEnv* env, jclass cls,
+    jmethodID methodID, jboolean isStatic)
+{
+    CHECK_ENTER(env, kFlag_Default);
+    CHECK_CLASS(env, cls);
+    jobject result;
+    result = BASE_ENV(env)->ToReflectedMethod(env, cls, methodID, isStatic);
+    CHECK_EXIT(env);
+    return result;
+}
+
+static jobject Check_ToReflectedField(JNIEnv* env, jclass cls, jfieldID fieldID,
+    jboolean isStatic)
+{
+    CHECK_ENTER(env, kFlag_Default);
+    CHECK_CLASS(env, cls);
+    jobject result;
+    result = BASE_ENV(env)->ToReflectedField(env, cls, fieldID, isStatic);
+    CHECK_EXIT(env);
+    return result;
+}
+
+static jint Check_Throw(JNIEnv* env, jthrowable obj)
+{
+    CHECK_ENTER(env, kFlag_Default);
+    CHECK_OBJECT(env, obj);
+    jint result;
+    result = BASE_ENV(env)->Throw(env, obj);
+    CHECK_EXIT(env);
+    return result;
+}
+
+static jint Check_ThrowNew(JNIEnv* env, jclass clazz, const char* message)
+{
+    CHECK_ENTER(env, kFlag_Default);
+    CHECK_CLASS(env, clazz);
+    CHECK_UTF_STRING(env, message, true);
+    jint result;
+    result = BASE_ENV(env)->ThrowNew(env, clazz, message);
+    CHECK_EXIT(env);
+    return result;
+}
+
+static jthrowable Check_ExceptionOccurred(JNIEnv* env)
+{
+    CHECK_ENTER(env, kFlag_ExcepOkay);
+    jthrowable result;
+    result = BASE_ENV(env)->ExceptionOccurred(env);
+    CHECK_EXIT(env);
+    return result;
+}
+
+static void Check_ExceptionDescribe(JNIEnv* env)
+{
+    CHECK_ENTER(env, kFlag_ExcepOkay);
+    BASE_ENV(env)->ExceptionDescribe(env);
+    CHECK_EXIT(env);
+}
+
+static void Check_ExceptionClear(JNIEnv* env)
+{
+    CHECK_ENTER(env, kFlag_ExcepOkay);
+    BASE_ENV(env)->ExceptionClear(env);
+    CHECK_EXIT(env);
+}
+
+static void Check_FatalError(JNIEnv* env, const char* msg)
+{
+    CHECK_ENTER(env, kFlag_Default);
+    CHECK_UTF_STRING(env, msg, true);
+    BASE_ENV(env)->FatalError(env, msg);
+    CHECK_EXIT(env);
+}
+
+static jint Check_PushLocalFrame(JNIEnv* env, jint capacity)
+{
+    CHECK_ENTER(env, kFlag_Default | kFlag_ExcepOkay);
+    jint result;
+    result = BASE_ENV(env)->PushLocalFrame(env, capacity);
+    CHECK_EXIT(env);
+    return result;
+}
+
+static jobject Check_PopLocalFrame(JNIEnv* env, jobject res)
+{
+    CHECK_ENTER(env, kFlag_Default | kFlag_ExcepOkay);
+    CHECK_OBJECT(env, res);
+    jobject result;
+    result = BASE_ENV(env)->PopLocalFrame(env, res);
+    CHECK_EXIT(env);
+    return result;
+}
+
+static jobject Check_NewGlobalRef(JNIEnv* env, jobject obj)
+{
+    CHECK_ENTER(env, kFlag_Default);
+    CHECK_OBJECT(env, obj);
+    jobject result;
+    result = BASE_ENV(env)->NewGlobalRef(env, obj);
+    CHECK_EXIT(env);
+    return result;
+}
+
+static void Check_DeleteGlobalRef(JNIEnv* env, jobject localRef)
+{
+    CHECK_ENTER(env, kFlag_Default | kFlag_ExcepOkay);
+    CHECK_OBJECT(env, localRef);
+    BASE_ENV(env)->DeleteGlobalRef(env, localRef);
+    CHECK_EXIT(env);
+}
+
+static jobject Check_NewLocalRef(JNIEnv* env, jobject ref)
+{
+    CHECK_ENTER(env, kFlag_Default);
+    CHECK_OBJECT(env, ref);
+    jobject result;
+    result = BASE_ENV(env)->NewLocalRef(env, ref);
+    CHECK_EXIT(env);
+    return result;
+}
+
+static void Check_DeleteLocalRef(JNIEnv* env, jobject globalRef)
+{
+    CHECK_ENTER(env, kFlag_Default | kFlag_ExcepOkay);
+    CHECK_OBJECT(env, globalRef);
+    BASE_ENV(env)->DeleteLocalRef(env, globalRef);
+    CHECK_EXIT(env);
+}
+
+static jint Check_EnsureLocalCapacity(JNIEnv *env, jint capacity)
+{
+    CHECK_ENTER(env, kFlag_Default);
+    jint result;
+    result = BASE_ENV(env)->EnsureLocalCapacity(env, capacity);
+    CHECK_EXIT(env);
+    return result;
+}
+
+static jboolean Check_IsSameObject(JNIEnv* env, jobject ref1, jobject ref2)
+{
+    CHECK_ENTER(env, kFlag_Default);
+    CHECK_OBJECT(env, ref1);
+    CHECK_OBJECT(env, ref2);
+    jboolean result;
+    result = BASE_ENV(env)->IsSameObject(env, ref1, ref2);
+    CHECK_EXIT(env);
+    return result;
+}
+
+static jobject Check_AllocObject(JNIEnv* env, jclass clazz)
+{
+    CHECK_ENTER(env, kFlag_Default);
+    CHECK_CLASS(env, clazz);
+    jobject result;
+    result = BASE_ENV(env)->AllocObject(env, clazz);
+    CHECK_EXIT(env);
+    return result;
+}
+
+static jobject Check_NewObject(JNIEnv* env, jclass clazz, jmethodID methodID,
+    ...)
+{
+    CHECK_ENTER(env, kFlag_Default);
+    CHECK_CLASS(env, clazz);
+    jobject result;
+    va_list args;
+
+    va_start(args, methodID);
+    result = BASE_ENV(env)->NewObjectV(env, clazz, methodID, args);
+    va_end(args);
+
+    CHECK_EXIT(env);
+    return result;
+}
+static jobject Check_NewObjectV(JNIEnv* env, jclass clazz, jmethodID methodID,
+    va_list args)
+{
+    CHECK_ENTER(env, kFlag_Default);
+    CHECK_CLASS(env, clazz);
+    jobject result;
+    result = BASE_ENV(env)->NewObjectV(env, clazz, methodID, args);
+    CHECK_EXIT(env);
+    return result;
+}
+static jobject Check_NewObjectA(JNIEnv* env, jclass clazz, jmethodID methodID,
+    jvalue* args)
+{
+    CHECK_ENTER(env, kFlag_Default);
+    CHECK_CLASS(env, clazz);
+    jobject result;
+    result = BASE_ENV(env)->NewObjectA(env, clazz, methodID, args);
+    CHECK_EXIT(env);
+    return result;
+}
+
+static jclass Check_GetObjectClass(JNIEnv* env, jobject obj)
+{
+    CHECK_ENTER(env, kFlag_Default);
+    CHECK_OBJECT(env, obj);
+    jclass result;
+    result = BASE_ENV(env)->GetObjectClass(env, obj);
+    CHECK_EXIT(env);
+    return result;
+}
+
+static jboolean Check_IsInstanceOf(JNIEnv* env, jobject obj, jclass clazz)
+{
+    CHECK_ENTER(env, kFlag_Default);
+    CHECK_OBJECT(env, obj);
+    CHECK_CLASS(env, clazz);
+    jboolean result;
+    result = BASE_ENV(env)->IsInstanceOf(env, obj, clazz);
+    CHECK_EXIT(env);
+    return result;
+}
+
+static jmethodID Check_GetMethodID(JNIEnv* env, jclass clazz, const char* name,
+    const char* sig)
+{
+    CHECK_ENTER(env, kFlag_Default);
+    CHECK_CLASS(env, clazz);
+    CHECK_UTF_STRING(env, name, false);
+    CHECK_UTF_STRING(env, sig, false);
+    jmethodID result;
+    result = BASE_ENV(env)->GetMethodID(env, clazz, name, sig);
+    CHECK_EXIT(env);
+    return result;
+}
+
+static jfieldID Check_GetFieldID(JNIEnv* env, jclass clazz,
+    const char* name, const char* sig)
+{
+    CHECK_ENTER(env, kFlag_Default);
+    CHECK_CLASS(env, clazz);
+    CHECK_UTF_STRING(env, name, false);
+    CHECK_UTF_STRING(env, sig, false);
+    jfieldID result;
+    result = BASE_ENV(env)->GetFieldID(env, clazz, name, sig);
+    CHECK_EXIT(env);
+    return result;
+}
+
+static jmethodID Check_GetStaticMethodID(JNIEnv* env, jclass clazz,
+    const char* name, const char* sig)
+{
+    CHECK_ENTER(env, kFlag_Default);
+    CHECK_CLASS(env, clazz);
+    CHECK_UTF_STRING(env, name, false);
+    CHECK_UTF_STRING(env, sig, false);
+    jmethodID result;
+    result = BASE_ENV(env)->GetStaticMethodID(env, clazz, name, sig);
+    CHECK_EXIT(env);
+    return result;
+}
+
+static jfieldID Check_GetStaticFieldID(JNIEnv* env, jclass clazz,
+    const char* name, const char* sig)
+{
+    CHECK_ENTER(env, kFlag_Default);
+    CHECK_CLASS(env, clazz);
+    CHECK_UTF_STRING(env, name, false);
+    CHECK_UTF_STRING(env, sig, false);
+    jfieldID result;
+    result = BASE_ENV(env)->GetStaticFieldID(env, clazz, name, sig);
+    CHECK_EXIT(env);
+    return result;
+}
+
+#define GET_STATIC_TYPE_FIELD(_ctype, _jname, _isref)                       \
+    static _ctype Check_GetStatic##_jname##Field(JNIEnv* env, jclass clazz, \
+        jfieldID fieldID)                                                   \
+    {                                                                       \
+        CHECK_ENTER(env, kFlag_Default);                                    \
+        CHECK_CLASS(env, clazz);                                            \
+        _ctype result;                                                      \
+        checkStaticFieldID(env, clazz, fieldID);                            \
+        result = BASE_ENV(env)->GetStatic##_jname##Field(env, clazz,        \
+            fieldID);                                                       \
+        CHECK_EXIT(env);                                                    \
+        return result;                                                      \
+    }
+GET_STATIC_TYPE_FIELD(jobject, Object, true);
+GET_STATIC_TYPE_FIELD(jboolean, Boolean, false);
+GET_STATIC_TYPE_FIELD(jbyte, Byte, false);
+GET_STATIC_TYPE_FIELD(jchar, Char, false);
+GET_STATIC_TYPE_FIELD(jshort, Short, false);
+GET_STATIC_TYPE_FIELD(jint, Int, false);
+GET_STATIC_TYPE_FIELD(jlong, Long, false);
+GET_STATIC_TYPE_FIELD(jfloat, Float, false);
+GET_STATIC_TYPE_FIELD(jdouble, Double, false);
+
+#define SET_STATIC_TYPE_FIELD(_ctype, _jname, _ftype)                       \
+    static void Check_SetStatic##_jname##Field(JNIEnv* env, jclass clazz,   \
+        jfieldID fieldID, _ctype value)                                     \
+    {                                                                       \
+        CHECK_ENTER(env, kFlag_Default);                                    \
+        CHECK_CLASS(env, clazz);                                            \
+        checkStaticFieldID(env, clazz, fieldID);                            \
+        CHECK_FIELD_TYPE((jobject)(u4)value, fieldID, _ftype, true);        \
+        BASE_ENV(env)->SetStatic##_jname##Field(env, clazz, fieldID,        \
+            value);                                                         \
+        CHECK_EXIT(env);                                                    \
+    }
+SET_STATIC_TYPE_FIELD(jobject, Object, PRIM_NOT);
+SET_STATIC_TYPE_FIELD(jboolean, Boolean, PRIM_BOOLEAN);
+SET_STATIC_TYPE_FIELD(jbyte, Byte, PRIM_BYTE);
+SET_STATIC_TYPE_FIELD(jchar, Char, PRIM_CHAR);
+SET_STATIC_TYPE_FIELD(jshort, Short, PRIM_SHORT);
+SET_STATIC_TYPE_FIELD(jint, Int, PRIM_INT);
+SET_STATIC_TYPE_FIELD(jlong, Long, PRIM_LONG);
+SET_STATIC_TYPE_FIELD(jfloat, Float, PRIM_FLOAT);
+SET_STATIC_TYPE_FIELD(jdouble, Double, PRIM_DOUBLE);
+
+#define GET_TYPE_FIELD(_ctype, _jname, _isref)                              \
+    static _ctype Check_Get##_jname##Field(JNIEnv* env, jobject obj,        \
+        jfieldID fieldID)                                                   \
+    {                                                                       \
+        CHECK_ENTER(env, kFlag_Default);                                    \
+        CHECK_OBJECT(env, obj);                                             \
+        _ctype result;                                                      \
+        checkInstanceFieldID(env, obj, fieldID);                            \
+        result = BASE_ENV(env)->Get##_jname##Field(env, obj, fieldID);      \
+        CHECK_EXIT(env);                                                    \
+        return result;                                                      \
+    }
+GET_TYPE_FIELD(jobject, Object, true);
+GET_TYPE_FIELD(jboolean, Boolean, false);
+GET_TYPE_FIELD(jbyte, Byte, false);
+GET_TYPE_FIELD(jchar, Char, false);
+GET_TYPE_FIELD(jshort, Short, false);
+GET_TYPE_FIELD(jint, Int, false);
+GET_TYPE_FIELD(jlong, Long, false);
+GET_TYPE_FIELD(jfloat, Float, false);
+GET_TYPE_FIELD(jdouble, Double, false);
+
+#define SET_TYPE_FIELD(_ctype, _jname, _ftype)                              \
+    static void Check_Set##_jname##Field(JNIEnv* env, jobject obj,          \
+        jfieldID fieldID, _ctype value)                                     \
+    {                                                                       \
+        CHECK_ENTER(env, kFlag_Default);                                    \
+        CHECK_OBJECT(env, obj);                                             \
+        checkInstanceFieldID(env, obj, fieldID);                            \
+        CHECK_FIELD_TYPE((jobject)(u4) value, fieldID, _ftype, false);      \
+        BASE_ENV(env)->Set##_jname##Field(env, obj, fieldID, value);        \
+        CHECK_EXIT(env);                                                    \
+    }
+SET_TYPE_FIELD(jobject, Object, PRIM_NOT);
+SET_TYPE_FIELD(jboolean, Boolean, PRIM_BOOLEAN);
+SET_TYPE_FIELD(jbyte, Byte, PRIM_BYTE);
+SET_TYPE_FIELD(jchar, Char, PRIM_CHAR);
+SET_TYPE_FIELD(jshort, Short, PRIM_SHORT);
+SET_TYPE_FIELD(jint, Int, PRIM_INT);
+SET_TYPE_FIELD(jlong, Long, PRIM_LONG);
+SET_TYPE_FIELD(jfloat, Float, PRIM_FLOAT);
+SET_TYPE_FIELD(jdouble, Double, PRIM_DOUBLE);
+
+#define CALL_VIRTUAL(_ctype, _jname, _retfail, _retdecl, _retasgn, _retok,  \
+        _retsig)                                                            \
+    static _ctype Check_Call##_jname##Method(JNIEnv* env, jobject obj,      \
+        jmethodID methodID, ...)                                            \
+    {                                                                       \
+        CHECK_ENTER(env, kFlag_Default);                                    \
+        CHECK_OBJECT(env, obj);                                             \
+        CHECK_SIG(env, methodID, _retsig, false);                           \
+        _retdecl;                                                           \
+        va_list args;                                                       \
+        va_start(args, methodID);                                           \
+        _retasgn BASE_ENV(env)->Call##_jname##MethodV(env, obj, methodID,   \
+            args);                                                          \
+        va_end(args);                                                       \
+        CHECK_EXIT(env);                                                    \
+        return _retok;                                                      \
+    }                                                                       \
+    static _ctype Check_Call##_jname##MethodV(JNIEnv* env, jobject obj,     \
+        jmethodID methodID, va_list args)                                   \
+    {                                                                       \
+        CHECK_ENTER(env, kFlag_Default);                                    \
+        CHECK_OBJECT(env, obj);                                             \
+        CHECK_SIG(env, methodID, _retsig, false);                           \
+        _retdecl;                                                           \
+        _retasgn BASE_ENV(env)->Call##_jname##MethodV(env, obj, methodID,   \
+            args);                                                          \
+        CHECK_EXIT(env);                                                    \
+        return _retok;                                                      \
+    }                                                                       \
+    static _ctype Check_Call##_jname##MethodA(JNIEnv* env, jobject obj,     \
+        jmethodID methodID, jvalue* args)                                   \
+    {                                                                       \
+        CHECK_ENTER(env, kFlag_Default);                                    \
+        CHECK_OBJECT(env, obj);                                             \
+        CHECK_SIG(env, methodID, _retsig, false);                           \
+        _retdecl;                                                           \
+        _retasgn BASE_ENV(env)->Call##_jname##MethodA(env, obj, methodID,   \
+            args);                                                          \
+        CHECK_EXIT(env);                                                    \
+        return _retok;                                                      \
+    }
+CALL_VIRTUAL(jobject, Object, NULL, Object* result, result=, result, 'L');
+CALL_VIRTUAL(jboolean, Boolean, 0, jboolean result, result=, result, 'Z');
+CALL_VIRTUAL(jbyte, Byte, 0, jbyte result, result=, result, 'B');
+CALL_VIRTUAL(jchar, Char, 0, jchar result, result=, result, 'C');
+CALL_VIRTUAL(jshort, Short, 0, jshort result, result=, result, 'S');
+CALL_VIRTUAL(jint, Int, 0, jint result, result=, result, 'I');
+CALL_VIRTUAL(jlong, Long, 0, jlong result, result=, result, 'J');
+CALL_VIRTUAL(jfloat, Float, 0.0f, jfloat result, result=, *(float*)&result, 'F');
+CALL_VIRTUAL(jdouble, Double, 0.0, jdouble result, result=, *(double*)&result, 'D');
+CALL_VIRTUAL(void, Void, , , , , 'V');
+
+#define CALL_NONVIRTUAL(_ctype, _jname, _retfail, _retdecl, _retasgn,       \
+        _retok, _retsig)                                                    \
+    static _ctype Check_CallNonvirtual##_jname##Method(JNIEnv* env,         \
+        jobject obj, jclass clazz, jmethodID methodID, ...)                 \
+    {                                                                       \
+        CHECK_ENTER(env, kFlag_Default);                                    \
+        CHECK_CLASS(env, clazz);                                            \
+        CHECK_OBJECT(env, obj);                                             \
+        CHECK_SIG(env, methodID, _retsig, false);                           \
+        _retdecl;                                                           \
+        va_list args;                                                       \
+        va_start(args, methodID);                                           \
+        _retasgn BASE_ENV(env)->CallNonvirtual##_jname##MethodV(env, obj,   \
+            clazz, methodID, args);                                         \
+        va_end(args);                                                       \
+        CHECK_EXIT(env);                                                    \
+        return _retok;                                                      \
+    }                                                                       \
+    static _ctype Check_CallNonvirtual##_jname##MethodV(JNIEnv* env,        \
+        jobject obj, jclass clazz, jmethodID methodID, va_list args)        \
+    {                                                                       \
+        CHECK_ENTER(env, kFlag_Default);                                    \
+        CHECK_CLASS(env, clazz);                                            \
+        CHECK_OBJECT(env, obj);                                             \
+        CHECK_SIG(env, methodID, _retsig, false);                           \
+        _retdecl;                                                           \
+        _retasgn BASE_ENV(env)->CallNonvirtual##_jname##MethodV(env, obj,   \
+            clazz, methodID, args);                                         \
+        CHECK_EXIT(env);                                                    \
+        return _retok;                                                      \
+    }                                                                       \
+    static _ctype Check_CallNonvirtual##_jname##MethodA(JNIEnv* env,        \
+        jobject obj, jclass clazz, jmethodID methodID, jvalue* args)        \
+    {                                                                       \
+        CHECK_ENTER(env, kFlag_Default);                                    \
+        CHECK_CLASS(env, clazz);                                            \
+        CHECK_OBJECT(env, obj);                                             \
+        CHECK_SIG(env, methodID, _retsig, false);                           \
+        _retdecl;                                                           \
+        _retasgn BASE_ENV(env)->CallNonvirtual##_jname##MethodA(env, obj,   \
+            clazz, methodID, args);                                         \
+        CHECK_EXIT(env);                                                    \
+        return _retok;                                                      \
+    }
+CALL_NONVIRTUAL(jobject, Object, NULL, Object* result, result=, (Object*)(u4)result, 'L');
+CALL_NONVIRTUAL(jboolean, Boolean, 0, jboolean result, result=, result, 'Z');
+CALL_NONVIRTUAL(jbyte, Byte, 0, jbyte result, result=, result, 'B');
+CALL_NONVIRTUAL(jchar, Char, 0, jchar result, result=, result, 'C');
+CALL_NONVIRTUAL(jshort, Short, 0, jshort result, result=, result, 'S');
+CALL_NONVIRTUAL(jint, Int, 0, jint result, result=, result, 'I');
+CALL_NONVIRTUAL(jlong, Long, 0, jlong result, result=, result, 'J');
+CALL_NONVIRTUAL(jfloat, Float, 0.0f, jfloat result, result=, *(float*)&result, 'F');
+CALL_NONVIRTUAL(jdouble, Double, 0.0, jdouble result, result=, *(double*)&result, 'D');
+CALL_NONVIRTUAL(void, Void, , , , , 'V');
+
+
+#define CALL_STATIC(_ctype, _jname, _retfail, _retdecl, _retasgn, _retok,   \
+        _retsig)                                                            \
+    static _ctype Check_CallStatic##_jname##Method(JNIEnv* env,             \
+        jclass clazz, jmethodID methodID, ...)                              \
+    {                                                                       \
+        CHECK_ENTER(env, kFlag_Default);                                    \
+        CHECK_CLASS(env, clazz);                                            \
+        CHECK_SIG(env, methodID, _retsig, true);                            \
+        _retdecl;                                                           \
+        va_list args;                                                       \
+        va_start(args, methodID);                                           \
+        _retasgn BASE_ENV(env)->CallStatic##_jname##MethodV(env, clazz,     \
+            methodID, args);                                                \
+        va_end(args);                                                       \
+        CHECK_EXIT(env);                                                    \
+        return _retok;                                                      \
+    }                                                                       \
+    static _ctype Check_CallStatic##_jname##MethodV(JNIEnv* env,            \
+        jclass clazz, jmethodID methodID, va_list args)                     \
+    {                                                                       \
+        CHECK_ENTER(env, kFlag_Default);                                    \
+        CHECK_CLASS(env, clazz);                                            \
+        CHECK_SIG(env, methodID, _retsig, true);                            \
+        _retdecl;                                                           \
+        _retasgn BASE_ENV(env)->CallStatic##_jname##MethodV(env, clazz,     \
+            methodID, args);                                                \
+        CHECK_EXIT(env);                                                    \
+        return _retok;                                                      \
+    }                                                                       \
+    static _ctype Check_CallStatic##_jname##MethodA(JNIEnv* env,            \
+        jclass clazz, jmethodID methodID, jvalue* args)                     \
+    {                                                                       \
+        CHECK_ENTER(env, kFlag_Default);                                    \
+        CHECK_CLASS(env, clazz);                                            \
+        CHECK_SIG(env, methodID, _retsig, true);                            \
+        _retdecl;                                                           \
+        _retasgn BASE_ENV(env)->CallStatic##_jname##MethodA(env, clazz,     \
+            methodID, args);                                                \
+        CHECK_EXIT(env);                                                    \
+        return _retok;                                                      \
+    }
+CALL_STATIC(jobject, Object, NULL, Object* result, result=, (Object*)(u4)result, 'L');
+CALL_STATIC(jboolean, Boolean, 0, jboolean result, result=, result, 'Z');
+CALL_STATIC(jbyte, Byte, 0, jbyte result, result=, result, 'B');
+CALL_STATIC(jchar, Char, 0, jchar result, result=, result, 'C');
+CALL_STATIC(jshort, Short, 0, jshort result, result=, result, 'S');
+CALL_STATIC(jint, Int, 0, jint result, result=, result, 'I');
+CALL_STATIC(jlong, Long, 0, jlong result, result=, result, 'J');
+CALL_STATIC(jfloat, Float, 0.0f, jfloat result, result=, *(float*)&result, 'F');
+CALL_STATIC(jdouble, Double, 0.0, jdouble result, result=, *(double*)&result, 'D');
+CALL_STATIC(void, Void, , , , , 'V');
+
+static jstring Check_NewString(JNIEnv* env, const jchar* unicodeChars,
+    jsize len)
+{
+    CHECK_ENTER(env, kFlag_Default);
+    jstring result;
+    result = BASE_ENV(env)->NewString(env, unicodeChars, len);
+    CHECK_EXIT(env);
+    return result;
+}
+
+static jsize Check_GetStringLength(JNIEnv* env, jstring string)
+{
+    CHECK_ENTER(env, kFlag_CritOkay);
+    CHECK_STRING(env, string);
+    jsize result;
+    result = BASE_ENV(env)->GetStringLength(env, string);
+    CHECK_EXIT(env);
+    return result;
+}
+
+static const jchar* Check_GetStringChars(JNIEnv* env, jstring string,
+    jboolean* isCopy)
+{
+    CHECK_ENTER(env, kFlag_CritOkay);
+    CHECK_STRING(env, string);
+    const jchar* result;
+    result = BASE_ENV(env)->GetStringChars(env, string, isCopy);
+    CHECK_EXIT(env);
+    return result;
+}
+
+static void Check_ReleaseStringChars(JNIEnv* env, jstring string,
+    const jchar* chars)
+{
+    CHECK_ENTER(env, kFlag_Default | kFlag_ExcepOkay);
+    CHECK_STRING(env, string);
+    BASE_ENV(env)->ReleaseStringChars(env, string, chars);
+    CHECK_EXIT(env);
+}
+
+static jstring Check_NewStringUTF(JNIEnv* env, const char* bytes)
+{
+    CHECK_ENTER(env, kFlag_Default);
+    CHECK_UTF_STRING(env, bytes, true);
+    jstring result;
+    result = BASE_ENV(env)->NewStringUTF(env, bytes);
+    CHECK_EXIT(env);
+    return result;
+}
+
+static jsize Check_GetStringUTFLength(JNIEnv* env, jstring string)
+{
+    CHECK_ENTER(env, kFlag_CritOkay);
+    CHECK_STRING(env, string);
+    jsize result;
+    result = BASE_ENV(env)->GetStringUTFLength(env, string);
+    CHECK_EXIT(env);
+    return result;
+}
+
+static const char* Check_GetStringUTFChars(JNIEnv* env, jstring string,
+    jboolean* isCopy)
+{
+    CHECK_ENTER(env, kFlag_CritOkay);
+    CHECK_STRING(env, string);
+    const char* result;
+    result = BASE_ENV(env)->GetStringUTFChars(env, string, isCopy);
+    CHECK_EXIT(env);
+    return result;
+}
+
+static void Check_ReleaseStringUTFChars(JNIEnv* env, jstring string,
+    const char* utf)
+{
+    CHECK_ENTER(env, kFlag_ExcepOkay);
+    CHECK_STRING(env, string);
+    BASE_ENV(env)->ReleaseStringUTFChars(env, string, utf);
+    CHECK_EXIT(env);
+}
+
+static jsize Check_GetArrayLength(JNIEnv* env, jarray array)
+{
+    CHECK_ENTER(env, kFlag_CritOkay);
+    CHECK_ARRAY(env, array);
+    jsize result;
+    result = BASE_ENV(env)->GetArrayLength(env, array);
+    CHECK_EXIT(env);
+    return result;
+}
+
+static jobjectArray Check_NewObjectArray(JNIEnv* env, jsize length,
+    jclass elementClass, jobject initialElement)
+{
+    CHECK_ENTER(env, kFlag_Default);
+    CHECK_CLASS(env, elementClass);
+    CHECK_OBJECT(env, initialElement);
+    CHECK_LENGTH_POSITIVE(env, length);
+    jobjectArray result;
+    result = BASE_ENV(env)->NewObjectArray(env, length, elementClass,
+                                            initialElement);
+    CHECK_EXIT(env);
+    return result;
+}
+
+static jobject Check_GetObjectArrayElement(JNIEnv* env, jobjectArray array,
+    jsize index)
+{
+    CHECK_ENTER(env, kFlag_Default);
+    CHECK_ARRAY(env, array);
+    jobject result;
+    result = BASE_ENV(env)->GetObjectArrayElement(env, array, index);
+    CHECK_EXIT(env);
+    return result;
+}
+
+static void Check_SetObjectArrayElement(JNIEnv* env, jobjectArray array,
+    jsize index, jobject value)
+{
+    CHECK_ENTER(env, kFlag_Default);
+    CHECK_ARRAY(env, array);
+    BASE_ENV(env)->SetObjectArrayElement(env, array, index, value);
+    CHECK_EXIT(env);
+}
+
+#define NEW_PRIMITIVE_ARRAY(_artype, _jname, _typechar)                     \
+    static _artype Check_New##_jname##Array(JNIEnv* env, jsize length)      \
+    {                                                                       \
+        CHECK_ENTER(env, kFlag_Default);                                    \
+        CHECK_LENGTH_POSITIVE(env, length);                                 \
+        _artype result;                                                     \
+        result = BASE_ENV(env)->New##_jname##Array(env, length);            \
+        CHECK_EXIT(env);                                                    \
+        return result;                                                      \
+    }
+NEW_PRIMITIVE_ARRAY(jbooleanArray, Boolean, 'Z');
+NEW_PRIMITIVE_ARRAY(jbyteArray, Byte, 'B');
+NEW_PRIMITIVE_ARRAY(jcharArray, Char, 'C');
+NEW_PRIMITIVE_ARRAY(jshortArray, Short, 'S');
+NEW_PRIMITIVE_ARRAY(jintArray, Int, 'I');
+NEW_PRIMITIVE_ARRAY(jlongArray, Long, 'J');
+NEW_PRIMITIVE_ARRAY(jfloatArray, Float, 'F');
+NEW_PRIMITIVE_ARRAY(jdoubleArray, Double, 'D');
+
+#define GET_PRIMITIVE_ARRAY_ELEMENTS(_ctype, _jname)                        \
+    static _ctype* Check_Get##_jname##ArrayElements(JNIEnv* env,            \
+        _ctype##Array array, jboolean* isCopy)                              \
+    {                                                                       \
+        CHECK_ENTER(env, kFlag_Default);                                    \
+        CHECK_ARRAY(env, array);                                            \
+        _ctype* result;                                                     \
+        result = BASE_ENV(env)->Get##_jname##ArrayElements(env,             \
+            array, isCopy);                                                 \
+        CHECK_EXIT(env);                                                    \
+        return result;                                                      \
+    }
+
+#define RELEASE_PRIMITIVE_ARRAY_ELEMENTS(_ctype, _jname)                    \
+    static void Check_Release##_jname##ArrayElements(JNIEnv* env,           \
+        _ctype##Array array, _ctype* elems, jint mode)                      \
+    {                                                                       \
+        CHECK_ENTER(env, kFlag_Default | kFlag_ExcepOkay);                  \
+        CHECK_ARRAY(env, array);                                            \
+        BASE_ENV(env)->Release##_jname##ArrayElements(env,                  \
+            array, elems, mode);                                            \
+        CHECK_EXIT(env);                                                    \
+    }
+
+#define GET_PRIMITIVE_ARRAY_REGION(_ctype, _jname)                          \
+    static void Check_Get##_jname##ArrayRegion(JNIEnv* env,                 \
+        _ctype##Array array, jsize start, jsize len, _ctype* buf)           \
+    {                                                                       \
+        CHECK_ENTER(env, kFlag_Default);                                    \
+        CHECK_ARRAY(env, array);                                            \
+        BASE_ENV(env)->Get##_jname##ArrayRegion(env, array, start,          \
+            len, buf);                                                      \
+        CHECK_EXIT(env);                                                    \
+    }
+
+#define SET_PRIMITIVE_ARRAY_REGION(_ctype, _jname)                          \
+    static void Check_Set##_jname##ArrayRegion(JNIEnv* env,                 \
+        _ctype##Array array, jsize start, jsize len, const _ctype* buf)     \
+    {                                                                       \
+        CHECK_ENTER(env, kFlag_Default);                                    \
+        CHECK_ARRAY(env, array);                                            \
+        BASE_ENV(env)->Set##_jname##ArrayRegion(env, array, start,          \
+            len, buf);                                                      \
+        CHECK_EXIT(env);                                                    \
+    }
+
+#define PRIMITIVE_ARRAY_FUNCTIONS(_ctype, _jname)                           \
+    GET_PRIMITIVE_ARRAY_ELEMENTS(_ctype, _jname);                           \
+    RELEASE_PRIMITIVE_ARRAY_ELEMENTS(_ctype, _jname);                       \
+    GET_PRIMITIVE_ARRAY_REGION(_ctype, _jname);                             \
+    SET_PRIMITIVE_ARRAY_REGION(_ctype, _jname);
+
+PRIMITIVE_ARRAY_FUNCTIONS(jboolean, Boolean);
+PRIMITIVE_ARRAY_FUNCTIONS(jbyte, Byte);
+PRIMITIVE_ARRAY_FUNCTIONS(jchar, Char);
+PRIMITIVE_ARRAY_FUNCTIONS(jshort, Short);
+PRIMITIVE_ARRAY_FUNCTIONS(jint, Int);
+PRIMITIVE_ARRAY_FUNCTIONS(jlong, Long);
+PRIMITIVE_ARRAY_FUNCTIONS(jfloat, Float);
+PRIMITIVE_ARRAY_FUNCTIONS(jdouble, Double);
+
+static jint Check_RegisterNatives(JNIEnv* env, jclass clazz,
+    const JNINativeMethod* methods, jint nMethods)
+{
+    CHECK_ENTER(env, kFlag_Default);
+    CHECK_CLASS(env, clazz);
+    jint result;
+    result = BASE_ENV(env)->RegisterNatives(env, clazz, methods, nMethods);
+    CHECK_EXIT(env);
+    return result;
+}
+
+static jint Check_UnregisterNatives(JNIEnv* env, jclass clazz)
+{
+    CHECK_ENTER(env, kFlag_Default);
+    CHECK_CLASS(env, clazz);
+    jint result;
+    result = BASE_ENV(env)->UnregisterNatives(env, clazz);
+    CHECK_EXIT(env);
+    return result;
+}
+
+static jint Check_MonitorEnter(JNIEnv* env, jobject obj)
+{
+    CHECK_ENTER(env, kFlag_Default);
+    CHECK_OBJECT(env, obj);
+    jint result;
+    result = BASE_ENV(env)->MonitorEnter(env, obj);
+    CHECK_EXIT(env);
+    return result;
+}
+
+static jint Check_MonitorExit(JNIEnv* env, jobject obj)
+{
+    CHECK_ENTER(env, kFlag_Default | kFlag_ExcepOkay);
+    CHECK_OBJECT(env, obj);
+    jint result;
+    result = BASE_ENV(env)->MonitorExit(env, obj);
+    CHECK_EXIT(env);
+    return result;
+}
+
+static jint Check_GetJavaVM(JNIEnv *env, JavaVM **vm)
+{
+    CHECK_ENTER(env, kFlag_Default);
+    jint result;
+    result = BASE_ENV(env)->GetJavaVM(env, vm);
+    CHECK_EXIT(env);
+    return result;
+}
+
+static void Check_GetStringRegion(JNIEnv* env, jstring str, jsize start,
+    jsize len, jchar* buf)
+{
+    CHECK_ENTER(env, kFlag_CritOkay);
+    CHECK_STRING(env, str);
+    BASE_ENV(env)->GetStringRegion(env, str, start, len, buf);
+    CHECK_EXIT(env);
+}
+
+static void Check_GetStringUTFRegion(JNIEnv* env, jstring str, jsize start,
+    jsize len, char* buf)
+{
+    CHECK_ENTER(env, kFlag_CritOkay);
+    CHECK_STRING(env, str);
+    BASE_ENV(env)->GetStringUTFRegion(env, str, start, len, buf);
+    CHECK_EXIT(env);
+}
+
+static void* Check_GetPrimitiveArrayCritical(JNIEnv* env, jarray array,
+    jboolean* isCopy)
+{
+    CHECK_ENTER(env, kFlag_CritGet);
+    CHECK_ARRAY(env, array);
+    void* result;
+    result = BASE_ENV(env)->GetPrimitiveArrayCritical(env, array, isCopy);
+    CHECK_EXIT(env);
+    return result;
+}
+
+static void Check_ReleasePrimitiveArrayCritical(JNIEnv* env, jarray array,
+    void* carray, jint mode)
+{
+    CHECK_ENTER(env, kFlag_CritRelease | kFlag_ExcepOkay);
+    CHECK_ARRAY(env, array);
+    BASE_ENV(env)->ReleasePrimitiveArrayCritical(env, array, carray, mode);
+    CHECK_EXIT(env);
+}
+
+static const jchar* Check_GetStringCritical(JNIEnv* env, jstring string,
+    jboolean* isCopy)
+{
+    CHECK_ENTER(env, kFlag_CritGet);
+    CHECK_STRING(env, string);
+    const jchar* result;
+    result = BASE_ENV(env)->GetStringCritical(env, string, isCopy);
+    CHECK_EXIT(env);
+    return result;
+}
+
+static void Check_ReleaseStringCritical(JNIEnv* env, jstring string,
+    const jchar* carray)
+{
+    CHECK_ENTER(env, kFlag_CritRelease | kFlag_ExcepOkay);
+    CHECK_STRING(env, string);
+    BASE_ENV(env)->ReleaseStringCritical(env, string, carray);
+    CHECK_EXIT(env);
+}
+
+static jweak Check_NewWeakGlobalRef(JNIEnv* env, jobject obj)
+{
+    CHECK_ENTER(env, kFlag_Default);
+    CHECK_OBJECT(env, obj);
+    jweak result;
+    result = BASE_ENV(env)->NewWeakGlobalRef(env, obj);
+    CHECK_EXIT(env);
+    return result;
+}
+
+static void Check_DeleteWeakGlobalRef(JNIEnv* env, jweak obj)
+{
+    CHECK_ENTER(env, kFlag_Default | kFlag_ExcepOkay);
+    CHECK_OBJECT(env, obj);
+    BASE_ENV(env)->DeleteWeakGlobalRef(env, obj);
+    CHECK_EXIT(env);
+}
+
+static jboolean Check_ExceptionCheck(JNIEnv* env)
+{
+    CHECK_ENTER(env, kFlag_CritOkay | kFlag_ExcepOkay);
+    jboolean result;
+    result = BASE_ENV(env)->ExceptionCheck(env);
+    CHECK_EXIT(env);
+    return result;
+}
+
+static jobjectRefType Check_GetObjectRefType(JNIEnv* env, jobject obj)
+{
+    CHECK_ENTER(env, kFlag_Default);
+    CHECK_OBJECT(env, obj);
+    jobjectRefType result;
+    result = BASE_ENV(env)->GetObjectRefType(env, obj);
+    CHECK_EXIT(env);
+    return result;
+}
+
+static jobject Check_NewDirectByteBuffer(JNIEnv* env, void* address,
+    jlong capacity)
+{
+    CHECK_ENTER(env, kFlag_Default);
+    jobject result;
+    if (address == NULL || capacity < 0) {
+        LOGW("JNI WARNING: invalid values for address (%p) or capacity (%ld)\n",
+            address, (long) capacity);
+        abortMaybe();
+    }
+    result = BASE_ENV(env)->NewDirectByteBuffer(env, address, capacity);
+    CHECK_EXIT(env);
+    return result;
+}
+
+static void* Check_GetDirectBufferAddress(JNIEnv* env, jobject buf)
+{
+    CHECK_ENTER(env, kFlag_Default);
+    CHECK_OBJECT(env, buf);
+    void* result;
+    //if (buf == NULL)
+    //    result = NULL;
+    //else
+        result = BASE_ENV(env)->GetDirectBufferAddress(env, buf);
+    CHECK_EXIT(env);
+    return result;
+}
+
+static jlong Check_GetDirectBufferCapacity(JNIEnv* env, jobject buf)
+{
+    CHECK_ENTER(env, kFlag_Default);
+    CHECK_OBJECT(env, buf);
+    jlong result;
+    //if (buf == NULL)
+    //    result = -1;
+    //else
+        result = BASE_ENV(env)->GetDirectBufferCapacity(env, buf);
+    CHECK_EXIT(env);
+    return result;
+}
+
+
+/*
+ * ===========================================================================
+ *      JNI invocation functions
+ * ===========================================================================
+ */
+
+static jint Check_DestroyJavaVM(JavaVM* vm)
+{
+    CHECK_VMENTER(vm, false);
+    jint result;
+    result = BASE_VM(vm)->DestroyJavaVM(vm);
+    CHECK_VMEXIT(vm, false);
+    return result;
+}
+
+static jint Check_AttachCurrentThread(JavaVM* vm, JNIEnv** p_env,
+    void* thr_args)
+{
+    CHECK_VMENTER(vm, false);
+    jint result;
+    result = BASE_VM(vm)->AttachCurrentThread(vm, p_env, thr_args);
+    CHECK_VMEXIT(vm, true);
+    return result;
+}
+
+static jint Check_AttachCurrentThreadAsDaemon(JavaVM* vm, JNIEnv** p_env,
+    void* thr_args)
+{
+    CHECK_VMENTER(vm, false);
+    jint result;
+    result = BASE_VM(vm)->AttachCurrentThreadAsDaemon(vm, p_env, thr_args);
+    CHECK_VMEXIT(vm, true);
+    return result;
+}
+
+static jint Check_DetachCurrentThread(JavaVM* vm)
+{
+    CHECK_VMENTER(vm, true);
+    jint result;
+    result = BASE_VM(vm)->DetachCurrentThread(vm);
+    CHECK_VMEXIT(vm, false);
+    return result;
+}
+
+static jint Check_GetEnv(JavaVM* vm, void** env, jint version)
+{
+    CHECK_VMENTER(vm, true);
+    jint result;
+    result = BASE_VM(vm)->GetEnv(vm, env, version);
+    CHECK_VMEXIT(vm, true);
+    return result;
+}
+
+
+/*
+ * ===========================================================================
+ *      Function tables
+ * ===========================================================================
+ */
+
+static const struct JNINativeInterface gCheckNativeInterface = {
+    NULL,
+    NULL,
+    NULL,
+    NULL,
+
+    Check_GetVersion,
+
+    Check_DefineClass,
+    Check_FindClass,
+
+    Check_FromReflectedMethod,
+    Check_FromReflectedField,
+    Check_ToReflectedMethod,
+
+    Check_GetSuperclass,
+    Check_IsAssignableFrom,
+
+    Check_ToReflectedField,
+
+    Check_Throw,
+    Check_ThrowNew,
+    Check_ExceptionOccurred,
+    Check_ExceptionDescribe,
+    Check_ExceptionClear,
+    Check_FatalError,
+
+    Check_PushLocalFrame,
+    Check_PopLocalFrame,
+
+    Check_NewGlobalRef,
+    Check_DeleteGlobalRef,
+    Check_DeleteLocalRef,
+    Check_IsSameObject,
+    Check_NewLocalRef,
+    Check_EnsureLocalCapacity,
+
+    Check_AllocObject,
+    Check_NewObject,
+    Check_NewObjectV,
+    Check_NewObjectA,
+
+    Check_GetObjectClass,
+    Check_IsInstanceOf,
+
+    Check_GetMethodID,
+
+    Check_CallObjectMethod,
+    Check_CallObjectMethodV,
+    Check_CallObjectMethodA,
+    Check_CallBooleanMethod,
+    Check_CallBooleanMethodV,
+    Check_CallBooleanMethodA,
+    Check_CallByteMethod,
+    Check_CallByteMethodV,
+    Check_CallByteMethodA,
+    Check_CallCharMethod,
+    Check_CallCharMethodV,
+    Check_CallCharMethodA,
+    Check_CallShortMethod,
+    Check_CallShortMethodV,
+    Check_CallShortMethodA,
+    Check_CallIntMethod,
+    Check_CallIntMethodV,
+    Check_CallIntMethodA,
+    Check_CallLongMethod,
+    Check_CallLongMethodV,
+    Check_CallLongMethodA,
+    Check_CallFloatMethod,
+    Check_CallFloatMethodV,
+    Check_CallFloatMethodA,
+    Check_CallDoubleMethod,
+    Check_CallDoubleMethodV,
+    Check_CallDoubleMethodA,
+    Check_CallVoidMethod,
+    Check_CallVoidMethodV,
+    Check_CallVoidMethodA,
+
+    Check_CallNonvirtualObjectMethod,
+    Check_CallNonvirtualObjectMethodV,
+    Check_CallNonvirtualObjectMethodA,
+    Check_CallNonvirtualBooleanMethod,
+    Check_CallNonvirtualBooleanMethodV,
+    Check_CallNonvirtualBooleanMethodA,
+    Check_CallNonvirtualByteMethod,
+    Check_CallNonvirtualByteMethodV,
+    Check_CallNonvirtualByteMethodA,
+    Check_CallNonvirtualCharMethod,
+    Check_CallNonvirtualCharMethodV,
+    Check_CallNonvirtualCharMethodA,
+    Check_CallNonvirtualShortMethod,
+    Check_CallNonvirtualShortMethodV,
+    Check_CallNonvirtualShortMethodA,
+    Check_CallNonvirtualIntMethod,
+    Check_CallNonvirtualIntMethodV,
+    Check_CallNonvirtualIntMethodA,
+    Check_CallNonvirtualLongMethod,
+    Check_CallNonvirtualLongMethodV,
+    Check_CallNonvirtualLongMethodA,
+    Check_CallNonvirtualFloatMethod,
+    Check_CallNonvirtualFloatMethodV,
+    Check_CallNonvirtualFloatMethodA,
+    Check_CallNonvirtualDoubleMethod,
+    Check_CallNonvirtualDoubleMethodV,
+    Check_CallNonvirtualDoubleMethodA,
+    Check_CallNonvirtualVoidMethod,
+    Check_CallNonvirtualVoidMethodV,
+    Check_CallNonvirtualVoidMethodA,
+
+    Check_GetFieldID,
+
+    Check_GetObjectField,
+    Check_GetBooleanField,
+    Check_GetByteField,
+    Check_GetCharField,
+    Check_GetShortField,
+    Check_GetIntField,
+    Check_GetLongField,
+    Check_GetFloatField,
+    Check_GetDoubleField,
+    Check_SetObjectField,
+    Check_SetBooleanField,
+    Check_SetByteField,
+    Check_SetCharField,
+    Check_SetShortField,
+    Check_SetIntField,
+    Check_SetLongField,
+    Check_SetFloatField,
+    Check_SetDoubleField,
+
+    Check_GetStaticMethodID,
+
+    Check_CallStaticObjectMethod,
+    Check_CallStaticObjectMethodV,
+    Check_CallStaticObjectMethodA,
+    Check_CallStaticBooleanMethod,
+    Check_CallStaticBooleanMethodV,
+    Check_CallStaticBooleanMethodA,
+    Check_CallStaticByteMethod,
+    Check_CallStaticByteMethodV,
+    Check_CallStaticByteMethodA,
+    Check_CallStaticCharMethod,
+    Check_CallStaticCharMethodV,
+    Check_CallStaticCharMethodA,
+    Check_CallStaticShortMethod,
+    Check_CallStaticShortMethodV,
+    Check_CallStaticShortMethodA,
+    Check_CallStaticIntMethod,
+    Check_CallStaticIntMethodV,
+    Check_CallStaticIntMethodA,
+    Check_CallStaticLongMethod,
+    Check_CallStaticLongMethodV,
+    Check_CallStaticLongMethodA,
+    Check_CallStaticFloatMethod,
+    Check_CallStaticFloatMethodV,
+    Check_CallStaticFloatMethodA,
+    Check_CallStaticDoubleMethod,
+    Check_CallStaticDoubleMethodV,
+    Check_CallStaticDoubleMethodA,
+    Check_CallStaticVoidMethod,
+    Check_CallStaticVoidMethodV,
+    Check_CallStaticVoidMethodA,
+
+    Check_GetStaticFieldID,
+
+    Check_GetStaticObjectField,
+    Check_GetStaticBooleanField,
+    Check_GetStaticByteField,
+    Check_GetStaticCharField,
+    Check_GetStaticShortField,
+    Check_GetStaticIntField,
+    Check_GetStaticLongField,
+    Check_GetStaticFloatField,
+    Check_GetStaticDoubleField,
+
+    Check_SetStaticObjectField,
+    Check_SetStaticBooleanField,
+    Check_SetStaticByteField,
+    Check_SetStaticCharField,
+    Check_SetStaticShortField,
+    Check_SetStaticIntField,
+    Check_SetStaticLongField,
+    Check_SetStaticFloatField,
+    Check_SetStaticDoubleField,
+
+    Check_NewString,
+
+    Check_GetStringLength,
+    Check_GetStringChars,
+    Check_ReleaseStringChars,
+
+    Check_NewStringUTF,
+    Check_GetStringUTFLength,
+    Check_GetStringUTFChars,
+    Check_ReleaseStringUTFChars,
+
+    Check_GetArrayLength,
+    Check_NewObjectArray,
+    Check_GetObjectArrayElement,
+    Check_SetObjectArrayElement,
+
+    Check_NewBooleanArray,
+    Check_NewByteArray,
+    Check_NewCharArray,
+    Check_NewShortArray,
+    Check_NewIntArray,
+    Check_NewLongArray,
+    Check_NewFloatArray,
+    Check_NewDoubleArray,
+
+    Check_GetBooleanArrayElements,
+    Check_GetByteArrayElements,
+    Check_GetCharArrayElements,
+    Check_GetShortArrayElements,
+    Check_GetIntArrayElements,
+    Check_GetLongArrayElements,
+    Check_GetFloatArrayElements,
+    Check_GetDoubleArrayElements,
+
+    Check_ReleaseBooleanArrayElements,
+    Check_ReleaseByteArrayElements,
+    Check_ReleaseCharArrayElements,
+    Check_ReleaseShortArrayElements,
+    Check_ReleaseIntArrayElements,
+    Check_ReleaseLongArrayElements,
+    Check_ReleaseFloatArrayElements,
+    Check_ReleaseDoubleArrayElements,
+
+    Check_GetBooleanArrayRegion,
+    Check_GetByteArrayRegion,
+    Check_GetCharArrayRegion,
+    Check_GetShortArrayRegion,
+    Check_GetIntArrayRegion,
+    Check_GetLongArrayRegion,
+    Check_GetFloatArrayRegion,
+    Check_GetDoubleArrayRegion,
+    Check_SetBooleanArrayRegion,
+    Check_SetByteArrayRegion,
+    Check_SetCharArrayRegion,
+    Check_SetShortArrayRegion,
+    Check_SetIntArrayRegion,
+    Check_SetLongArrayRegion,
+    Check_SetFloatArrayRegion,
+    Check_SetDoubleArrayRegion,
+
+    Check_RegisterNatives,
+    Check_UnregisterNatives,
+
+    Check_MonitorEnter,
+    Check_MonitorExit,
+
+    Check_GetJavaVM,
+
+    Check_GetStringRegion,
+    Check_GetStringUTFRegion,
+
+    Check_GetPrimitiveArrayCritical,
+    Check_ReleasePrimitiveArrayCritical,
+
+    Check_GetStringCritical,
+    Check_ReleaseStringCritical,
+
+    Check_NewWeakGlobalRef,
+    Check_DeleteWeakGlobalRef,
+
+    Check_ExceptionCheck,
+
+    Check_NewDirectByteBuffer,
+    Check_GetDirectBufferAddress,
+    Check_GetDirectBufferCapacity,
+
+    Check_GetObjectRefType
+};
+static const struct JNIInvokeInterface gCheckInvokeInterface = {
+    NULL,
+    NULL,
+    NULL,
+
+    Check_DestroyJavaVM,
+    Check_AttachCurrentThread,
+    Check_DetachCurrentThread,
+
+    Check_GetEnv,
+
+    Check_AttachCurrentThreadAsDaemon,
+};
+
+
+/*
+ * Replace the normal table with the checked table.
+ */
+void dvmUseCheckedJniEnv(JNIEnvExt* pEnv)
+{
+    assert(pEnv->funcTable != &gCheckNativeInterface);
+    pEnv->baseFuncTable = pEnv->funcTable;
+    pEnv->funcTable = &gCheckNativeInterface;
+}
+
+/*
+ * Replace the normal table with the checked table.
+ */
+void dvmUseCheckedJniVm(JavaVMExt* pVm)
+{
+    assert(pVm->funcTable != &gCheckInvokeInterface);
+    pVm->baseFuncTable = pVm->funcTable;
+    pVm->funcTable = &gCheckInvokeInterface;
+}
+
diff --git a/vm/Common.h b/vm/Common.h
new file mode 100644
index 0000000..8ca5224
--- /dev/null
+++ b/vm/Common.h
@@ -0,0 +1,145 @@
+/*
+ * Copyright (C) 2008 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/*
+ * Common defines for all Dalvik code.
+ */
+#ifndef _DALVIK_COMMON
+#define _DALVIK_COMMON
+
+#ifndef LOG_TAG
+# define LOG_TAG "dalvikvm"
+#endif
+
+#include <stdio.h>
+#include <assert.h>
+
+#if !defined(NDEBUG) && defined(WITH_DALVIK_ASSERT)
+# undef assert
+# define assert(x) \
+    ((x) ? ((void)0) : (LOGE("ASSERT FAILED (%s:%d): " #x "\n", \
+        __FILE__, __LINE__), *(int*)39=39, 0) )
+#endif
+
+
+/*
+ * If "very verbose" logging is enabled, make it equivalent to LOGV.
+ * Otherwise, make it disappear.
+ *
+ * Define this above the #include "Dalvik.h" to enable for only a
+ * single file.
+ */
+/* #define VERY_VERBOSE_LOG */
+#if defined(VERY_VERBOSE_LOG)
+# define LOGVV      LOGV
+# define IF_LOGVV() IF_LOGV()
+#else
+# define LOGVV(...) ((void)0)
+# define IF_LOGVV() if (false)
+#endif
+
+
+/*
+ * These match the definitions in the VM specification.
+ */
+#ifdef HAVE_STDINT_H
+# include <stdint.h>    /* C99 */
+typedef uint8_t             u1;
+typedef uint16_t            u2;
+typedef uint32_t            u4;
+typedef uint64_t            u8;
+typedef int8_t              s1;
+typedef int16_t             s2;
+typedef int32_t             s4;
+typedef int64_t             s8;
+#else
+typedef unsigned char       u1;
+typedef unsigned short      u2;
+typedef unsigned int        u4;
+typedef unsigned long long  u8;
+typedef signed char         s1;
+typedef signed short        s2;
+typedef signed int          s4;
+typedef signed long long    s8;
+#endif
+
+/*
+ * Storage for primitive types and object references.
+ *
+ * Some parts of the code (notably object field access) assume that values
+ * are "left aligned", i.e. given "JValue jv", "jv.i" and "*((s4*)&jv)"
+ * yield the same result.  This seems to be guaranteed by gcc on big- and
+ * little-endian systems.
+ */
+typedef union JValue {
+    u1      z;
+    s1      b;
+    u2      c;
+    s2      s;
+    s4      i;
+    s8      j;
+    float   f;
+    double  d;
+    void*   l;
+} JValue;
+
+/*
+ * Some systems might have this in <stdbool.h>.
+ */
+#ifndef __bool_true_false_are_defined
+typedef enum { false=0, true=!false } bool;
+#define __bool_true_false_are_defined 1
+#endif
+
+#define NELEM(x) ((int) (sizeof(x) / sizeof((x)[0])))
+
+
+#if defined(HAVE_ENDIAN_H)
+# include <endian.h>
+#else /*not HAVE_ENDIAN_H*/
+# define __BIG_ENDIAN 4321
+# define __LITTLE_ENDIAN 1234
+# if defined(HAVE_LITTLE_ENDIAN)
+#  define __BYTE_ORDER __LITTLE_ENDIAN
+# else
+#  define __BYTE_ORDER __BIG_ENDIAN
+# endif
+#endif /*not HAVE_ENDIAN_H*/
+
+
+#if 0
+/*
+ * Pretend we have the Android logging macros.  These are replaced by the
+ * Android logging implementation.
+ */
+#define ANDROID_LOG_DEBUG 3
+#define LOGV(...)    LOG_PRI(2, 0, __VA_ARGS__)
+#define LOGD(...)    LOG_PRI(3, 0, __VA_ARGS__)
+#define LOGI(...)    LOG_PRI(4, 0, __VA_ARGS__)
+#define LOGW(...)    LOG_PRI(5, 0, __VA_ARGS__)
+#define LOGE(...)    LOG_PRI(6, 0, __VA_ARGS__)
+#define MIN_LOG_LEVEL   2
+
+#define LOG_PRI(priority, tag, ...) do {                            \
+        if (priority >= MIN_LOG_LEVEL) {                            \
+            dvmFprintf(stdout, "%s:%-4d ", __FILE__, __LINE__);     \
+            dvmFprintf(stdout, __VA_ARGS__);                        \
+        }                                                           \
+    } while(0)
+#else
+# include "utils/Log.h"
+#endif
+
+#endif /*_DALVIK_COMMON*/
diff --git a/vm/Dalvik.h b/vm/Dalvik.h
new file mode 100644
index 0000000..5805db5
--- /dev/null
+++ b/vm/Dalvik.h
@@ -0,0 +1,79 @@
+/*
+ * Copyright (C) 2008 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/*
+ * All-inclusive internal header file.  Include this to get everything useful.
+ */
+#ifndef _DALVIK_DALVIK
+#define _DALVIK_DALVIK
+
+#include <pthread.h>
+
+#include "Common.h"
+#include "Inlines.h"
+#include "Misc.h"
+#include "Bits.h"
+#include "libdex/SysUtil.h"
+#include "libdex/DexFile.h"
+#include "libdex/DexProto.h"
+#include "libdex/ZipArchive.h"
+#include "DvmDex.h"
+#include "RawDexFile.h"
+#include "Sync.h"
+#include "oo/Object.h"
+#include "Native.h"
+
+#include "DalvikVersion.h"
+#include "Debugger.h"
+#include "Profile.h"
+#include "UtfString.h"
+#include "Intern.h"
+#include "ReferenceTable.h"
+#include "AtomicCache.h"
+#include "Thread.h"
+#include "Ddm.h"
+#include "Hash.h"
+#include "interp/Stack.h"
+#include "oo/Class.h"
+#include "oo/Resolve.h"
+#include "oo/Array.h"
+#include "Exception.h"
+#include "alloc/Alloc.h"
+#include "alloc/HeapDebug.h"
+#include "alloc/HeapWorker.h"
+#include "alloc/GC.h"
+#include "oo/AccessCheck.h"
+#include "JarFile.h"
+#include "Properties.h"
+#include "jdwp/Jdwp.h"
+#include "SignalCatcher.h"
+#include "StdioConverter.h"
+#include "JniInternal.h"
+#include "LinearAlloc.h"
+#include "analysis/DexVerify.h"
+#include "analysis/DexOptimize.h"
+#include "Init.h"
+#include "libdex/OpCode.h"
+#include "libdex/InstrUtils.h"
+#include "AllocTracker.h"
+#include "Globals.h"
+#include "reflect/Reflect.h"
+#include "oo/TypeCheck.h"
+#include "Atomic.h"
+#include "interp/Interp.h"
+#include "PointerSet.h"
+#include "InlineNative.h"
+
+#endif /*_DALVIK_DALVIK*/
diff --git a/vm/DalvikVersion.h b/vm/DalvikVersion.h
new file mode 100644
index 0000000..03d1382
--- /dev/null
+++ b/vm/DalvikVersion.h
@@ -0,0 +1,36 @@
+/*
+ * Copyright (C) 2008 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/*
+ * Dalvik VM version info.
+ */
+#ifndef _DALVIK_VERSION
+#define _DALVIK_VERSION
+
+/*
+ * The version we show to tourists.
+ */
+#define DALVIK_MAJOR_VERSION    1
+#define DALVIK_MINOR_VERSION    0
+#define DALVIK_BUG_VERSION      0
+
+/*
+ * VM build number.  This must change whenever something that affects the
+ * way classes load changes, e.g. field ordering or vtable layout.  Changing
+ * this guarantees that the optimized form of the DEX file is regenerated.
+ */
+#define DALVIK_VM_BUILD         13
+
+#endif /*_DALVIK_VERSION*/
diff --git a/vm/Ddm.c b/vm/Ddm.c
new file mode 100644
index 0000000..76cbe82
--- /dev/null
+++ b/vm/Ddm.c
@@ -0,0 +1,598 @@
+/*
+ * Copyright (C) 2008 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/*
+ * Handle Dalvik Debug Monitor requests and events.
+ *
+ * Remember that all DDM traffic is big-endian since it travels over the
+ * JDWP connection.
+ */
+#include "Dalvik.h"
+
+#include <fcntl.h>
+#include <errno.h>
+
+/*
+ * "buf" contains a full JDWP packet, possibly with multiple chunks.  We
+ * need to process each, accumulate the replies, and ship the whole thing
+ * back.
+ *
+ * Returns "true" if we have a reply.  The reply buffer is newly allocated,
+ * and includes the chunk type/length, followed by the data.
+ *
+ * TODO: we currently assume that the request and reply include a single
+ * chunk.  If this becomes inconvenient we will need to adapt.
+ */
+bool dvmDdmHandlePacket(const u1* buf, int dataLen, u1** pReplyBuf,
+    int* pReplyLen)
+{
+    Thread* self = dvmThreadSelf();
+    const int kChunkHdrLen = 8;
+    ArrayObject* dataArray = NULL;
+    bool result = false;
+
+    assert(dataLen >= 0);
+
+    /*
+     * Prep DdmServer.  We could throw this in gDvm.
+     */
+    ClassObject* ddmServerClass;
+    Method* dispatch;
+
+    ddmServerClass =
+        dvmFindClass("Lorg/apache/harmony/dalvik/ddmc/DdmServer;", NULL);
+    if (ddmServerClass == NULL) {
+        LOGW("Unable to find org.apache.harmony.dalvik.ddmc.DdmServer\n");
+        goto bail;
+    }
+    dispatch = dvmFindDirectMethodByDescriptor(ddmServerClass, "dispatch",
+                    "(I[BII)Lorg/apache/harmony/dalvik/ddmc/Chunk;");
+    if (dispatch == NULL) {
+        LOGW("Unable to find DdmServer.dispatch\n");
+        goto bail;
+    }
+
+    /*
+     * Prep Chunk.
+     */
+    int chunkTypeOff, chunkDataOff, chunkOffsetOff, chunkLengthOff;
+    ClassObject* chunkClass;
+    chunkClass = dvmFindClass("Lorg/apache/harmony/dalvik/ddmc/Chunk;", NULL);
+    if (chunkClass == NULL) {
+        LOGW("Unable to find org.apache.harmony.dalvik.ddmc.Chunk\n");
+        goto bail;
+    }
+    chunkTypeOff = dvmFindFieldOffset(chunkClass, "type", "I");
+    chunkDataOff = dvmFindFieldOffset(chunkClass, "data", "[B");
+    chunkOffsetOff = dvmFindFieldOffset(chunkClass, "offset", "I");
+    chunkLengthOff = dvmFindFieldOffset(chunkClass, "length", "I");
+    if (chunkTypeOff < 0 || chunkDataOff < 0 ||
+        chunkOffsetOff < 0 || chunkLengthOff < 0)
+    {
+        LOGW("Unable to find all chunk fields\n");
+        goto bail;
+    }
+
+    /*
+     * The chunk handlers are written in the Java programming language, so
+     * we need to convert the buffer to a byte array.
+     */
+    dataArray = dvmAllocPrimitiveArray('B', dataLen, ALLOC_DEFAULT);
+    if (dataArray == NULL) {
+        LOGW("array alloc failed (%d)\n", dataLen);
+        dvmClearException(self);
+        goto bail;
+    }
+    memcpy(dataArray->contents, buf, dataLen);
+
+    /*
+     * Run through and find all chunks.  [Currently just find the first.]
+     */
+    unsigned int offset, length, type;
+    type = get4BE((u1*)dataArray->contents + 0);
+    length = get4BE((u1*)dataArray->contents + 4);
+    offset = kChunkHdrLen;
+    if (offset+length > (unsigned int) dataLen) {
+        LOGW("WARNING: bad chunk found (len=%u pktLen=%d)\n", length, dataLen);
+        goto bail;
+    }
+
+    /*
+     * Call the handler.
+     */
+    JValue callRes;
+    dvmCallMethod(self, dispatch, NULL, &callRes, type, dataArray, offset,
+        length);
+    if (dvmCheckException(self)) {
+        LOGI("Exception thrown by dispatcher for 0x%08x\n", type);
+        dvmLogExceptionStackTrace();
+        dvmClearException(self);
+        goto bail;
+    }
+
+    Object* chunk;
+    ArrayObject* replyData;
+    chunk = (Object*) callRes.l;
+    if (chunk == NULL)
+        goto bail;
+
+    /*
+     * Pull the pieces out of the chunk.  We copy the results into a
+     * newly-allocated buffer that the caller can free.  We don't want to
+     * continue using the Chunk object because nothing has a reference to it.
+     * (If we do an alloc in here, we need to dvmAddTrackedAlloc it.)
+     *
+     * We could avoid this by returning type/data/offset/length and having
+     * the caller be aware of the object lifetime issues, but that
+     * integrates the JDWP code more tightly into the VM, and doesn't work
+     * if we have responses for multiple chunks.
+     *
+     * So we're pretty much stuck with copying data around multiple times.
+     */
+    type = dvmGetFieldInt(chunk, chunkTypeOff);
+    replyData = (ArrayObject*) dvmGetFieldObject(chunk, chunkDataOff);
+    offset = dvmGetFieldInt(chunk, chunkOffsetOff);
+    length = dvmGetFieldInt(chunk, chunkLengthOff);
+
+    LOGV("DDM reply: type=0x%08x data=%p offset=%d length=%d\n",
+        type, replyData, offset, length);
+
+    if (length == 0 || replyData == NULL)
+        goto bail;
+    if (offset + length > replyData->length) {
+        LOGW("WARNING: chunk off=%d len=%d exceeds reply array len %d\n",
+            offset, length, replyData->length);
+        goto bail;
+    }
+
+    u1* reply;
+    reply = (u1*) malloc(length + kChunkHdrLen);
+    if (reply == NULL) {
+        LOGW("malloc %d failed\n", length+kChunkHdrLen);
+        goto bail;
+    }
+    set4BE(reply + 0, type);
+    set4BE(reply + 4, length);
+    memcpy(reply+kChunkHdrLen, (const u1*)replyData->contents + offset, length);
+
+    *pReplyBuf = reply;
+    *pReplyLen = length + kChunkHdrLen;
+    result = true;
+
+    LOGV("dvmHandleDdm returning type=%.4s buf=%p len=%d\n",
+        (char*) reply, reply, length);
+
+bail:
+    dvmReleaseTrackedAlloc((Object*) dataArray, NULL);
+    return result;
+}
+
+/* defined in org.apache.harmony.dalvik.ddmc.DdmServer */
+#define CONNECTED       1
+#define DISCONNECTED    2
+
+/*
+ * Broadcast an event to all handlers.
+ */
+static void broadcast(int event)
+{
+    ClassObject* ddmServerClass;
+    Method* bcast;
+
+    ddmServerClass =
+        dvmFindClass("Lorg/apache/harmony/dalvik/ddmc/DdmServer;", NULL);
+    if (ddmServerClass == NULL) {
+        LOGW("Unable to find org.apache.harmony.dalvik.ddmc.DdmServer\n");
+        goto bail;
+    }
+    bcast = dvmFindDirectMethodByDescriptor(ddmServerClass, "broadcast", "(I)V");
+    if (bcast == NULL) {
+        LOGW("Unable to find DdmServer.broadcast\n");
+        goto bail;
+    }
+
+    Thread* self = dvmThreadSelf();
+
+    if (self->status != THREAD_RUNNING) {
+        LOGE("ERROR: DDM broadcast with thread status=%d\n", self->status);
+        /* try anyway? */
+    }
+
+    JValue unused;
+    dvmCallMethod(self, bcast, NULL, &unused, event);
+    if (dvmCheckException(self)) {
+        LOGI("Exception thrown by broadcast(%d)\n", event);
+        dvmLogExceptionStackTrace();
+        dvmClearException(self);
+        goto bail;
+    }
+
+bail:
+    ;
+}
+
+/*
+ * First DDM packet has arrived over JDWP.  Notify the press.
+ *
+ * We can do some initialization here too.
+ */
+void dvmDdmConnected(void)
+{
+    // TODO: any init
+
+    LOGV("Broadcasting DDM connect\n");
+    broadcast(CONNECTED);
+}
+
+/*
+ * JDWP connection has dropped.
+ *
+ * Do some cleanup.
+ */
+void dvmDdmDisconnected(void)
+{
+    LOGV("Broadcasting DDM disconnect\n");
+    broadcast(DISCONNECTED);
+
+    gDvm.ddmThreadNotification = false;
+}
+
+
+/*
+ * Turn thread notification on or off.
+ */
+void dvmDdmSetThreadNotification(bool enable)
+{
+    /*
+     * We lock the thread list to avoid sending duplicate events or missing
+     * a thread change.  We should be okay holding this lock while sending
+     * the messages out.  (We have to hold it while accessing a live thread.)
+     */
+    dvmLockThreadList(NULL);
+    gDvm.ddmThreadNotification = enable;
+
+    if (enable) {
+        Thread* thread;
+        for (thread = gDvm.threadList; thread != NULL; thread = thread->next) {
+            //LOGW("notify %d\n", thread->threadId);
+            dvmDdmSendThreadNotification(thread, true);
+        }
+    }
+
+    dvmUnlockThreadList();
+}
+
+/*
+ * Send a notification when a thread starts or stops.
+ *
+ * Because we broadcast the full set of threads when the notifications are
+ * first enabled, it's possible for "thread" to be actively executing.
+ */
+void dvmDdmSendThreadNotification(Thread* thread, bool started)
+{
+    if (!gDvm.ddmThreadNotification)
+        return;
+
+    StringObject* nameObj = (StringObject*)
+        dvmGetFieldObject(thread->threadObj, gDvm.offJavaLangThread_name);
+
+    int type, len;
+    u1 buf[256];
+
+    if (started) {
+        const u2* chars;
+        u2* outChars;
+        size_t stringLen;
+
+        type = CHUNK_TYPE("THCR");
+
+        if (nameObj != NULL) {
+            stringLen = dvmStringLen(nameObj);
+            chars = dvmStringChars(nameObj);
+        } else {
+            stringLen = 0;
+            chars = NULL;
+        }
+
+        /* leave room for the two integer fields */
+        if (stringLen > (sizeof(buf) - sizeof(u4)*2) / 2)
+            stringLen = (sizeof(buf) - sizeof(u4)*2) / 2;
+        len = stringLen*2 + sizeof(u4)*2;
+
+        set4BE(&buf[0x00], thread->threadId);
+        set4BE(&buf[0x04], stringLen);
+
+        /* copy the UTF-16 string, transforming to big-endian */
+        outChars = (u2*) &buf[0x08];
+        while (stringLen--)
+            set2BE((u1*) (outChars++), *chars++);
+    } else {
+        type = CHUNK_TYPE("THDE");
+
+        len = 4;
+
+        set4BE(&buf[0x00], thread->threadId);
+    }
+
+    dvmDbgDdmSendChunk(type, len, buf);
+}
+
+/*
+ * Send a notification when a thread's name changes.
+ */
+void dvmDdmSendThreadNameChange(int threadId, StringObject* newName)
+{
+    if (!gDvm.ddmThreadNotification)
+        return;
+
+    size_t stringLen = dvmStringLen(newName);
+    const u2* chars = dvmStringChars(newName);
+
+    /*
+     * Output format:
+     *  (4b) thread ID
+     *  (4b) stringLen
+     *  (xb) string chars
+     */
+    int bufLen = 4 + 4 + (stringLen * 2);
+    u1 buf[bufLen];
+
+    set4BE(&buf[0x00], threadId);
+    set4BE(&buf[0x04], stringLen);
+    u2* outChars = (u2*) &buf[0x08];
+    while (stringLen--)
+        set2BE((u1*) (outChars++), *chars++);
+
+    dvmDbgDdmSendChunk(CHUNK_TYPE("THNM"), bufLen, buf);
+}
+
+/*
+ * Get some per-thread stats.
+ *
+ * This is currently generated by opening the appropriate "stat" file
+ * in /proc and reading the pile of stuff that comes out.
+ */
+static bool getThreadStats(pid_t pid, pid_t tid, unsigned long* pUtime,
+    unsigned long* pStime)
+{
+    /*
+    int pid;
+    char comm[128];
+    char state;
+    int ppid, pgrp, session, tty_nr, tpgid;
+    unsigned long flags, minflt, cminflt, majflt, cmajflt, utime, stime;
+    long cutime, cstime, priority, nice, zero, itrealvalue;
+    unsigned long starttime, vsize;
+    long rss;
+    unsigned long rlim, startcode, endcode, startstack, kstkesp, kstkeip;
+    unsigned long signal, blocked, sigignore, sigcatch, wchan, nswap, cnswap;
+    int exit_signal, processor;
+    unsigned long rt_priority, policy;
+
+    scanf("%d %s %c %d %d %d %d %d %lu %lu %lu %lu %lu %lu %lu %ld %ld %ld "
+          "%ld %ld %ld %lu %lu %ld %lu %lu %lu %lu %lu %lu %lu %lu %lu %lu "
+          "%lu %lu %lu %d %d %lu %lu",
+        &pid, comm, &state, &ppid, &pgrp, &session, &tty_nr, &tpgid,
+        &flags, &minflt, &cminflt, &majflt, &cmajflt, &utime, &stime,
+        &cutime, &cstime, &priority, &nice, &zero, &itrealvalue,
+        &starttime, &vsize, &rss, &rlim, &startcode, &endcode,
+        &startstack, &kstkesp, &kstkeip, &signal, &blocked, &sigignore,
+        &sigcatch, &wchan, &nswap, &cnswap, &exit_signal, &processor,
+        &rt_priority, &policy);
+    */
+
+    char nameBuf[64];
+    int i, fd;
+
+    /*
+     * Open and read the appropriate file.  This is expected to work on
+     * Linux but will fail on other platforms (e.g. Mac sim).
+     */
+    sprintf(nameBuf, "/proc/%d/task/%d/stat", (int) pid, (int) tid);
+    fd = open(nameBuf, O_RDONLY);
+    if (fd < 0) {
+        LOGV("Unable to open '%s': %s\n", nameBuf, strerror(errno));
+        return false;
+    }
+
+    char lineBuf[512];      // > 2x typical
+    int cc;
+    cc = read(fd, lineBuf, sizeof(lineBuf)-1);
+    if (cc <= 0) {
+        LOGI("Unable to read '%s': got %d (errno=%d)\n", nameBuf, cc, errno);
+        close(fd);
+        return false;
+    }
+    lineBuf[cc] = '\0';
+
+    /*
+     * Skip whitespace-separated tokens.
+     */
+    static const char* kWhitespace = " ";
+    char* cp = lineBuf;
+    for (i = 0; i < 13; i++) {
+        cp += strcspn(cp, kWhitespace);     // skip token
+        cp += strspn(cp, kWhitespace);      // skip whitespace
+    }
+
+    /*
+     * Grab the values we want.
+     */
+    char* endp;
+    *pUtime = strtoul(cp, &endp, 10);
+    if (endp == cp)
+        LOGI("Warning: strtoul failed on utime ('%.30s...')\n", cp);
+
+    cp += strcspn(cp, kWhitespace);
+    cp += strspn(cp, kWhitespace);
+
+    *pStime = strtoul(cp, &endp, 10);
+    if (endp == cp)
+        LOGI("Warning: strtoul failed on stime ('%.30s...')\n", cp);
+
+    close(fd);
+    return true;
+}
+
+/*
+ * Generate the contents of a THST chunk.  The data encompasses all known
+ * threads.
+ *
+ * Response has:
+ *  (1b) header len
+ *  (1b) bytes per entry
+ *  (2b) thread count
+ * Then, for each thread:
+ *  (4b) threadId
+ *  (1b) thread status
+ *  (4b) tid
+ *  (4b) utime 
+ *  (4b) stime 
+ *  (1b) is daemon?
+ *
+ * The length fields exist in anticipation of adding additional fields
+ * without wanting to break ddms or bump the full protocol version.  I don't
+ * think it warrants full versioning.  They might be extraneous and could
+ * be removed from a future version.
+ *
+ * Returns a new byte[] with the data inside, or NULL on failure.  The
+ * caller must call dvmReleaseTrackedAlloc() on the array.
+ */
+ArrayObject* dvmDdmGenerateThreadStats(void)
+{
+    const int kHeaderLen = 4;
+    const int kBytesPerEntry = 18;
+
+    dvmLockThreadList(NULL);
+
+    Thread* thread;
+    int threadCount = 0;
+    for (thread = gDvm.threadList; thread != NULL; thread = thread->next)
+        threadCount++;
+
+    /*
+     * Create a temporary buffer.  We can't perform heap allocation with
+     * the thread list lock held (could cause a GC).  The output is small
+     * enough to sit on the stack.
+     */
+    int bufLen = kHeaderLen + threadCount * kBytesPerEntry;
+    u1 tmpBuf[bufLen];
+    u1* buf = tmpBuf;
+
+    set1(buf+0, kHeaderLen);
+    set1(buf+1, kBytesPerEntry);
+    set2BE(buf+2, (u2) threadCount);
+    buf += kHeaderLen;
+
+    pid_t pid = getpid();
+    for (thread = gDvm.threadList; thread != NULL; thread = thread->next) {
+        unsigned long utime, stime;
+        bool isDaemon;
+
+        if (!getThreadStats(pid, thread->systemTid, &utime, &stime)) {
+            // failed; drop in empty values
+            utime = stime = 0;
+        }
+
+        isDaemon = dvmGetFieldBoolean(thread->threadObj,
+                        gDvm.offJavaLangThread_daemon);
+
+        set4BE(buf+0, thread->threadId);
+        set1(buf+4, thread->status);
+        set4BE(buf+5, thread->systemTid);
+        set4BE(buf+9, utime);
+        set4BE(buf+13, stime);
+        set1(buf+17, isDaemon);
+
+        buf += kBytesPerEntry;
+    }
+    dvmUnlockThreadList();
+
+
+    /*
+     * Create a byte array to hold the data.
+     */
+    ArrayObject* arrayObj = dvmAllocPrimitiveArray('B', bufLen, ALLOC_DEFAULT);
+    if (arrayObj != NULL)
+        memcpy(arrayObj->contents, tmpBuf, bufLen);
+    return arrayObj;
+}
+
+
+/*
+ * Find the specified thread and return its stack trace as an array of
+ * StackTraceElement objects.
+ */
+ArrayObject* dvmDdmGetStackTraceById(u4 threadId)
+{
+    Thread* self = dvmThreadSelf();
+    Thread* thread;
+    int* traceBuf;
+
+    dvmLockThreadList(self);
+
+    for (thread = gDvm.threadList; thread != NULL; thread = thread->next) {
+        if (thread->threadId == threadId)
+            break;
+    }
+    if (thread == NULL) {
+        LOGI("dvmDdmGetStackTraceById: threadid=%d not found\n", threadId);
+        dvmUnlockThreadList();
+        return NULL;
+    }
+
+    /*
+     * Suspend the thread, pull out the stack trace, then resume the thread
+     * and release the thread list lock.  If we're being asked to examine
+     * our own stack trace, skip the suspend/resume.
+     */
+    int stackDepth = -1;
+    if (thread != self)
+        dvmSuspendThread(thread);
+    traceBuf = dvmFillInStackTraceRaw(thread, &stackDepth);
+    if (thread != self)
+        dvmResumeThread(thread);
+    dvmUnlockThreadList();
+
+    /*
+     * Convert the raw buffer into an array of StackTraceElement.
+     */
+    ArrayObject* trace = dvmGetStackTraceRaw(traceBuf, stackDepth);
+    free(traceBuf);
+    return trace;
+}
+
+/*
+ * Gather up the allocation data and copy it into a byte[].
+ *
+ * Returns NULL on failure with an exception raised.
+ */
+ArrayObject* dvmDdmGetRecentAllocations(void)
+{
+    u1* data;
+    size_t len;
+
+    if (!dvmGenerateTrackedAllocationReport(&data, &len)) {
+        /* assume OOM */
+        dvmThrowException("Ljava/lang/OutOfMemoryError;","recent alloc native");
+        return NULL;
+    }
+
+    ArrayObject* arrayObj = dvmAllocPrimitiveArray('B', len, ALLOC_DEFAULT);
+    if (arrayObj != NULL)
+        memcpy(arrayObj->contents, data, len);
+    return arrayObj;
+}
+
diff --git a/vm/Ddm.h b/vm/Ddm.h
new file mode 100644
index 0000000..01f5d18
--- /dev/null
+++ b/vm/Ddm.h
@@ -0,0 +1,87 @@
+/*
+ * Copyright (C) 2008 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/*
+ * Dalvik Debug Monitor
+ */
+#ifndef _DALVIK_DDM
+#define _DALVIK_DDM
+
+/*
+ * Handle a packet full of DDM goodness.
+ *
+ * Returns "true" if we have anything to say in return; in which case,
+ * "*pReplyBuf" and "*pReplyLen" will also be set.
+ */
+bool dvmDdmHandlePacket(const u1* buf, int dataLen, u1** pReplyBuf,
+    int* pReplyLen);
+
+/*
+ * Deal with the DDM server connecting and disconnecting.
+ */
+void dvmDdmConnected(void);
+void dvmDdmDisconnected(void);
+
+/*
+ * Turn thread notification on or off.
+ */
+void dvmDdmSetThreadNotification(bool enable);
+
+/*
+ * If thread start/stop notification is enabled, call this when threads
+ * are created or die.
+ */
+void dvmDdmSendThreadNotification(Thread* thread, bool started);
+
+/*
+ * If thread start/stop notification is enabled, call this when the
+ * thread name changes.
+ */
+void dvmDdmSendThreadNameChange(int threadId, StringObject* newName);
+
+/*
+ * Generate a byte[] full of thread stats for a THST packet.
+ */
+ArrayObject* dvmDdmGenerateThreadStats(void);
+
+/*
+ * Let the heap know that the HPIF when value has changed.
+ *
+ * @return true iff the when value is supported by the VM.
+ */
+bool dvmDdmHandleHpifChunk(int when);
+
+/*
+ * Let the heap know that the HPSG or NHSG what/when values have changed.
+ *
+ * @param native false for an HPSG chunk, true for an NHSG chunk
+ *
+ * @return true iff the what/when values are supported by the VM.
+ */
+bool dvmDdmHandleHpsgNhsgChunk(int when, int what, bool native);
+
+/*
+ * Get an array of StackTraceElement objects for the specified thread.
+ */
+ArrayObject* dvmDdmGetStackTraceById(u4 threadId);
+
+/*
+ * Gather up recent allocation data and return it in a byte[].
+ *
+ * Returns NULL on failure with an exception raised.
+ */
+ArrayObject* dvmDdmGetRecentAllocations(void);
+
+#endif /*_DALVIK_DDM*/
diff --git a/vm/Debugger.c b/vm/Debugger.c
new file mode 100644
index 0000000..02bb8a6
--- /dev/null
+++ b/vm/Debugger.c
@@ -0,0 +1,2943 @@
+/*
+ * Copyright (C) 2008 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/*
+ * Link between JDWP and the VM.  The code here only runs as a result of
+ * requests from the debugger, so speed is not essential.  Maintaining
+ * isolation of the JDWP code should make it easier to maintain and reuse.
+ *
+ * Collecting all debugger-related pieces here will also allow us to #ifdef
+ * the JDWP code out of release builds.
+ */
+#include "Dalvik.h"
+
+/*
+Notes on garbage collection and object registration
+
+JDWP does not allow the debugger to assume that objects passed to it
+will not be garbage collected.  It specifies explicit commands (e.g.
+ObjectReference.DisableCollection) to allow the debugger to manage
+object lifetime.  It does, however, require that the VM not re-use an
+object ID unless an explicit "dispose" call has been made, and if the
+VM asks for a now-collected object we must return INVALID_OBJECT.
+
+JDWP also requires that, while the VM is suspended, no garbage collection
+occur.  The JDWP docs suggest that this is obvious, because no threads
+can be running.  Unfortunately it's not entirely clear how to deal
+with situations where the debugger itself allocates strings or executes
+code as part of displaying variables.  The easiest way to enforce this,
+short of disabling GC whenever the debugger is connected, is to ensure
+that the debugger thread can't cause a GC: it has to expand the heap or
+fail to allocate.  (Might want to make that "is debugger thread AND all
+other threads are suspended" to avoid unnecessary heap expansion by a
+poorly-timed JDWP request.)
+
+We use an "object registry" so that we can separate our internal
+representation from what we show the debugger.  This allows us to
+return a registry table index instead of a pointer or handle.
+
+There are various approaches we can take to achieve correct behavior:
+
+(1) Disable garbage collection entirely while the debugger is attached.
+This is very easy, but doesn't allow extended debugging sessions on
+small devices.
+
+(2) Keep a list of all object references requested by or sent to the
+debugger, and include the list in the GC root set.  This ensures that
+objects the debugger might care about don't go away.  This is straightforward,
+but it can cause us to hold on to large objects and prevent finalizers from
+being executed.
+
+(3) Keep a list of what amount to weak object references.  This way we
+don't interfere with the GC, and can support JDWP requests like
+"ObjectReference.IsCollected".
+
+The current implementation is #2.  The set should be reasonably small and
+performance isn't critical, so a simple expanding array can be used.
+
+
+Notes on threads:
+
+The VM has a Thread struct associated with every active thread.  The
+ThreadId we pass to the debugger is the ObjectId for the java/lang/Thread
+object, so to retrieve the VM's Thread struct we have to scan through the
+list looking for a match.
+
+When a thread goes away, we lock the list and free the struct.  To
+avoid having the thread list updated or Thread structs freed out from
+under us, we want to acquire and hold the thread list lock while we're
+performing operations on Threads.  Exceptions to this rule are noted in
+a couple of places.
+
+We can speed this up a bit by adding a Thread struct pointer to the
+java/lang/Thread object, and ensuring that both are discarded at the
+same time.
+*/
+
+#define THREAD_GROUP_ALL ((ObjectId) 0x12345)   // magic, internal-only value
+
+#define kSlot0Sub   1000    // Eclipse workaround
+
+/*
+ * System init.  We don't allocate the registry until first use.
+ * Make sure we do this before initializing JDWP.
+ */
+bool dvmDebuggerStartup(void)
+{
+    gDvm.dbgRegistry = dvmHashTableCreate(1000, NULL);
+    return (gDvm.dbgRegistry != NULL);
+}
+
+/*
+ * Free registry storage.
+ */
+void dvmDebuggerShutdown(void)
+{
+    dvmHashTableFree(gDvm.dbgRegistry);
+    gDvm.dbgRegistry = NULL;
+}
+
+
+/*
+ * Pass these through to the VM functions.  Allows extended checking
+ * (e.g. "errorcheck" mutexes).  If nothing else we can assert() success.
+ */
+void dvmDbgInitMutex(pthread_mutex_t* pMutex)
+{
+    dvmInitMutex(pMutex);
+}
+void dvmDbgLockMutex(pthread_mutex_t* pMutex)
+{
+    dvmLockMutex(pMutex);
+}
+void dvmDbgUnlockMutex(pthread_mutex_t* pMutex)
+{
+    dvmUnlockMutex(pMutex);
+}
+void dvmDbgInitCond(pthread_cond_t* pCond)
+{
+    pthread_cond_init(pCond, NULL);
+}
+void dvmDbgCondWait(pthread_cond_t* pCond, pthread_mutex_t* pMutex)
+{
+    int cc = pthread_cond_wait(pCond, pMutex);
+    assert(cc == 0);
+}
+void dvmDbgCondSignal(pthread_cond_t* pCond)
+{
+    int cc = pthread_cond_signal(pCond);
+    assert(cc == 0);
+}
+void dvmDbgCondBroadcast(pthread_cond_t* pCond)
+{
+    int cc = pthread_cond_broadcast(pCond);
+    assert(cc == 0);
+}
+
+
+/* keep track of type, in case we need to distinguish them someday */
+typedef enum RegistryType {
+    kObjectId = 0xc1, kRefTypeId
+} RegistryType;
+
+/*
+ * Hash function for object IDs.  Since objects are at least 8 bytes, and
+ * could someday be allocated on 16-byte boundaries, we don't want to use
+ * the low 4 bits in our hash.
+ */
+static inline u4 registryHash(u4 val)
+{
+    return val >> 4;
+}
+
+/*
+ * (This is a dvmHashTableLookup() callback.)
+ */
+static int registryCompare(const void* obj1, const void* obj2)
+{
+    return (int) obj1 - (int) obj2;
+}
+
+
+/*
+ * Determine if an id is already in the list.
+ *
+ * If the list doesn't yet exist, this creates it.
+ *
+ * Lock the registry before calling here.
+ */
+static bool lookupId(ObjectId id)
+{
+    void* found;
+
+    found = dvmHashTableLookup(gDvm.dbgRegistry, registryHash((u4) id),
+                (void*)(u4) id, registryCompare, false);
+    if (found == NULL)
+        return false;
+    assert(found == (void*)(u4) id);
+    return true;
+}
+
+/*
+ * Register an object, if it hasn't already been.
+ *
+ * This is used for both ObjectId and RefTypeId.  In theory we don't have
+ * to register RefTypeIds unless we're worried about classes unloading.
+ *
+ * Null references must be represented as zero, or the debugger will get
+ * very confused.
+ */
+static ObjectId registerObject(const Object* obj, RegistryType type, bool reg)
+{
+    ObjectId id;
+
+    if (obj == NULL)
+        return 0;
+
+    assert((u4) obj != 0xcccccccc);
+    assert((u4) obj > 0x100);
+
+    id = (ObjectId)(u4)obj | ((u8) type) << 32;
+    if (!reg)
+        return id;
+
+    dvmHashTableLock(gDvm.dbgRegistry);
+    if (!gDvm.debuggerConnected) {
+        /* debugger has detached while we were doing stuff? */
+        LOGI("ignoring registerObject request in thread=%d\n",
+            dvmThreadSelf()->threadId);
+        //dvmAbort();
+        goto bail;
+    }
+
+    (void) dvmHashTableLookup(gDvm.dbgRegistry, registryHash((u4) id),
+                (void*)(u4) id, registryCompare, true);
+
+bail:
+    dvmHashTableUnlock(gDvm.dbgRegistry);
+    return id;
+}
+
+/*
+ * (This is a HashForeachFunc callback.)
+ */
+static int markRef(void* data, void* arg)
+{
+    UNUSED_PARAMETER(arg);
+
+    //LOGI("dbg mark %p\n", data);
+    dvmMarkObjectNonNull(data);
+    return 0;
+}
+
+/* Mark all of the registered debugger references so the
+ * GC doesn't collect them.
+ */
+void dvmGcMarkDebuggerRefs()
+{
+    /* dvmDebuggerStartup() may not have been called before the first GC.
+     */
+    if (gDvm.dbgRegistry != NULL) {
+        dvmHashTableLock(gDvm.dbgRegistry);
+        dvmHashForeach(gDvm.dbgRegistry, markRef, NULL);
+        dvmHashTableUnlock(gDvm.dbgRegistry);
+    }
+}
+
+/*
+ * Verify that an object has been registered.  If it hasn't, the debugger
+ * is asking for something we didn't send it, which means something
+ * somewhere is broken.
+ *
+ * If speed is an issue we can encode the registry index in the high
+ * four bytes.  We could also just hard-wire this to "true".
+ *
+ * Note this actually takes both ObjectId and RefTypeId.
+ */
+static bool objectIsRegistered(ObjectId id, RegistryType type)
+{
+    UNUSED_PARAMETER(type);
+
+    if (id == 0)        // null reference?
+        return true;
+
+    dvmHashTableLock(gDvm.dbgRegistry);
+    bool result = lookupId(id);
+    dvmHashTableUnlock(gDvm.dbgRegistry);
+    return result;
+}
+
+/*
+ * Convert to/from a RefTypeId.
+ *
+ * These are rarely NULL, but can be (e.g. java/lang/Object's superclass).
+ */
+static RefTypeId classObjectToRefTypeId(ClassObject* clazz)
+{
+    return (RefTypeId) registerObject((Object*) clazz, kRefTypeId, true);
+}
+static RefTypeId classObjectToRefTypeIdNoReg(ClassObject* clazz)
+{
+    return (RefTypeId) registerObject((Object*) clazz, kRefTypeId, false);
+}
+static ClassObject* refTypeIdToClassObject(RefTypeId id)
+{
+    assert(objectIsRegistered(id, kRefTypeId) || !gDvm.debuggerConnected);
+    return (ClassObject*)(u4) id;
+}
+
+/*
+ * Convert to/from an ObjectId.
+ */
+static ObjectId objectToObjectId(const Object* obj)
+{
+    return registerObject(obj, kObjectId, true);
+}
+static ObjectId objectToObjectIdNoReg(const Object* obj)
+{
+    return registerObject(obj, kObjectId, false);
+}
+static Object* objectIdToObject(ObjectId id)
+{
+    assert(objectIsRegistered(id, kObjectId) || !gDvm.debuggerConnected);
+    return (Object*)(u4) id;
+}
+
+/*
+ * Convert to/from a MethodId.
+ *
+ * These IDs are only guaranteed unique within a class, so they could be
+ * an enumeration index.  For now we just use the Method*.
+ */
+static MethodId methodToMethodId(const Method* meth)
+{
+    return (MethodId)(u4) meth;
+}
+static Method* methodIdToMethod(RefTypeId refTypeId, MethodId id)
+{
+    // TODO? verify "id" is actually a method in "refTypeId"
+    return (Method*)(u4) id;
+}
+
+/*
+ * Convert to/from a FieldId.
+ *
+ * These IDs are only guaranteed unique within a class, so they could be
+ * an enumeration index.  For now we just use the Field*.
+ */
+static FieldId fieldToFieldId(const Field* field)
+{
+    return (FieldId)(u4) field;
+}
+static Field* fieldIdToField(RefTypeId refTypeId, FieldId id)
+{
+    // TODO? verify "id" is actually a field in "refTypeId"
+    return (Field*)(u4) id;
+}
+
+/*
+ * Convert to/from a FrameId.
+ *
+ * We just return a pointer to the stack frame.
+ */
+static FrameId frameToFrameId(const void* frame)
+{
+    return (FrameId)(u4) frame;
+}
+static void* frameIdToFrame(FrameId id)
+{
+    return (void*)(u4) id;
+}
+
+
+/*
+ * Get the invocation request state.
+ */
+DebugInvokeReq* dvmDbgGetInvokeReq(void)
+{
+    return &dvmThreadSelf()->invokeReq;
+}
+
+/*
+ * Enable the object registry, but don't enable debugging features yet.
+ *
+ * Only called from the JDWP handler thread.
+ */
+void dvmDbgConnected(void)
+{
+    assert(!gDvm.debuggerConnected);
+
+    LOGV("JDWP has attached\n");
+    assert(dvmHashTableNumEntries(gDvm.dbgRegistry) == 0);
+    gDvm.debuggerConnected = true;
+}
+
+/*
+ * Enable all debugging features, including scans for breakpoints.
+ *
+ * This is a no-op if we're already active.
+ *
+ * Only called from the JDWP handler thread.
+ */
+void dvmDbgActive(void)
+{
+    if (gDvm.debuggerActive)
+        return;
+
+    LOGI("Debugger is active\n");
+    dvmInitBreakpoints();
+    gDvm.debuggerActive = true;
+}
+
+/*
+ * Disable debugging features.
+ *
+ * Set "debuggerConnected" to false, which disables use of the object
+ * registry.
+ *
+ * Only called from the JDWP handler thread.
+ */
+void dvmDbgDisconnected(void)
+{
+    assert(gDvm.debuggerConnected);
+
+    gDvm.debuggerActive = false;
+
+    dvmHashTableLock(gDvm.dbgRegistry);
+    gDvm.debuggerConnected = false;
+
+    LOGI("Debugger has detached; object registry had %d entries\n",
+        dvmHashTableNumEntries(gDvm.dbgRegistry));
+    //int i;
+    //for (i = 0; i < gDvm.dbgRegistryNext; i++)
+    //    LOGVV("%4d: 0x%llx\n", i, gDvm.dbgRegistryTable[i]);
+
+    dvmHashTableClear(gDvm.dbgRegistry);
+    dvmHashTableUnlock(gDvm.dbgRegistry);
+}
+
+/*
+ * Returns "true" if a debugger is connected.
+ *
+ * Does not return "true" if it's just a DDM server.
+ */
+bool dvmDbgIsDebuggerConnected(void)
+{
+    return gDvm.debuggerActive;
+}
+
+/*
+ * Get time since last debugger activity.  Used when figuring out if the
+ * debugger has finished configuring us.
+ */
+s8 dvmDbgLastDebuggerActivity(void)
+{
+    return dvmJdwpLastDebuggerActivity(gDvm.jdwpState);
+}
+
+/*
+ * JDWP thread is running, don't allow GC.
+ */
+int dvmDbgThreadRunning(void)
+{
+    return dvmChangeStatus(NULL, THREAD_RUNNING);
+}
+
+/*
+ * JDWP thread is idle, allow GC.
+ */
+int dvmDbgThreadWaiting(void)
+{
+    return dvmChangeStatus(NULL, THREAD_VMWAIT);
+}
+
+/*
+ * Restore state returned by Running/Waiting calls.
+ */
+int dvmDbgThreadContinuing(int status)
+{
+    return dvmChangeStatus(NULL, status);
+}
+
+/*
+ * The debugger wants us to exit.
+ */
+void dvmDbgExit(int status)
+{
+    // TODO? invoke System.exit() to perform exit processing; ends up
+    // in System.exitInternal(), which can call JNI exit hook
+#ifdef WITH_PROFILER
+    LOGI("GC lifetime allocation: %d bytes\n", gDvm.allocProf.allocCount);
+    if (CALC_CACHE_STATS) {
+        dvmDumpAtomicCacheStats(gDvm.instanceofCache);
+        dvmDumpBootClassPath();
+    }
+#endif
+#ifdef PROFILE_FIELD_ACCESS
+    dvmDumpFieldAccessCounts();
+#endif
+
+    exit(status);
+}
+
+
+/*
+ * ===========================================================================
+ *      Class, Object, Array
+ * ===========================================================================
+ */
+
+/*
+ * Get the class's type descriptor from a reference type ID.
+ */
+const char* dvmDbgGetClassDescriptor(RefTypeId id)
+{
+    ClassObject* clazz;
+
+    clazz = refTypeIdToClassObject(id);
+    return clazz->descriptor;
+}
+
+/*
+ * Return the superclass of a class (will be NULL for java/lang/Object).
+ */
+RefTypeId dvmDbgGetSuperclass(RefTypeId id)
+{
+    ClassObject* clazz = refTypeIdToClassObject(id);
+    return classObjectToRefTypeId(clazz->super);
+}
+
+/*
+ * Return a class's defining class loader.
+ */
+RefTypeId dvmDbgGetClassLoader(RefTypeId id)
+{
+    ClassObject* clazz = refTypeIdToClassObject(id);
+    return objectToObjectId(clazz->classLoader);
+}
+
+/*
+ * Return a class's access flags.
+ */
+u4 dvmDbgGetAccessFlags(RefTypeId id)
+{
+    ClassObject* clazz = refTypeIdToClassObject(id);
+    return clazz->accessFlags & JAVA_FLAGS_MASK;
+}
+
+/*
+ * Is this class an interface?
+ */
+bool dvmDbgIsInterface(RefTypeId id)
+{
+    ClassObject* clazz = refTypeIdToClassObject(id);
+    return dvmIsInterfaceClass(clazz);
+}
+
+/*
+ * dvmHashForeach callback
+ */
+static int copyRefType(void* vclazz, void* varg)
+{
+    RefTypeId** pRefType = (RefTypeId**)varg;
+    **pRefType = classObjectToRefTypeId((ClassObject*) vclazz);
+    (*pRefType)++;
+    return 0;
+}
+
+/*
+ * Get the complete list of reference classes (i.e. all classes except
+ * the primitive types).
+ *
+ * Returns a newly-allocated buffer full of RefTypeId values.
+ */
+void dvmDbgGetClassList(u4* pNumClasses, RefTypeId** pClassRefBuf)
+{
+    RefTypeId* pRefType;
+
+    dvmHashTableLock(gDvm.loadedClasses);
+    *pNumClasses = dvmHashTableNumEntries(gDvm.loadedClasses);
+    pRefType = *pClassRefBuf = malloc(sizeof(RefTypeId) * *pNumClasses);
+
+    if (dvmHashForeach(gDvm.loadedClasses, copyRefType, &pRefType) != 0) {
+        LOGW("Warning: problem getting class list\n");
+        /* not really expecting this to happen */
+    } else {
+        assert(pRefType - *pClassRefBuf == (int) *pNumClasses);
+    }
+
+    dvmHashTableUnlock(gDvm.loadedClasses);
+}
+
+/*
+ * Get the list of reference classes "visible" to the specified class
+ * loader.  A class is visible to a class loader if the ClassLoader object
+ * is the defining loader or is listed as an initiating loader.
+ *
+ * Returns a newly-allocated buffer full of RefTypeId values.
+ */
+void dvmDbgGetVisibleClassList(ObjectId classLoaderId, u4* pNumClasses,
+    RefTypeId** pClassRefBuf)
+{
+    Object* classLoader;
+    int numClasses = 0, maxClasses;
+
+    classLoader = objectIdToObject(classLoaderId);
+    // I don't think classLoader can be NULL, but the spec doesn't say
+
+    LOGVV("GetVisibleList: comparing to %p\n", classLoader);
+
+    dvmHashTableLock(gDvm.loadedClasses);
+
+    /* over-allocate the return buffer */
+    maxClasses = dvmHashTableNumEntries(gDvm.loadedClasses);
+    *pClassRefBuf = malloc(sizeof(RefTypeId) * maxClasses);
+
+    /*
+     * Run through the list, looking for matches.
+     */
+    HashIter iter;
+    for (dvmHashIterBegin(gDvm.loadedClasses, &iter); !dvmHashIterDone(&iter);
+        dvmHashIterNext(&iter))
+    {
+        ClassObject* clazz = (ClassObject*) dvmHashIterData(&iter);
+
+        if (clazz->classLoader == classLoader ||
+            dvmLoaderInInitiatingList(clazz, classLoader))
+        {
+            LOGVV("  match '%s'\n", clazz->descriptor);
+            (*pClassRefBuf)[numClasses++] = classObjectToRefTypeId(clazz);
+        }
+    }
+    *pNumClasses = numClasses;
+
+    dvmHashTableUnlock(gDvm.loadedClasses);
+}
+
+/*
+ * Generate the "JNI signature" for a class, e.g. "Ljava/lang/String;".
+ *
+ * Our class descriptors are in the correct format, so we just copy that.
+ * TODO: figure out if we can avoid the copy now that we're using
+ * descriptors instead of unadorned class names.
+ *
+ * Returns a newly-allocated string.
+ */
+static char* generateJNISignature(ClassObject* clazz)
+{
+    return strdup(clazz->descriptor);
+}
+
+/*
+ * Get information about a class.
+ *
+ * If "pSignature" is not NULL, *pSignature gets the "JNI signature" of
+ * the class.
+ */
+void dvmDbgGetClassInfo(RefTypeId classId, u1* pTypeTag, u4* pStatus,
+    char** pSignature)
+{
+    ClassObject* clazz = refTypeIdToClassObject(classId);
+
+    if (clazz->descriptor[0] == '[') {
+        /* generated array class */
+        *pStatus = CS_VERIFIED | CS_PREPARED;
+        *pTypeTag = TT_ARRAY;
+    } else {
+        if (clazz->status == CLASS_ERROR)
+            *pStatus = CS_ERROR;
+        else
+            *pStatus = CS_VERIFIED | CS_PREPARED | CS_INITIALIZED;
+        if (dvmIsInterfaceClass(clazz))
+            *pTypeTag = TT_INTERFACE;
+        else
+            *pTypeTag = TT_CLASS;
+    }
+    if (pSignature != NULL)
+        *pSignature = generateJNISignature(clazz);
+}
+
+/*
+ * Search the list of loaded classes for a match.
+ */
+bool dvmDbgFindLoadedClassBySignature(const char* classDescriptor,
+        RefTypeId* pRefTypeId)
+{
+    ClassObject* clazz;
+
+    clazz = dvmFindLoadedClass(classDescriptor);
+    if (clazz != NULL) {
+        *pRefTypeId = classObjectToRefTypeId(clazz);
+        return true;
+    } else
+        return false;
+}
+
+
+/*
+ * Get an object's class and "type tag".
+ */
+void dvmDbgGetObjectType(ObjectId objectId, u1* pRefTypeTag,
+    RefTypeId* pRefTypeId)
+{
+    Object* obj = objectIdToObject(objectId);
+
+    if (dvmIsArrayClass(obj->clazz))
+        *pRefTypeTag = TT_ARRAY;
+    else if (dvmIsInterfaceClass(obj->clazz))
+        *pRefTypeTag = TT_INTERFACE;
+    else
+        *pRefTypeTag = TT_CLASS;
+    *pRefTypeId = classObjectToRefTypeId(obj->clazz);
+}
+
+/*
+ * Get a class object's "type tag".
+ */
+u1 dvmDbgGetClassObjectType(RefTypeId refTypeId)
+{
+    ClassObject* clazz = refTypeIdToClassObject(refTypeId);
+
+    if (dvmIsArrayClass(clazz))
+        return TT_ARRAY;
+    else if (dvmIsInterfaceClass(clazz))
+        return TT_INTERFACE;
+    else
+        return TT_CLASS;
+}
+
+/*
+ * Get a class' signature.
+ *
+ * Returns a newly-allocated string.
+ */
+char* dvmDbgGetSignature(RefTypeId refTypeId)
+{
+    ClassObject* clazz;
+
+    clazz = refTypeIdToClassObject(refTypeId);
+    assert(clazz != NULL);
+
+    return generateJNISignature(clazz);
+}
+
+/*
+ * Get class' source file.
+ *
+ * Returns a newly-allocated string.
+ */
+const char* dvmDbgGetSourceFile(RefTypeId refTypeId)
+{
+    ClassObject* clazz;
+
+    clazz = refTypeIdToClassObject(refTypeId);
+    assert(clazz != NULL);
+
+    return clazz->sourceFile;
+}
+
+/*
+ * Get an object's type name.  Converted to a "JNI signature".
+ *
+ * Returns a newly-allocated string.
+ */
+char* dvmDbgGetObjectTypeName(ObjectId objectId)
+{
+    Object* obj = objectIdToObject(objectId);
+
+    assert(obj != NULL);
+
+    return generateJNISignature(obj->clazz);
+}
+
+/*
+ * Given a type signature (e.g. "Ljava/lang/String;"), return the JDWP
+ * "type tag".
+ *
+ * In many cases this is necessary but not sufficient.  For example, if
+ * we have a NULL String object, we want to return JT_STRING.  If we have
+ * a java/lang/Object that holds a String reference, we also want to
+ * return JT_STRING.  See dvmDbgGetObjectTag().
+ */
+int dvmDbgGetSignatureTag(const char* type)
+{
+    /*
+     * We're not checking the class loader here (to guarantee that JT_STRING
+     * is truly the one and only String), but it probably doesn't matter
+     * for our purposes.
+     */
+    if (strcmp(type, "Ljava/lang/String;") == 0)
+        return JT_STRING;
+    else if (strcmp(type, "Ljava/lang/Class;") == 0)
+        return JT_CLASS_OBJECT; 
+    else if (strcmp(type, "Ljava/lang/Thread;") == 0)
+        return JT_THREAD;
+    else if (strcmp(type, "Ljava/lang/ThreadGroup;") == 0)
+        return JT_THREAD_GROUP;
+    else if (strcmp(type, "Ljava/lang/ClassLoader;") == 0)
+        return JT_CLASS_LOADER;
+
+    switch (type[0]) {
+    case '[':       return JT_ARRAY;
+    case 'B':       return JT_BYTE;
+    case 'C':       return JT_CHAR;
+    case 'L':       return JT_OBJECT;
+    case 'F':       return JT_FLOAT;
+    case 'D':       return JT_DOUBLE;
+    case 'I':       return JT_INT;
+    case 'J':       return JT_LONG;
+    case 'S':       return JT_SHORT;
+    case 'V':       return JT_VOID;
+    case 'Z':       return JT_BOOLEAN;
+    default:
+        LOGE("ERROR: unhandled type '%s'\n", type);
+        assert(false);
+        return -1;
+    }
+}
+
+/*
+ * Methods declared to return Object might actually be returning one
+ * of the "refined types".  We need to check the object explicitly.
+ */
+static u1 resultTagFromObject(Object* obj)
+{
+    ClassObject* clazz;
+
+    if (obj == NULL)
+        return JT_OBJECT;
+
+    clazz = obj->clazz;
+
+    /*
+     * Comparing against the known classes is faster than string
+     * comparisons.  It ensures that we only find the classes in the
+     * bootstrap class loader, which may or may not be what we want.
+     */
+    if (clazz == gDvm.classJavaLangString)
+        return JT_STRING;
+    else if (clazz == gDvm.classJavaLangClass)
+        return JT_CLASS_OBJECT;
+    else if (clazz == gDvm.classJavaLangThread)
+        return JT_THREAD;
+    else if (clazz == gDvm.classJavaLangThreadGroup)
+        return JT_THREAD_GROUP;
+    else if (strcmp(clazz->descriptor, "Ljava/lang/ClassLoader;") == 0)
+        return JT_CLASS_LOADER;
+    else if (clazz->descriptor[0] == '[')
+        return JT_ARRAY;
+    else
+        return JT_OBJECT;
+}
+
+/*
+ * Determine the tag for an object with a known type.
+ */
+int dvmDbgGetObjectTag(ObjectId objectId, const char* type)
+{
+    u1 tag;
+
+    tag = dvmDbgGetSignatureTag(type);
+    if (tag == JT_OBJECT && objectId != 0)
+        tag = resultTagFromObject(objectIdToObject(objectId));
+
+    return tag;
+}
+
+/*
+ * Get the widths of the specified JDWP.Tag value.
+ */
+int dvmDbgGetTagWidth(int tag)
+{
+    switch (tag) {
+    case JT_VOID:
+        return 0;
+    case JT_BYTE:
+    case JT_BOOLEAN:
+        return 1;
+    case JT_CHAR:
+    case JT_SHORT:
+        return 2;
+    case JT_FLOAT:
+    case JT_INT:
+        return 4;
+    case JT_ARRAY:
+    case JT_OBJECT:
+    case JT_STRING:
+    case JT_THREAD:
+    case JT_THREAD_GROUP:
+    case JT_CLASS_LOADER:
+    case JT_CLASS_OBJECT:
+        return sizeof(ObjectId);
+    case JT_DOUBLE:
+    case JT_LONG:
+        return 8;
+    default:
+        LOGE("ERROR: unhandled tag '%c'\n", tag);
+        assert(false);
+        return -1;
+    }
+}
+
+/*
+ * Determine whether or not a tag represents a primitive type.
+ */
+static bool isTagPrimitive(u1 tag)
+{
+    switch (tag) {
+    case JT_BYTE:
+    case JT_CHAR:
+    case JT_FLOAT:
+    case JT_DOUBLE:
+    case JT_INT:
+    case JT_LONG:
+    case JT_SHORT:
+    case JT_VOID:
+    case JT_BOOLEAN:
+        return true;
+    case JT_ARRAY:
+    case JT_OBJECT:
+    case JT_STRING:
+    case JT_CLASS_OBJECT:
+    case JT_THREAD:
+    case JT_THREAD_GROUP:
+    case JT_CLASS_LOADER:
+        return false;
+    default:
+        LOGE("ERROR: unhandled tag '%c'\n", tag);
+        assert(false);
+        return false;
+    }
+}
+
+
+/*
+ * Return the length of the specified array.
+ */
+int dvmDbgGetArrayLength(ObjectId arrayId)
+{
+    ArrayObject* arrayObj = (ArrayObject*) objectIdToObject(arrayId);
+    assert(dvmIsArray(arrayObj));
+    return arrayObj->length;
+}
+
+/*
+ * Return a tag indicating the general type of elements in the array.
+ */
+int dvmDbgGetArrayElementTag(ObjectId arrayId)
+{
+    ArrayObject* arrayObj = (ArrayObject*) objectIdToObject(arrayId);
+
+    assert(dvmIsArray(arrayObj));
+
+    return dvmDbgGetSignatureTag(arrayObj->obj.clazz->descriptor + 1);
+}
+
+/*
+ * Copy a series of values with the specified width, changing the byte
+ * ordering to big-endian.
+ */
+static void copyValuesToBE(u1* out, const u1* in, int count, int width)
+{
+    int i;
+
+    switch (width) {
+    case 1:
+        memcpy(out, in, count);
+        break;
+    case 2:
+        for (i = 0; i < count; i++)
+            *(((u2*) out)+i) = get2BE(in + i*2);
+        break;
+    case 4:
+        for (i = 0; i < count; i++)
+            *(((u4*) out)+i) = get4BE(in + i*4);
+        break;
+    case 8:
+        for (i = 0; i < count; i++)
+            *(((u8*) out)+i) = get8BE(in + i*8);
+        break;
+    default:
+        assert(false);
+    }
+}
+
+/*
+ * Copy a series of values with the specified with, changing the
+ * byte order from big-endian.
+ */
+static void copyValuesFromBE(u1* out, const u1* in, int count, int width)
+{
+    int i;
+
+    switch (width) {
+    case 1:
+        memcpy(out, in, count);
+        break;
+    case 2:
+        for (i = 0; i < count; i++)
+            set2BE(out + i*2, *((u2*)in + i));
+        break;
+    case 4:
+        for (i = 0; i < count; i++)
+            set4BE(out + i*4, *((u4*)in + i));
+        break;
+    case 8:
+        for (i = 0; i < count; i++)
+            set8BE(out + i*8, *((u8*)in + i));
+        break;
+    default:
+        assert(false);
+    }
+}
+
+/*
+ * Output a piece of an array to the reply buffer.
+ *
+ * Returns "false" if something looks fishy.
+ */
+bool dvmDbgOutputArray(ObjectId arrayId, int firstIndex, int count,
+    ExpandBuf* pReply)
+{
+    ArrayObject* arrayObj = (ArrayObject*) objectIdToObject(arrayId);
+    const u1* data = (const u1*)arrayObj->contents;
+    u1 tag;
+
+    assert(dvmIsArray(arrayObj));
+
+    if (firstIndex + count > (int)arrayObj->length) {
+        LOGW("Request for index=%d + count=%d excceds length=%d\n",
+            firstIndex, count, arrayObj->length);
+        return false;
+    }
+
+    tag = dvmDbgGetSignatureTag(arrayObj->obj.clazz->descriptor + 1);
+
+    if (isTagPrimitive(tag)) {
+        int width = dvmDbgGetTagWidth(tag);
+        u1* outBuf;
+
+        outBuf = expandBufAddSpace(pReply, count * width);
+
+        copyValuesToBE(outBuf, data + firstIndex*width, count, width);
+    } else {
+        Object** pObjects;
+        int i;
+
+        pObjects = (Object**) data;
+        pObjects += firstIndex;
+
+        LOGV("    --> copying %d object IDs\n", count);
+        //assert(tag == JT_OBJECT);     // could be object or "refined" type
+
+        for (i = 0; i < count; i++, pObjects++) {
+            u1 thisTag;
+            if (*pObjects != NULL)
+                thisTag = resultTagFromObject(*pObjects);
+            else
+                thisTag = tag;
+            expandBufAdd1(pReply, thisTag);
+            expandBufAddObjectId(pReply, objectToObjectId(*pObjects));
+        }
+    }
+
+    return true;
+}
+
+/*
+ * Set a range of elements in an array from the data in "buf".
+ */
+bool dvmDbgSetArrayElements(ObjectId arrayId, int firstIndex, int count,
+    const u1* buf)
+{
+    ArrayObject* arrayObj = (ArrayObject*) objectIdToObject(arrayId);
+    u1* data = (u1*)arrayObj->contents;
+    u1 tag;
+
+    assert(dvmIsArray(arrayObj));
+
+    if (firstIndex + count > (int)arrayObj->length) {
+        LOGW("Attempt to set index=%d + count=%d excceds length=%d\n",
+            firstIndex, count, arrayObj->length);
+        return false;
+    }
+
+    tag = dvmDbgGetSignatureTag(arrayObj->obj.clazz->descriptor + 1);
+
+    if (isTagPrimitive(tag)) {
+        int width = dvmDbgGetTagWidth(tag);
+
+        LOGV("    --> setting %d '%c' width=%d\n", count, tag, width);
+
+        copyValuesFromBE(data + firstIndex*width, buf, count, width);
+    } else {
+        Object** pObjects;
+        int i;
+
+        pObjects = (Object**) data;
+        pObjects += firstIndex;
+
+        LOGV("    --> setting %d objects", count);
+
+        /* should do array type check here */
+        for (i = 0; i < count; i++) {
+            ObjectId id = dvmReadObjectId(&buf);
+            *pObjects++ = objectIdToObject(id);
+        }
+    }
+
+    return true;
+}
+
+/*
+ * Create a new string.
+ *
+ * The only place the reference will be held in the VM is in our registry.
+ */
+ObjectId dvmDbgCreateString(const char* str)
+{
+    StringObject* strObj;
+
+    strObj = dvmCreateStringFromCstr(str, ALLOC_DEFAULT);
+    dvmReleaseTrackedAlloc((Object*) strObj, NULL);
+    return objectToObjectId((Object*) strObj);
+}
+
+/*
+ * Determine if "instClassId" is an instance of "classId".
+ */
+bool dvmDbgMatchType(RefTypeId instClassId, RefTypeId classId)
+{
+    ClassObject* instClazz = refTypeIdToClassObject(instClassId);
+    ClassObject* clazz = refTypeIdToClassObject(classId);
+
+    return dvmInstanceof(instClazz, clazz);
+}
+
+
+/*
+ * ===========================================================================
+ *      Method and Field
+ * ===========================================================================
+ */
+
+/*
+ * Get the method name from a MethodId.
+ */
+const char* dvmDbgGetMethodName(RefTypeId refTypeId, MethodId id)
+{
+    Method* meth;
+
+    meth = methodIdToMethod(refTypeId, id);
+    return meth->name;
+}
+
+/*
+ * For ReferenceType.Fields and ReferenceType.FieldsWithGeneric:
+ * output all fields declared by the class.  Inerhited fields are
+ * not included.
+ */
+void dvmDbgOutputAllFields(RefTypeId refTypeId, bool withGeneric,
+    ExpandBuf* pReply)
+{
+    static const u1 genericSignature[1] = "";
+    ClassObject* clazz;
+    Field* field;
+    u4 declared;
+    int i;
+
+    clazz = refTypeIdToClassObject(refTypeId);
+    assert(clazz != NULL);
+
+    declared = clazz->sfieldCount + clazz->ifieldCount;
+    expandBufAdd4BE(pReply, declared);
+
+    for (i = 0; i < clazz->sfieldCount; i++) {
+        field = (Field*) &clazz->sfields[i];
+
+        expandBufAddFieldId(pReply, fieldToFieldId(field));
+        expandBufAddUtf8String(pReply, (const u1*) field->name);
+        expandBufAddUtf8String(pReply, (const u1*) field->signature);
+        if (withGeneric)
+            expandBufAddUtf8String(pReply, genericSignature);
+        expandBufAdd4BE(pReply, field->accessFlags);
+    }
+    for (i = 0; i < clazz->ifieldCount; i++) {
+        field = (Field*) &clazz->ifields[i];
+
+        expandBufAddFieldId(pReply, fieldToFieldId(field));
+        expandBufAddUtf8String(pReply, (const u1*) field->name);
+        expandBufAddUtf8String(pReply, (const u1*) field->signature);
+        if (withGeneric)
+            expandBufAddUtf8String(pReply, genericSignature);
+        expandBufAdd4BE(pReply, field->accessFlags);
+    }
+}
+
+/*
+ * For ReferenceType.Methods and ReferenceType.MethodsWithGeneric:
+ * output all methods declared by the class.  Inherited methods are
+ * not included.
+ */
+void dvmDbgOutputAllMethods(RefTypeId refTypeId, bool withGeneric,
+    ExpandBuf* pReply)
+{
+    DexStringCache stringCache;
+    static const u1 genericSignature[1] = "";
+    ClassObject* clazz;
+    Method* meth;
+    u4 declared;
+    int i;
+
+    dexStringCacheInit(&stringCache);
+    
+    clazz = refTypeIdToClassObject(refTypeId);
+    assert(clazz != NULL);
+
+    declared = clazz->directMethodCount + clazz->virtualMethodCount;
+    expandBufAdd4BE(pReply, declared);
+
+    for (i = 0; i < clazz->directMethodCount; i++) {
+        meth = &clazz->directMethods[i];
+
+        expandBufAddMethodId(pReply, methodToMethodId(meth));
+        expandBufAddUtf8String(pReply, (const u1*) meth->name);
+
+        expandBufAddUtf8String(pReply,
+            (const u1*) dexProtoGetMethodDescriptor(&meth->prototype,
+                    &stringCache));
+
+        if (withGeneric)
+            expandBufAddUtf8String(pReply, genericSignature);
+        expandBufAdd4BE(pReply, meth->accessFlags);
+    }
+    for (i = 0; i < clazz->virtualMethodCount; i++) {
+        meth = &clazz->virtualMethods[i];
+
+        expandBufAddMethodId(pReply, methodToMethodId(meth));
+        expandBufAddUtf8String(pReply, (const u1*) meth->name);
+
+        expandBufAddUtf8String(pReply,
+            (const u1*) dexProtoGetMethodDescriptor(&meth->prototype,
+                    &stringCache));
+
+        if (withGeneric)
+            expandBufAddUtf8String(pReply, genericSignature);
+        expandBufAdd4BE(pReply, meth->accessFlags);
+    }
+
+    dexStringCacheRelease(&stringCache);
+}
+
+/*
+ * Output all interfaces directly implemented by the class.
+ */
+void dvmDbgOutputAllInterfaces(RefTypeId refTypeId, ExpandBuf* pReply)
+{
+    ClassObject* clazz;
+    int i, start, count;
+
+    clazz = refTypeIdToClassObject(refTypeId);
+    assert(clazz != NULL);
+
+    if (clazz->super == NULL)
+        start = 0;
+    else
+        start = clazz->super->iftableCount;
+
+    count = clazz->iftableCount - start;
+    expandBufAdd4BE(pReply, count);
+    for (i = start; i < clazz->iftableCount; i++) {
+        ClassObject* iface = clazz->iftable[i].clazz;
+        expandBufAddRefTypeId(pReply, classObjectToRefTypeId(iface));
+    }
+}
+
+typedef struct DebugCallbackContext {
+    int numItems;
+    ExpandBuf* pReply;
+    // used by locals table
+    bool withGeneric;
+} DebugCallbackContext;
+
+static int lineTablePositionsCb(void *cnxt, u4 address, u4 lineNum) 
+{
+    DebugCallbackContext *pContext = (DebugCallbackContext *)cnxt;
+
+    expandBufAdd8BE(pContext->pReply, address);
+    expandBufAdd4BE(pContext->pReply, lineNum);
+    pContext->numItems++;
+
+    return 0;
+}
+
+/*
+ * For Method.LineTable: output the line table.
+ *
+ * Note we operate in Dalvik's 16-bit units rather than bytes.
+ */
+void dvmDbgOutputLineTable(RefTypeId refTypeId, MethodId methodId,
+    ExpandBuf* pReply)
+{
+    Method* method;
+    u8 start, end;
+    int i;
+    DebugCallbackContext context;
+
+    memset (&context, 0, sizeof(DebugCallbackContext));
+
+    method = methodIdToMethod(refTypeId, methodId);
+    if (dvmIsNativeMethod(method)) {
+        start = (u8) -1;
+        end = (u8) -1;
+    } else {
+        start = 0;
+        end = dvmGetMethodInsnsSize(method);
+    }
+
+    expandBufAdd8BE(pReply, start);
+    expandBufAdd8BE(pReply, end);
+
+    // Add numLines later
+    size_t numLinesOffset = expandBufGetLength(pReply);
+    expandBufAdd4BE(pReply, 0);
+
+    context.pReply = pReply;
+
+    dexDecodeDebugInfo(method->clazz->pDvmDex->pDexFile,
+        dvmGetMethodCode(method),
+        method->clazz->descriptor,
+        method->prototype.protoIdx,
+        method->accessFlags,
+        lineTablePositionsCb, NULL, &context);
+
+    set4BE(expandBufGetBuffer(pReply) + numLinesOffset, context.numItems);
+}
+
+/*
+ * Eclipse appears to expect that the "this" reference is in slot zero.
+ * If it's not, the "variables" display will show two copies of "this",
+ * possibly because it gets "this" from SF.ThisObject and then displays
+ * all locals with nonzero slot numbers.
+ *
+ * So, we remap the item in slot 0 to 1000, and remap "this" to zero.  On
+ * SF.GetValues / SF.SetValues we map them back.
+ */
+static int tweakSlot(int slot, const char* name)
+{
+    int newSlot = slot;
+
+    if (strcmp(name, "this") == 0)      // only remap "this" ptr
+        newSlot = 0;
+    else if (slot == 0)                 // always remap slot 0
+        newSlot = kSlot0Sub;
+
+    LOGV("untweak: %d to %d\n", slot, newSlot);
+    return newSlot;
+}
+
+/*
+ * Reverse Eclipse hack.
+ */
+static int untweakSlot(int slot, const void* framePtr)
+{
+    int newSlot = slot;
+
+    if (slot == kSlot0Sub) {
+        newSlot = 0;
+    } else if (slot == 0) {
+        const StackSaveArea* saveArea = SAVEAREA_FROM_FP(framePtr);
+        const Method* method = saveArea->method;
+        newSlot = method->registersSize - method->insSize;
+    }
+
+    LOGV("untweak: %d to %d\n", slot, newSlot);
+    return newSlot;
+}
+
+static void variableTableCb (void *cnxt, u2 reg, u4 startAddress,
+        u4 endAddress, const char *name, const char *descriptor,
+        const char *signature)
+{
+    DebugCallbackContext *pContext = (DebugCallbackContext *)cnxt;
+
+    reg = (u2) tweakSlot(reg, name);
+
+    LOGV("    %2d: %d(%d) '%s' '%s' slot=%d\n",
+        pContext->numItems, startAddress, endAddress - startAddress,
+        name, descriptor, reg);
+
+    expandBufAdd8BE(pContext->pReply, startAddress);
+    expandBufAddUtf8String(pContext->pReply, (const u1*)name);
+    expandBufAddUtf8String(pContext->pReply, (const u1*)descriptor);
+    if (pContext->withGeneric) {
+        expandBufAddUtf8String(pContext->pReply, (const u1*) signature);
+    }
+    expandBufAdd4BE(pContext->pReply, endAddress - startAddress);
+    expandBufAdd4BE(pContext->pReply, reg);
+
+    pContext->numItems++;
+}
+
+/*
+ * For Method.VariableTable[WithGeneric]: output information about local
+ * variables for the specified method.
+ */
+void dvmDbgOutputVariableTable(RefTypeId refTypeId, MethodId methodId,
+    bool withGeneric, ExpandBuf* pReply)
+{
+    Method* method;
+    DebugCallbackContext context;
+
+    memset (&context, 0, sizeof(DebugCallbackContext));
+    
+    method = methodIdToMethod(refTypeId, methodId);
+
+    expandBufAdd4BE(pReply, method->insSize);
+
+    // Add numLocals later
+    size_t numLocalsOffset = expandBufGetLength(pReply);
+    expandBufAdd4BE(pReply, 0);
+
+    context.pReply = pReply;
+    context.withGeneric = withGeneric;
+    dexDecodeDebugInfo(method->clazz->pDvmDex->pDexFile,
+        dvmGetMethodCode(method),
+        method->clazz->descriptor,
+        method->prototype.protoIdx,
+        method->accessFlags,
+        NULL, variableTableCb, &context);
+
+    set4BE(expandBufGetBuffer(pReply) + numLocalsOffset, context.numItems);
+}
+
+/*
+ * Get the type tag for the field's type.
+ */
+int dvmDbgGetFieldTag(ObjectId objId, FieldId fieldId)
+{
+    Object* obj = objectIdToObject(objId);
+    RefTypeId classId = classObjectToRefTypeId(obj->clazz);
+    Field* field = fieldIdToField(classId, fieldId);
+
+    return dvmDbgGetSignatureTag(field->signature);
+}
+
+/*
+ * Get the type tag for the static field's type.
+ */
+int dvmDbgGetStaticFieldTag(RefTypeId refTypeId, FieldId fieldId)
+{
+    Field* field = fieldIdToField(refTypeId, fieldId);
+    return dvmDbgGetSignatureTag(field->signature);
+}
+
+/*
+ * Copy the value of a field into the specified buffer.
+ */
+void dvmDbgGetFieldValue(ObjectId objectId, FieldId fieldId, u1* buf,
+    int expectedLen)
+{
+    Object* obj = objectIdToObject(objectId);
+    RefTypeId classId = classObjectToRefTypeId(obj->clazz);
+    InstField* field = (InstField*) fieldIdToField(classId, fieldId);
+    Object* objVal;
+    u4 intVal;
+    u8 longVal;
+
+    switch (field->field.signature[0]) {
+    case JT_BOOLEAN:
+        assert(expectedLen == 1);
+        intVal = dvmGetFieldBoolean(obj, field->byteOffset);
+        set1(buf, intVal != 0);
+        break;
+    case JT_BYTE:
+        assert(expectedLen == 1);
+        intVal = dvmGetFieldInt(obj, field->byteOffset);
+        set1(buf, intVal);
+        break;
+    case JT_SHORT:
+    case JT_CHAR:
+        assert(expectedLen == 2);
+        intVal = dvmGetFieldInt(obj, field->byteOffset);
+        set2BE(buf, intVal);
+        break;
+    case JT_INT:
+    case JT_FLOAT:
+        assert(expectedLen == 4);
+        intVal = dvmGetFieldInt(obj, field->byteOffset);
+        set4BE(buf, intVal);
+        break;
+    case JT_ARRAY:
+    case JT_OBJECT:
+        assert(expectedLen == sizeof(ObjectId));
+        objVal = dvmGetFieldObject(obj, field->byteOffset);
+        dvmSetObjectId(buf, objectToObjectId(objVal));
+        break;
+    case JT_DOUBLE:
+    case JT_LONG:
+        assert(expectedLen == 8);
+        longVal = dvmGetFieldLong(obj, field->byteOffset);
+        set8BE(buf, longVal);
+        break;
+    default:
+        LOGE("ERROR: unhandled class type '%s'\n", field->field.signature);
+        assert(false);
+        break;
+    }
+}
+
+/*
+ * Set the value of the specified field.
+ */
+void dvmDbgSetFieldValue(ObjectId objectId, FieldId fieldId, u8 value,
+    int width)
+{
+    Object* obj = objectIdToObject(objectId);
+    RefTypeId classId = classObjectToRefTypeId(obj->clazz);
+    InstField* field = (InstField*) fieldIdToField(classId, fieldId);
+
+    switch (field->field.signature[0]) {
+    case JT_BOOLEAN:
+        assert(width == 1);
+        dvmSetFieldBoolean(obj, field->byteOffset, value != 0);
+        break;
+    case JT_BYTE:
+        assert(width == 1);
+        dvmSetFieldInt(obj, field->byteOffset, value);
+        break;
+    case JT_SHORT:
+    case JT_CHAR:
+        assert(width == 2);
+        dvmSetFieldInt(obj, field->byteOffset, value);
+        break;
+    case JT_INT:
+    case JT_FLOAT:
+        assert(width == 4);
+        dvmSetFieldInt(obj, field->byteOffset, value);
+        break;
+    case JT_ARRAY:
+    case JT_OBJECT:
+        assert(width == sizeof(ObjectId));
+        dvmSetFieldObject(obj, field->byteOffset, objectIdToObject(value));
+        break;
+    case JT_DOUBLE:
+    case JT_LONG:
+        assert(width == 8);
+        dvmSetFieldLong(obj, field->byteOffset, value);
+        break;
+    default:
+        LOGE("ERROR: unhandled class type '%s'\n", field->field.signature);
+        assert(false);
+        break;
+    }
+}
+
+/*
+ * Copy the value of a static field into the specified buffer.
+ */
+void dvmDbgGetStaticFieldValue(RefTypeId refTypeId, FieldId fieldId, u1* buf,
+    int expectedLen)
+{
+    StaticField* sfield = (StaticField*) fieldIdToField(refTypeId, fieldId);
+    Object* objVal;
+    JValue value;
+
+    switch (sfield->field.signature[0]) {
+    case JT_BOOLEAN:
+        assert(expectedLen == 1);
+        set1(buf, dvmGetStaticFieldBoolean(sfield));
+        break;
+    case JT_BYTE:
+        assert(expectedLen == 1);
+        set1(buf, dvmGetStaticFieldByte(sfield));
+        break;
+    case JT_SHORT:
+        assert(expectedLen == 2);
+        set2BE(buf, dvmGetStaticFieldShort(sfield));
+        break;
+    case JT_CHAR:
+        assert(expectedLen == 2);
+        set2BE(buf, dvmGetStaticFieldChar(sfield));
+        break;
+    case JT_INT:
+        assert(expectedLen == 4);
+        set4BE(buf, dvmGetStaticFieldInt(sfield));
+        break;
+    case JT_FLOAT:
+        assert(expectedLen == 4);
+        value.f = dvmGetStaticFieldFloat(sfield);
+        set4BE(buf, value.i);
+        break;
+    case JT_ARRAY:
+    case JT_OBJECT:
+        assert(expectedLen == sizeof(ObjectId));
+        objVal = dvmGetStaticFieldObject(sfield);
+        dvmSetObjectId(buf, objectToObjectId(objVal));
+        break;
+    case JT_LONG:
+        assert(expectedLen == 8);
+        set8BE(buf, dvmGetStaticFieldLong(sfield));
+        break;
+    case JT_DOUBLE:
+        assert(expectedLen == 8);
+        value.d = dvmGetStaticFieldDouble(sfield);
+        set8BE(buf, value.j);
+        break;
+    default:
+        LOGE("ERROR: unhandled class type '%s'\n", sfield->field.signature);
+        assert(false);
+        break;
+    }
+}
+
+/*
+ * Set the value of a static field.
+ */
+void dvmDbgSetStaticFieldValue(RefTypeId refTypeId, FieldId fieldId,
+    u8 rawValue, int width)
+{
+    StaticField* sfield = (StaticField*) fieldIdToField(refTypeId, fieldId);
+    Object* objVal;
+    JValue value;
+    
+    value.j = rawValue;
+
+    switch (sfield->field.signature[0]) {
+    case JT_BOOLEAN:
+        assert(width == 1);
+        dvmSetStaticFieldBoolean(sfield, value.z);
+        break;
+    case JT_BYTE:
+        assert(width == 1);
+        dvmSetStaticFieldByte(sfield, value.b);
+        break;
+    case JT_SHORT:
+        assert(width == 2);
+        dvmSetStaticFieldShort(sfield, value.s);
+        break;
+    case JT_CHAR:
+        assert(width == 2);
+        dvmSetStaticFieldChar(sfield, value.c);
+        break;
+    case JT_INT:
+        assert(width == 4);
+        dvmSetStaticFieldInt(sfield, value.i);
+        break;
+    case JT_FLOAT:
+        assert(width == 4);
+        dvmSetStaticFieldFloat(sfield, value.f);
+        break;
+    case JT_ARRAY:
+    case JT_OBJECT:
+        assert(width == sizeof(ObjectId));
+        objVal = objectIdToObject(rawValue);
+        dvmSetStaticFieldObject(sfield, objVal);
+        break;
+    case JT_LONG:
+        assert(width == 8);
+        dvmSetStaticFieldLong(sfield, value.j);
+        break;
+    case JT_DOUBLE:
+        assert(width == 8);
+        dvmSetStaticFieldDouble(sfield, value.d);
+        break;
+    default:
+        LOGE("ERROR: unhandled class type '%s'\n", sfield->field.signature);
+        assert(false);
+        break;
+    }
+}
+
+/*
+ * Convert a string object to a UTF-8 string.
+ *
+ * Returns a newly-allocated string.
+ */
+char* dvmDbgStringToUtf8(ObjectId strId)
+{
+    StringObject* strObj = (StringObject*) objectIdToObject(strId);
+
+    return dvmCreateCstrFromString(strObj);
+}
+
+
+/*
+ * ===========================================================================
+ *      Thread and ThreadGroup
+ * ===========================================================================
+ */
+
+/*
+ * Convert a thread object to a Thread ptr.
+ *
+ * This currently requires running through the list of threads and finding
+ * a match.
+ *
+ * IMPORTANT: grab gDvm.threadListLock before calling here.
+ */
+static Thread* threadObjToThread(Object* threadObj)
+{
+    Thread* thread;
+
+    for (thread = gDvm.threadList; thread != NULL; thread = thread->next) {
+        if (thread->threadObj == threadObj)
+            break;
+    }
+
+    return thread;
+}
+
+/*
+ * Get the status and suspend state of a thread.
+ */
+bool dvmDbgGetThreadStatus(ObjectId threadId, u4* pThreadStatus,
+    u4* pSuspendStatus)
+{
+    Object* threadObj;
+    Thread* thread;
+    bool result = false;
+    
+    threadObj = objectIdToObject(threadId);
+    assert(threadObj != NULL);
+
+    /* lock the thread list, so the thread doesn't vanish while we work */
+    dvmLockThreadList(NULL);
+
+    thread = threadObjToThread(threadObj);
+    if (thread == NULL)
+        goto bail;
+
+    switch (thread->status) {
+    case THREAD_ZOMBIE:         *pThreadStatus = TS_ZOMBIE;     break;
+    case THREAD_RUNNING:        *pThreadStatus = TS_RUNNING;    break;
+    case THREAD_TIMED_WAIT:     *pThreadStatus = TS_SLEEPING;   break;
+    case THREAD_MONITOR:        *pThreadStatus = TS_MONITOR;    break;
+    case THREAD_WAIT:           *pThreadStatus = TS_WAIT;       break;
+    case THREAD_INITIALIZING:   *pThreadStatus = TS_ZOMBIE;     break;
+    case THREAD_STARTING:       *pThreadStatus = TS_ZOMBIE;     break;
+    case THREAD_NATIVE:         *pThreadStatus = TS_RUNNING;    break;
+    case THREAD_VMWAIT:         *pThreadStatus = TS_WAIT;       break;
+    default:
+        assert(false);
+        *pThreadStatus = THREAD_ZOMBIE;
+        break;
+    }
+
+    if (dvmIsSuspended(thread))
+        *pSuspendStatus = SUSPEND_STATUS_SUSPENDED;
+    else
+        *pSuspendStatus = 0;
+
+    result = true;
+
+bail:
+    dvmUnlockThreadList();
+    return result;
+}
+
+/*
+ * Get the thread's suspend count.
+ */
+u4 dvmDbgGetThreadSuspendCount(ObjectId threadId)
+{
+    Object* threadObj;
+    Thread* thread;
+    u4 result = 0;
+    
+    threadObj = objectIdToObject(threadId);
+    assert(threadObj != NULL);
+
+    /* lock the thread list, so the thread doesn't vanish while we work */
+    dvmLockThreadList(NULL);
+
+    thread = threadObjToThread(threadObj);
+    if (thread == NULL)
+        goto bail;
+
+    result = thread->suspendCount;
+
+bail:
+    dvmUnlockThreadList();
+    return result;
+}
+
+/*
+ * Determine whether or not a thread exists in the VM's thread list.
+ *
+ * Returns "true" if the thread exists.
+ */
+bool dvmDbgThreadExists(ObjectId threadId)
+{
+    Object* threadObj;
+    Thread* thread;
+    bool result;
+    
+    threadObj = objectIdToObject(threadId);
+    assert(threadObj != NULL);
+
+    /* lock the thread list, so the thread doesn't vanish while we work */
+    dvmLockThreadList(NULL);
+
+    thread = threadObjToThread(threadObj);
+    if (thread == NULL)
+        result = false;
+    else
+        result = true;
+
+    dvmUnlockThreadList();
+    return result;
+}
+
+/*
+ * Determine whether or not a thread is suspended.
+ *
+ * Returns "false" if the thread is running or doesn't exist.
+ */
+bool dvmDbgIsSuspended(ObjectId threadId)
+{
+    Object* threadObj;
+    Thread* thread;
+    bool result = false;
+    
+    threadObj = objectIdToObject(threadId);
+    assert(threadObj != NULL);
+
+    /* lock the thread list, so the thread doesn't vanish while we work */
+    dvmLockThreadList(NULL);
+
+    thread = threadObjToThread(threadObj);
+    if (thread == NULL)
+        goto bail;
+
+    result = dvmIsSuspended(thread);
+
+bail:
+    dvmUnlockThreadList();
+    return result;
+}
+
+#if 0
+/*
+ * Wait until a thread suspends.
+ *
+ * We stray from the usual pattern here, and release the thread list lock
+ * before we use the Thread.  This is necessary and should be safe in this
+ * circumstance; see comments in dvmWaitForSuspend().
+ */
+void dvmDbgWaitForSuspend(ObjectId threadId)
+{
+    Object* threadObj;
+    Thread* thread;
+    
+    threadObj = objectIdToObject(threadId);
+    assert(threadObj != NULL);
+
+    dvmLockThreadList(NULL);
+    thread = threadObjToThread(threadObj);
+    dvmUnlockThreadList();
+
+    if (thread != NULL)
+        dvmWaitForSuspend(thread);
+}
+#endif
+
+
+/*
+ * Return the ObjectId for the "system" thread group.
+ */
+ObjectId dvmDbgGetSystemThreadGroupId(void)
+{
+    Object* groupObj = dvmGetSystemThreadGroup();
+    return objectToObjectId(groupObj);
+}
+
+/*
+ * Return the ObjectId for the "system" thread group.
+ */
+ObjectId dvmDbgGetMainThreadGroupId(void)
+{
+    Object* groupObj = dvmGetMainThreadGroup();
+    return objectToObjectId(groupObj);
+}
+
+/*
+ * Get the name of a thread.
+ *
+ * Returns a newly-allocated string.
+ */
+char* dvmDbgGetThreadName(ObjectId threadId)
+{
+    Object* threadObj;
+    StringObject* nameStr;
+    char* str;
+    char* result;
+
+    threadObj = objectIdToObject(threadId);
+    assert(threadObj != NULL);
+
+    nameStr = (StringObject*) dvmGetFieldObject(threadObj,
+                                                gDvm.offJavaLangThread_name);
+    str = dvmCreateCstrFromString(nameStr);
+    result = (char*) malloc(strlen(str) + 20);
+
+    /* lock the thread list, so the thread doesn't vanish while we work */
+    dvmLockThreadList(NULL);
+    Thread* thread = threadObjToThread(threadObj);
+    if (thread != NULL)
+        sprintf(result, "<%d> %s", thread->threadId, str);
+    else
+        sprintf(result, "%s", str);
+    dvmUnlockThreadList();
+
+    free(str);
+    return result;
+}
+
+/*
+ * Get a thread's group.
+ */
+ObjectId dvmDbgGetThreadGroup(ObjectId threadId)
+{
+    Object* threadObj;
+    Object* group;
+
+    threadObj = objectIdToObject(threadId);
+    assert(threadObj != NULL);
+
+    group = dvmGetFieldObject(threadObj, gDvm.offJavaLangThread_group);
+    return objectToObjectId(group);
+}
+
+
+/*
+ * Get the name of a thread group.
+ *
+ * Returns a newly-allocated string.
+ */
+char* dvmDbgGetThreadGroupName(ObjectId threadGroupId)
+{
+    Object* threadGroup;
+    InstField* nameField;
+    StringObject* nameStr;
+
+    threadGroup = objectIdToObject(threadGroupId);
+    assert(threadGroup != NULL);
+
+    nameField = dvmFindInstanceField(gDvm.classJavaLangThreadGroup,
+                    "name", "Ljava/lang/String;");
+    if (nameField == NULL) {
+        LOGE("unable to find name field in ThreadGroup\n");
+        return NULL;
+    }
+
+    nameStr = (StringObject*) dvmGetFieldObject(threadGroup,
+                                                nameField->byteOffset);
+    return dvmCreateCstrFromString(nameStr);
+}
+
+/*
+ * Get the parent of a thread group.
+ *
+ * Returns a newly-allocated string.
+ */
+ObjectId dvmDbgGetThreadGroupParent(ObjectId threadGroupId)
+{
+    Object* threadGroup;
+    InstField* parentField;
+    Object* parent;
+
+    threadGroup = objectIdToObject(threadGroupId);
+    assert(threadGroup != NULL);
+
+    parentField = dvmFindInstanceField(gDvm.classJavaLangThreadGroup,
+                    "parent", "Ljava/lang/ThreadGroup;");
+    if (parentField == NULL) {
+        LOGE("unable to find parent field in ThreadGroup\n");
+        parent = NULL;
+    } else {
+        parent = dvmGetFieldObject(threadGroup, parentField->byteOffset);
+    }
+    return objectToObjectId(parent);
+}
+
+/*
+ * Get the list of threads in the thread group.
+ *
+ * We do this by running through the full list of threads and returning
+ * the ones that have the ThreadGroup object as their owner.
+ *
+ * If threadGroupId is set to "kAllThreads", we ignore the group field and
+ * return all threads.
+ *
+ * The caller must free "*ppThreadIds".
+ */
+void dvmDbgGetThreadGroupThreads(ObjectId threadGroupId,
+    ObjectId** ppThreadIds, u4* pThreadCount)
+{
+    Object* targetThreadGroup = NULL;
+    InstField* groupField = NULL;
+    Thread* thread;
+    int count;
+
+    if (threadGroupId != THREAD_GROUP_ALL) {
+        targetThreadGroup = objectIdToObject(threadGroupId);
+        assert(targetThreadGroup != NULL);
+    }
+
+    groupField = dvmFindInstanceField(gDvm.classJavaLangThread,
+        "group", "Ljava/lang/ThreadGroup;");
+
+    dvmLockThreadList(NULL);
+
+    thread = gDvm.threadList;
+    count = 0;
+    for (thread = gDvm.threadList; thread != NULL; thread = thread->next) {
+        Object* group;
+
+        /* Skip over the JDWP support thread.  Some debuggers
+         * get bent out of shape when they can't suspend and
+         * query all threads, so it's easier if we just don't
+         * tell them about us.
+         */
+        if (thread->handle == dvmJdwpGetDebugThread(gDvm.jdwpState))
+            continue;
+
+        /* This thread is currently being created, and isn't ready
+         * to be seen by the debugger yet.
+         */
+        if (thread->threadObj == NULL)
+            continue;
+
+        group = dvmGetFieldObject(thread->threadObj, groupField->byteOffset);
+        if (threadGroupId == THREAD_GROUP_ALL || group == targetThreadGroup)
+            count++;
+    }
+
+    *pThreadCount = count;
+
+    if (count == 0) {
+        *ppThreadIds = NULL;
+    } else {
+        ObjectId* ptr;
+        ptr = *ppThreadIds = (ObjectId*) malloc(sizeof(ObjectId) * count);
+
+        for (thread = gDvm.threadList; thread != NULL; thread = thread->next) {
+            Object* group;
+
+            /* Skip over the JDWP support thread.  Some debuggers
+             * get bent out of shape when they can't suspend and
+             * query all threads, so it's easier if we just don't
+             * tell them about us.
+             */
+            if (thread->handle == dvmJdwpGetDebugThread(gDvm.jdwpState))
+                continue;
+
+            /* This thread is currently being created, and isn't ready
+             * to be seen by the debugger yet.
+             */
+            if (thread->threadObj == NULL)
+                continue;
+
+            group = dvmGetFieldObject(thread->threadObj,groupField->byteOffset);
+            if (threadGroupId == THREAD_GROUP_ALL || group == targetThreadGroup)
+            {
+                *ptr++ = objectToObjectId(thread->threadObj);
+                count--;
+            }
+        }
+
+        assert(count == 0);
+    }
+
+    dvmUnlockThreadList();
+}
+
+/*
+ * Get all threads.
+ *
+ * The caller must free "*ppThreadIds".
+ */
+void dvmDbgGetAllThreads(ObjectId** ppThreadIds, u4* pThreadCount)
+{
+    dvmDbgGetThreadGroupThreads(THREAD_GROUP_ALL, ppThreadIds, pThreadCount);
+}
+
+
+/*
+ * Count up the #of frames on the thread's stack.
+ *
+ * Returns -1 on failure;
+ */
+int dvmDbgGetThreadFrameCount(ObjectId threadId)
+{
+    Object* threadObj;
+    Thread* thread;
+    void* framePtr;
+    u4 count = 0;
+
+    threadObj = objectIdToObject(threadId);
+
+    dvmLockThreadList(NULL);
+
+    thread = threadObjToThread(threadObj);
+    if (thread == NULL)
+        goto bail;
+
+    framePtr = thread->curFrame;
+    while (framePtr != NULL) {
+        if (!dvmIsBreakFrame(framePtr))
+            count++;
+
+        framePtr = SAVEAREA_FROM_FP(framePtr)->prevFrame;
+    }
+
+bail:
+    dvmUnlockThreadList();
+    return count;
+}
+
+/*
+ * Get info for frame N from the specified thread's stack.
+ */
+bool dvmDbgGetThreadFrame(ObjectId threadId, int num, FrameId* pFrameId,
+    JdwpLocation* pLoc)
+{
+    Object* threadObj;
+    Thread* thread;
+    void* framePtr;
+    int count;
+
+    threadObj = objectIdToObject(threadId);
+
+    dvmLockThreadList(NULL);
+
+    thread = threadObjToThread(threadObj);
+    if (thread == NULL)
+        goto bail;
+
+    framePtr = thread->curFrame;
+    count = 0;
+    while (framePtr != NULL) {
+        const StackSaveArea* saveArea = SAVEAREA_FROM_FP(framePtr);
+        const Method* method = saveArea->method;
+
+        if (!dvmIsBreakFrame(framePtr)) {
+            if (count == num) {
+                *pFrameId = frameToFrameId(framePtr);
+                if (dvmIsInterfaceClass(method->clazz))
+                    pLoc->typeTag = TT_INTERFACE;
+                else
+                    pLoc->typeTag = TT_CLASS;
+                pLoc->classId = classObjectToRefTypeId(method->clazz);
+                pLoc->methodId = methodToMethodId(method);
+                if (dvmIsNativeMethod(method))
+                    pLoc->idx = (u8)-1;
+                else
+                    pLoc->idx = saveArea->xtra.currentPc - method->insns;
+                dvmUnlockThreadList();
+                return true;
+            }
+
+            count++;
+        }
+
+        framePtr = saveArea->prevFrame;
+    }
+
+bail:
+    dvmUnlockThreadList();
+    return false;
+}
+
+/*
+ * Get the ThreadId for the current thread.
+ */
+ObjectId dvmDbgGetThreadSelfId(void)
+{
+    Thread* self = dvmThreadSelf();
+    return objectToObjectId(self->threadObj);
+}
+
+/*
+ * Suspend the VM.
+ */
+void dvmDbgSuspendVM(bool isEvent)
+{
+    dvmSuspendAllThreads(isEvent ? SUSPEND_FOR_DEBUG_EVENT : SUSPEND_FOR_DEBUG);
+}
+
+/*
+ * Resume the VM.
+ */
+void dvmDbgResumeVM()
+{
+    dvmResumeAllThreads(SUSPEND_FOR_DEBUG);
+}
+
+/*
+ * Suspend one thread (not ourselves).
+ */
+void dvmDbgSuspendThread(ObjectId threadId)
+{
+    Object* threadObj = objectIdToObject(threadId);
+    Thread* thread;
+
+    dvmLockThreadList(NULL);
+
+    thread = threadObjToThread(threadObj);
+    if (thread == NULL) {
+        /* can happen if our ThreadDeath notify crosses in the mail */
+        LOGW("WARNING: threadid=%llx obj=%p no match\n", threadId, threadObj);
+    } else {
+        dvmSuspendThread(thread);
+    }
+
+    dvmUnlockThreadList();
+}
+
+/*
+ * Resume one thread (not ourselves).
+ */
+void dvmDbgResumeThread(ObjectId threadId)
+{
+    Object* threadObj = objectIdToObject(threadId);
+    Thread* thread;
+
+    dvmLockThreadList(NULL);
+
+    thread = threadObjToThread(threadObj);
+    if (thread == NULL) {
+        LOGW("WARNING: threadid=%llx obj=%p no match\n", threadId, threadObj);
+    } else {
+        dvmResumeThread(thread);
+    }
+
+    dvmUnlockThreadList();
+}
+
+/*
+ * Suspend ourselves after sending an event to the debugger.
+ */
+void dvmDbgSuspendSelf(void)
+{
+    dvmSuspendSelf(true);
+}
+
+/*
+ * Get the "this" object for the specified frame.
+ */
+static Object* getThisObject(const u4* framePtr)
+{
+    const StackSaveArea* saveArea = SAVEAREA_FROM_FP(framePtr);
+    const Method* method = saveArea->method;
+    int argOffset = method->registersSize - method->insSize;
+    Object* thisObj;
+
+    if (method == NULL) {
+        /* this is a "break" frame? */
+        assert(false);
+        return NULL;
+    }
+
+    LOGVV("  Pulling this object for frame at %p\n", framePtr);
+    LOGVV("    Method='%s' native=%d static=%d this=%p\n",
+        method->name, dvmIsNativeMethod(method),
+        dvmIsStaticMethod(method), (Object*) framePtr[argOffset]);
+
+    /*
+     * No "this" pointer for statics.  No args on the interp stack for
+     * native methods invoked directly from the VM.
+     */
+    if (dvmIsNativeMethod(method) || dvmIsStaticMethod(method))
+        thisObj = NULL;
+    else
+        thisObj = (Object*) framePtr[argOffset];
+
+    return thisObj;
+}
+
+/*
+ * Return the "this" object for the specified frame.  The thread must be
+ * suspended.
+ */
+bool dvmDbgGetThisObject(ObjectId threadId, FrameId frameId, ObjectId* pThisId)
+{
+    const u4* framePtr = frameIdToFrame(frameId);
+    Object* thisObj;
+
+    UNUSED_PARAMETER(threadId);
+
+    thisObj = getThisObject(framePtr);
+
+    *pThisId = objectToObjectId(thisObj);
+    return true;
+}
+
+/*
+ * Copy the value of a method argument or local variable into the
+ * specified buffer.  The value will be preceeded with the tag.
+ */
+void dvmDbgGetLocalValue(ObjectId threadId, FrameId frameId, int slot,
+    u1 tag, u1* buf, int expectedLen)
+{
+    const u4* framePtr = frameIdToFrame(frameId);
+    Object* objVal;
+    u4 intVal;
+    u8 longVal;
+
+    UNUSED_PARAMETER(threadId);
+
+    slot = untweakSlot(slot, framePtr);     // Eclipse workaround
+
+    switch (tag) {
+    case JT_BOOLEAN:
+        assert(expectedLen == 1);
+        intVal = framePtr[slot];
+        set1(buf+1, intVal != 0);
+        break;
+    case JT_BYTE:
+        assert(expectedLen == 1);
+        intVal = framePtr[slot];
+        set1(buf+1, intVal);
+        break;
+    case JT_SHORT:
+    case JT_CHAR:
+        assert(expectedLen == 2);
+        intVal = framePtr[slot];
+        set2BE(buf+1, intVal);
+        break;
+    case JT_INT:
+    case JT_FLOAT:
+        assert(expectedLen == 4);
+        intVal = framePtr[slot];
+        set4BE(buf+1, intVal);
+        break;
+    case JT_ARRAY:
+        assert(expectedLen == 8);
+        {
+            /* convert to "ObjectId" */
+            objVal = (Object*)framePtr[slot];
+            if (objVal != NULL && !dvmIsValidObject(objVal)) {
+                LOGW("JDWP: slot %d expected to hold array, %p invalid\n",
+                    slot, objVal);
+                dvmAbort();         // DEBUG: make it obvious
+                objVal = NULL;
+                tag = JT_OBJECT;    // JT_ARRAY not expected for NULL ref
+            }
+            dvmSetObjectId(buf+1, objectToObjectId(objVal));
+        }
+        break;
+    case JT_OBJECT:
+        assert(expectedLen == 8);
+        {
+            /* convert to "ObjectId" */
+            objVal = (Object*)framePtr[slot];
+            //char* name;
+
+            if (objVal != NULL) {
+                if (!dvmIsValidObject(objVal)) {
+                    LOGW("JDWP: slot %d expected to hold object, %p invalid\n",
+                        slot, objVal);
+                    dvmAbort();         // DEBUG: make it obvious
+                    objVal = NULL;
+                }
+                //name = generateJNISignature(objVal->clazz);
+                tag = resultTagFromObject(objVal);
+                //free(name);
+            } else {
+                tag = JT_OBJECT;
+            }
+            dvmSetObjectId(buf+1, objectToObjectId(objVal));
+        }
+        break;
+    case JT_DOUBLE:
+    case JT_LONG:
+        assert(expectedLen == 8);
+        longVal = *(u8*)(&framePtr[slot]);
+        set8BE(buf+1, longVal);
+        break;
+    default:
+        LOGE("ERROR: unhandled tag '%c'\n", tag);
+        assert(false);
+        break;
+    }
+
+    set1(buf, tag);
+}
+
+/*
+ * Copy a new value into an argument or local variable.
+ */
+void dvmDbgSetLocalValue(ObjectId threadId, FrameId frameId, int slot, u1 tag,
+    u8 value, int width)
+{
+    u4* framePtr = frameIdToFrame(frameId);
+
+    UNUSED_PARAMETER(threadId);
+
+    slot = untweakSlot(slot, framePtr);     // Eclipse workaround
+
+    switch (tag) {
+    case JT_BOOLEAN:
+        assert(width == 1);
+        framePtr[slot] = (u4)value;
+        break;
+    case JT_BYTE:
+        assert(width == 1);
+        framePtr[slot] = (u4)value;
+        break;
+    case JT_SHORT:
+    case JT_CHAR:
+        assert(width == 2);
+        framePtr[slot] = (u4)value;
+        break;
+    case JT_INT:
+    case JT_FLOAT:
+        assert(width == 4);
+        framePtr[slot] = (u4)value;
+        break;
+    case JT_STRING:
+        /* The debugger calls VirtualMachine.CreateString to create a new
+         * string, then uses this to set the object reference, when you
+         * edit a String object */
+    case JT_ARRAY:
+    case JT_OBJECT:
+        assert(width == sizeof(ObjectId));
+        framePtr[slot] = (u4) objectIdToObject(value);
+        break;
+    case JT_DOUBLE:
+    case JT_LONG:
+        assert(width == 8);
+        *(u8*)(&framePtr[slot]) = value;
+        break;
+    case JT_VOID:
+    case JT_CLASS_OBJECT:
+    case JT_THREAD:
+    case JT_THREAD_GROUP:
+    case JT_CLASS_LOADER:
+    default:
+        LOGE("ERROR: unhandled tag '%c'\n", tag);
+        assert(false);
+        break;
+    }
+}
+
+
+/*
+ * ===========================================================================
+ *      Debugger notification
+ * ===========================================================================
+ */
+
+/*
+ * Tell JDWP that a breakpoint address has been reached.
+ *
+ * "pcOffset" will be -1 for native methods.
+ * "thisPtr" will be NULL for static methods.
+ */
+void dvmDbgPostLocationEvent(const Method* method, int pcOffset,
+    Object* thisPtr, int eventFlags)
+{
+    JdwpLocation loc;
+
+    if (dvmIsInterfaceClass(method->clazz))
+        loc.typeTag = TT_INTERFACE;
+    else
+        loc.typeTag = TT_CLASS;
+    loc.classId = classObjectToRefTypeId(method->clazz);
+    loc.methodId = methodToMethodId(method);
+    loc.idx = pcOffset;
+
+    /*
+     * Note we use "NoReg" so we don't keep track of references that are
+     * never actually sent to the debugger.  The "thisPtr" is used to
+     * compare against registered events.
+     */
+
+    if (dvmJdwpPostLocationEvent(gDvm.jdwpState, &loc,
+            objectToObjectIdNoReg(thisPtr), eventFlags))
+    {
+        classObjectToRefTypeId(method->clazz);
+        objectToObjectId(thisPtr);
+    }
+}
+
+/*
+ * Tell JDWP that an exception has occurred.
+ */
+void dvmDbgPostException(void* throwFp, int throwRelPc, void* catchFp,
+    int catchRelPc, Object* exception)
+{
+    JdwpLocation throwLoc, catchLoc;
+    const Method* throwMeth;
+    const Method* catchMeth;
+
+    throwMeth = SAVEAREA_FROM_FP(throwFp)->method;
+    if (dvmIsInterfaceClass(throwMeth->clazz))
+        throwLoc.typeTag = TT_INTERFACE;
+    else
+        throwLoc.typeTag = TT_CLASS;
+    throwLoc.classId = classObjectToRefTypeId(throwMeth->clazz);
+    throwLoc.methodId = methodToMethodId(throwMeth);
+    throwLoc.idx = throwRelPc;
+
+    if (catchRelPc < 0) {
+        memset(&catchLoc, 0, sizeof(catchLoc));
+    } else {
+        catchMeth = SAVEAREA_FROM_FP(catchFp)->method;
+        if (dvmIsInterfaceClass(catchMeth->clazz))
+            catchLoc.typeTag = TT_INTERFACE;
+        else
+            catchLoc.typeTag = TT_CLASS;
+        catchLoc.classId = classObjectToRefTypeId(catchMeth->clazz);
+        catchLoc.methodId = methodToMethodId(catchMeth);
+        catchLoc.idx = catchRelPc;
+    }
+
+    /* need this for InstanceOnly filters */
+    Object* thisObj = getThisObject(throwFp);
+
+    dvmJdwpPostException(gDvm.jdwpState, &throwLoc, objectToObjectId(exception),
+        classObjectToRefTypeId(exception->clazz), &catchLoc,
+        objectToObjectId(thisObj));
+}
+
+/*
+ * Tell JDWP and/or DDMS that a thread has started.
+ */
+void dvmDbgPostThreadStart(Thread* thread)
+{
+    if (gDvm.debuggerActive) {
+        dvmJdwpPostThreadChange(gDvm.jdwpState,
+            objectToObjectId(thread->threadObj), true);
+    }
+    if (gDvm.ddmThreadNotification)
+        dvmDdmSendThreadNotification(thread, true);
+}
+
+/*
+ * Tell JDWP and/or DDMS that a thread has gone away.
+ */
+void dvmDbgPostThreadDeath(Thread* thread)
+{
+    if (gDvm.debuggerActive) {
+        dvmJdwpPostThreadChange(gDvm.jdwpState,
+            objectToObjectId(thread->threadObj), false);
+    }
+    if (gDvm.ddmThreadNotification)
+        dvmDdmSendThreadNotification(thread, false);
+}
+
+/*
+ * Tell JDWP that a new class has been prepared.
+ */
+void dvmDbgPostClassPrepare(ClassObject* clazz)
+{
+    int tag;
+    char* signature;
+
+    if (dvmIsInterfaceClass(clazz))
+        tag = TT_INTERFACE;
+    else
+        tag = TT_CLASS;
+
+    // TODO - we currently always send both "verified" and "prepared" since
+    // debuggers seem to like that.  There might be some advantage to honesty,
+    // since the class may not yet be verified.
+    signature = generateJNISignature(clazz);
+    dvmJdwpPostClassPrepare(gDvm.jdwpState, tag, classObjectToRefTypeId(clazz),
+        signature, CS_VERIFIED | CS_PREPARED);
+    free(signature);
+}
+
+/*
+ * The JDWP event mechanism has registered an event with a LocationOnly
+ * mod.  Tell the interpreter to call us if we hit the specified
+ * address.
+ */
+bool dvmDbgWatchLocation(const JdwpLocation* pLoc)
+{
+    Method* method = methodIdToMethod(pLoc->classId, pLoc->methodId);
+    assert(!dvmIsNativeMethod(method));
+    dvmAddBreakAddr(method, pLoc->idx);
+    return true;        /* assume success */
+}
+
+/*
+ * An event with a LocationOnly mod has been removed.
+ */
+void dvmDbgUnwatchLocation(const JdwpLocation* pLoc)
+{
+    Method* method = methodIdToMethod(pLoc->classId, pLoc->methodId);
+    assert(!dvmIsNativeMethod(method));
+    dvmClearBreakAddr(method, pLoc->idx);
+}
+
+/*
+ * The JDWP event mechanism has registered a single-step event.  Tell
+ * the interpreter about it.
+ */
+bool dvmDbgConfigureStep(ObjectId threadId, enum JdwpStepSize size,
+    enum JdwpStepDepth depth)
+{
+    Object* threadObj;
+    Thread* thread;
+    bool result = false;
+
+    threadObj = objectIdToObject(threadId);
+    assert(threadObj != NULL);
+
+    /*
+     * Get a pointer to the Thread struct for this ID.  The pointer will
+     * be used strictly for comparisons against the current thread pointer
+     * after the setup is complete, so we can safely release the lock.
+     */
+    dvmLockThreadList(NULL);
+    thread = threadObjToThread(threadObj);
+
+    if (thread == NULL) {
+        LOGE("Thread for single-step not found\n");
+        goto bail;
+    }
+    if (!dvmIsSuspended(thread)) {
+        LOGE("Thread for single-step not suspended\n");
+        assert(!"non-susp step");      // I want to know if this can happen
+        goto bail;
+    }
+
+    assert(dvmIsSuspended(thread));
+    if (!dvmAddSingleStep(thread, size, depth))
+        goto bail;
+
+    result = true;
+
+bail:
+    dvmUnlockThreadList();
+    return result;
+}
+
+/*
+ * A single-step event has been removed.
+ */
+void dvmDbgUnconfigureStep(ObjectId threadId)
+{
+    UNUSED_PARAMETER(threadId);
+
+    /* right now it's global, so don't need to find Thread */
+    dvmClearSingleStep(NULL);
+}
+
+/*
+ * Invoke a method in a thread that has been stopped on a breakpoint or
+ * other debugger event.  (This function is called from the JDWP thread.)
+ *
+ * Note that access control is not enforced, per spec.
+ */
+JdwpError dvmDbgInvokeMethod(ObjectId threadId, ObjectId objectId,
+    RefTypeId classId, MethodId methodId, u4 numArgs, ObjectId* argArray,
+    u4 options, u1* pResultTag, u8* pResultValue, ObjectId* pExceptObj)
+{
+    Object* threadObj = objectIdToObject(threadId);
+    Thread* targetThread;
+    JdwpError err = ERR_NONE;
+
+    dvmLockThreadList(NULL);
+
+    targetThread = threadObjToThread(threadObj);
+    if (targetThread == NULL) {
+        err = ERR_INVALID_THREAD;       /* thread does not exist */
+        dvmUnlockThreadList();
+        goto bail;
+    }
+    if (!targetThread->invokeReq.ready) {
+        err = ERR_INVALID_THREAD;       /* thread not stopped by event */
+        dvmUnlockThreadList();
+        goto bail;
+    }
+
+    /*
+     * TODO: ought to screen the various IDs, and verify that the argument
+     * list is valid.
+     */
+
+    targetThread->invokeReq.obj = objectIdToObject(objectId);
+    targetThread->invokeReq.thread = threadObj;
+    targetThread->invokeReq.clazz = refTypeIdToClassObject(classId);
+    targetThread->invokeReq.method = methodIdToMethod(classId, methodId);
+    targetThread->invokeReq.numArgs = numArgs;
+    targetThread->invokeReq.argArray = argArray;
+    targetThread->invokeReq.options = options;
+    targetThread->invokeReq.invokeNeeded = true;
+
+    /*
+     * This is a bit risky -- if the thread goes away we're sitting high
+     * and dry -- but we must release this before the dvmResumeAllThreads
+     * call, and it's unwise to hold it during dvmWaitForSuspend.
+     */
+    dvmUnlockThreadList();
+
+    /*
+     * We change our thread status (which should be THREAD_RUNNING) so the
+     * VM can suspend for a GC if the invoke request causes us to run out
+     * of memory.  It's also a good idea to change it before locking the
+     * invokeReq mutex, although that should never be held for long.
+     */
+    Thread* self = dvmThreadSelf();
+    int oldStatus = dvmChangeStatus(self, THREAD_VMWAIT);
+
+    LOGV("    Transferring control to event thread\n");
+    dvmLockMutex(&targetThread->invokeReq.lock);
+
+    if ((options & INVOKE_SINGLE_THREADED) == 0) {
+        LOGV("      Resuming all threads\n");
+        dvmResumeAllThreads(SUSPEND_FOR_DEBUG_EVENT);
+    } else {
+        LOGV("      Resuming event thread only\n");
+        dvmResumeThread(targetThread);
+    }
+
+    /*
+     * Wait for the request to finish executing.
+     */
+    while (targetThread->invokeReq.invokeNeeded) {
+        pthread_cond_wait(&targetThread->invokeReq.cv,
+                          &targetThread->invokeReq.lock);
+    }
+    dvmUnlockMutex(&targetThread->invokeReq.lock);
+    LOGV("    Control has returned from event thread\n");
+
+    /* wait for thread to re-suspend itself */
+    dvmWaitForSuspend(targetThread);
+
+    /*
+     * Done waiting, switch back to RUNNING.
+     */
+    dvmChangeStatus(self, oldStatus);
+
+    /*
+     * Suspend the threads.  We waited for the target thread to suspend
+     * itself, so all we need to do is suspend the others.
+     *
+     * The suspendAllThreads() call will double-suspend the event thread,
+     * so we want to resume the target thread once to keep the books straight.
+     */
+    if ((options & INVOKE_SINGLE_THREADED) == 0) {
+        LOGV("      Suspending all threads\n");
+        dvmSuspendAllThreads(SUSPEND_FOR_DEBUG_EVENT);
+        LOGV("      Resuming event thread to balance the count\n");
+        dvmResumeThread(targetThread);
+    }
+
+    /*
+     * Set up the result.
+     */
+    *pResultTag = targetThread->invokeReq.resultTag;
+    if (isTagPrimitive(targetThread->invokeReq.resultTag))
+        *pResultValue = targetThread->invokeReq.resultValue.j;
+    else
+        *pResultValue = objectToObjectId(targetThread->invokeReq.resultValue.l);
+    *pExceptObj = targetThread->invokeReq.exceptObj;
+    err = targetThread->invokeReq.err;
+
+bail:
+    return err;
+}
+
+/*
+ * Determine the tag type for the return value for this method.
+ */
+static u1 resultTagFromSignature(const Method* method)
+{
+    const char* descriptor = dexProtoGetReturnType(&method->prototype);
+    return dvmDbgGetSignatureTag(descriptor);
+}
+
+/*
+ * Execute the method described by "*pReq".
+ */
+void dvmDbgExecuteMethod(DebugInvokeReq* pReq)
+{
+    Thread* self = dvmThreadSelf();
+    const Method* meth;
+    Object* oldExcept;
+
+    /*
+     * We can be called while an exception is pending in the VM.  We need
+     * to preserve that across the method invocation.
+     */
+    oldExcept = dvmGetException(self);
+
+    /*
+     * Translate the method through the vtable, unless we're calling a
+     * static method or the debugger wants to suppress it.
+     */
+    if ((pReq->options & INVOKE_NONVIRTUAL) != 0 || pReq->obj == NULL) {
+        meth = pReq->method;
+    } else {
+        meth = dvmGetVirtualizedMethod(pReq->clazz, pReq->method);
+    }
+    assert(meth != NULL);
+
+    assert(sizeof(jvalue) == sizeof(u8));
+
+    IF_LOGV() {
+        char* desc = dexProtoCopyMethodDescriptor(&meth->prototype);
+        LOGV("JDWP invoking method %s.%s %s\n",
+            meth->clazz->descriptor, meth->name, desc);
+        free(desc);
+    }
+
+    dvmCallMethodA(self, meth, pReq->obj, &pReq->resultValue,
+        (jvalue*)pReq->argArray);
+    pReq->exceptObj = objectToObjectId(dvmGetException(self));
+    pReq->resultTag = resultTagFromSignature(meth);
+    if (pReq->exceptObj != 0) {
+        LOGD("  JDWP invocation returning with exceptObj=%p\n",
+            dvmGetException(self));
+        dvmClearException(self);
+        /*
+         * Nothing should try to use this, but it looks like something is.
+         * Make it null to be safe.
+         */
+        pReq->resultValue.j = 0; /*0xadadadad;*/
+    } else if (pReq->resultTag == JT_OBJECT) {
+        /* if no exception thrown, examine object result more closely */
+        u1 newTag = resultTagFromObject(pReq->resultValue.l);
+        if (newTag != pReq->resultTag) {
+            LOGVV("  JDWP promoted result from %d to %d\n",
+                pReq->resultTag, newTag);
+            pReq->resultTag = newTag;
+        }
+    }
+
+    if (oldExcept != NULL)
+        dvmSetException(self, oldExcept);
+}
+
+// for dvmAddressSetForLine
+typedef struct AddressSetContext {
+    bool lastAddressValid;
+    u4 lastAddress;
+    u4 lineNum;
+    AddressSet *pSet;
+} AddressSetContext;
+
+// for dvmAddressSetForLine
+static int addressSetCb (void *cnxt, u4 address, u4 lineNum)
+{
+    AddressSetContext *pContext = (AddressSetContext *)cnxt;
+
+    if (lineNum == pContext->lineNum) {
+        if (!pContext->lastAddressValid) {
+            // Everything from this address until the next line change is ours
+            pContext->lastAddress = address;
+            pContext->lastAddressValid = true;
+        }
+        // else, If we're already in a valid range for this lineNum,
+        // just keep going (shouldn't really happen)
+    } else if (pContext->lastAddressValid) { // and the line number is new
+        u4 i;
+        // Add everything from the last entry up until here to the set
+        for (i = pContext->lastAddress; i < address; i++) {
+            dvmAddressSetSet(pContext->pSet, i);
+        }
+
+        pContext->lastAddressValid = false;
+    }
+
+    // there may be multiple entries for a line
+    return 0;
+}
+/*
+ * Build up a set of bytecode addresses associated with a line number
+ */
+const AddressSet *dvmAddressSetForLine(const Method* method, int line)
+{
+    AddressSet *result;
+    const DexFile *pDexFile = method->clazz->pDvmDex->pDexFile;
+    u4 insnsSize = dvmGetMethodInsnsSize(method);
+    AddressSetContext context;
+
+    result = calloc(1, sizeof(AddressSet) + (insnsSize/8) + 1);
+    result->setSize = insnsSize;
+
+    memset(&context, 0, sizeof(context));
+    context.pSet = result;
+    context.lineNum = line;
+    context.lastAddressValid = false;
+
+    dexDecodeDebugInfo(pDexFile, dvmGetMethodCode(method),
+        method->clazz->descriptor,
+        method->prototype.protoIdx,
+        method->accessFlags,
+        addressSetCb, NULL, &context);
+
+    // If the line number was the last in the position table...
+    if (context.lastAddressValid) {
+        u4 i;
+        for (i = context.lastAddress; i < insnsSize; i++) {
+            dvmAddressSetSet(result, i);
+        }
+    }
+
+    return result;
+}
+
+
+/*
+ * ===========================================================================
+ *      Dalvik Debug Monitor support
+ * ===========================================================================
+ */
+
+/*
+ * We have received a DDM packet over JDWP.  Hand it off to the VM.
+ */
+bool dvmDbgDdmHandlePacket(const u1* buf, int dataLen, u1** pReplyBuf,
+    int* pReplyLen)
+{
+    return dvmDdmHandlePacket(buf, dataLen, pReplyBuf, pReplyLen);
+}
+
+/*
+ * First DDM packet has arrived over JDWP.  Notify the press.
+ */
+void dvmDbgDdmConnected(void)
+{
+    dvmDdmConnected();
+}
+
+/*
+ * JDWP connection has dropped.
+ */
+void dvmDbgDdmDisconnected(void)
+{
+    dvmDdmDisconnected();
+}
+
+/*
+ * Send up a JDWP event packet with a DDM chunk in it.
+ */
+void dvmDbgDdmSendChunk(int type, int len, const u1* buf)
+{
+    if (gDvm.jdwpState == NULL) {
+        LOGI("Ignoring DDM send req for type=0x%08x len=%d\n", type, len);
+        return;
+    }
+
+    dvmJdwpDdmSendChunk(gDvm.jdwpState, type, len, buf);
+}
+
diff --git a/vm/Debugger.h b/vm/Debugger.h
new file mode 100644
index 0000000..fcf07c7
--- /dev/null
+++ b/vm/Debugger.h
@@ -0,0 +1,303 @@
+/*
+ * Copyright (C) 2008 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/*
+ * Dalvik-specific side of debugger support.  (The JDWP code is intended to
+ * be relatively generic.)
+ */
+#ifndef _DALVIK_DEBUGGER
+#define _DALVIK_DEBUGGER
+
+#include "Common.h"
+#include "Misc.h"
+#include "jdwp/Jdwp.h"
+#include <pthread.h>
+
+/* fwd decl */
+struct Object;
+struct ClassObject;
+struct Method;
+struct Thread;
+
+/*
+ * used by StepControl to track a set of addresses associated with
+ * a single line.
+ */
+typedef struct AddressSet {
+    u4 setSize;
+    u1 set[1];
+} AddressSet;
+
+INLINE void dvmAddressSetSet(AddressSet *pSet, u4 toSet)
+{
+    if (toSet < pSet->setSize) {
+        pSet->set[toSet/8] |= 1 << (toSet % 8);
+    }
+}
+
+INLINE bool dvmAddressSetGet(const AddressSet *pSet, u4 toGet)
+{
+    if (toGet < pSet->setSize) {
+        return (pSet->set[toGet/8] & (1 << (toGet % 8))) != 0;
+    } else {
+        return false;
+    }
+}
+
+/*
+ * Single-step management.
+ */
+typedef struct StepControl {
+    /* request */
+    enum JdwpStepSize   size;
+    enum JdwpStepDepth  depth;
+    struct Thread*      thread;         /* don't deref; for comparison only */
+
+    /* current state */
+    bool                active;
+    const struct Method* method;
+    int                 line;           /* line #; could be -1 */
+    const AddressSet*   pAddressSet;    /* if non-null, address set for line */
+    int                 frameDepth;
+} StepControl;
+
+/*
+ * Invoke-during-breakpoint support.
+ */
+typedef struct DebugInvokeReq {
+    /* boolean; only set when we're in the tail end of an event handler */
+    bool ready;
+
+    /* boolean; set if the JDWP thread wants this thread to do work */
+    bool invokeNeeded;
+
+    /* request */
+    struct Object*      obj;        /* not used for ClassType.InvokeMethod */
+    struct Object*      thread;
+    struct ClassObject* clazz;
+    struct Method*      method;
+    u4                  numArgs;
+    u8*                 argArray;   /* will be NULL if numArgs==0 */
+    u4                  options;
+
+    /* result */
+    JdwpError           err;
+    u1                  resultTag;
+    JValue              resultValue;
+    ObjectId            exceptObj;
+
+    /* condition variable to wait on while the method executes */
+    pthread_mutex_t     lock;
+    pthread_cond_t      cv;
+} DebugInvokeReq;
+
+/* system init/shutdown */
+bool dvmDebuggerStartup(void);
+void dvmDebuggerShutdown(void);
+
+void dvmDbgInitMutex(pthread_mutex_t* pMutex);
+void dvmDbgLockMutex(pthread_mutex_t* pMutex);
+void dvmDbgUnlockMutex(pthread_mutex_t* pMutex);
+void dvmDbgInitCond(pthread_cond_t* pCond);
+void dvmDbgCondWait(pthread_cond_t* pCond, pthread_mutex_t* pMutex);
+void dvmDbgCondSignal(pthread_cond_t* pCond);
+void dvmDbgCondBroadcast(pthread_cond_t* pCond);
+
+/*
+ * Return the DebugInvokeReq for the current thread.
+ */
+DebugInvokeReq* dvmDbgGetInvokeReq(void);
+
+/*
+ * Enable/disable breakpoints and step modes.  Used to provide a heads-up
+ * when the debugger attaches.
+ */
+void dvmDbgConnected(void);
+void dvmDbgActive(void);
+void dvmDbgDisconnected(void);
+
+/*
+ * Returns "true" if a debugger is connected.  Returns "false" if it's
+ * just DDM.
+ */
+bool dvmDbgIsDebuggerConnected(void);
+
+/*
+ * Time, in milliseconds, since the last debugger activity.  Does not
+ * include DDMS activity.  Returns -1 if there has been no activity.
+ * Returns 0 if we're in the middle of handling a debugger request.
+ */
+s8 dvmDbgLastDebuggerActivity(void);
+
+/*
+ * Block/allow GC depending on what we're doing.  These return the old
+ * status, which can be fed to dvmDbgThreadGoing() to restore the previous
+ * mode.
+ */
+int dvmDbgThreadRunning(void);
+int dvmDbgThreadWaiting(void);
+int dvmDbgThreadContinuing(int status);
+
+/*
+ * The debugger wants the VM to exit.
+ */
+void dvmDbgExit(int status);
+
+/*
+ * Class, Object, Array
+ */
+const char* dvmDbgGetClassDescriptor(RefTypeId id);
+RefTypeId dvmDbgGetSuperclass(RefTypeId id);
+ObjectId dvmDbgGetClassLoader(RefTypeId id);
+u4 dvmDbgGetAccessFlags(RefTypeId id);
+bool dvmDbgIsInterface(RefTypeId id);
+void dvmDbgGetClassList(u4* pNumClasses, RefTypeId** pClassRefBuf);
+void dvmDbgGetVisibleClassList(ObjectId classLoaderId, u4* pNumClasses,
+        RefTypeId** pClassRefBuf);
+void dvmDbgGetClassInfo(RefTypeId classId, u1* pTypeTag, u4* pStatus,
+    char** pSignature);
+bool dvmDbgFindLoadedClassBySignature(const char* classDescriptor,
+        RefTypeId* pRefTypeId);
+void dvmDbgGetObjectType(ObjectId objectId, u1* pRefTypeTag,
+    RefTypeId* pRefTypeId);
+u1 dvmDbgGetClassObjectType(RefTypeId refTypeId);
+char* dvmDbgGetSignature(RefTypeId refTypeId);
+const char* dvmDbgGetSourceFile(RefTypeId refTypeId);
+char* dvmDbgGetObjectTypeName(ObjectId objectId);
+int dvmDbgGetSignatureTag(const char* signature);
+int dvmDbgGetObjectTag(ObjectId objectId, const char* type);
+int dvmDbgGetTagWidth(int tag);
+
+int dvmDbgGetArrayLength(ObjectId arrayId);
+int dvmDbgGetArrayElementTag(ObjectId arrayId);
+bool dvmDbgOutputArray(ObjectId arrayId, int firstIndex, int count,
+    ExpandBuf* pReply);
+bool dvmDbgSetArrayElements(ObjectId arrayId, int firstIndex, int count,
+    const u1* buf);
+
+ObjectId dvmDbgCreateString(const char* str);
+
+bool dvmDbgMatchType(RefTypeId instClassId, RefTypeId classId);
+
+/*
+ * Method and Field
+ */
+const char* dvmDbgGetMethodName(RefTypeId refTypeId, MethodId id);
+void dvmDbgOutputAllFields(RefTypeId refTypeId, bool withGeneric,
+    ExpandBuf* pReply);
+void dvmDbgOutputAllMethods(RefTypeId refTypeId, bool withGeneric,
+    ExpandBuf* pReply);
+void dvmDbgOutputAllInterfaces(RefTypeId refTypeId, ExpandBuf* pReply);
+void dvmDbgOutputLineTable(RefTypeId refTypeId, MethodId methodId,
+    ExpandBuf* pReply);
+void dvmDbgOutputVariableTable(RefTypeId refTypeId, MethodId id,
+    bool withGeneric, ExpandBuf* pReply);
+
+int dvmDbgGetFieldTag(ObjectId objId, FieldId fieldId);
+int dvmDbgGetStaticFieldTag(RefTypeId refTypeId, FieldId fieldId);
+void dvmDbgGetFieldValue(ObjectId objId, FieldId fieldId, u1* ptr, int width);
+void dvmDbgSetFieldValue(ObjectId objectId, FieldId fieldId, u8 value,
+    int width);
+void dvmDbgGetStaticFieldValue(RefTypeId refTypeId, FieldId fieldId, u1* ptr,
+    int width);
+void dvmDbgSetStaticFieldValue(RefTypeId refTypeId, FieldId fieldId,
+    u8 rawValue, int width);
+
+char* dvmDbgStringToUtf8(ObjectId strId);
+
+/*
+ * Thread, ThreadGroup, Frame
+ */
+char* dvmDbgGetThreadName(ObjectId threadId);
+ObjectId dvmDbgGetThreadGroup(ObjectId threadId);
+char* dvmDbgGetThreadGroupName(ObjectId threadGroupId);
+ObjectId dvmDbgGetThreadGroupParent(ObjectId threadGroupId);
+ObjectId dvmDbgGetSystemThreadGroupId(void);
+ObjectId dvmDbgGetMainThreadGroupId(void);
+
+bool dvmDbgGetThreadStatus(ObjectId threadId, u4* threadStatus,
+    u4* suspendStatus);
+u4 dvmDbgGetThreadSuspendCount(ObjectId threadId);
+bool dvmDbgThreadExists(ObjectId threadId);
+bool dvmDbgIsSuspended(ObjectId threadId);
+//void dvmDbgWaitForSuspend(ObjectId threadId);
+void dvmDbgGetThreadGroupThreads(ObjectId threadGroupId,
+    ObjectId** ppThreadIds, u4* pThreadCount);
+void dvmDbgGetAllThreads(ObjectId** ppThreadIds, u4* pThreadCount);
+int dvmDbgGetThreadFrameCount(ObjectId threadId);
+bool dvmDbgGetThreadFrame(ObjectId threadId, int num, FrameId* pFrameId,
+    JdwpLocation* pLoc);
+
+ObjectId dvmDbgGetThreadSelfId(void);
+void dvmDbgSuspendVM(bool isEvent);
+void dvmDbgResumeVM(void);
+void dvmDbgSuspendThread(ObjectId threadId);
+void dvmDbgResumeThread(ObjectId threadId);
+void dvmDbgSuspendSelf(void);
+
+bool dvmDbgGetThisObject(ObjectId threadId, FrameId frameId, ObjectId* pThisId);
+void dvmDbgGetLocalValue(ObjectId threadId, FrameId frameId, int slot,
+    u1 tag, u1* buf, int expectedLen);
+void dvmDbgSetLocalValue(ObjectId threadId, FrameId frameId, int slot,
+    u1 tag, u8 value, int width);
+
+
+/*
+ * Debugger notification
+ */
+void dvmDbgPostLocationEvent(const struct Method* method, int pcOffset,
+    struct Object* thisPtr, int eventFlags);
+void dvmDbgPostException(void* throwFp, int throwRelPc, void* catchFp,
+    int catchRelPc, struct Object* exception);
+void dvmDbgPostThreadStart(struct Thread* thread);
+void dvmDbgPostThreadDeath(struct Thread* thread);
+void dvmDbgPostClassPrepare(struct ClassObject* clazz);
+// FieldAccess, FieldModification
+
+/* for "eventFlags" */
+enum {
+    DBG_BREAKPOINT      = 0x01,
+    DBG_SINGLE_STEP     = 0x02,
+    DBG_METHOD_ENTRY    = 0x04,
+    DBG_METHOD_EXIT     = 0x08,
+};
+
+bool dvmDbgWatchLocation(const JdwpLocation* pLoc);
+void dvmDbgUnwatchLocation(const JdwpLocation* pLoc);
+bool dvmDbgConfigureStep(ObjectId threadId, enum JdwpStepSize size,
+    enum JdwpStepDepth depth);
+void dvmDbgUnconfigureStep(ObjectId threadId);
+
+JdwpError dvmDbgInvokeMethod(ObjectId threadId, ObjectId objectId,
+    RefTypeId classId, MethodId methodId, u4 numArgs, u8* argArray,
+    u4 options, u1* pResultTag, u8* pResultValue, ObjectId* pExceptObj);
+void dvmDbgExecuteMethod(DebugInvokeReq* pReq);
+
+/* Make an AddressSet for a line, for single stepping */
+const AddressSet *dvmAddressSetForLine(const struct Method* method, int line);
+
+/*
+ * DDM support.
+ */
+bool dvmDbgDdmHandlePacket(const u1* buf, int dataLen, u1** pReplyBuf,
+    int* pReplyLen);
+void dvmDbgDdmConnected(void);
+void dvmDbgDdmDisconnected(void);
+void dvmDbgDdmSendChunk(int type, int len, const u1* buf);
+
+#define CHUNK_TYPE(_name) \
+    ((_name)[0] << 24 | (_name)[1] << 16 | (_name)[2] << 8 | (_name)[3])
+
+#endif /*_DALVIK_DEBUGGER*/
diff --git a/vm/DvmDex.c b/vm/DvmDex.c
new file mode 100644
index 0000000..8085412
--- /dev/null
+++ b/vm/DvmDex.c
@@ -0,0 +1,192 @@
+/*
+ * Copyright (C) 2008 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/*
+ * VM-specific state associated with a DEX file.
+ */
+#include "Dalvik.h"
+
+/*
+ * Create auxillary data structures.
+ *
+ * We need a 4-byte pointer for every reference to a class, method, field,
+ * or string constant.  Summed up over all loaded DEX files (including the
+ * whoppers in the boostrap class path), this adds up to be quite a bit
+ * of native memory.
+ *
+ * For more traditional VMs these values could be stuffed into the loaded
+ * class file constant pool area, but we don't have that luxury since our
+ * classes are memory-mapped read-only.
+ *
+ * The DEX optimizer will remove the need for some of these (e.g. we won't
+ * use the entry for virtual methods that are only called through
+ * invoke-virtual-quick), creating the possibility of some space reduction
+ * at dexopt time.
+ */
+static DvmDex* allocateAuxStructures(DexFile* pDexFile)
+{
+    DvmDex* pDvmDex;
+    const DexHeader* pHeader;
+
+    pDvmDex = (DvmDex*) calloc(1, sizeof(DvmDex));
+    if (pDvmDex == NULL)
+        return NULL;
+
+    pDvmDex->pDexFile = pDexFile;
+    pDvmDex->pHeader = pDexFile->pHeader;
+
+    pHeader = pDvmDex->pHeader;
+
+    pDvmDex->pResStrings = (struct StringObject**)
+        calloc(pHeader->stringIdsSize, sizeof(struct StringObject*));
+
+    pDvmDex->pResClasses = (struct ClassObject**)
+        calloc(pHeader->typeIdsSize, sizeof(struct ClassObject*));
+
+    pDvmDex->pResMethods = (struct Method**)
+        calloc(pHeader->methodIdsSize, sizeof(struct Method*));
+
+    pDvmDex->pResFields = (struct Field**)
+        calloc(pHeader->fieldIdsSize, sizeof(struct Field*));
+
+    LOGV("+++ DEX %p: allocateAux %d+%d+%d+%d * 4 = %d bytes\n",
+        pDvmDex,
+        pHeader->stringIdsSize, pHeader->typeIdsSize,
+        pHeader->methodIdsSize, pHeader->fieldIdsSize,
+        (pHeader->stringIdsSize + pHeader->typeIdsSize +
+         pHeader->methodIdsSize + pHeader->fieldIdsSize) * 4);
+
+    pDvmDex->pInterfaceCache = dvmAllocAtomicCache(DEX_INTERFACE_CACHE_SIZE);
+
+    if (pDvmDex->pResStrings == NULL ||
+        pDvmDex->pResClasses == NULL ||
+        pDvmDex->pResMethods == NULL ||
+        pDvmDex->pResFields == NULL ||
+        pDvmDex->pInterfaceCache == NULL)
+    {
+        LOGE("Alloc failure in allocateAuxStructures\n");
+        free(pDvmDex->pResStrings);
+        free(pDvmDex->pResClasses);
+        free(pDvmDex->pResMethods);
+        free(pDvmDex->pResFields);
+        free(pDvmDex);
+        return NULL;
+    }
+
+    return pDvmDex;
+
+}
+
+/*
+ * Given an open optimized DEX file, map it into read-only shared memory and
+ * parse the contents.
+ *
+ * Returns nonzero on error.
+ */
+int dvmDexFileOpenFromFd(int fd, DvmDex** ppDvmDex)
+{
+    DvmDex* pDvmDex;
+    DexFile* pDexFile;
+    MemMapping memMap;
+    int result = -1;
+
+    if (lseek(fd, 0, SEEK_SET) < 0) {
+        LOGE("lseek rewind failed\n");
+        goto bail;
+    }
+
+    if (sysMapFileInShmem(fd, &memMap) != 0) {
+        LOGE("Unable to map file\n");
+        goto bail;
+    }
+
+    pDexFile = dexFileParse(memMap.addr, memMap.length);
+    if (pDexFile == NULL) {
+        LOGE("DEX parse failed\n");
+        sysReleaseShmem(&memMap);
+        goto bail;
+    }
+
+    pDvmDex = allocateAuxStructures(pDexFile);
+    if (pDvmDex == NULL) {
+        dexFileFree(pDexFile);
+        sysReleaseShmem(&memMap);
+        goto bail;
+    }
+
+    /* tuck this into the DexFile so it gets released later */
+    sysCopyMap(&pDvmDex->memMap, &memMap);
+    *ppDvmDex = pDvmDex;
+    result = 0;
+
+bail:
+    return result;
+}
+
+/*
+ * Create a DexFile structure for a "partial" DEX.  This is one that is in
+ * the process of being optimized.  The optimization header isn't finished
+ * and we won't have any of the auxillary data tables, so we have to do
+ * the initialization slightly differently.
+ *
+ * Returns nonzero on error.
+ */
+int dvmDexFileOpenPartial(const void* addr, int len, DvmDex** ppDvmDex)
+{
+    DvmDex* pDvmDex;
+    DexFile* pDexFile;
+    int result = -1;
+
+    pDexFile = dexFileParse(addr, len);
+    if (pDexFile == NULL) {
+        LOGE("DEX parse failed\n");
+        goto bail;
+    }
+    pDvmDex = allocateAuxStructures(pDexFile);
+    if (pDvmDex == NULL) {
+        dexFileFree(pDexFile);
+        goto bail;
+    }
+
+    *ppDvmDex = pDvmDex;
+    result = 0;
+
+bail:
+    return result;
+}
+
+/*
+ * Free up the DexFile and any associated data structures.
+ *
+ * Note we may be called with a partially-initialized DvmDex.
+ */
+void dvmDexFileFree(DvmDex* pDvmDex)
+{
+    if (pDvmDex == NULL)
+        return;
+
+    dexFileFree(pDvmDex->pDexFile);
+
+    LOGV("+++ DEX %p: freeing aux structs\n", pDvmDex);
+    free(pDvmDex->pResStrings);
+    free(pDvmDex->pResClasses);
+    free(pDvmDex->pResMethods);
+    free(pDvmDex->pResFields);
+    dvmFreeAtomicCache(pDvmDex->pInterfaceCache);
+
+    sysReleaseShmem(&pDvmDex->memMap);
+    free(pDvmDex);
+}
+
diff --git a/vm/DvmDex.h b/vm/DvmDex.h
new file mode 100644
index 0000000..2cd508c
--- /dev/null
+++ b/vm/DvmDex.h
@@ -0,0 +1,151 @@
+/*
+ * Copyright (C) 2008 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/*
+ * The VM wraps some additional data structures around the DexFile.  These
+ * are defined here.
+ */
+#ifndef _DALVIK_DVMDEX
+#define _DALVIK_DVMDEX
+
+#include "libdex/DexFile.h"
+
+/* extern */
+struct ClassObject;
+struct HashTable;
+struct InstField;
+struct Method;
+struct StringObject;
+
+
+/*
+ * Some additional VM data structures that are associated with the DEX file.
+ */
+typedef struct DvmDex {
+    /* pointer to the DexFile we're associated with */
+    DexFile*            pDexFile;
+
+    /* clone of pDexFile->pHeader (it's used frequently enough) */
+    const DexHeader*    pHeader;
+
+    /* interned strings; parallel to "stringIds" */
+    struct StringObject** pResStrings;
+
+    /* resolved classes; parallel to "typeIds" */
+    struct ClassObject** pResClasses;
+
+    /* resolved methods; parallel to "methodIds" */
+    struct Method**     pResMethods;
+
+    /* resolved instance fields; parallel to "fieldIds" */
+    /* (this holds both InstField and StaticField) */
+    struct Field**      pResFields;
+
+    /* interface method lookup cache */
+    struct AtomicCache* pInterfaceCache;
+
+    /* shared memory region with file contents */
+    MemMapping          memMap;
+} DvmDex;
+
+#if 0
+/*
+ * Retrieve the DvmDex from the DexFile.
+ */
+INLINE DvmDex* dvmDexFile(const DexFile* pDexFile) {
+    return (DvmDex*) pDexFile->auxData;
+}
+#endif
+
+/*
+ * Given a file descriptor for an open "optimized" DEX file, map it into
+ * memory and parse the contents.
+ *
+ * On success, returns 0 and sets "*ppDvmDex" to a newly-allocated DvmDex.
+ * On failure, returns a meaningful error code [currently just -1].
+ */
+int dvmDexFileOpenFromFd(int fd, DvmDex** ppDvmDex);
+
+/*
+ * Open a partial DEX file.  Only useful as part of the optimization process.
+ */
+int dvmDexFileOpenPartial(const void* addr, int len, DvmDex** ppDvmDex);
+
+/*
+ * Free a DvmDex structure, along with any associated structures.
+ */
+void dvmDexFileFree(DvmDex* pDvmDex);
+
+
+
+/*
+ * Return the requested item if it has been resolved, or NULL if it hasn't.
+ */
+INLINE struct StringObject* dvmDexGetResolvedString(const DvmDex* pDvmDex,
+    u4 stringIdx)
+{
+    assert(stringIdx < pDvmDex->pHeader->stringIdsSize);
+    return pDvmDex->pResStrings[stringIdx];
+}
+INLINE struct ClassObject* dvmDexGetResolvedClass(const DvmDex* pDvmDex,
+    u4 classIdx)
+{
+    assert(classIdx < pDvmDex->pHeader->typeIdsSize);
+    return pDvmDex->pResClasses[classIdx];
+}
+INLINE struct Method* dvmDexGetResolvedMethod(const DvmDex* pDvmDex,
+    u4 methodIdx)
+{
+    assert(methodIdx < pDvmDex->pHeader->methodIdsSize);
+    return pDvmDex->pResMethods[methodIdx];
+}
+INLINE struct Field* dvmDexGetResolvedField(const DvmDex* pDvmDex,
+    u4 fieldIdx)
+{
+    assert(fieldIdx < pDvmDex->pHeader->fieldIdsSize);
+    return pDvmDex->pResFields[fieldIdx];
+}
+
+/*
+ * Update the resolved item table.  Resolution always produces the same
+ * result, so we're not worried about atomicity here.
+ */
+INLINE void dvmDexSetResolvedString(DvmDex* pDvmDex, u4 stringIdx,
+    struct StringObject* str)
+{
+    assert(stringIdx < pDvmDex->pHeader->stringIdsSize);
+    pDvmDex->pResStrings[stringIdx] = str;
+}
+INLINE void dvmDexSetResolvedClass(DvmDex* pDvmDex, u4 classIdx,
+    struct ClassObject* clazz)
+{
+    assert(classIdx < pDvmDex->pHeader->typeIdsSize);
+    pDvmDex->pResClasses[classIdx] = clazz;
+}
+INLINE void dvmDexSetResolvedMethod(DvmDex* pDvmDex, u4 methodIdx,
+    struct Method* method)
+{
+    assert(methodIdx < pDvmDex->pHeader->methodIdsSize);
+    pDvmDex->pResMethods[methodIdx] = method;
+}
+INLINE void dvmDexSetResolvedField(DvmDex* pDvmDex, u4 fieldIdx,
+    struct Field* field)
+{
+    assert(fieldIdx < pDvmDex->pHeader->fieldIdsSize);
+    pDvmDex->pResFields[fieldIdx] = field;
+}
+
+
+#endif /*_DALVIK_DVMDEX*/
diff --git a/vm/Exception.c b/vm/Exception.c
new file mode 100644
index 0000000..9345ed4
--- /dev/null
+++ b/vm/Exception.c
@@ -0,0 +1,1134 @@
+/*
+ * Copyright (C) 2008 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/*
+ * Exception handling.
+ */
+#include "Dalvik.h"
+#include "libdex/DexCatch.h"
+
+#include <stdlib.h>
+
+/*
+Notes on Exception Handling
+
+We have one fairly sticky issue to deal with: creating the exception stack
+trace.  The trouble is that we need the current value of the program
+counter for the method now being executed, but that's only held in a local
+variable or hardware register in the main interpreter loop.
+
+The exception mechanism requires that the current stack trace be associated
+with a Throwable at the time the Throwable is constructed.  The construction
+may or may not be associated with a throw.  We have three situations to
+consider:
+
+ (1) A Throwable is created with a "new Throwable" statement in the
+     application code, for immediate or deferred use with a "throw" statement.
+ (2) The VM throws an exception from within the interpreter core, e.g.
+     after an integer divide-by-zero.
+ (3) The VM throws an exception from somewhere deeper down, e.g. while
+     trying to link a class.
+
+We need to have the current value for the PC, which means that for
+situation (3) the interpreter loop must copy it to an externally-accessible
+location before handling any opcode that could cause the VM to throw
+an exception.  We can't store it globally, because the various threads
+would trample each other.  We can't store it in the Thread structure,
+because it'll get overwritten as soon as the Throwable constructor starts
+executing.  It needs to go on the stack, but our stack frames hold the
+caller's *saved* PC, not the current PC.
+
+Situation #1 doesn't require special handling.  Situation #2 could be dealt
+with by passing the PC into the exception creation function.  The trick
+is to solve situation #3 in a way that adds minimal overhead to common
+operations.  Making it more costly to throw an exception is acceptable.
+
+There are a few ways to deal with this:
+
+ (a) Change "savedPc" to "currentPc" in the stack frame.  All of the
+     stack logic gets offset by one frame.  The current PC is written
+     to the current stack frame when necessary.
+ (b) Write the current PC into the current stack frame, but without
+     replacing "savedPc".  The JNI local refs pointer, which is only
+     used for native code, can be overloaded to save space.
+ (c) In dvmThrowException(), push an extra stack frame on, with the
+     current PC in it.  The current PC is written into the Thread struct
+     when necessary, and copied out when the VM throws.
+ (d) Before doing something that might throw an exception, push a
+     temporary frame on with the saved PC in it.
+
+Solution (a) is the simplest, but breaks Dalvik's goal of mingling native
+and interpreted stacks.
+
+Solution (b) retains the simplicity of (a) without rearranging the stack,
+but now in some cases we're storing the PC twice, which feels wrong.
+
+Solution (c) usually works, because we push the saved PC onto the stack
+before the Throwable construction can overwrite the copy in Thread.  One
+way solution (c) could break is:
+ - Interpreter saves the PC
+ - Execute some bytecode, which runs successfully (and alters the saved PC)
+ - Throw an exception before re-saving the PC (i.e in the same opcode)
+This is a risk for anything that could cause <clinit> to execute, e.g.
+executing a static method or accessing a static field.  Attemping to access
+a field that doesn't exist in a class that does exist might cause this.
+It may be possible to simply bracket the dvmCallMethod*() functions to
+save/restore it.
+
+Solution (d) incurs additional overhead, but may have other benefits (e.g.
+it's easy to find the stack frames that should be removed before storage
+in the Throwable).
+
+Current plan is option (b), because it's simple, fast, and doesn't change
+the way the stack works.
+*/
+
+/* fwd */
+static bool initException(Object* exception, const char* msg, Object* cause,
+    Thread* self);
+
+
+/*
+ * Cache pointers to some of the exception classes we use locally.
+ */
+bool dvmExceptionStartup(void)
+{
+    gDvm.classJavaLangThrowable =
+        dvmFindSystemClassNoInit("Ljava/lang/Throwable;");
+    gDvm.classJavaLangStackTraceElement =
+        dvmFindSystemClassNoInit("Ljava/lang/StackTraceElement;");
+    gDvm.classJavaLangStackTraceElementArray =
+        dvmFindArrayClass("[Ljava/lang/StackTraceElement;", NULL);
+    if (gDvm.classJavaLangThrowable == NULL ||
+        gDvm.classJavaLangStackTraceElement == NULL ||
+        gDvm.classJavaLangStackTraceElementArray == NULL)
+    {
+        LOGE("Could not find one or more essential exception classes\n");
+        return false;
+    }
+
+    /*
+     * Find the constructor.  Note that, unlike other saved method lookups,
+     * we're using a Method* instead of a vtable offset.  This is because
+     * constructors don't have vtable offsets.  (Also, since we're creating
+     * the object in question, it's impossible for anyone to sub-class it.)
+     */
+    Method* meth;
+    meth = dvmFindDirectMethodByDescriptor(gDvm.classJavaLangStackTraceElement,
+        "<init>", "(Ljava/lang/String;Ljava/lang/String;Ljava/lang/String;I)V");
+    if (meth == NULL) {
+        LOGE("Unable to find constructor for StackTraceElement\n");
+        return false;
+    }
+    gDvm.methJavaLangStackTraceElement_init = meth;
+
+    /* grab an offset for the stackData field */
+    gDvm.offJavaLangThrowable_stackState =
+        dvmFindFieldOffset(gDvm.classJavaLangThrowable,
+            "stackState", "Ljava/lang/Object;");
+    if (gDvm.offJavaLangThrowable_stackState < 0) {
+        LOGE("Unable to find Throwable.stackState\n");
+        return false;
+    }
+
+    /* and one for the message field, in case we want to show it */
+    gDvm.offJavaLangThrowable_message =
+        dvmFindFieldOffset(gDvm.classJavaLangThrowable,
+            "detailMessage", "Ljava/lang/String;");
+    if (gDvm.offJavaLangThrowable_message < 0) {
+        LOGE("Unable to find Throwable.detailMessage\n");
+        return false;
+    }
+
+    /* and one for the cause field, just 'cause */
+    gDvm.offJavaLangThrowable_cause =
+        dvmFindFieldOffset(gDvm.classJavaLangThrowable,
+            "cause", "Ljava/lang/Throwable;");
+    if (gDvm.offJavaLangThrowable_cause < 0) {
+        LOGE("Unable to find Throwable.cause\n");
+        return false;
+    }
+
+    return true;
+}
+
+/*
+ * Clean up.
+ */
+void dvmExceptionShutdown(void)
+{
+    // nothing to do
+}
+
+
+/*
+ * Create a Throwable and throw an exception in the current thread (where
+ * "throwing" just means "set the thread's exception pointer").
+ *
+ * "msg" and/or "cause" may be NULL.
+ *
+ * If we have a bad exception hierarchy -- something in Throwable.<init>
+ * is missing -- then every attempt to throw an exception will result
+ * in another exception.  Exceptions are generally allowed to "chain"
+ * to other exceptions, so it's hard to auto-detect this problem.  It can
+ * only happen if the system classes are broken, so it's probably not
+ * worth spending cycles to detect it.
+ *
+ * We do have one case to worry about: if the classpath is completely
+ * wrong, we'll go into a death spin during startup because we can't find
+ * the initial class and then we can't find NoClassDefFoundError.  We have
+ * to handle this case.
+ *
+ * [Do we want to cache pointers to common exception classes?]
+ */
+void dvmThrowChainedException(const char* exceptionDescriptor, const char* msg,
+    Object* cause)
+{
+    ClassObject* excepClass;
+
+    LOGV("THROW '%s' msg='%s' cause=%s\n",
+        exceptionDescriptor, msg,
+        (cause != NULL) ? cause->clazz->descriptor : "(none)");
+
+    if (gDvm.initializing) {
+        if (++gDvm.initExceptionCount >= 2) {
+            LOGE("Too many exceptions during init (failed on '%s' '%s')\n",
+                exceptionDescriptor, msg);
+            dvmAbort();
+        }
+    }
+
+    excepClass = dvmFindSystemClass(exceptionDescriptor);
+    if (excepClass == NULL) {
+        /*
+         * We couldn't find the exception class.  The attempt to find a
+         * nonexistent class should have raised an exception.  If no
+         * exception is currently raised, then we're pretty clearly unable
+         * to throw ANY sort of exception, and we need to pack it in.
+         *
+         * If we were able to throw the "class load failed" exception,
+         * stick with that.  Ideally we'd stuff the original exception
+         * into the "cause" field, but since we can't find it we can't
+         * do that.  The exception class name should be in the "message"
+         * field.
+         */
+        if (!dvmCheckException(dvmThreadSelf())) {
+            LOGE("FATAL: unable to throw exception (failed on '%s' '%s')\n",
+                exceptionDescriptor, msg);
+            dvmAbort();
+        }
+        return;
+    }
+
+    dvmThrowChainedExceptionByClass(excepClass, msg, cause);
+}
+
+/*
+ * Start/continue throwing process now that we have a class reference.
+ */
+void dvmThrowChainedExceptionByClass(ClassObject* excepClass, const char* msg,
+    Object* cause)
+{
+    Thread* self = dvmThreadSelf();
+    Object* exception;
+
+    /* make sure the exception is initialized */
+    if (!dvmIsClassInitialized(excepClass) && !dvmInitClass(excepClass)) {
+        LOGE("ERROR: unable to initialize exception class '%s'\n",
+            excepClass->descriptor);
+        if (strcmp(excepClass->descriptor, "Ljava/lang/InternalError;") == 0)
+            dvmAbort();
+        dvmThrowChainedException("Ljava/lang/InternalError;",
+            "failed to init original exception class", cause);
+        return;
+    }
+
+    exception = dvmAllocObject(excepClass, ALLOC_DEFAULT);
+    if (exception == NULL) {
+        /*
+         * We're in a lot of trouble.  We might be in the process of
+         * throwing an out-of-memory exception, in which case the
+         * pre-allocated object will have been thrown when our object alloc
+         * failed.  So long as there's an exception raised, return and
+         * allow the system to try to recover.  If not, something is broken
+         * and we need to bail out.
+         */
+        if (dvmCheckException(self))
+            goto bail;
+        LOGE("FATAL: unable to allocate exception '%s' '%s'\n",
+            excepClass->descriptor, msg != NULL ? msg : "(no msg)");
+        dvmAbort();
+    }
+
+    /*
+     * Init the exception.
+     */
+    if (gDvm.optimizing) {
+        /* need the exception object, but can't invoke interpreted code */
+        LOGV("Skipping init of exception %s '%s'\n",
+            excepClass->descriptor, msg);
+    } else {
+        assert(excepClass == exception->clazz);
+        if (!initException(exception, msg, cause, self)) {
+            /*
+             * Whoops.  If we can't initialize the exception, we can't use
+             * it.  If there's an exception already set, the constructor
+             * probably threw an OutOfMemoryError.
+             */
+            if (!dvmCheckException(self)) {
+                /*
+                 * We're required to throw something, so we just
+                 * throw the pre-constructed internal error.
+                 */
+                self->exception = gDvm.internalErrorObj;
+            }
+            goto bail;
+        }
+    }
+
+    self->exception = exception;
+
+bail:
+    dvmReleaseTrackedAlloc(exception, self);
+}
+
+/*
+ * Throw the named exception using the dotted form of the class
+ * descriptor as the exception message, and with the specified cause.
+ */
+void dvmThrowChainedExceptionWithClassMessage(const char* exceptionDescriptor,
+    const char* messageDescriptor, Object* cause)
+{
+    char* message = dvmDescriptorToDot(messageDescriptor);
+
+    dvmThrowChainedException(exceptionDescriptor, message, cause);
+    free(message);
+}
+
+/*
+ * Like dvmThrowExceptionWithMessageFromDescriptor, but take a
+ * class object instead of a name.
+ */
+void dvmThrowExceptionByClassWithClassMessage(ClassObject* exceptionClass,
+    const char* messageDescriptor)
+{
+    char* message = dvmDescriptorToName(messageDescriptor);
+
+    dvmThrowExceptionByClass(exceptionClass, message);
+    free(message);
+}
+
+/*
+ * Initialize an exception with an appropriate constructor.
+ *
+ * "exception" is the exception object to initialize.
+ * Either or both of "msg" and "cause" may be null.
+ * "self" is dvmThreadSelf(), passed in so we don't have to look it up again.
+ *
+ * If the process of initializing the exception causes another
+ * exception (e.g., OutOfMemoryError) to be thrown, return an error
+ * and leave self->exception intact.
+ */
+static bool initException(Object* exception, const char* msg, Object* cause,
+    Thread* self)
+{
+    enum {
+        kInitUnknown,
+        kInitNoarg,
+        kInitMsg,
+        kInitMsgThrow,
+        kInitThrow
+    } initKind = kInitUnknown;
+    Method* initMethod = NULL;
+    ClassObject* excepClass = exception->clazz;
+    StringObject* msgStr = NULL;
+    bool result = false;
+    bool needInitCause = false;
+
+    assert(self != NULL);
+    assert(self->exception == NULL);
+
+    /* if we have a message, create a String */
+    if (msg == NULL)
+        msgStr = NULL;
+    else {
+        msgStr = dvmCreateStringFromCstr(msg, ALLOC_DEFAULT);
+        if (msgStr == NULL) {
+            LOGW("Could not allocate message string \"%s\" while "
+                    "throwing internal exception (%s)\n",
+                    msg, excepClass->descriptor);
+            goto bail;
+        }
+    }
+
+    /*
+     * The Throwable class has four public constructors:
+     *  (1) Throwable()
+     *  (2) Throwable(String message)
+     *  (3) Throwable(String message, Throwable cause)  (added in 1.4)
+     *  (4) Throwable(Throwable cause)                  (added in 1.4)
+     *
+     * The first two are part of the original design, and all exception
+     * classes should support them.  The third prototype was used by
+     * individual exceptions. e.g. ClassNotFoundException added it in 1.2.
+     * The general "cause" mechanism was added in 1.4.  Some classes,
+     * such as IllegalArgumentException, initially supported the first
+     * two, but added the second two in a later release.
+     *
+     * Exceptions may be picky about how their "cause" field is initialized.
+     * If you call ClassNotFoundException(String), it may choose to
+     * initialize its "cause" field to null.  Doing so prevents future
+     * calls to Throwable.initCause().
+     *
+     * So, if "cause" is not NULL, we need to look for a constructor that
+     * takes a throwable.  If we can't find one, we fall back on calling
+     * #1/#2 and making a separate call to initCause().  Passing a null ref
+     * for "message" into Throwable(String, Throwable) is allowed, but we
+     * prefer to use the Throwable-only version because it has different
+     * behavior.
+     */
+    if (cause == NULL) {
+        if (msgStr == NULL) {
+            initMethod = dvmFindDirectMethodByDescriptor(excepClass, "<init>", "()V");
+            initKind = kInitNoarg;
+        } else {
+            initMethod = dvmFindDirectMethodByDescriptor(excepClass, "<init>",
+                            "(Ljava/lang/String;)V");
+            initKind = kInitMsg;
+        }
+    } else {
+        if (msgStr == NULL) {
+            initMethod = dvmFindDirectMethodByDescriptor(excepClass, "<init>",
+                            "(Ljava/lang/Throwable;)V");
+            if (initMethod != NULL) {
+                initKind = kInitThrow;
+            } else {
+                initMethod = dvmFindDirectMethodByDescriptor(excepClass, "<init>", "()V");
+                initKind = kInitNoarg;
+                needInitCause = true;
+            }
+        } else {
+            initMethod = dvmFindDirectMethodByDescriptor(excepClass, "<init>",
+                            "(Ljava/lang/String;Ljava/lang/Throwable;)V");
+            if (initMethod != NULL) {
+                initKind = kInitMsgThrow;
+            } else {
+                initMethod = dvmFindDirectMethodByDescriptor(excepClass, "<init>",
+                                "(Ljava/lang/String;)V");
+                initKind = kInitMsg;
+                needInitCause = true;
+            }
+        }
+    }
+
+    if (initMethod == NULL) {
+        /*
+         * We can't find the desired constructor.  This can happen if a
+         * subclass of java/lang/Throwable doesn't define an expected
+         * constructor, e.g. it doesn't provide one that takes a string
+         * when a message has been provided.
+         */
+        LOGW("WARNING: exception class '%s' missing constructor "
+            "(msg='%s' kind=%d)\n",
+            excepClass->descriptor, msg, initKind);
+        assert(strcmp(excepClass->descriptor,
+                      "Ljava/lang/RuntimeException;") != 0);
+        dvmThrowChainedException("Ljava/lang/RuntimeException;", 
+            "re-throw on exception class missing constructor", NULL);
+        goto bail;
+    }
+
+    /*
+     * Call the constructor with the appropriate arguments.
+     */
+    JValue unused;
+    switch (initKind) {
+    case kInitNoarg:
+        LOGVV("+++ exc noarg (ic=%d)\n", needInitCause);
+        dvmCallMethod(self, initMethod, exception, &unused);
+        break;
+    case kInitMsg:
+        LOGVV("+++ exc msg (ic=%d)\n", needInitCause);
+        dvmCallMethod(self, initMethod, exception, &unused, msgStr);
+        break;
+    case kInitThrow:
+        LOGVV("+++ exc throw");
+        assert(!needInitCause);
+        dvmCallMethod(self, initMethod, exception, &unused, cause);
+        break;
+    case kInitMsgThrow:
+        LOGVV("+++ exc msg+throw");
+        assert(cause != NULL && !needInitCause);
+        dvmCallMethod(self, initMethod, exception, &unused, msgStr, cause);
+        break;
+    default:
+        assert(false);
+        goto bail;
+    }
+
+    /*
+     * It's possible the constructor has thrown an exception.  If so, we
+     * return an error and let our caller deal with it.
+     */
+    if (self->exception != NULL) {
+        LOGW("Exception thrown (%s) while throwing internal exception (%s)\n",
+            self->exception->clazz->descriptor, exception->clazz->descriptor);
+        goto bail;
+    }
+
+    /*
+     * If this exception was caused by another exception, and we weren't
+     * able to find a cause-setting constructor, set the "cause" field
+     * with an explicit call.
+     */
+    if (needInitCause) {
+        Method* initCause;
+        initCause = dvmFindVirtualMethodHierByDescriptor(excepClass, "initCause",
+            "(Ljava/lang/Throwable;)Ljava/lang/Throwable;");
+        if (initCause != NULL) {
+            dvmCallMethod(self, initCause, exception, &unused, cause);
+            if (self->exception != NULL) {
+                /* initCause() threw an exception; return an error and
+                 * let the caller deal with it.
+                 */
+                LOGW("Exception thrown (%s) during initCause() "
+                        "of internal exception (%s)\n",
+                        self->exception->clazz->descriptor,
+                        exception->clazz->descriptor);
+                goto bail;
+            }
+        } else {
+            LOGW("WARNING: couldn't find initCause in '%s'\n",
+                excepClass->descriptor);
+        }
+    }
+
+
+    result = true;
+
+bail:
+    dvmReleaseTrackedAlloc((Object*) msgStr, self);     // NULL is ok
+    return result;
+}
+
+
+/*
+ * Clear the pending exception and the "initExceptionCount" counter.  This
+ * is used by the optimization and verification code, which has to run with
+ * "initializing" set to avoid going into a death-spin if the "class not
+ * found" exception can't be found.
+ *
+ * This can also be called when the VM is in a "normal" state, e.g. when
+ * verifying classes that couldn't be verified at optimization time.  The
+ * reset of initExceptionCount should be harmless in that case.
+ */
+void dvmClearOptException(Thread* self)
+{
+    self->exception = NULL;
+    gDvm.initExceptionCount = 0;
+}
+
+/*
+ * Print the stack trace of the current exception on stderr.  This is called
+ * from the JNI ExceptionDescribe call.
+ *
+ * For consistency we just invoke the Throwable printStackTrace method,
+ * which might be overridden in the exception object.
+ *
+ * Exceptions thrown during the course of printing the stack trace are
+ * ignored.
+ */
+void dvmPrintExceptionStackTrace(void)
+{
+    Thread* self = dvmThreadSelf();
+    Object* exception;
+    Method* printMethod;
+
+    exception = self->exception;
+    if (exception == NULL)
+        return;
+
+    self->exception = NULL;
+    printMethod = dvmFindVirtualMethodHierByDescriptor(exception->clazz,
+                    "printStackTrace", "()V");
+    if (printMethod != NULL) {
+        JValue unused;
+        dvmCallMethod(self, printMethod, exception, &unused);
+    } else {
+        LOGW("WARNING: could not find printStackTrace in %s\n",
+            exception->clazz->descriptor);
+    }
+
+    if (self->exception != NULL) {
+        LOGI("NOTE: exception thrown while printing stack trace: %s\n",
+            self->exception->clazz->descriptor);
+    }
+
+    self->exception = exception;
+}
+
+/*
+ * Search the method's list of exceptions for a match.
+ *
+ * Returns the offset of the catch block on success, or -1 on failure.
+ */
+static int findCatchInMethod(Thread* self, const Method* method, int relPc,
+    ClassObject* excepClass)
+{
+    /*
+     * Need to clear the exception before entry.  Otherwise, dvmResolveClass
+     * might think somebody threw an exception while it was loading a class.
+     */
+    assert(!dvmCheckException(self));
+    assert(!dvmIsNativeMethod(method));
+
+    LOGVV("findCatchInMethod %s.%s excep=%s depth=%d\n",
+        method->clazz->descriptor, method->name, excepClass->descriptor,
+        dvmComputeExactFrameDepth(self->curFrame));
+
+    DvmDex* pDvmDex = method->clazz->pDvmDex;
+    const DexCode* pCode = dvmGetMethodCode(method);
+    DexCatchIterator iterator;
+
+    if (dexFindCatchHandler(&iterator, pCode, relPc)) {
+        for (;;) {
+            DexCatchHandler* handler = dexCatchIteratorNext(&iterator);
+
+            if (handler == NULL) {
+                break;
+            }
+                
+            if (handler->typeIdx == kDexNoIndex) {
+                /* catch-all */
+                LOGV("Match on catch-all block at 0x%02x in %s.%s for %s\n",
+                        relPc, method->clazz->descriptor,
+                        method->name, excepClass->descriptor);
+                return handler->address;
+            }
+
+            ClassObject* throwable =
+                dvmDexGetResolvedClass(pDvmDex, handler->typeIdx);
+            if (throwable == NULL) {
+                /*
+                 * TODO: this behaves badly if we run off the stack
+                 * while trying to throw an exception.  The problem is
+                 * that, if we're in a class loaded by a class loader,
+                 * the call to dvmResolveClass has to ask the class
+                 * loader for help resolving any previously-unresolved
+                 * classes.  If this particular class loader hasn't
+                 * resolved StackOverflowError, it will call into
+                 * interpreted code, and blow up.
+                 *
+                 * We currently replace the previous exception with
+                 * the StackOverflowError, which means they won't be
+                 * catching it *unless* they explicitly catch
+                 * StackOverflowError, in which case we'll be unable
+                 * to resolve the class referred to by the "catch"
+                 * block.
+                 *
+                 * We end up getting a huge pile of warnings if we do
+                 * a simple synthetic test, because this method gets
+                 * called on every stack frame up the tree, and it
+                 * fails every time.
+                 *
+                 * This eventually bails out, effectively becoming an
+                 * uncatchable exception, so other than the flurry of
+                 * warnings it's not really a problem.  Still, we could
+                 * probably handle this better.
+                 */
+                throwable = dvmResolveClass(method->clazz, handler->typeIdx,
+                    true);
+                if (throwable == NULL) {
+                    /*
+                     * We couldn't find the exception they wanted in
+                     * our class files (or, perhaps, the stack blew up
+                     * while we were querying a class loader). Cough
+                     * up a warning, then move on to the next entry.
+                     * Keep the exception status clear.
+                     */
+                    LOGW("Could not resolve class ref'ed in exception "
+                            "catch list (class index %d, exception %s)\n",
+                            handler->typeIdx,
+                            (self->exception != NULL) ?
+                            self->exception->clazz->descriptor : "(none)");
+                    dvmClearException(self);
+                    continue;
+                }
+            }
+
+            //LOGD("ADDR MATCH, check %s instanceof %s\n",
+            //    excepClass->descriptor, pEntry->excepClass->descriptor);
+
+            if (dvmInstanceof(excepClass, throwable)) {
+                LOGV("Match on catch block at 0x%02x in %s.%s for %s\n",
+                        relPc, method->clazz->descriptor,
+                        method->name, excepClass->descriptor);
+                return handler->address;
+            }
+        }
+    }
+
+    LOGV("No matching catch block at 0x%02x in %s for %s\n",
+        relPc, method->name, excepClass->descriptor);
+    return -1;
+}
+
+/*
+ * Find a matching "catch" block.  "pc" is the relative PC within the
+ * current method, indicating the offset from the start in 16-bit units.
+ *
+ * Returns the offset to the catch block, or -1 if we run up against a
+ * break frame without finding anything.
+ *
+ * The class resolution stuff we have to do while evaluating the "catch"
+ * blocks could cause an exception.  The caller should clear the exception
+ * before calling here and restore it after.
+ *
+ * Sets *newFrame to the frame pointer of the frame with the catch block.
+ * If "scanOnly" is false, self->curFrame is also set to this value.
+ */
+int dvmFindCatchBlock(Thread* self, int relPc, Object* exception,
+    bool scanOnly, void** newFrame)
+{
+    void* fp = self->curFrame;
+    int catchAddr = -1;
+
+    assert(!dvmCheckException(self));
+
+    while (true) {
+        StackSaveArea* saveArea = SAVEAREA_FROM_FP(fp);
+        catchAddr = findCatchInMethod(self, saveArea->method, relPc,
+                        exception->clazz);
+        if (catchAddr >= 0)
+            break;
+
+        /*
+         * Normally we'd check for ACC_SYNCHRONIZED methods and unlock
+         * them as we unroll.  Dalvik uses what amount to generated
+         * "finally" blocks to take care of this for us.
+         */
+        // if (!scanOnly) ...
+
+        TRACE_METHOD_UNROLL(self, SAVEAREA_FROM_FP(fp)->method);
+
+        /*
+         * Move up one frame.  If the next thing up is a break frame,
+         * break out now so we're left unrolled to the last method frame.
+         * We need to point there so we can roll up the JNI local refs
+         * if this was a native method.
+         */
+        assert(saveArea->prevFrame != NULL);
+        if (dvmIsBreakFrame(saveArea->prevFrame)) {
+            if (!scanOnly)
+                break;      // bail with catchAddr == -1
+
+            /*
+             * We're scanning for the debugger.  It needs to know if this
+             * exception is going to be caught or not, and we need to figure
+             * out if it will be caught *ever* not just between the current
+             * position and the next break frame.  We can't tell what native
+             * code is going to do, so we assume it never catches exceptions.
+             *
+             * Start by finding an interpreted code frame.
+             */
+            fp = saveArea->prevFrame;           // this is the break frame
+            saveArea = SAVEAREA_FROM_FP(fp);
+            fp = saveArea->prevFrame;           // this may be a good one
+            while (fp != NULL) {
+                if (!dvmIsBreakFrame(fp)) {
+                    saveArea = SAVEAREA_FROM_FP(fp);
+                    if (!dvmIsNativeMethod(saveArea->method))
+                        break;
+                }
+
+                fp = SAVEAREA_FROM_FP(fp)->prevFrame;
+            }
+            if (fp == NULL)
+                break;      // bail with catchAddr == -1
+
+            /*
+             * Now fp points to the "good" frame.  When the interp code
+             * invoked the native code, it saved a copy of its current PC
+             * into xtra.currentPc.  Pull it out of there.
+             */
+            relPc =
+                saveArea->xtra.currentPc - SAVEAREA_FROM_FP(fp)->method->insns;
+        } else {
+            fp = saveArea->prevFrame;
+
+            /* savedPc in was-current frame goes with method in now-current */
+            relPc = saveArea->savedPc - SAVEAREA_FROM_FP(fp)->method->insns;
+        }
+    }
+
+    if (!scanOnly)
+        self->curFrame = fp;
+
+    /*
+     * The class resolution in findCatchInMethod() could cause an exception.
+     * Clear it to be safe.
+     */
+    self->exception = NULL;
+
+    *newFrame = fp;
+    return catchAddr;
+}
+
+/*
+ * We have to carry the exception's stack trace around, but in many cases
+ * it will never be examined.  It makes sense to keep it in a compact,
+ * VM-specific object, rather than an array of Objects with strings.
+ *
+ * Pass in the thread whose stack we're interested in.  If "thread" is
+ * not self, the thread must be suspended.  This implies that the thread
+ * list lock is held, which means we can't allocate objects or we risk
+ * jamming the GC.  So, we allow this function to return different formats.
+ * (This shouldn't be called directly -- see the inline functions in the
+ * header file.)
+ *
+ * If "wantObject" is true, this returns a newly-allocated Object, which is
+ * presently an array of integers, but could become something else in the
+ * future.  If "wantObject" is false, return plain malloc data.
+ *
+ * NOTE: if we support class unloading, we will need to scan the class
+ * object references out of these arrays.
+ */
+void* dvmFillInStackTraceInternal(Thread* thread, bool wantObject, int* pCount)
+{
+    ArrayObject* stackData = NULL;
+    int* simpleData = NULL;
+    void* fp;
+    void* startFp;
+    int stackDepth;
+    int* intPtr;
+
+    if (pCount != NULL)
+        *pCount = 0;
+    fp = thread->curFrame;
+
+    assert(thread == dvmThreadSelf() || dvmIsSuspended(thread));
+
+    /*
+     * We're looking at a stack frame for code running below a Throwable
+     * constructor.  We want to remove the Throwable methods and the
+     * superclass initializations so the user doesn't see them when they
+     * read the stack dump.
+     *
+     * TODO: this just scrapes off the top layers of Throwable.  Might not do
+     * the right thing if we create an exception object or cause a VM
+     * exception while in a Throwable method.
+     */
+    while (fp != NULL) {
+        const StackSaveArea* saveArea = SAVEAREA_FROM_FP(fp);
+        const Method* method = saveArea->method;
+
+        if (dvmIsBreakFrame(fp))
+            break;
+        if (!dvmInstanceof(method->clazz, gDvm.classJavaLangThrowable))
+            break;
+        //LOGD("EXCEP: ignoring %s.%s\n",
+        //         method->clazz->descriptor, method->name);
+        fp = saveArea->prevFrame;
+    }
+    startFp = fp;
+
+    /*
+     * Compute the stack depth.
+     */
+    stackDepth = 0;
+    while (fp != NULL) {
+        const StackSaveArea* saveArea = SAVEAREA_FROM_FP(fp);
+
+        if (!dvmIsBreakFrame(fp))
+            stackDepth++;
+
+        assert(fp != saveArea->prevFrame);
+        fp = saveArea->prevFrame;
+    }
+    //LOGD("EXCEP: stack depth is %d\n", stackDepth);
+
+    if (!stackDepth)
+        goto bail;
+
+    /*
+     * We need to store a pointer to the Method and the program counter.
+     * We have 4-byte pointers, so we use '[I'.
+     */
+    if (wantObject) {
+        assert(sizeof(Method*) == 4);
+        stackData = dvmAllocPrimitiveArray('I', stackDepth*2, ALLOC_DEFAULT);
+        if (stackData == NULL) {
+            assert(dvmCheckException(dvmThreadSelf()));
+            goto bail;
+        }
+        intPtr = (int*) stackData->contents;
+    } else {
+        /* array of ints; first entry is stack depth */
+        assert(sizeof(Method*) == sizeof(int));
+        simpleData = (int*) malloc(sizeof(int) * stackDepth*2);
+        if (simpleData == NULL)
+            goto bail;
+
+        assert(pCount != NULL);
+        intPtr = simpleData;
+    }
+    if (pCount != NULL)
+        *pCount = stackDepth;
+
+    fp = startFp;
+    while (fp != NULL) {
+        const StackSaveArea* saveArea = SAVEAREA_FROM_FP(fp);
+        const Method* method = saveArea->method;
+
+        if (!dvmIsBreakFrame(fp)) {
+            //LOGD("EXCEP keeping %s.%s\n", method->clazz->descriptor,
+            //         method->name);
+
+            *intPtr++ = (int) method;
+            if (dvmIsNativeMethod(method)) {
+                *intPtr++ = 0;      /* no saved PC for native methods */
+            } else {
+                assert(saveArea->xtra.currentPc >= method->insns &&
+                        saveArea->xtra.currentPc < 
+                        method->insns + dvmGetMethodInsnsSize(method));
+                *intPtr++ = (int) (saveArea->xtra.currentPc - method->insns);
+            }
+
+            stackDepth--;       // for verification
+        }
+
+        assert(fp != saveArea->prevFrame);
+        fp = saveArea->prevFrame;
+    }
+    assert(stackDepth == 0);
+
+bail:
+    if (wantObject) {
+        dvmReleaseTrackedAlloc((Object*) stackData, dvmThreadSelf());
+        return stackData;
+    } else {
+        return simpleData;
+    }
+}
+
+
+/*
+ * Given an Object previously created by dvmFillInStackTrace(), use the
+ * contents of the saved stack trace to generate an array of
+ * java/lang/StackTraceElement objects.
+ *
+ * The returned array is not added to the "local refs" list.
+ */
+ArrayObject* dvmGetStackTrace(const Object* ostackData)
+{
+    const ArrayObject* stackData = (const ArrayObject*) ostackData;
+    const int* intVals;
+    int i, stackSize;
+
+    stackSize = stackData->length / 2;
+    intVals = (const int*) stackData->contents;
+    return dvmGetStackTraceRaw(intVals, stackSize);
+}
+
+/*
+ * Generate an array of StackTraceElement objects from the raw integer
+ * data encoded by dvmFillInStackTrace().
+ *
+ * "intVals" points to the first {method,pc} pair.
+ *
+ * The returned array is not added to the "local refs" list.
+ */
+ArrayObject* dvmGetStackTraceRaw(const int* intVals, int stackDepth)
+{
+    ArrayObject* steArray = NULL;
+    Object** stePtr;
+    int i;
+
+    /* init this if we haven't yet */
+    if (!dvmIsClassInitialized(gDvm.classJavaLangStackTraceElement))
+        dvmInitClass(gDvm.classJavaLangStackTraceElement);
+
+    /* allocate a StackTraceElement array */
+    steArray = dvmAllocArray(gDvm.classJavaLangStackTraceElementArray,
+                    stackDepth, kObjectArrayRefWidth, ALLOC_DEFAULT);
+    if (steArray == NULL)
+        goto bail;
+    stePtr = (Object**) steArray->contents;
+
+    /*
+     * Allocate and initialize a StackTraceElement for each stack frame.
+     * We use the standard constructor to configure the object.
+     */
+    for (i = 0; i < stackDepth; i++) {
+        Object* ste;
+        Method* meth;
+        StringObject* className;
+        StringObject* methodName;
+        StringObject* fileName;
+        int lineNumber, pc;
+        const char* sourceFile;
+        char* dotName;
+
+        ste = dvmAllocObject(gDvm.classJavaLangStackTraceElement,ALLOC_DEFAULT);
+        if (ste == NULL)
+            goto bail;
+
+        meth = (Method*) *intVals++;
+        pc = *intVals++;
+
+        if (pc == -1)      // broken top frame?
+            lineNumber = 0;
+        else
+            lineNumber = dvmLineNumFromPC(meth, pc);
+
+        dotName = dvmDescriptorToDot(meth->clazz->descriptor);
+        className = dvmCreateStringFromCstr(dotName, ALLOC_DEFAULT);
+        free(dotName);
+
+        methodName = dvmCreateStringFromCstr(meth->name, ALLOC_DEFAULT);
+        sourceFile = dvmGetMethodSourceFile(meth);
+        if (sourceFile != NULL)
+            fileName = dvmCreateStringFromCstr(sourceFile, ALLOC_DEFAULT);
+        else
+            fileName = NULL;
+
+        /*
+         * Invoke:
+         *  public StackTraceElement(String declaringClass, String methodName,
+         *      String fileName, int lineNumber)
+         * (where lineNumber==-2 means "native")
+         */
+        JValue unused;
+        dvmCallMethod(dvmThreadSelf(), gDvm.methJavaLangStackTraceElement_init,
+            ste, &unused, className, methodName, fileName, lineNumber);
+
+        dvmReleaseTrackedAlloc(ste, NULL);
+        dvmReleaseTrackedAlloc((Object*) className, NULL);
+        dvmReleaseTrackedAlloc((Object*) methodName, NULL);
+        dvmReleaseTrackedAlloc((Object*) fileName, NULL);
+
+        if (dvmCheckException(dvmThreadSelf()))
+            goto bail;
+
+        *stePtr++ = ste;
+    }
+
+bail:
+    dvmReleaseTrackedAlloc((Object*) steArray, NULL);
+    return steArray;
+}
+
+/*
+ * Dump the contents of a raw stack trace to the log.
+ */
+void dvmLogRawStackTrace(const int* intVals, int stackDepth)
+{
+    int i;
+
+    /*
+     * Run through the array of stack frame data.
+     */
+    for (i = 0; i < stackDepth; i++) {
+        Method* meth;
+        int lineNumber, pc;
+        const char* sourceFile;
+        char* dotName;
+
+        meth = (Method*) *intVals++;
+        pc = *intVals++;
+
+        if (pc == -1)      // broken top frame?
+            lineNumber = 0;
+        else
+            lineNumber = dvmLineNumFromPC(meth, pc);
+
+        // probably don't need to do this, but it looks nicer
+        dotName = dvmDescriptorToDot(meth->clazz->descriptor);
+
+        if (dvmIsNativeMethod(meth)) {
+            LOGI("\tat %s.%s(Native Method)\n", dotName, meth->name);
+        } else {
+            LOGI("\tat %s.%s(%s:%d)\n",
+                dotName, meth->name, dvmGetMethodSourceFile(meth),
+                dvmLineNumFromPC(meth, pc));
+        }
+
+        free(dotName);
+
+        sourceFile = dvmGetMethodSourceFile(meth);
+    }
+}
+
+/*
+ * Print the direct stack trace of the given exception to the log.
+ */
+static void logStackTraceOf(Object* exception)
+{
+    const ArrayObject* stackData;
+    StringObject* messageStr;
+    int stackSize;
+    const int* intVals;
+
+    messageStr = (StringObject*) dvmGetFieldObject(exception,
+                    gDvm.offJavaLangThrowable_message);
+    if (messageStr != NULL) {
+        char* cp = dvmCreateCstrFromString(messageStr);
+        LOGI("%s: %s\n", exception->clazz->descriptor, cp);
+        free(cp);
+    } else {
+        LOGI("%s:\n", exception->clazz->descriptor);
+    }
+
+    stackData = (const ArrayObject*) dvmGetFieldObject(exception,
+                    gDvm.offJavaLangThrowable_stackState);
+    if (stackData == NULL) {
+        LOGI("  (no stack trace data found)\n");
+        return;
+    }
+
+    stackSize = stackData->length / 2;
+    intVals = (const int*) stackData->contents;
+
+    dvmLogRawStackTrace(intVals, stackSize);
+}
+
+/*
+ * Print the stack trace of the current thread's exception, as well as
+ * the stack traces of any chained exceptions, to the log. We extract
+ * the stored stack trace and process it internally instead of calling
+ * interpreted code.
+ */
+void dvmLogExceptionStackTrace(void)
+{
+    Object* exception = dvmThreadSelf()->exception;
+    Object* cause;
+
+    if (exception == NULL) {
+        LOGW("tried to log a null exception?\n");
+        return;
+    }
+
+    for (;;) {
+        logStackTraceOf(exception);
+        cause = (Object*) dvmGetFieldObject(exception,
+                    gDvm.offJavaLangThrowable_cause);
+        if ((cause == NULL) || (cause == exception)) {
+            break;
+        }
+        LOGI("Caused by:\n");
+        exception = cause;
+    }
+}
+
diff --git a/vm/Exception.h b/vm/Exception.h
new file mode 100644
index 0000000..c402343
--- /dev/null
+++ b/vm/Exception.h
@@ -0,0 +1,162 @@
+/*
+ * Copyright (C) 2008 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/*
+ * Exception handling.
+ */
+#ifndef _DALVIK_EXCEPTION
+#define _DALVIK_EXCEPTION
+
+/* initialization */
+bool dvmExceptionStartup(void);
+void dvmExceptionShutdown(void);
+
+/*
+ * Throw an exception in the current thread, by class descriptor.
+ */
+void dvmThrowChainedException(const char* exceptionDescriptor, const char* msg,
+    Object* cause);
+INLINE void dvmThrowException(const char* exceptionDescriptor,
+    const char* msg)
+{
+    dvmThrowChainedException(exceptionDescriptor, msg, NULL);
+}
+
+/*
+ * Throw an exception in the current thread, by class object.
+ */
+void dvmThrowChainedExceptionByClass(ClassObject* exceptionClass,
+    const char* msg, Object* cause);
+INLINE void dvmThrowExceptionByClass(ClassObject* exceptionClass,
+    const char* msg)
+{
+    dvmThrowChainedExceptionByClass(exceptionClass, msg, NULL);
+}
+
+/*
+ * Throw the named exception using the name of a class as the exception
+ * message.
+ */
+void dvmThrowChainedExceptionWithClassMessage(const char* exceptionDescriptor,
+    const char* messageDescriptor, Object* cause);
+INLINE void dvmThrowExceptionWithClassMessage(const char* exceptionDescriptor,
+    const char* messageDescriptor)
+{
+    dvmThrowChainedExceptionWithClassMessage(exceptionDescriptor,
+        messageDescriptor, NULL);
+}
+
+/*
+ * Like dvmThrowExceptionWithMessageFromDescriptor, but take a
+ * class object instead of a name.
+ */
+void dvmThrowExceptionByClassWithClassMessage(ClassObject* exceptionClass,
+    const char* messageDescriptor);
+
+/*
+ * Return the exception being thrown in the current thread, or NULL if
+ * no exception is pending.
+ */
+INLINE Object* dvmGetException(Thread* self) {
+    return self->exception;
+}
+
+/*
+ * Set the exception being thrown in the current thread.
+ */
+INLINE void dvmSetException(Thread* self, Object* exception)
+{
+    assert(exception != NULL);
+    self->exception = exception;
+}
+
+/*
+ * Clear the pending exception.
+ *
+ * (We use this rather than "set(null)" because we may need to have special
+ * fixups here for StackOverflowError stuff.  Calling "clear" in the code
+ * makes it obvious.)
+ */
+INLINE void dvmClearException(Thread* self) {
+    self->exception = NULL;
+}
+
+/*
+ * Clear the pending exception.  Used by the optimization and verification
+ * code, which has to run with "initializing" set to avoid going into a
+ * death-spin if the "class not found" exception can't be found.
+ */
+void dvmClearOptException(Thread* self);
+
+/*
+ * Returns "true" if an exception is pending.  Use this if you have a
+ * "self" pointer.
+ */
+INLINE bool dvmCheckException(Thread* self) {
+    return (self->exception != NULL);
+}
+
+/*
+ * Print the exception stack trace on stderr.  Calls the exception's
+ * print function.
+ */
+void dvmPrintExceptionStackTrace(void);
+
+/*
+ * Print the exception stack trace to the log file.  The exception stack
+ * trace is computed within the VM.
+ */
+void dvmLogExceptionStackTrace(void);
+
+/*
+ * Search for a catch block that matches "exception".
+ *
+ * "*newFrame" gets a copy of the new frame pointer.
+ *
+ * If "doUnroll" is set, we unroll "thread"s stack as we go (and update
+ * self->curFrame with the same value as in *newFrame).
+ *
+ * Returns the offset to the catch code on success, or -1 if we couldn't
+ * find a catcher.
+ */
+int dvmFindCatchBlock(Thread* self, int relPc, Object* exception,
+    bool doUnroll, void** newFrame);
+
+/*
+ * Support for saving exception stack traces and converting them to
+ * usable form.  Use the "FillIn" function to generate a compact array
+ * that represents the stack frames, then "GetStackTrace" to convert it
+ * to an array of StackTraceElement objects.
+ *
+ * Don't call the "Internal" form of the function directly.
+ */
+void* dvmFillInStackTraceInternal(Thread* thread, bool wantObject, int* pCount);
+/* return an [I for use by interpreted code */
+INLINE Object* dvmFillInStackTrace(Thread* thread) {
+    return (Object*) dvmFillInStackTraceInternal(thread, true, NULL);
+}
+ArrayObject* dvmGetStackTrace(const Object* stackState);
+/* return an int* and array count; caller must free() the return value */
+INLINE int* dvmFillInStackTraceRaw(Thread* thread, int* pCount) {
+    return (int*) dvmFillInStackTraceInternal(thread, false, pCount);
+}
+ArrayObject* dvmGetStackTraceRaw(const int* intVals, int stackDepth);
+
+/*
+ * Print a formatted version of a raw stack trace to the log file.
+ */
+void dvmLogRawStackTrace(const int* intVals, int stackDepth);
+
+#endif /*_DALVIK_EXCEPTION*/
diff --git a/vm/Globals.h b/vm/Globals.h
new file mode 100644
index 0000000..b0a4633
--- /dev/null
+++ b/vm/Globals.h
@@ -0,0 +1,580 @@
+/*
+ * Copyright (C) 2008 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/*
+ * Variables with library scope.
+ *
+ * Prefer this over scattered static and global variables -- it's easier to
+ * view the state in a debugger, it makes clean shutdown simpler, we can
+ * trivially dump the state into a crash log, and it dodges most naming
+ * collisions that will arise when we are embedded in a larger program.
+ *
+ * If we want multiple VMs per process, this can get stuffed into TLS (or
+ * accessed through a Thread field).  May need to pass it around for some
+ * of the early initialization functions.
+ */
+#ifndef _DALVIK_GLOBALS
+#define _DALVIK_GLOBALS
+
+#include <stdarg.h>
+#include <pthread.h>
+
+#define MAX_BREAKPOINTS 20      /* used for a debugger optimization */
+
+// fwd
+typedef struct GcHeap GcHeap;   /* heap internal structure */
+
+/*
+ * One of these for each -ea/-da/-esa/-dsa on the command line.
+ */
+typedef struct AssertionControl {
+    char*   pkgOrClass;         /* package/class string, or NULL for esa/dsa */
+    int     pkgOrClassLen;      /* string length, for quick compare */
+    bool    enable;             /* enable or disable */
+    bool    isPackage;          /* string ended with "..."? */
+} AssertionControl;
+
+/*
+ * Execution mode, e.g. interpreter vs. JIT.
+ */
+typedef enum ExecutionMode {
+    kExecutionModeUnknown = 0,
+    kExecutionModeInterpPortable,
+    kExecutionModeInterpFast,
+} ExecutionMode;
+
+/*
+ * All fields are initialized to zero.
+ *
+ * Storage allocated here must be freed by a subsystem shutdown function or
+ * from within freeGlobals().
+ */
+struct DvmGlobals {
+    /*
+     * Some options from the command line or environment.
+     */
+    char*       bootClassPathStr;
+    char*       classPathStr;
+
+    unsigned int    heapSizeStart;
+    unsigned int    heapSizeMax;
+    unsigned int    stackSize;
+
+    bool        verboseGc;
+    bool        verboseJni;
+    bool        verboseClass;
+
+    bool        jdwpAllowed;        // debugging allowed for this process?
+    bool        jdwpConfigured;     // has debugging info been provided?
+    int         jdwpTransport;
+    bool        jdwpServer;
+    char*       jdwpHost;
+    int         jdwpPort;
+    bool        jdwpSuspend;
+
+    int         (*vfprintfHook)(FILE*, const char*, va_list);
+    void        (*exitHook)(int);
+    void        (*abortHook)(void);
+
+    int         jniGrefLimit;       // 0 means no limit
+    bool        jniWarnError;       // treat JNI warnings as fatal errors?
+    bool        reduceSignals;
+    bool        noQuitHandler;
+    char*       stackTraceFile;     // for SIGQUIT-inspired output
+
+    bool        logStdio;
+
+    DexOptimizerMode    dexOptMode;
+    DexClassVerifyMode  classVerifyMode;
+
+    int         assertionCtrlCount;
+    AssertionControl*   assertionCtrl;
+
+    ExecutionMode   executionMode;
+
+    /*
+     * VM init management.
+     */
+    bool        initializing;
+    int         initExceptionCount;
+    bool        optimizing;
+
+    /*
+     * java.lang.System properties set from the command line.
+     */
+    int         numProps;
+    int         maxProps;
+    char**      propList;
+
+    /*
+     * Where the VM goes to find system classes.
+     */
+    ClassPathEntry* bootClassPath;
+    /* used by the DEX optimizer to load classes from an unfinished DEX */
+    DvmDex*     bootClassPathOptExtra;
+    bool        optimizingBootstrapClass;
+
+    /*
+     * Loaded classes, hashed by class name.  Each entry is a ClassObject*,
+     * allocated in GC space.
+     */
+    HashTable*  loadedClasses;
+
+    /*
+     * Interned strings.
+     */
+    HashTable*  internedStrings;
+
+    /*
+     * Quick lookups for popular classes used internally.
+     */
+    ClassObject* unlinkedJavaLangClass;    // see unlinkedJavaLangClassObject
+    ClassObject* classJavaLangClass;
+    ClassObject* classJavaLangClassArray;
+    ClassObject* classJavaLangObject;
+    ClassObject* classJavaLangObjectArray;
+    ClassObject* classJavaLangString;
+    ClassObject* classJavaLangThread;
+    ClassObject* classJavaLangVMThread;
+    ClassObject* classJavaLangThreadGroup;
+    ClassObject* classJavaLangThrowable;
+    ClassObject* classJavaLangStackTraceElement;
+    ClassObject* classJavaLangStackTraceElementArray;
+    ClassObject* classJavaLangAnnotationAnnotationArray;
+    ClassObject* classJavaLangAnnotationAnnotationArrayArray;
+    ClassObject* classJavaLangReflectAccessibleObject;
+    ClassObject* classJavaLangReflectConstructor;
+    ClassObject* classJavaLangReflectConstructorArray;
+    ClassObject* classJavaLangReflectField;
+    ClassObject* classJavaLangReflectFieldArray;
+    ClassObject* classJavaLangReflectMethod;
+    ClassObject* classJavaLangReflectMethodArray;
+    ClassObject* classJavaLangReflectProxy;
+    ClassObject* classJavaLangExceptionInInitializerError;
+    ClassObject* classJavaLangRefReference;
+    ClassObject* classJavaSecurityAccessController;
+    ClassObject* classOrgApacheHarmonyLangAnnotationAnnotationFactory;
+    ClassObject* classOrgApacheHarmonyLangAnnotationAnnotationMember;
+    ClassObject* classOrgApacheHarmonyLangAnnotationAnnotationMemberArray;
+
+    /* synthetic classes for arrays of primitives */
+    ClassObject* classArrayBoolean;
+    ClassObject* classArrayChar;
+    ClassObject* classArrayFloat;
+    ClassObject* classArrayDouble;
+    ClassObject* classArrayByte;
+    ClassObject* classArrayShort;
+    ClassObject* classArrayInt;
+    ClassObject* classArrayLong;
+
+    /* method offsets - Object */
+    int         voffJavaLangObject_equals;
+    int         voffJavaLangObject_hashCode;
+    int         voffJavaLangObject_toString;
+    int         voffJavaLangObject_finalize;
+
+    /* field offsets - Class */
+    int         offJavaLangClass_pd;
+
+    /* field offsets - String */
+    volatile int javaLangStringReady;   /* 0=not init, 1=ready, -1=initing */
+    int         offJavaLangString_value;
+    int         offJavaLangString_count;
+    int         offJavaLangString_offset;
+    int         offJavaLangString_hashCode;
+
+    /* field offsets - Thread */
+    int         offJavaLangThread_vmThread;
+    int         offJavaLangThread_group;
+    int         offJavaLangThread_daemon;
+    int         offJavaLangThread_name;
+    int         offJavaLangThread_priority;
+
+    /* method offsets - Thread */
+    int         voffJavaLangThread_run;
+
+    /* field offsets - VMThread */
+    int         offJavaLangVMThread_thread;
+    int         offJavaLangVMThread_vmData;
+
+    /* method offsets - ThreadGroup */
+    int         voffJavaLangThreadGroup_removeThread;
+
+    /* field offsets - Throwable */
+    int         offJavaLangThrowable_stackState;
+    int         offJavaLangThrowable_message;
+    int         offJavaLangThrowable_cause;
+
+    /* field offsets - java.lang.reflect.* */
+    int         offJavaLangReflectAccessibleObject_flag;
+    int         offJavaLangReflectConstructor_slot;
+    int         offJavaLangReflectConstructor_declClass;
+    int         offJavaLangReflectField_slot;
+    int         offJavaLangReflectField_declClass;
+    int         offJavaLangReflectMethod_slot;
+    int         offJavaLangReflectMethod_declClass;
+
+    /* field offsets - java.lang.ref.Reference */
+    int         offJavaLangRefReference_referent;
+    int         offJavaLangRefReference_queue;
+    int         offJavaLangRefReference_queueNext;
+    int         offJavaLangRefReference_vmData;
+
+#if FANCY_REFERENCE_SUBCLASS
+    /* method offsets - java.lang.ref.Reference */
+    int         voffJavaLangRefReference_clear;
+    int         voffJavaLangRefReference_enqueue;
+#else
+    /* method pointers - java.lang.ref.Reference */
+    Method*     methJavaLangRefReference_enqueueInternal;
+#endif
+
+    /* field offsets - java.nio.Buffer and java.nio.DirectByteBufferImpl */
+    //int         offJavaNioBuffer_capacity;
+    //int         offJavaNioDirectByteBufferImpl_pointer;
+
+    /* method pointers - java.security.AccessController */
+    volatile bool javaSecurityAccessControllerReady;
+    Method*     methJavaSecurityAccessController_doPrivileged[4];
+
+    /* constructor method pointers; no vtable involved, so use Method* */
+    Method*     methJavaLangStackTraceElement_init;
+    Method*     methJavaLangExceptionInInitializerError_init;
+    Method*     methJavaLangReflectConstructor_init;
+    Method*     methJavaLangReflectField_init;
+    Method*     methJavaLangReflectMethod_init;
+    Method*     methOrgApacheHarmonyLangAnnotationAnnotationMember_init;
+
+    /* static method pointers - android.lang.annotation.* */
+    Method*
+        methOrgApacheHarmonyLangAnnotationAnnotationFactory_createAnnotation;
+
+    /* direct method pointers - java.lang.reflect.Proxy */
+    Method*     methJavaLangReflectProxy_constructorPrototype;
+
+    /* fake native entry point method */
+    Method*     methFakeNativeEntry;
+
+    /*
+     * VM-synthesized primitive classes, for arrays.
+     */
+    ClassObject* volatile primitiveClass[PRIM_MAX];
+
+    /*
+     * A placeholder ClassObject used during ClassObject
+     * construction.
+     */
+    ClassObject  unlinkedJavaLangClassObject;
+
+    /*
+     * Thread list.  This always has at least one element in it (main),
+     * and main is always the first entry.
+     *
+     * The threadListLock is used for several things, including the thread
+     * start condition variable.  Generally speaking, you must hold the
+     * threadListLock when:
+     *  - adding/removing items from the list
+     *  - waiting on or signaling threadStartCond
+     *  - examining the Thread struct for another thread (this is to avoid
+     *    one thread freeing the Thread struct while another thread is
+     *    perusing it)
+     */
+    Thread*     threadList;
+    pthread_mutex_t threadListLock;
+
+    pthread_cond_t threadStartCond;
+
+    /*
+     * The thread code grabs this before suspending all threads.  There
+     * are four things that can cause a "suspend all":
+     *  (1) the GC is starting;
+     *  (2) the debugger has sent a "suspend all" request;
+     *  (3) a thread has hit a breakpoint or exception that the debugger
+     *      has marked as a "suspend all" event;
+     *  (4) the SignalCatcher caught a signal that requires suspension.
+     *
+     * Because we use "safe point" self-suspension, it is never safe to
+     * do a blocking "lock" call on this mutex -- if it has been acquired,
+     * somebody is probably trying to put you to sleep.  The leading '_' is
+     * intended as a reminder that this lock is special.
+     *
+     * This lock is also held while attaching an externally-created thread
+     * through JNI.  That way we can correctly set the initial suspend state.
+     */
+    pthread_mutex_t _threadSuspendLock;
+
+    /*
+     * Guards Thread->suspendCount for all threads, and provides the lock
+     * for the condition variable that all suspended threads sleep on
+     * (threadSuspendCountCond).
+     *
+     * This has to be separate from threadListLock because of the way
+     * threads put themselves to sleep.
+     */
+    pthread_mutex_t threadSuspendCountLock;
+
+    /*
+     * Suspended threads sleep on this.  They should sleep on the condition
+     * variable until their "suspend count" is zero.
+     *
+     * Paired with "threadSuspendCountLock".
+     */
+    pthread_cond_t  threadSuspendCountCond;
+
+    /*
+     * MUTEX ORDERING: when locking multiple mutexes, always grab them in
+     * this order to avoid deadlock:
+     *
+     *  (1) _threadSuspendLock      (use lockThreadSuspend())
+     *  (2) threadListLock          (use dvmLockThreadList())
+     *  (3) threadSuspendCountLock  (use lockThreadSuspendCount())
+     */
+
+
+    /*
+     * Thread ID bitmap.  We want threads to have small integer IDs so
+     * we can use them in "thin locks".
+     */
+    BitVector*  threadIdMap;
+
+    /*
+     * Manage exit conditions.  The VM exits when all non-daemon threads
+     * have exited.  If the main thread returns early, we need to sleep
+     * on a condition variable.
+     */
+    int         nonDaemonThreadCount;   /* must hold threadListLock to access */
+    //pthread_mutex_t vmExitLock;
+    pthread_cond_t  vmExitCond;
+
+    /*
+     * The set of DEX files loaded by custom class loaders.
+     */
+    HashTable*  userDexFiles;
+
+    /*
+     * JNI global reference table.
+     */
+    ReferenceTable  jniGlobalRefTable;
+    pthread_mutex_t jniGlobalRefLock;
+    int         jniGlobalRefHiMark;
+    int         jniGlobalRefLoMark;
+
+    /*
+     * Native shared library table.
+     */
+    HashTable*  nativeLibs;
+
+    /*
+     * GC heap lock.  Functions like gcMalloc() acquire this before making
+     * any changes to the heap.  It is held throughout garbage collection.
+     */
+    pthread_mutex_t gcHeapLock;
+
+    /* Opaque pointer representing the heap. */
+    GcHeap*     gcHeap;
+
+    /*
+     * Pre-allocated object for out-of-memory errors.
+     */
+    Object*     outOfMemoryObj;
+
+    /* pre-allocated general failure exception */
+    Object*     internalErrorObj;
+
+    /* Monitor list, so we can free them */
+    /*volatile*/ Monitor* monitorList;
+
+    /* Monitor for Thread.sleep() implementation */
+    Monitor*    threadSleepMon;
+
+    /* set when we create a second heap inside the zygote */
+    bool        newZygoteHeapAllocated;
+
+    /*
+     * TLS keys.
+     */
+    pthread_key_t pthreadKeySelf;       /* Thread*, for dvmThreadSelf */
+
+    /*
+     * JNI allows you to have multiple VMs, but we limit ourselves to 1,
+     * so "vmList" is really just a pointer to the one and only VM.
+     */
+    JavaVM*     vmList;
+
+    /*
+     * Cache results of "A instanceof B".
+     */
+    AtomicCache* instanceofCache;
+
+    /* instruction width table, used for optimization and verification */
+    InstructionWidth*   instrWidth;
+    /* instruction flags table, used for verification */
+    InstructionFlags*   instrFlags;
+    /* instruction format table, used for verification */
+    InstructionFormat*  instrFormat;
+
+    /*
+     * Bootstrap class loader linear allocator.
+     */
+    LinearAllocHdr* pBootLoaderAlloc;
+
+
+    /*
+     * Heap worker thread.
+     */
+    bool            heapWorkerInitialized;
+    bool            heapWorkerReady;
+    bool            haltHeapWorker;
+    pthread_t       heapWorkerHandle;
+    pthread_mutex_t heapWorkerLock;
+    pthread_cond_t  heapWorkerCond;
+    pthread_cond_t  heapWorkerIdleCond;
+    pthread_mutex_t heapWorkerListLock;
+
+    /*
+     * Compute some stats on loaded classes.
+     */
+    int             numLoadedClasses;
+    int             numDeclaredMethods;
+    int             numDeclaredInstFields;
+    int             numDeclaredStaticFields;
+
+    /*
+     * JDWP debugger support.
+     */
+    bool        debuggerConnected;      /* debugger or DDMS is connected */
+    bool        debuggerActive;         /* debugger is making requests */
+    JdwpState*  jdwpState;
+
+    /*
+     * Registry of objects known to the debugger.
+     */
+    HashTable*  dbgRegistry;
+
+    /*
+     * Breakpoint optimization table.  This is global and NOT explicitly
+     * synchronized, but all operations that modify the table are made
+     * from relatively-synchronized functions.  False-positives are
+     * possible, false-negatives (i.e. missing a breakpoint) should not be.
+     */
+    const u2*   debugBreakAddr[MAX_BREAKPOINTS];
+
+    /*
+     * Single-step control struct.  We currently only allow one thread to
+     * be single-stepping at a time, which is all that really makes sense,
+     * but it's possible we may need to expand this to be per-thread.
+     */
+    StepControl stepControl;
+
+    /*
+     * DDM features embedded in the VM.
+     */
+    bool        ddmThreadNotification;
+
+    /*
+     * Zygote (partially-started process) support
+     */
+    bool        zygote;
+
+    /*
+     * Used for tracking allocations that we report to DDMS.  When the feature
+     * is enabled (through a DDMS request) the "allocRecords" pointer becomes
+     * non-NULL.
+     */
+    pthread_mutex_t allocTrackerLock;
+    AllocRecord*    allocRecords;
+    int             allocRecordHead;        /* most-recently-added entry */
+    int             allocRecordCount;       /* #of valid entries */
+
+#ifdef WITH_ALLOC_LIMITS
+    /* set on first use of an alloc limit, never cleared */
+    bool        checkAllocLimits;
+    /* allocation limit, for setGlobalAllocationLimit() regression testing */
+    int         allocationLimit;
+#endif
+
+#ifdef WITH_DEADLOCK_PREDICTION
+    /* global lock on history tree accesses */
+    pthread_mutex_t deadlockHistoryLock;
+
+    enum { kDPOff=0, kDPWarn, kDPErr, kDPAbort } deadlockPredictMode;
+#endif
+
+#ifdef WITH_PROFILER
+    /*
+     * When a profiler is enabled, this is incremented.  Distinct profilers
+     * include "dmtrace" method tracing, emulator method tracing, and
+     * possibly instruction counting.
+     *
+     * The purpose of this is to have a single value that the interpreter
+     * can check to see if any profiling activity is enabled.
+     */
+    volatile int activeProfilers;
+
+    /*
+     * State for method-trace profiling.
+     */
+    MethodTraceState methodTrace;
+
+    /*
+     * State for emulator tracing.
+     */
+    void*       emulatorTracePage;
+    int         emulatorTraceEnableCount;
+
+    /*
+     * Global state for memory allocation profiling.
+     */
+    AllocProfState allocProf;
+
+    /*
+     * Pointers to the original methods for things that have been inlined.
+     * This makes it easy for us to output method entry/exit records for
+     * the method calls we're not actually making.
+     */
+    Method**    inlinedMethods;
+
+    /*
+     * Dalvik instruction counts (256 entries).
+     */
+    int*        executedInstrCounts;
+    bool        instructionCountEnableCount;
+#endif
+
+    /*
+     * Signal catcher thread (for SIGQUIT).
+     */
+    pthread_t   signalCatcherHandle;
+    bool        haltSignalCatcher;
+
+    /*
+     * Stdout/stderr conversion thread.
+     */
+    bool            haltStdioConverter;
+    bool            stdioConverterReady;
+    pthread_t       stdioConverterHandle;
+    pthread_mutex_t stdioConverterLock;
+    pthread_cond_t  stdioConverterCond;
+
+    /*
+     * pid of the system_server process. We track it so that when system server
+     * crashes the Zygote process will be killed and restarted.
+     */
+    pid_t systemServerPid;
+};
+
+extern struct DvmGlobals gDvm;
+
+#endif /*_DALVIK_GLOBALS*/
diff --git a/vm/Hash.c b/vm/Hash.c
new file mode 100644
index 0000000..67a25a1
--- /dev/null
+++ b/vm/Hash.c
@@ -0,0 +1,418 @@
+/*
+ * Copyright (C) 2008 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/*
+ * Hash table.  The dominant calls are add and lookup, with removals
+ * happening very infrequently.  We use probing, and don't worry much
+ * about tombstone removal.
+ */
+#include "Dalvik.h"
+
+#include <stdlib.h>
+
+/* table load factor, i.e. how full can it get before we resize */
+//#define LOAD_NUMER  3       // 75%
+//#define LOAD_DENOM  4
+#define LOAD_NUMER  5       // 62.5%
+#define LOAD_DENOM  8
+//#define LOAD_NUMER  1       // 50%
+//#define LOAD_DENOM  2
+
+/*
+ * Compute the capacity needed for a table to hold "size" elements.
+ */
+size_t dvmHashSize(size_t size) {
+    return (size * LOAD_DENOM) / LOAD_NUMER +1;
+}
+
+
+/*
+ * Create and initialize a hash table.
+ */
+HashTable* dvmHashTableCreate(size_t initialSize, HashFreeFunc freeFunc)
+{
+    HashTable* pHashTable;
+
+    assert(initialSize > 0);
+
+    pHashTable = (HashTable*) malloc(sizeof(*pHashTable));
+    if (pHashTable == NULL)
+        return NULL;
+
+    dvmInitMutex(&pHashTable->lock);
+
+    pHashTable->tableSize = dexRoundUpPower2(initialSize);
+    pHashTable->numEntries = pHashTable->numDeadEntries = 0;
+    pHashTable->freeFunc = freeFunc;
+    pHashTable->pEntries =
+        (HashEntry*) malloc(pHashTable->tableSize * sizeof(HashEntry));
+    if (pHashTable->pEntries == NULL) {
+        free(pHashTable);
+        return NULL;
+    }
+
+    memset(pHashTable->pEntries, 0, pHashTable->tableSize * sizeof(HashEntry));
+    return pHashTable;
+}
+
+/*
+ * Clear out all entries.
+ */
+void dvmHashTableClear(HashTable* pHashTable)
+{
+    HashEntry* pEnt;
+    int i;
+
+    pEnt = pHashTable->pEntries;
+    for (i = 0; i < pHashTable->tableSize; i++, pEnt++) {
+        if (pEnt->data == HASH_TOMBSTONE) {
+            // nuke entry
+            pEnt->data = NULL;
+        } else if (pEnt->data != NULL) {
+            // call free func then nuke entry
+            if (pHashTable->freeFunc != NULL)
+                (*pHashTable->freeFunc)(pEnt->data);
+            pEnt->data = NULL;
+        }
+    }
+
+    pHashTable->numEntries = 0;
+    pHashTable->numDeadEntries = 0;
+}
+
+/*
+ * Free the table.
+ */
+void dvmHashTableFree(HashTable* pHashTable)
+{
+    if (pHashTable == NULL)
+        return;
+    dvmHashTableClear(pHashTable);
+    free(pHashTable->pEntries);
+    free(pHashTable);
+}
+
+#ifndef NDEBUG
+/*
+ * Count up the number of tombstone entries in the hash table.
+ */
+static int countTombStones(HashTable* pHashTable)
+{
+    int i, count;
+
+    for (count = i = 0; i < pHashTable->tableSize; i++) {
+        if (pHashTable->pEntries[i].data == HASH_TOMBSTONE)
+            count++;
+    }
+    return count;
+}
+#endif
+
+/*
+ * Resize a hash table.  We do this when adding an entry increased the
+ * size of the table beyond its comfy limit.
+ *
+ * This essentially requires re-inserting all elements into the new storage.
+ *
+ * If multiple threads can access the hash table, the table's lock should
+ * have been grabbed before issuing the "lookup+add" call that led to the
+ * resize, so we don't have a synchronization problem here.
+ */
+static bool resizeHash(HashTable* pHashTable, int newSize)
+{
+    HashEntry* pNewEntries;
+    int i;
+
+    assert(countTombStones(pHashTable) == pHashTable->numDeadEntries);
+    //LOGI("before: dead=%d\n", pHashTable->numDeadEntries);
+
+    pNewEntries = (HashEntry*) calloc(newSize, sizeof(HashEntry));
+    if (pNewEntries == NULL)
+        return false;
+
+    for (i = 0; i < pHashTable->tableSize; i++) {
+        void* data = pHashTable->pEntries[i].data;
+        if (data != NULL && data != HASH_TOMBSTONE) {
+            int hashValue = pHashTable->pEntries[i].hashValue;
+            int newIdx;
+
+            /* probe for new spot, wrapping around */
+            newIdx = hashValue & (newSize-1);
+            while (pNewEntries[newIdx].data != NULL)
+                newIdx = (newIdx + 1) & (newSize-1);
+
+            pNewEntries[newIdx].hashValue = hashValue;
+            pNewEntries[newIdx].data = data;
+        }
+    }
+
+    free(pHashTable->pEntries);
+    pHashTable->pEntries = pNewEntries;
+    pHashTable->tableSize = newSize;
+    pHashTable->numDeadEntries = 0;
+
+    assert(countTombStones(pHashTable) == 0);
+    return true;
+}
+
+/*
+ * Look up an entry.
+ *
+ * We probe on collisions, wrapping around the table.
+ */
+void* dvmHashTableLookup(HashTable* pHashTable, u4 itemHash, void* item,
+    HashCompareFunc cmpFunc, bool doAdd)
+{
+    HashEntry* pEntry;
+    HashEntry* pEnd;
+    void* result = NULL;
+
+    assert(pHashTable->tableSize > 0);
+    assert(item != HASH_TOMBSTONE);
+    assert(item != NULL);
+
+    /* jump to the first entry and probe for a match */
+    pEntry = &pHashTable->pEntries[itemHash & (pHashTable->tableSize-1)];
+    pEnd = &pHashTable->pEntries[pHashTable->tableSize];
+    while (pEntry->data != NULL) {
+        if (pEntry->data != HASH_TOMBSTONE &&
+            pEntry->hashValue == itemHash &&
+            (*cmpFunc)(pEntry->data, item) == 0)
+        {
+            /* match */
+            //LOGD("+++ match on entry %d\n", pEntry - pHashTable->pEntries);
+            break;
+        }
+
+        pEntry++;
+        if (pEntry == pEnd) {     /* wrap around to start */
+            if (pHashTable->tableSize == 1)
+                break;      /* edge case - single-entry table */
+            pEntry = pHashTable->pEntries;
+        }
+
+        //LOGI("+++ look probing %d...\n", pEntry - pHashTable->pEntries);
+    }
+
+    if (pEntry->data == NULL) {
+        if (doAdd) {
+            pEntry->hashValue = itemHash;
+            pEntry->data = item;
+            pHashTable->numEntries++;
+
+            /*
+             * We've added an entry.  See if this brings us too close to full.
+             */
+            if ((pHashTable->numEntries+pHashTable->numDeadEntries) * LOAD_DENOM
+                > pHashTable->tableSize * LOAD_NUMER)
+            {
+                if (!resizeHash(pHashTable, pHashTable->tableSize * 2)) {
+                    /* don't really have a way to indicate failure */
+                    LOGE("Dalvik hash resize failure\n");
+                    dvmAbort();
+                }
+                /* note "pEntry" is now invalid */
+            } else {
+                //LOGW("okay %d/%d/%d\n",
+                //    pHashTable->numEntries, pHashTable->tableSize,
+                //    (pHashTable->tableSize * LOAD_NUMER) / LOAD_DENOM);
+            }
+
+            /* full table is bad -- search for nonexistent never halts */
+            assert(pHashTable->numEntries < pHashTable->tableSize);
+            result = item;
+        } else {
+            assert(result == NULL);
+        }
+    } else {
+        result = pEntry->data;
+    }
+
+    return result;
+}
+
+/*
+ * Remove an entry from the table.
+ *
+ * Does NOT invoke the "free" function on the item.
+ */
+bool dvmHashTableRemove(HashTable* pHashTable, u4 itemHash, void* item)
+{
+    HashEntry* pEntry;
+    HashEntry* pEnd;
+
+    assert(pHashTable->tableSize > 0);
+
+    /* jump to the first entry and probe for a match */
+    pEntry = &pHashTable->pEntries[itemHash & (pHashTable->tableSize-1)];
+    pEnd = &pHashTable->pEntries[pHashTable->tableSize];
+    while (pEntry->data != NULL) {
+        if (pEntry->data == item) {
+            //LOGI("+++ stepping on entry %d\n", pEntry - pHashTable->pEntries);
+            pEntry->data = HASH_TOMBSTONE;
+            pHashTable->numEntries--;
+            pHashTable->numDeadEntries++;
+            return true;
+        }
+
+        pEntry++;
+        if (pEntry == pEnd) {     /* wrap around to start */
+            if (pHashTable->tableSize == 1)
+                break;      /* edge case - single-entry table */
+            pEntry = pHashTable->pEntries;
+        }
+
+        //LOGI("+++ del probing %d...\n", pEntry - pHashTable->pEntries);
+    }
+
+    return false;
+}
+
+/*
+ * Scan every entry in the hash table and evaluate it with the specified
+ * indirect function call. If the function returns 1, remove the entry from 
+ * the table. 
+ *
+ * Does NOT invoke the "free" function on the item.
+ *
+ * Returning values other than 0 or 1 will abort the routine.
+ */
+int dvmHashForeachRemove(HashTable* pHashTable, HashForeachRemoveFunc func)
+{
+    int i, val;
+
+    for (i = 0; i < pHashTable->tableSize; i++) {
+        HashEntry* pEnt = &pHashTable->pEntries[i];
+
+        if (pEnt->data != NULL && pEnt->data != HASH_TOMBSTONE) {
+            val = (*func)(pEnt->data);
+            if (val == 1) {
+                pEnt->data = HASH_TOMBSTONE;
+                pHashTable->numEntries--;
+                pHashTable->numDeadEntries++;
+            }
+            else if (val != 0) {
+                return val;
+            }
+        }
+    }
+    return 0;
+}
+
+
+/*
+ * Execute a function on every entry in the hash table.
+ *
+ * If "func" returns a nonzero value, terminate early and return the value.
+ */
+int dvmHashForeach(HashTable* pHashTable, HashForeachFunc func, void* arg)
+{
+    int i, val;
+
+    for (i = 0; i < pHashTable->tableSize; i++) {
+        HashEntry* pEnt = &pHashTable->pEntries[i];
+
+        if (pEnt->data != NULL && pEnt->data != HASH_TOMBSTONE) {
+            val = (*func)(pEnt->data, arg);
+            if (val != 0)
+                return val;
+        }
+    }
+
+    return 0;
+}
+
+
+/*
+ * Look up an entry, counting the number of times we have to probe.
+ *
+ * Returns -1 if the entry wasn't found.
+ */
+static int countProbes(HashTable* pHashTable, u4 itemHash, const void* item,
+    HashCompareFunc cmpFunc)
+{
+    HashEntry* pEntry;
+    HashEntry* pEnd;
+    int count = 0;
+
+    assert(pHashTable->tableSize > 0);
+    assert(item != HASH_TOMBSTONE);
+    assert(item != NULL);
+
+    /* jump to the first entry and probe for a match */
+    pEntry = &pHashTable->pEntries[itemHash & (pHashTable->tableSize-1)];
+    pEnd = &pHashTable->pEntries[pHashTable->tableSize];
+    while (pEntry->data != NULL) {
+        if (pEntry->data != HASH_TOMBSTONE &&
+            pEntry->hashValue == itemHash &&
+            (*cmpFunc)(pEntry->data, item) == 0)
+        {
+            /* match */
+            break;
+        }
+
+        pEntry++;
+        if (pEntry == pEnd) {     /* wrap around to start */
+            if (pHashTable->tableSize == 1)
+                break;      /* edge case - single-entry table */
+            pEntry = pHashTable->pEntries;
+        }
+
+        count++;
+    }
+    if (pEntry->data == NULL)
+        return -1;
+
+    return count;
+}
+
+/*
+ * Evaluate the amount of probing required for the specified hash table.
+ *
+ * We do this by running through all entries in the hash table, computing
+ * the hash value and then doing a lookup.
+ *
+ * The caller should lock the table before calling here.
+ */
+void dvmHashTableProbeCount(HashTable* pHashTable, HashCalcFunc calcFunc,
+    HashCompareFunc cmpFunc)
+{
+    int numEntries, minProbe, maxProbe, totalProbe;
+    HashIter iter;
+
+    numEntries = maxProbe = totalProbe = 0;
+    minProbe = 65536*32767;
+
+    for (dvmHashIterBegin(pHashTable, &iter); !dvmHashIterDone(&iter);
+        dvmHashIterNext(&iter))
+    {
+        const void* data = (const void*)dvmHashIterData(&iter);
+        int count;
+            
+        count = countProbes(pHashTable, (*calcFunc)(data), data, cmpFunc);
+
+        numEntries++;
+
+        if (count < minProbe)
+            minProbe = count;
+        if (count > maxProbe)
+            maxProbe = count;
+        totalProbe += count;
+    }
+
+    LOGI("Probe: min=%d max=%d, total=%d in %d (%d), avg=%.3f\n",
+        minProbe, maxProbe, totalProbe, numEntries, pHashTable->tableSize,
+        (float) totalProbe / (float) numEntries);
+}
diff --git a/vm/Hash.h b/vm/Hash.h
new file mode 100644
index 0000000..8dcf6be
--- /dev/null
+++ b/vm/Hash.h
@@ -0,0 +1,220 @@
+/*
+ * Copyright (C) 2008 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/*
+ * General purpose hash table, used for finding classes, methods, etc.
+ *
+ * When the number of elements reaches a certain percentage of the table's
+ * capacity, the table will be resized.
+ */
+#ifndef _DALVIK_HASH
+#define _DALVIK_HASH
+
+/* compute the hash of an item with a specific type */
+typedef u4 (*HashCompute)(const void* item);
+
+/*
+ * Compare a hash entry with a "loose" item after their hash values match.
+ * Returns { <0, 0, >0 } depending on ordering of items (same semantics
+ * as strcmp()).
+ */
+typedef int (*HashCompareFunc)(const void* tableItem, const void* looseItem);
+
+/*
+ * This function will be used to free entries in the table.  This can be
+ * NULL if no free is required, free(), or a custom function.
+ */
+typedef void (*HashFreeFunc)(void* ptr);
+
+/*
+ * Used by dvmHashForeach().
+ */
+typedef int (*HashForeachFunc)(void* data, void* arg);
+
+/*
+ * Used by dvmHashForeachRemove().
+ */
+typedef int (*HashForeachRemoveFunc)(void* data);
+
+/*
+ * One entry in the hash table.  "data" values are expected to be (or have
+ * the same characteristics as) valid pointers.  In particular, a NULL
+ * value for "data" indicates an empty slot, and HASH_TOMBSTONE indicates
+ * a no-longer-used slot that must be stepped over during probing.
+ *
+ * Attempting to add a NULL or tombstone value is an error.
+ *
+ * When an entry is released, we will call (HashFreeFunc)(entry->data).
+ */
+typedef struct HashEntry {
+    u4 hashValue;
+    void* data;
+} HashEntry;
+
+#define HASH_TOMBSTONE ((void*) 0xcbcacccd)     // invalid ptr value
+
+/*
+ * Expandable hash table.
+ *
+ * This structure should be considered opaque.
+ */
+typedef struct HashTable {
+    int         tableSize;          /* must be power of 2 */
+    int         numEntries;         /* current #of "live" entries */
+    int         numDeadEntries;     /* current #of tombstone entries */
+    HashEntry*  pEntries;           /* array on heap */
+    HashFreeFunc freeFunc;
+    pthread_mutex_t lock;
+} HashTable;
+
+/*
+ * Create and initialize a HashTable structure, using "initialSize" as
+ * a basis for the initial capacity of the table.  (The actual initial
+ * table size may be adjusted upward.)  If you know exactly how many
+ * elements the table will hold, pass the result from dvmHashSize() in.)
+ *
+ * Returns "false" if unable to allocate the table.
+ */
+HashTable* dvmHashTableCreate(size_t initialSize, HashFreeFunc freeFunc);
+
+/*
+ * Compute the capacity needed for a table to hold "size" elements.  Use
+ * this when you know ahead of time how many elements the table will hold.
+ * Pass this value into dvmHashTableCreate() to ensure that you can add
+ * all elements without needing to reallocate the table.
+ */
+size_t dvmHashSize(size_t size);
+
+/*
+ * Clear out a hash table, freeing the contents of any used entries.
+ */
+void dvmHashTableClear(HashTable* pHashTable);
+
+/*
+ * Free a hash table.  Performs a "clear" first.
+ */
+void dvmHashTableFree(HashTable* pHashTable);
+
+/*
+ * Exclusive access.  Important when adding items to a table, or when
+ * doing any operations on a table that could be added to by another thread.
+ */
+INLINE void dvmHashTableLock(HashTable* pHashTable) {
+    dvmLockMutex(&pHashTable->lock);
+}
+INLINE void dvmHashTableUnlock(HashTable* pHashTable) {
+    dvmUnlockMutex(&pHashTable->lock);
+}
+
+/*
+ * Get #of entries in hash table.
+ */
+INLINE int dvmHashTableNumEntries(HashTable* pHashTable) {
+    return pHashTable->numEntries;
+}
+
+/*
+ * Get total size of hash table (for memory usage calculations).
+ */
+INLINE int dvmHashTableMemUsage(HashTable* pHashTable) {
+    return sizeof(HashTable) + pHashTable->tableSize * sizeof(HashEntry);
+}
+
+/*
+ * Look up an entry in the table, possibly adding it if it's not there.
+ *
+ * If "item" is not found, and "doAdd" is false, NULL is returned.
+ * Otherwise, a pointer to the found or added item is returned.  (You can
+ * tell the difference by seeing if return value == item.)
+ *
+ * An "add" operation may cause the entire table to be reallocated.
+ */
+void* dvmHashTableLookup(HashTable* pHashTable, u4 itemHash, void* item,
+    HashCompareFunc cmpFunc, bool doAdd);
+
+/*
+ * Remove an item from the hash table, given its "data" pointer.  Does not
+ * invoke the "free" function; just detaches it from the table.
+ */
+bool dvmHashTableRemove(HashTable* pHashTable, u4 hash, void* item);
+
+/*
+ * Execute "func" on every entry in the hash table.
+ *
+ * If "func" returns a nonzero value, terminate early and return the value.
+ */
+int dvmHashForeach(HashTable* pHashTable, HashForeachFunc func, void* arg);
+
+/*
+ * Execute "func" on every entry in the hash table.
+ *
+ * If "func" returns 1 detach the entry from the hash table. Does not invoke
+ * the "free" function.
+ *
+ * Returning values other than 0 or 1 from "func" will abort the routine.
+ */
+int dvmHashForeachRemove(HashTable* pHashTable, HashForeachRemoveFunc func);
+
+/*
+ * An alternative to dvmHashForeach(), using an iterator.
+ *
+ * Use like this:
+ *   HashIter iter;
+ *   for (dvmHashIterBegin(hashTable, &iter); !dvmHashIterDone(&iter);
+ *       dvmHashIterNext(&iter))
+ *   {
+ *       MyData* data = (MyData*)dvmHashIterData(&iter);
+ *   }
+ */
+typedef struct HashIter {
+    void*       data;
+    HashTable*  pHashTable;
+    int         idx;
+} HashIter;
+INLINE void dvmHashIterNext(HashIter* pIter) {
+    int i = pIter->idx +1;
+    int lim = pIter->pHashTable->tableSize;
+    for ( ; i < lim; i++) {
+        void* data = pIter->pHashTable->pEntries[i].data;
+        if (data != NULL && data != HASH_TOMBSTONE)
+            break;
+    }
+    pIter->idx = i;
+}
+INLINE void dvmHashIterBegin(HashTable* pHashTable, HashIter* pIter) {
+    pIter->pHashTable = pHashTable;
+    pIter->idx = -1;
+    dvmHashIterNext(pIter);
+}
+INLINE bool dvmHashIterDone(HashIter* pIter) {
+    return (pIter->idx >= pIter->pHashTable->tableSize);
+}
+INLINE void* dvmHashIterData(HashIter* pIter) {
+    assert(pIter->idx >= 0 && pIter->idx < pIter->pHashTable->tableSize);
+    return pIter->pHashTable->pEntries[pIter->idx].data;
+}
+
+
+/*
+ * Evaluate hash table performance by examining the number of times we
+ * have to probe for an entry.
+ *
+ * The caller should lock the table beforehand.
+ */
+typedef u4 (*HashCalcFunc)(const void* item);
+void dvmHashTableProbeCount(HashTable* pHashTable, HashCalcFunc calcFunc,
+    HashCompareFunc cmpFunc);
+
+#endif /*_DALVIK_HASH*/
diff --git a/vm/Init.c b/vm/Init.c
new file mode 100644
index 0000000..240744c
--- /dev/null
+++ b/vm/Init.c
@@ -0,0 +1,1397 @@
+/*
+ * Copyright (C) 2008 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/*
+ * Dalvik initialization, shutdown, and command-line argument processing.
+ */
+#include "Dalvik.h"
+#include "test/Test.h"
+#include "mterp/Mterp.h"
+
+#include <stdlib.h>
+#include <stdio.h>
+#include <signal.h>
+#include <limits.h>
+#include <ctype.h>
+#include <sys/wait.h>
+#include <unistd.h>
+
+#define kMinHeapStartSize   (1*1024*1024)
+#define kMinHeapSize        (2*1024*1024)
+#define kMaxHeapSize        (1*1024*1024*1024)
+
+/*
+ * Register VM-agnostic native methods for system classes.
+ *
+ * Currently defined in ../include/nativehelper/AndroidSystemNatives.h
+ */
+extern int jniRegisterSystemMethods(JNIEnv* env);
+
+/* fwd */
+static bool registerSystemNatives(JNIEnv* pEnv);
+static bool dvmInitJDWP(void);
+static bool dvmInitZygote(void);
+
+
+/* global state */
+struct DvmGlobals gDvm;
+
+/*
+ * Show usage.
+ *
+ * We follow the tradition of unhyphenated compound words.
+ */
+static void dvmUsage(const char* progName)
+{
+    dvmFprintf(stderr, "%s: [options] class [argument ...]\n", progName);
+    dvmFprintf(stderr, "%s: [options] -jar file.jar [argument ...]\n",progName);
+    dvmFprintf(stderr, "\n");
+    dvmFprintf(stderr, "The following standard options are recognized:\n");
+    dvmFprintf(stderr, "  -classpath classpath\n");
+    dvmFprintf(stderr, "  -Dproperty=value\n");
+    dvmFprintf(stderr, "  -verbose:tag  ('gc', 'jni', or 'class')\n");
+    dvmFprintf(stderr, "  -ea[:<package name>... |:<class name>]\n");
+    dvmFprintf(stderr, "  -da[:<package name>... |:<class name>]\n");
+    dvmFprintf(stderr, "   (-enableassertions, -disableassertions)\n");
+    dvmFprintf(stderr, "  -esa\n");
+    dvmFprintf(stderr, "  -dsa\n");
+    dvmFprintf(stderr,
+                "   (-enablesystemassertions, -disablesystemassertions)\n");
+    dvmFprintf(stderr, "  -showversion\n");
+    dvmFprintf(stderr, "  -help\n");
+    dvmFprintf(stderr, "\n");
+    dvmFprintf(stderr, "The following extended options are recognized:\n");
+    dvmFprintf(stderr, "  -Xrunjdwp:<options>\n");
+    dvmFprintf(stderr, "  -Xbootclasspath:bootclasspath\n");
+    dvmFprintf(stderr, "  -Xcheck:tag  (e.g. 'jni', 'jni-warnonly')\n");
+    dvmFprintf(stderr, "  -XmsN  (min heap, must be multiple of 1K, >= 1MB)\n");
+    dvmFprintf(stderr, "  -XmxN  (max heap, must be multiple of 1K, >= 2MB)\n");
+    dvmFprintf(stderr, "  -XssN  (stack size, >= %dKB, <= %dKB)\n",
+        kMinStackSize / 1024, kMaxStackSize / 1024);
+    dvmFprintf(stderr, "  -Xverify:{none,remote,all}\n");
+    dvmFprintf(stderr, "  -Xrs\n");
+    dvmFprintf(stderr,
+                "  -Xint  (extended to accept ':portable' and ':fast')\n");
+    dvmFprintf(stderr, "\n");
+    dvmFprintf(stderr, "These are unique to Dalvik:\n");
+    dvmFprintf(stderr, "  -Xzygote\n");
+    dvmFprintf(stderr, "  -Xdexopt:{none,verified,all}\n");
+    dvmFprintf(stderr, "  -Xnoquithandler\n");
+    dvmFprintf(stderr,
+                "  -Xjnigreflimit:N  (must be multiple of 100, >= 200)\n");
+    dvmFprintf(stderr, "  -Xdeadlockpredict:{off,warn,err,abort}\n");
+    dvmFprintf(stderr, "  -Xstacktracefile:<file name>");
+    dvmFprintf(stderr, "\n\n");
+    dvmFprintf(stderr, "Configured with:"
+#ifdef WITH_DEBUGGER
+        " debugger"
+#endif
+#ifdef WITH_PROFILER
+        " profiler"
+#endif
+#ifdef WITH_MONITOR_TRACKING
+        " monitor_tracking"
+#endif
+#ifdef WITH_DEADLOCK_PREDICTION
+        " deadlock_prediction"
+#endif
+#ifdef WITH_HPROF
+        " hprof"
+#endif
+#ifdef WITH_HPROF_STACK
+        " hprof_stack"
+#endif
+#ifdef WITH_HPROF_STACK_UNREACHABLE
+        " hprof_stack_unreachable"
+#endif
+#ifdef WITH_ALLOC_LIMITS
+        " alloc_limits"
+#endif
+#ifdef WITH_TRACKREF_CHECKS
+        " trackref_checks"
+#endif
+#ifdef WITH_INSTR_CHECKS
+        " instr_checks"
+#endif
+#ifdef WITH_EXTRA_OBJECT_VALIDATION
+        " extra_object_validation"
+#endif
+#ifdef WITH_DALVIK_ASSERT
+        " dalvik_assert"
+#endif
+#ifdef WITH_JNI_STACK_CHECK
+        " jni_stack_check"
+#endif
+#ifdef EASY_GDB
+        " easy_gdb"
+#endif
+#ifdef CHECK_MUTEX
+        " check_mutex"
+#endif
+#ifdef PROFILE_FIELD_ACCESS
+        " profile_field_access"
+#endif
+#ifdef DVM_TRACK_HEAP_MARKING
+        " track_heap_marking"
+#endif
+    );
+#ifdef DVM_SHOW_EXCEPTION
+    dvmFprintf(stderr, " show_exception=%d", DVM_SHOW_EXCEPTION);
+#endif
+    dvmFprintf(stderr, "\n\n");
+}
+
+/*
+ * Show helpful information on JDWP options.
+ */
+static void showJdwpHelp(void)
+{
+    dvmFprintf(stderr,
+        "Example: -Xrunjdwp:transport=dt_socket,address=8000,server=y\n");
+    dvmFprintf(stderr,
+        "Example: -Xrunjdwp:transport=dt_socket,address=localhost:6500,server=n\n");
+}
+
+/*
+ * Show version and copyright info.
+ */
+static void showVersion(void)
+{
+    dvmFprintf(stdout, "DalvikVM version %d.%d.%d\n",
+        DALVIK_MAJOR_VERSION, DALVIK_MINOR_VERSION, DALVIK_BUG_VERSION);
+    dvmFprintf(stdout, 
+        "Copyright (C) 2007 The Android Open Source Project\n\n"
+        "This software is built from source code licensed under the "
+        "Apache License,\n"
+        "Version 2.0 (the \"License\"). You may obtain a copy of the "
+        "License at\n\n"
+        "     http://www.apache.org/licenses/LICENSE-2.0\n\n"
+        "See the associated NOTICE file for this software for further "
+        "details.\n");
+}
+
+/*
+ * Parse a string of the form /[0-9]+[kKmMgG]?/, which is used to specify
+ * memory sizes.  [kK] indicates kilobytes, [mM] megabytes, and
+ * [gG] gigabytes.
+ *
+ * "s" should point just past the "-Xm?" part of the string.
+ * "min" specifies the lowest acceptable value described by "s".
+ * "div" specifies a divisor, e.g. 1024 if the value must be a multiple
+ * of 1024.
+ *
+ * The spec says the -Xmx and -Xms options must be multiples of 1024.  It
+ * doesn't say anything about -Xss.
+ *
+ * Returns 0 (a useless size) if "s" is malformed or specifies a low or
+ * non-evenly-divisible value.
+ */
+static unsigned int dvmParseMemOption(const char *s, unsigned int div)
+{
+    /* strtoul accepts a leading [+-], which we don't want,
+     * so make sure our string starts with a decimal digit.
+     */
+    if (isdigit(*s)) {
+        const char *s2;
+        unsigned int val;
+
+        val = (unsigned int)strtoul(s, (char **)&s2, 10);
+        if (s2 != s) {
+            /* s2 should be pointing just after the number.
+             * If this is the end of the string, the user
+             * has specified a number of bytes.  Otherwise,
+             * there should be exactly one more character
+             * that specifies a multiplier.
+             */
+            if (*s2 != '\0') {
+                char c;
+
+                /* The remainder of the string is either a single multiplier
+                 * character, or nothing to indicate that the value is in
+                 * bytes.
+                 */
+                c = *s2++;
+                if (*s2 == '\0') {
+                    unsigned int mul;
+
+                    if (c == '\0') {
+                        mul = 1;
+                    } else if (c == 'k' || c == 'K') {
+                        mul = 1024;
+                    } else if (c == 'm' || c == 'M') {
+                        mul = 1024 * 1024;
+                    } else if (c == 'g' || c == 'G') {
+                        mul = 1024 * 1024 * 1024;
+                    } else {
+                        /* Unknown multiplier character.
+                         */
+                        return 0;
+                    }
+
+                    if (val <= UINT_MAX / mul) {
+                        val *= mul;
+                    } else {
+                        /* Clamp to a multiple of 1024.
+                         */
+                        val = UINT_MAX & ~(1024-1);
+                    }
+                } else {
+                    /* There's more than one character after the
+                     * numeric part.
+                     */
+                    return 0;
+                }
+            }
+
+            /* The man page says that a -Xm value must be
+             * a multiple of 1024.
+             */
+            if (val % div == 0) {
+                return val;
+            }
+        }
+    }
+
+    return 0;
+}
+
+/*
+ * Handle one of the JDWP name/value pairs.
+ *
+ * JDWP options are:
+ *  help: if specified, show help message and bail
+ *  transport: may be dt_socket or dt_shmem
+ *  address: for dt_socket, "host:port", or just "port" when listening
+ *  server: if "y", wait for debugger to attach; if "n", attach to debugger
+ *  timeout: how long to wait for debugger to connect / listen
+ *
+ * Useful with server=n (these aren't supported yet):
+ *  onthrow=<exception-name>: connect to debugger when exception thrown
+ *  onuncaught=y|n: connect to debugger when uncaught exception thrown
+ *  launch=<command-line>: launch the debugger itself
+ *
+ * The "transport" option is required, as is "address" if server=n.
+ */
+static bool handleJdwpOption(const char* name, const char* value)
+{
+    if (strcmp(name, "transport") == 0) {
+        if (strcmp(value, "dt_socket") == 0) {
+            gDvm.jdwpTransport = kJdwpTransportSocket;
+        } else if (strcmp(value, "dt_android_adb") == 0) {
+            gDvm.jdwpTransport = kJdwpTransportAndroidAdb;
+        } else {
+            LOGE("JDWP transport '%s' not supported\n", value);
+            return false;
+        }
+    } else if (strcmp(name, "server") == 0) {
+        if (*value == 'n')
+            gDvm.jdwpServer = false;
+        else if (*value == 'y')
+            gDvm.jdwpServer = true;
+        else {
+            LOGE("JDWP option 'server' must be 'y' or 'n'\n");
+            return false;
+        }
+    } else if (strcmp(name, "suspend") == 0) {
+        if (*value == 'n')
+            gDvm.jdwpSuspend = false;
+        else if (*value == 'y')
+            gDvm.jdwpSuspend = true;
+        else {
+            LOGE("JDWP option 'suspend' must be 'y' or 'n'\n");
+            return false;
+        }
+    } else if (strcmp(name, "address") == 0) {
+        /* this is either <port> or <host>:<port> */
+        const char* colon = strchr(value, ':');
+        char* end;
+        long port;
+
+        if (colon != NULL) {
+            free(gDvm.jdwpHost);
+            gDvm.jdwpHost = (char*) malloc(colon - value +1);
+            strncpy(gDvm.jdwpHost, value, colon - value +1);
+            gDvm.jdwpHost[colon-value] = '\0';
+            value = colon + 1;
+        }
+        if (*value == '\0') {
+            LOGE("JDWP address missing port\n");
+            return false;
+        }
+        port = strtol(value, &end, 10);
+        if (*end != '\0') {
+            LOGE("JDWP address has junk in port field '%s'\n", value);
+            return false;
+        }
+        gDvm.jdwpPort = port;
+    } else if (strcmp(name, "launch") == 0 ||
+               strcmp(name, "onthrow") == 0 ||
+               strcmp(name, "oncaught") == 0 ||
+               strcmp(name, "timeout") == 0)
+    {
+        /* valid but unsupported */
+        LOGI("Ignoring JDWP option '%s'='%s'\n", name, value);
+    } else {
+        LOGI("Ignoring unrecognized JDWP option '%s'='%s'\n", name, value);
+    }
+
+    return true;
+}
+
+/*
+ * Parse the latter half of a -Xrunjdwp/-agentlib:jdwp= string, e.g.:
+ * "transport=dt_socket,address=8000,server=y,suspend=n"
+ */
+static bool parseJdwpOptions(const char* str)
+{
+    char* mangle = strdup(str);
+    char* name = mangle;
+    bool result = false;
+
+    /*
+     * Process all of the name=value pairs.
+     */
+    while (true) {
+        char* value;
+        char* comma;
+
+        value = strchr(name, '=');
+        if (value == NULL) {
+            LOGE("JDWP opts: garbage at '%s'\n", name);
+            goto bail;
+        }
+
+        comma = strchr(name, ',');      // use name, not value, for safety
+        if (comma != NULL) {
+            if (comma < value) {
+                LOGE("JDWP opts: found comma before '=' in '%s'\n", mangle);
+                goto bail;
+            }
+            *comma = '\0';
+        }
+
+        *value++ = '\0';        // stomp the '='
+
+        if (!handleJdwpOption(name, value))
+            goto bail;
+
+        if (comma == NULL) {
+            /* out of options */
+            break;
+        }
+        name = comma+1;
+    }
+
+    /*
+     * Make sure the combination of arguments makes sense.
+     */
+    if (gDvm.jdwpTransport == kJdwpTransportUnknown) {
+        LOGE("JDWP opts: must specify transport\n");
+        goto bail;
+    }
+    if (!gDvm.jdwpServer && (gDvm.jdwpHost == NULL || gDvm.jdwpPort == 0)) {
+        LOGE("JDWP opts: when server=n, must specify host and port\n");
+        goto bail;
+    }
+    // transport mandatory
+    // outbound server address
+
+    gDvm.jdwpConfigured = true;
+    result = true;
+
+bail:
+    free(mangle);
+    return result;
+}
+
+/*
+ * Handle one of the four kinds of assertion arguments.
+ *
+ * "pkgOrClass" is the last part of an enable/disable line.  For a package
+ * the arg looks like "-ea:com.google.fubar...", for a class it looks
+ * like "-ea:com.google.fubar.Wahoo".  The string we get starts at the ':'.
+ *
+ * For system assertions (-esa/-dsa), "pkgOrClass" is NULL.
+ *
+ * Multiple instances of these arguments can be specified, e.g. you can
+ * enable assertions for a package and then disable them for one class in
+ * the package.
+ */
+static bool enableAssertions(const char* pkgOrClass, bool enable)
+{
+    AssertionControl* pCtrl = &gDvm.assertionCtrl[gDvm.assertionCtrlCount++];
+    pCtrl->enable = enable;
+
+    if (pkgOrClass == NULL) {
+        /* enable or disable for all system classes */
+        pCtrl->isPackage = false;
+        pCtrl->pkgOrClass = NULL;
+        pCtrl->pkgOrClassLen = 0;
+    } else {
+        if (*pkgOrClass == '\0') {
+            /* global enable/disable for all but system */
+            pCtrl->isPackage = false;
+            pCtrl->pkgOrClass = strdup("");
+            pCtrl->pkgOrClassLen = 0;
+        } else {
+            pCtrl->pkgOrClass = dvmDotToSlash(pkgOrClass+1);    // skip ':'
+            if (pCtrl->pkgOrClass == NULL) {
+                /* can happen if class name includes an illegal '/' */
+                LOGW("Unable to process assertion arg '%s'\n", pkgOrClass);
+                return false;
+            }
+
+            int len = strlen(pCtrl->pkgOrClass);
+            if (len >= 3 && strcmp(pCtrl->pkgOrClass + len-3, "///") == 0) {
+                /* mark as package, truncate two of the three slashes */
+                pCtrl->isPackage = true;
+                *(pCtrl->pkgOrClass + len-2) = '\0';
+                pCtrl->pkgOrClassLen = len - 2;
+            } else {
+                /* just a class */
+                pCtrl->isPackage = false;
+                pCtrl->pkgOrClassLen = len;
+            }
+        }
+    }
+
+    return true;
+}
+
+/*
+ * Release memory associated with the AssertionCtrl array.
+ */
+static void freeAssertionCtrl(void)
+{
+    int i;
+
+    for (i = 0; i < gDvm.assertionCtrlCount; i++)
+        free(gDvm.assertionCtrl[i].pkgOrClass);
+    free(gDvm.assertionCtrl);
+}
+
+
+/*
+ * Process an argument vector full of options.  Unlike standard C programs,
+ * argv[0] does not contain the name of the program.
+ *
+ * If "ignoreUnrecognized" is set, we ignore options starting with "-X" or "_"
+ * that we don't recognize.  Otherwise, we return with an error as soon as
+ * we see anything we can't identify.
+ *
+ * Returns 0 on success, -1 on failure, and 1 for the special case of
+ * "-version" where we want to stop without showing an error message.
+ */
+static int dvmProcessOptions(int argc, const char* const argv[],
+    bool ignoreUnrecognized)
+{
+    int i;
+
+    LOGV("VM options (%d):\n", argc);
+    for (i = 0; i < argc; i++)
+        LOGV("  %d: '%s'\n", i, argv[i]);
+
+    /* over-allocate AssertionControl array for convenience */
+    assert(gDvm.assertionCtrl == NULL);
+    if (argc > 0) {
+        gDvm.assertionCtrl =
+            (AssertionControl*) malloc(sizeof(AssertionControl) * argc);
+        if (gDvm.assertionCtrl == NULL)
+            return -1;
+        assert(gDvm.assertionCtrlCount == 0);
+    }
+
+    for (i = 0; i < argc; i++) {
+        if (strcmp(argv[i], "-help") == 0) {
+            /* show usage and stop */
+            return -1;
+
+        } else if (strcmp(argv[i], "-version") == 0) {
+            /* show version and stop */
+            showVersion();
+            return 1;
+        } else if (strcmp(argv[i], "-showversion") == 0) {
+            /* show version and continue */
+            showVersion();
+
+        } else if (strcmp(argv[i], "-classpath") == 0 ||
+                   strcmp(argv[i], "-cp") == 0)
+        {
+            /* set classpath */
+            if (i == argc-1) {
+                dvmFprintf(stderr, "Missing classpath path list\n");
+                return -1;
+            }
+            free(gDvm.classPathStr); /* in case we have compiled-in default */
+            gDvm.classPathStr = strdup(argv[++i]);
+
+        } else if (strncmp(argv[i], "-Xbootclasspath:",
+                sizeof("-Xbootclasspath:")-1) == 0)
+        {
+            /* set bootclasspath */
+            const char* path = argv[i] + sizeof("-Xbootclasspath:")-1;
+
+            if (*path == '\0') {
+                dvmFprintf(stderr, "Missing bootclasspath path list\n");
+                return -1;
+            }
+            free(gDvm.bootClassPathStr);
+            gDvm.bootClassPathStr = strdup(path);
+
+            /*
+             * TODO: support -Xbootclasspath/a and /p, which append or
+             * prepend to the default bootclasspath.  We set the default
+             * path earlier.
+             */
+
+        } else if (strncmp(argv[i], "-D", 2) == 0) {
+            /* set property */
+            dvmAddCommandLineProperty(argv[i] + 2);
+
+        } else if (strcmp(argv[i], "-jar") == 0) {
+            // TODO: handle this; name of jar should be in argv[i+1]
+            dvmFprintf(stderr, "-jar not yet handled\n");
+            assert(false);
+
+        } else if (strncmp(argv[i], "-Xms", 4) == 0) {
+            unsigned int val = dvmParseMemOption(argv[i]+4, 1024);
+            if (val != 0) {
+                if (val >= kMinHeapStartSize && val <= kMaxHeapSize) {
+                    gDvm.heapSizeStart = val;
+                } else {
+                    dvmFprintf(stderr,
+                        "Invalid -Xms '%s', range is %dKB to %dKB\n",
+                        argv[i], kMinHeapStartSize/1024, kMaxHeapSize/1024);
+                    return -1;
+                }
+            } else {
+                dvmFprintf(stderr, "Invalid -Xms option '%s'\n", argv[i]);
+                return -1;
+            }
+        } else if (strncmp(argv[i], "-Xmx", 4) == 0) {
+            unsigned int val = dvmParseMemOption(argv[i]+4, 1024);
+            if (val != 0) {
+                if (val >= kMinHeapSize && val <= kMaxHeapSize) {
+                    gDvm.heapSizeMax = val;
+                } else {
+                    dvmFprintf(stderr,
+                        "Invalid -Xmx '%s', range is %dKB to %dKB\n",
+                        argv[i], kMinHeapSize/1024, kMaxHeapSize/1024);
+                    return -1;
+                }
+            } else {
+                dvmFprintf(stderr, "Invalid -Xmx option '%s'\n", argv[i]);
+                return -1;
+            }
+        } else if (strncmp(argv[i], "-Xss", 4) == 0) {
+            unsigned int val = dvmParseMemOption(argv[i]+4, 1);
+            if (val != 0) {
+                if (val >= kMinStackSize && val <= kMaxStackSize) {
+                    gDvm.stackSize = val;
+                } else {
+                    dvmFprintf(stderr, "Invalid -Xss '%s', range is %d to %d\n",
+                        argv[i], kMinStackSize, kMaxStackSize);
+                    return -1;
+                }
+            } else {
+                dvmFprintf(stderr, "Invalid -Xss option '%s'\n", argv[i]);
+                return -1;
+            }
+
+        } else if (strcmp(argv[i], "-verbose") == 0 ||
+            strcmp(argv[i], "-verbose:class") == 0)
+        {
+            // JNI spec says "-verbose:gc,class" is valid, but cmd line
+            // doesn't work that way; may want to support.
+            gDvm.verboseClass = true;
+        } else if (strcmp(argv[i], "-verbose:jni") == 0) {
+            gDvm.verboseJni = true;
+        } else if (strcmp(argv[i], "-verbose:gc") == 0) {
+            gDvm.verboseGc = true;
+
+        } else if (strncmp(argv[i], "-enableassertions", 17) == 0) {
+            enableAssertions(argv[i] + 17, true);
+        } else if (strncmp(argv[i], "-ea", 3) == 0) {
+            enableAssertions(argv[i] + 3, true);
+        } else if (strncmp(argv[i], "-disableassertions", 18) == 0) {
+            enableAssertions(argv[i] + 18, false);
+        } else if (strncmp(argv[i], "-da", 3) == 0) {
+            enableAssertions(argv[i] + 3, false);
+        } else if (strcmp(argv[i], "-enablesystemassertions") == 0 ||
+                   strcmp(argv[i], "-esa") == 0)
+        {
+            enableAssertions(NULL, true);
+        } else if (strcmp(argv[i], "-disablesystemassertions") == 0 ||
+                   strcmp(argv[i], "-dsa") == 0)
+        {
+            enableAssertions(NULL, false);
+
+        } else if (strncmp(argv[i], "-Xcheck:jni", 11) == 0) {
+            /* nothing to do now -- was handled during JNI init */
+
+        } else if (strcmp(argv[i], "-Xdebug") == 0) {
+            /* accept but ignore */
+
+        } else if (strncmp(argv[i], "-Xrunjdwp:", 10) == 0 ||
+            strncmp(argv[i], "-agentlib:jdwp=", 15) == 0)
+        {
+            const char* tail;
+            bool result = false;
+
+            if (argv[i][1] == 'X')
+                tail = argv[i] + 10;
+            else
+                tail = argv[i] + 15;
+
+            if (strncmp(tail, "help", 4) == 0 || !parseJdwpOptions(tail)) {
+                showJdwpHelp();
+                return 1;
+            }
+        } else if (strcmp(argv[i], "-Xrs") == 0) {
+            gDvm.reduceSignals = true;
+        } else if (strcmp(argv[i], "-Xnoquithandler") == 0) {
+            /* disables SIGQUIT handler thread while still blocking SIGQUIT */
+            /* (useful if we don't want thread but system still signals us) */
+            gDvm.noQuitHandler = true;
+        } else if (strcmp(argv[i], "-Xzygote") == 0) {
+            gDvm.zygote = true;
+        } else if (strncmp(argv[i], "-Xdexopt:", 9) == 0) {
+            if (strcmp(argv[i] + 9, "none") == 0)
+                gDvm.dexOptMode = OPTIMIZE_MODE_NONE;
+            else if (strcmp(argv[i] + 9, "verified") == 0)
+                gDvm.dexOptMode = OPTIMIZE_MODE_VERIFIED;
+            else if (strcmp(argv[i] + 9, "all") == 0)
+                gDvm.dexOptMode = OPTIMIZE_MODE_ALL;
+            else {
+                dvmFprintf(stderr, "Unrecognized dexopt option '%s'\n",argv[i]);
+                return -1;
+            }
+        } else if (strncmp(argv[i], "-Xverify:", 9) == 0) {
+            if (strcmp(argv[i] + 9, "none") == 0)
+                gDvm.classVerifyMode = VERIFY_MODE_NONE;
+            else if (strcmp(argv[i] + 9, "remote") == 0)
+                gDvm.classVerifyMode = VERIFY_MODE_REMOTE;
+            else if (strcmp(argv[i] + 9, "all") == 0)
+                gDvm.classVerifyMode = VERIFY_MODE_ALL;
+            else {
+                dvmFprintf(stderr, "Unrecognized verify option '%s'\n",argv[i]);
+                return -1;
+            }
+        } else if (strncmp(argv[i], "-Xjnigreflimit:", 15) == 0) {
+            int lim = atoi(argv[i] + 15);
+            if (lim < 200 || (lim % 100) != 0) {
+                dvmFprintf(stderr, "Bad value for -Xjnigreflimit: '%s'\n",
+                    argv[i]+15);
+                return -1;
+            }
+            gDvm.jniGrefLimit = lim;
+
+        } else if (strcmp(argv[i], "-Xlog-stdio") == 0) {
+            gDvm.logStdio = true;
+
+        } else if (strncmp(argv[i], "-Xint", 5) == 0) {
+            if (argv[i][5] == ':') {
+                if (strcmp(argv[i] + 6, "portable") == 0)
+                    gDvm.executionMode = kExecutionModeInterpPortable;
+                else if (strcmp(argv[i] + 6, "fast") == 0)
+                    gDvm.executionMode = kExecutionModeInterpFast;
+                else {
+                    dvmFprintf(stderr,
+                        "Warning: Unrecognized interpreter mode %s\n",argv[i]);
+                    /* keep going */
+                }
+            } else {
+                /* disable JIT -- nothing to do here for now */
+            }
+
+        } else if (strncmp(argv[i], "-Xdeadlockpredict:", 18) == 0) {
+#ifdef WITH_DEADLOCK_PREDICTION
+            if (strcmp(argv[i] + 18, "off") == 0)
+                gDvm.deadlockPredictMode = kDPOff;
+            else if (strcmp(argv[i] + 18, "warn") == 0)
+                gDvm.deadlockPredictMode = kDPWarn;
+            else if (strcmp(argv[i] + 18, "err") == 0)
+                gDvm.deadlockPredictMode = kDPErr;
+            else if (strcmp(argv[i] + 18, "abort") == 0)
+                gDvm.deadlockPredictMode = kDPAbort;
+            else {
+                dvmFprintf(stderr, "Bad value for -Xdeadlockpredict");
+                return -1;
+            }
+            if (gDvm.deadlockPredictMode != kDPOff)
+                LOGD("Deadlock prediction enabled (%s)\n", argv[i]+18);
+#endif
+
+        } else if (strncmp(argv[i], "-Xstacktracefile:", 17) == 0) {
+            gDvm.stackTraceFile = strdup(argv[i]+17);
+
+        } else {
+            if (!ignoreUnrecognized) {
+                dvmFprintf(stderr, "Unrecognized option '%s'\n", argv[i]);
+                return -1;
+            }
+        }
+    }
+
+    if (gDvm.heapSizeStart > gDvm.heapSizeMax) {
+        dvmFprintf(stderr, "Heap start size must be <= heap max size\n");
+        return -1;
+    }
+
+    return 0;
+}
+
+/*
+ * Set defaults for fields altered or modified by arguments.
+ */
+static void setCommandLineDefaults()
+{
+    const char* envStr;
+
+    envStr = getenv("CLASSPATH");
+    if (envStr != NULL)
+        gDvm.classPathStr = strdup(envStr);
+    else
+        gDvm.classPathStr = strdup(".");
+    envStr = getenv("BOOTCLASSPATH");
+    if (envStr != NULL)
+        gDvm.bootClassPathStr = strdup(envStr);
+    else
+        gDvm.bootClassPathStr = strdup(".");
+
+    /* Defaults overridden by -Xms and -Xmx.
+     * TODO: base these on a system or application-specific default
+     */
+    gDvm.heapSizeStart = 2 * 1024 * 1024;   // Spec says 16MB; too big for us.
+    gDvm.heapSizeMax = 16 * 1024 * 1024;    // Spec says 75% physical mem
+    gDvm.stackSize = kDefaultStackSize;
+
+    /* gDvm.jdwpSuspend = true; */
+
+    /* allowed unless zygote config doesn't allow it */
+    gDvm.jdwpAllowed = true;
+
+    /* default to standard behavior */
+    gDvm.jniWarnError = true;
+
+    /* default verification and optimization modes */
+    gDvm.classVerifyMode = VERIFY_MODE_ALL;
+    gDvm.dexOptMode = OPTIMIZE_MODE_VERIFIED;
+
+    /*
+     * Default execution mode.
+     * TODO: this should be controlled by the generated code, maybe a flag
+     * in the mterp config indicating whether or not an accelerated mterp
+     * implementation exists.
+     */
+#if defined(__arm__)
+    gDvm.executionMode = kExecutionModeInterpFast;
+#else
+    gDvm.executionMode = kExecutionModeInterpPortable;
+#endif
+}
+
+
+/*
+ * Handle a SIGBUS, which frequently occurs because somebody replaced an
+ * optimized DEX file out from under us.
+ */
+static void busCatcher(int signum, siginfo_t* info, void* context)
+{
+    void* addr = info->si_addr;
+
+    LOGE("Caught a SIGBUS (%d), addr=%p\n", signum, addr);
+
+    /*
+     * If we return at this point the SIGBUS just keeps happening, so we
+     * remove the signal handler and allow it to kill us.  TODO: restore
+     * the original, which points to a debuggerd stub; if we don't then
+     * debuggerd won't be notified.
+     */
+    signal(SIGBUS, SIG_DFL);
+}
+
+/*
+ * Configure signals.  We need to block SIGQUIT so that the signal only
+ * reaches the dump-stack-trace thread.
+ *
+ * This can be disabled with the "-Xrs" flag.
+ */
+static void blockSignals()
+{
+    sigset_t mask;
+    int cc;
+
+    sigemptyset(&mask);
+    sigaddset(&mask, SIGQUIT);
+    sigaddset(&mask, SIGUSR1);      // used to initiate heap dump
+    //sigaddset(&mask, SIGPIPE);
+    cc = sigprocmask(SIG_BLOCK, &mask, NULL);
+    assert(cc == 0);
+
+    if (false) {
+        /* TODO: save the old sigaction in a global */
+        struct sigaction sa;
+        memset(&sa, 0, sizeof(sa));
+        sa.sa_sigaction = busCatcher;
+        sa.sa_flags = SA_SIGINFO;
+        cc = sigaction(SIGBUS, &sa, NULL);
+        assert(cc == 0);
+    }
+}
+
+/*
+ * VM initialization.  Pass in any options provided on the command line.
+ * Do not pass in the class name or the options for the class.
+ *
+ * Returns 0 on success.
+ */
+int dvmStartup(int argc, const char* const argv[], bool ignoreUnrecognized,
+    JNIEnv* pEnv)
+{
+    int i, cc;
+
+    assert(gDvm.initializing);
+
+    LOGV("VM init args (%d):\n", argc);
+    for (i = 0; i < argc; i++)
+        LOGV("  %d: '%s'\n", i, argv[i]);
+
+    setCommandLineDefaults();
+
+    /* prep properties storage */
+    if (!dvmPropertiesStartup(argc))
+        goto fail;
+
+    /*
+     * Process the option flags (if any).
+     */
+    cc = dvmProcessOptions(argc, argv, ignoreUnrecognized);
+    if (cc != 0) {
+        if (cc < 0) {
+            dvmFprintf(stderr, "\n");
+            dvmUsage("dalvikvm");
+        }
+        goto fail;
+    }
+
+    /* configure signal handling */
+    if (!gDvm.reduceSignals)
+        blockSignals();
+
+    /* mterp setup */
+    LOGV("Using executionMode %d\n", gDvm.executionMode);
+    dvmCheckAsmConstants();
+
+    /*
+     * Initialize components.
+     */
+    if (!dvmAllocTrackerStartup())
+        goto fail;
+    if (!dvmGcStartup())
+        goto fail;
+    if (!dvmThreadStartup())
+        goto fail;
+    if (!dvmInlineNativeStartup())
+        goto fail;
+    if (!dvmVerificationStartup())
+        goto fail;
+    if (!dvmInstanceofStartup())
+        goto fail;
+    if (!dvmClassStartup())
+        goto fail;
+    if (!dvmThreadObjStartup())
+        goto fail;
+    if (!dvmExceptionStartup())
+        goto fail;
+    if (!dvmStringInternStartup())
+        goto fail;
+    if (!dvmNativeStartup())
+        goto fail;
+    if (!dvmInternalNativeStartup())
+        goto fail;
+    if (!dvmJniStartup())
+        goto fail;
+    if (!dvmReflectStartup())
+        goto fail;
+#ifdef WITH_PROFILER
+    if (!dvmProfilingStartup())
+        goto fail;
+#endif
+
+    /* make sure we got these [can this go away?] */
+    assert(gDvm.classJavaLangClass != NULL);
+    assert(gDvm.classJavaLangObject != NULL);
+    //assert(gDvm.classJavaLangString != NULL);
+    assert(gDvm.classJavaLangThread != NULL);
+    assert(gDvm.classJavaLangVMThread != NULL);
+    assert(gDvm.classJavaLangThreadGroup != NULL);
+
+    /*
+     * Make sure these exist.  If they don't, we can return a failure out
+     * of main and nip the whole thing in the bud.
+     */
+    static const char* earlyClasses[] = {
+        "Ljava/lang/InternalError;",
+        "Ljava/lang/StackOverflowError;",
+        "Ljava/lang/UnsatisfiedLinkError;",
+        "Ljava/lang/NoClassDefFoundError;",
+        NULL
+    };
+    const char** pClassName;
+    for (pClassName = earlyClasses; *pClassName != NULL; pClassName++) {
+        if (dvmFindSystemClassNoInit(*pClassName) == NULL)
+            goto fail;
+    }
+
+    /*
+     * Miscellaneous class library validation.
+     */
+    if (!dvmValidateBoxClasses())
+        goto fail;
+
+    /*
+     * Do the last bits of Thread struct initialization we need to allow
+     * JNI calls to work.
+     */
+    if (!dvmPrepMainForJni(pEnv))
+        goto fail;
+
+    /*
+     * Register the system native methods, which are registered through JNI.
+     */
+    if (!registerSystemNatives(pEnv))
+        goto fail;
+
+    /*
+     * Do some "late" initialization for the memory allocator.  This may
+     * allocate storage and initialize classes.
+     */
+    if (!dvmGcLateInit())
+        goto fail;
+
+    /*
+     * At this point, the VM is in a pretty good state.  Finish prep on
+     * the main thread (specifically, create a java.lang.Thread object to go
+     * along with our Thread struct).  Note we will probably be executing
+     * some interpreted class initializer code in here.
+     */
+    if (!dvmPrepMainThread())
+        goto fail;
+
+    /* general debugging setup */
+    if (!dvmDebuggerStartup())
+        goto fail;
+
+    /*
+     * Init for either zygote mode or non-zygote mode.  The key difference
+     * is that we don't start any additional threads in Zygote mode.
+     */
+    if (gDvm.zygote) {
+        if (!dvmInitZygote())
+            goto fail;
+    } else {
+        if (!dvmInitAfterZygote())
+            goto fail;
+    }
+
+
+#ifndef NDEBUG
+    dvmTestHash();
+#endif
+
+    assert(!dvmCheckException(dvmThreadSelf()));
+    gDvm.initExceptionCount = 0;
+
+    return 0;
+
+fail:
+    dvmShutdown();
+    return 1;
+}
+
+/*
+ * Register java.* natives from our class libraries.  We need to do
+ * this after we're ready for JNI registration calls, but before we
+ * do any class initialization.
+ *
+ * If we get this wrong, we will blow up in the ThreadGroup class init if
+ * interpreted code makes any reference to System.  It will likely do this
+ * since it wants to do some java.io.File setup (e.g. for static in/out/err).
+ *
+ * We need to have gDvm.initializing raised here so that JNI FindClass
+ * won't try to use the system/application class loader.
+ */
+static bool registerSystemNatives(JNIEnv* pEnv)
+{
+    Thread* self;
+
+    /* main thread is always first in list */
+    self = gDvm.threadList;
+
+    /* must set this before allowing JNI-based method registration */
+    self->status = THREAD_NATIVE;
+
+    if (jniRegisterSystemMethods(pEnv) < 0) {
+        LOGW("jniRegisterSystemMethods failed\n");
+        return false;
+    }
+
+    /* back to run mode */
+    self->status = THREAD_RUNNING;
+
+    return true;
+}
+
+
+/*
+ * Do zygote-mode-only initialization.
+ */
+static bool dvmInitZygote(void)
+{
+    /* zygote goes into its own process group */
+    setpgid(0,0);
+
+    return true;
+}
+
+/*
+ * Do non-zygote-mode initialization.  This is done during VM init for
+ * standard startup, or after a "zygote fork" when creating a new process.
+ */
+bool dvmInitAfterZygote(void)
+{
+    u8 startHeap, startQuit, startJdwp;
+    u8 endHeap, endQuit, endJdwp;
+    
+    startHeap = dvmGetRelativeTimeUsec();
+
+    /*
+     * Post-zygote heap initialization, including starting
+     * the HeapWorker thread.
+     */
+    if (!dvmGcStartupAfterZygote())
+        return false;
+
+    endHeap = dvmGetRelativeTimeUsec();
+    startQuit = dvmGetRelativeTimeUsec();
+
+    /* start signal catcher thread that dumps stacks on SIGQUIT */
+    if (!gDvm.reduceSignals && !gDvm.noQuitHandler) {
+        if (!dvmSignalCatcherStartup())
+            return false;
+    }
+
+    /* start stdout/stderr copier, if requested */
+    if (gDvm.logStdio) {
+        if (!dvmStdioConverterStartup())
+            return false;
+    }
+
+    endQuit = dvmGetRelativeTimeUsec();
+    startJdwp = dvmGetRelativeTimeUsec();
+
+    /*
+     * Start JDWP thread.  If the command-line debugger flags specified
+     * "suspend=y", this will pause the VM.  We probably want this to
+     * come last.
+     */
+    if (!dvmInitJDWP()) {
+        LOGD("JDWP init failed; continuing anyway\n");
+    }
+
+    endJdwp = dvmGetRelativeTimeUsec();
+
+    LOGV("thread-start heap=%d quit=%d jdwp=%d total=%d usec\n",
+        (int)(endHeap-startHeap), (int)(endQuit-startQuit),
+        (int)(endJdwp-startJdwp), (int)(endJdwp-startHeap));
+
+    return true;
+}
+
+/*
+ * Prepare for a connection to a JDWP-compliant debugger.
+ *
+ * Note this needs to happen fairly late in the startup process, because
+ * we need to have all of the java.* native methods registered (which in
+ * turn requires JNI to be fully prepped).
+ *
+ * There are several ways to initialize:
+ *   server=n
+ *     We immediately try to connect to host:port.  Bail on failure.  On
+ *     success, send VM_START (suspending the VM if "suspend=y").
+ *   server=y suspend=n
+ *     Passively listen for a debugger to connect.  Return immediately.
+ *   server=y suspend=y
+ *     Wait until debugger connects.  Send VM_START ASAP, suspending the
+ *     VM after the message is sent.
+ *
+ * This gets more complicated with a nonzero value for "timeout".
+ */
+static bool dvmInitJDWP(void)
+{
+    assert(!gDvm.zygote);
+
+#ifndef WITH_DEBUGGER
+    LOGI("Debugger support not compiled into VM\n");
+    return false;
+#endif
+
+    /*
+     * Init JDWP if the debugger is enabled.  This may connect out to a
+     * debugger, passively listen for a debugger, or block waiting for a
+     * debugger.
+     */
+    if (gDvm.jdwpAllowed && gDvm.jdwpConfigured) {
+        JdwpStartupParams params;
+
+        if (gDvm.jdwpHost != NULL) {
+            if (strlen(gDvm.jdwpHost) >= sizeof(params.host)-1) {
+                LOGE("ERROR: hostname too long: '%s'\n", gDvm.jdwpHost);
+                return false;
+            }
+            strcpy(params.host, gDvm.jdwpHost);
+        } else {
+            params.host[0] = '\0';
+        }
+        params.transport = gDvm.jdwpTransport;
+        params.server = gDvm.jdwpServer;
+        params.suspend = gDvm.jdwpSuspend;
+        params.port = gDvm.jdwpPort;
+
+        gDvm.jdwpState = dvmJdwpStartup(&params);
+        if (gDvm.jdwpState == NULL) {
+            LOGW("WARNING: debugger thread failed to initialize\n");
+            /* TODO: ignore? fail? need to mimic "expected" behavior */
+        }
+    }
+
+    /*
+     * If a debugger has already attached, send the "welcome" message.  This
+     * may cause us to suspend all threads.
+     */
+    if (dvmJdwpIsActive(gDvm.jdwpState)) {
+        //dvmChangeStatus(NULL, THREAD_RUNNING);
+        if (!dvmJdwpPostVMStart(gDvm.jdwpState, gDvm.jdwpSuspend)) {
+            LOGW("WARNING: failed to post 'start' message to debugger\n");
+            /* keep going */
+        }
+        //dvmChangeStatus(NULL, THREAD_NATIVE);
+    }
+
+    return true;
+}
+
+/*
+ * An alternative to JNI_CreateJavaVM/dvmStartup that does the first bit
+ * of initialization and then returns with "initializing" still set.  (Used
+ * by DexOpt command-line utility.)
+ *
+ * Attempting to use JNI or internal natives will fail.  It's best if
+ * no bytecode gets executed, which means no <clinit>, which means no
+ * exception-throwing.  We check the "initializing" flag anyway when
+ * throwing an exception, so we can insert some code that avoids chucking
+ * an exception when we're optimizing stuff.
+ *
+ * Returns 0 on success.
+ */
+int dvmPrepForDexOpt(const char* bootClassPath, DexOptimizerMode dexOptMode,
+    DexClassVerifyMode verifyMode)
+{
+    gDvm.initializing = true;
+    gDvm.optimizing = true;
+
+    /* configure signal handling */
+    blockSignals();
+
+    /* set some defaults */
+    setCommandLineDefaults();
+    free(gDvm.bootClassPathStr);
+    gDvm.bootClassPathStr = strdup(bootClassPath);
+
+    /* set opt/verify modes */
+    gDvm.dexOptMode = dexOptMode;
+    gDvm.classVerifyMode = verifyMode;
+
+    /*
+     * Initialize the heap, some basic thread control mutexes, and
+     * get the bootclasspath prepped.
+     *
+     * We can't load any classes yet because we may not yet have a source
+     * for things like java.lang.Object and java.lang.Class.
+     */
+    if (!dvmGcStartup())
+        goto fail;
+    if (!dvmThreadStartup())
+        goto fail;
+    if (!dvmInlineNativeStartup())
+        goto fail;
+    if (!dvmVerificationStartup())
+        goto fail;
+    if (!dvmInstanceofStartup())
+        goto fail;
+    if (!dvmClassStartup())
+        goto fail;
+
+    /*
+     * We leave gDvm.initializing set to "true" so that, if we're not
+     * able to process the "core" classes, we don't go into a death-spin
+     * trying to throw a "class not found" exception.
+     */
+
+    return 0;
+
+fail:
+    dvmShutdown();
+    return 1;
+}
+
+
+/*
+ * All threads have stopped.  Finish the shutdown procedure.
+ *
+ * We can also be called if startup fails partway through, so be prepared
+ * to deal with partially initialized data.
+ *
+ * Free any storage allocated in gGlobals.
+ *
+ * We can't dlclose() shared libs we've loaded, because it's possible a
+ * thread not associated with the VM is running code in one.
+ *
+ * This is called from the JNI DestroyJavaVM function, which can be
+ * called from any thread.  (In practice, this will usually run in the
+ * same thread that started the VM, a/k/a the main thread, but we don't
+ * want to assume that.)
+ */
+void dvmShutdown(void)
+{
+    LOGV("VM shutting down\n");
+
+    if (CALC_CACHE_STATS)
+        dvmDumpAtomicCacheStats(gDvm.instanceofCache);
+
+    /*
+     * Stop our internal threads.
+     */
+    dvmHeapWorkerShutdown();
+
+    if (gDvm.jdwpState != NULL)
+        dvmJdwpShutdown(gDvm.jdwpState);
+    free(gDvm.jdwpHost);
+    gDvm.jdwpHost = NULL;
+    free(gDvm.stackTraceFile);
+    gDvm.stackTraceFile = NULL;
+
+    /* tell signal catcher to shut down if it was started */
+    dvmSignalCatcherShutdown();
+
+    /* shut down stdout/stderr conversion */
+    dvmStdioConverterShutdown();
+
+    /*
+     * Kill any daemon threads that still exist.  Actively-running threads
+     * are likely to crash the process if they continue to execute while
+     * the VM shuts down.
+     */
+    dvmSlayDaemons();
+
+    LOGD("VM cleaning up\n");
+
+    dvmDebuggerShutdown();
+    dvmReflectShutdown();
+#ifdef WITH_PROFILER
+    dvmProfilingShutdown();
+#endif
+    dvmJniShutdown();
+    dvmStringInternShutdown();
+    dvmExceptionShutdown();
+    dvmThreadShutdown();
+    dvmClassShutdown();
+    dvmVerificationShutdown();
+    dvmInstanceofShutdown();
+    dvmInlineNativeShutdown();
+    dvmGcShutdown();
+    dvmAllocTrackerShutdown();
+    dvmPropertiesShutdown();
+
+    /* these must happen AFTER dvmClassShutdown has walked through class data */
+    dvmNativeShutdown();
+    dvmInternalNativeShutdown();
+
+    free(gDvm.bootClassPathStr);
+    free(gDvm.classPathStr);
+
+    freeAssertionCtrl();
+
+    /*
+     * We want valgrind to report anything we forget to free as "definitely
+     * lost".  If there's a pointer in the global chunk, it would be reported
+     * as "still reachable".  Erasing the memory fixes this.
+     *
+     * This must be erased to zero if we want to restart the VM within this
+     * process.
+     */
+    memset(&gDvm, 0xcd, sizeof(gDvm));
+}
+
+
+/*
+ * fprintf() wrapper that calls through the JNI-specified vfprintf hook if
+ * one was specified.
+ */
+int dvmFprintf(FILE* fp, const char* format, ...)
+{
+    va_list args;
+    int result;
+
+    va_start(args, format);
+    if (gDvm.vfprintfHook != NULL)
+        result = (*gDvm.vfprintfHook)(fp, format, args);
+    else
+        result = vfprintf(fp, format, args);
+    va_end(args);
+
+    return result;
+}
+
+/*
+ * Abort the VM.  We get here on fatal errors.  Try very hard not to use
+ * this; whenever possible, return an error to somebody responsible.
+ */
+void dvmAbort(void)
+{
+    LOGE("VM aborting\n");
+
+    fflush(NULL);       // flush all open file buffers
+
+    /* JNI-supplied abort hook gets right of first refusal */
+    if (gDvm.abortHook != NULL)
+        (*gDvm.abortHook)();
+
+    /*
+     * If we call abort(), all threads in the process receives a SIBABRT.
+     * debuggerd dumps the stack trace of the main thread, whether or not
+     * that was the thread that failed.
+     *
+     * By stuffing a value into a bogus address, we cause a segmentation
+     * fault in the current thread, and get a useful log from debuggerd.
+     * We can also trivially tell the difference between a VM crash and
+     * a deliberate abort by looking at the fault address.
+     */
+    *((char*)0xdeadd00d) = 38;
+    abort();
+
+    /* notreached */
+}
diff --git a/vm/Init.h b/vm/Init.h
new file mode 100644
index 0000000..a8daaa8
--- /dev/null
+++ b/vm/Init.h
@@ -0,0 +1,43 @@
+/*
+ * Copyright (C) 2008 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/*
+ * VM initialization and shutdown.
+ */
+#ifndef _DALVIK_INIT
+#define _DALVIK_INIT
+
+/*
+ * Standard VM initialization, usually invoked through JNI.
+ */
+int dvmStartup(int argc, const char* const argv[], bool ignoreUnrecognized,
+    JNIEnv* pEnv);
+void dvmShutdown(void);
+bool dvmInitAfterZygote(void);
+
+/*
+ * Partial VM initialization; only used as part of "dexopt", which may be
+ * asked to optimize a DEX file holding fundamental classes.
+ */
+int dvmPrepForDexOpt(const char* bootClassPath, DexOptimizerMode dexOptMode,
+    DexClassVerifyMode verifyMode);
+
+/*
+ * Unconditionally abort the entire VM.  Try not to use this.
+ */
+int dvmFprintf(FILE* fp, const char* format, ...);
+void dvmAbort(void);
+
+#endif /*_DALVIK_INIT*/
diff --git a/vm/InlineNative.c b/vm/InlineNative.c
new file mode 100644
index 0000000..28a9112
--- /dev/null
+++ b/vm/InlineNative.c
@@ -0,0 +1,520 @@
+/*
+ * Copyright (C) 2008 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/*
+ * Inlined native functions.  These definitions replace interpreted or
+ * native implementations at runtime; "intrinsic" might be a better word.
+ */
+#include "Dalvik.h"
+
+//#undef HAVE__MEMCMP16
+
+#ifdef HAVE__MEMCMP16
+//#warning "trying memcmp16"
+//#define CHECK_MEMCMP16
+/* "count" is in 16-bit units */
+extern u4 __memcmp16(const u2* s0, const u2* s1, size_t count);
+#endif
+
+/*
+ * Some notes on "inline" functions.
+ *
+ * These are NOT native methods.  A full method definition must still be
+ * provided.  Depending on the flags passed into the VM at runtime, the
+ * original or inline version may be selected by the DEX optimizer.
+ *
+ * PLEASE DO NOT use this as the default location for native methods.
+ * The difference between this and an "internal native" static method
+ * call on a 200MHz ARM 9 is roughly 370ns vs. 700ns.  The code here
+ * "secretly replaces" the other method, so you can't avoid having two
+ * implementations.  Since the DEX optimizer mode can't be known ahead
+ * of time, both implementations must be correct and complete.
+ *
+ * The only stuff that really needs to be here are methods that
+ * are high-volume or must be low-overhead, e.g. certain String/Math
+ * methods and some java.util.concurrent.atomic operations.
+ *
+ * Normally, a class is loaded and initialized the first time a static
+ * method is invoked.  This property is NOT preserved here.  If you need
+ * to access a static field in a class, you must ensure initialization
+ * yourself (cheap/easy way is to check the resolved-methods table, and
+ * resolve the method if it hasn't been).
+ *
+ * DO NOT replace "synchronized" methods.  (We can't reliabily check for
+ * this at DEX optimization time because "dx" doesn't propagate the
+ * "synchronized" flag for non-native methods.)
+ *
+ * Remember that these functions are executing while the thread is in
+ * the "RUNNING" state, not the "NATIVE" state.  If you perform a blocking
+ * operation you can stall the entire VM if the GC or debugger wants to
+ * suspend the thread.  Since these are arguably native implementations
+ * rather than VM internals, prefer NATIVE to VMWAIT if you want to change
+ * the thread state.
+ *
+ * Always write results to 32-bit or 64-bit fields in "pResult", e.g. do
+ * not write boolean results to pResult->z.  The interpreter expects
+ * 32 or 64 bits to be set.
+ *
+ * Inline op methods return "false" if an exception was thrown, "true" if
+ * everything went well.
+ *
+ * DO NOT provide implementations of methods that can be overridden by a
+ * subclass, as polymorphism does not work correctly.  For safety you should
+ * only provide inline functions for classes/methods declared "final".
+ *
+ * It's best to avoid inlining the overridden version of a method, e.g.
+ * String.hashCode() is inherited from Object.hashCode().  Somebody
+ * calling String.hashCode() through an Object reference will run the
+ * "slow" version, while calling it through a String reference gets
+ * the inlined version.  Best to have just one version unless there are
+ * clear performance gains.
+ */
+
+
+/*
+ * Forward declarations.
+ */
+static bool org_apache_harmony_dalvik_NativeTestTarget_emptyInlineMethod(
+    u4 arg0, u4 arg1, u4 arg2, u4 arg3, JValue* pResult);
+static bool javaLangString_charAt(u4 arg0, u4 arg1, u4 arg2, u4 arg3,
+    JValue* pResult);
+static bool javaLangString_compareTo(u4 arg0, u4 arg1, u4 arg2, u4 arg3,
+    JValue* pResult);
+static bool javaLangString_equals(u4 arg0, u4 arg1, u4 arg2, u4 arg3,
+    JValue* pResult);
+static bool javaLangString_length(u4 arg0, u4 arg1, u4 arg2, u4 arg3,
+    JValue* pResult);
+
+/*
+ * Table of methods.
+ *
+ * The DEX optimizer uses the class/method/signature strings to decide
+ * which calls it can trample.  The interpreter just uses the function
+ * pointer.
+ *
+ * IMPORTANT: you must update DALVIK_VM_BUILD in DalvikVersion.h if you make
+ * changes to this table.
+ */
+const InlineOperation gDvmInlineOpsTable[] = {
+    { org_apache_harmony_dalvik_NativeTestTarget_emptyInlineMethod,
+        "Lorg/apache/harmony/dalvik/NativeTestTarget;",
+        "emptyInlineMethod", "()V" },
+    { javaLangString_charAt,
+        "Ljava/lang/String;", "charAt", "(I)C" },
+    { javaLangString_compareTo,
+        "Ljava/lang/String;", "compareTo", "(Ljava/lang/String;)I" },
+    { javaLangString_equals,
+        "Ljava/lang/String;", "equals", "(Ljava/lang/Object;)Z" },
+    { javaLangString_length,
+        "Ljava/lang/String;", "length", "()I" },
+};
+
+
+/*
+ * Allocate some tables.
+ */
+bool dvmInlineNativeStartup(void)
+{
+#ifdef WITH_PROFILER
+    gDvm.inlinedMethods =
+        (Method**) calloc(NELEM(gDvmInlineOpsTable), sizeof(Method*));
+    if (gDvm.inlinedMethods == NULL)
+        return false;
+#endif
+
+    return true;
+}
+
+/*
+ * Free generated tables.
+ */
+void dvmInlineNativeShutdown(void)
+{
+#ifdef WITH_PROFILER
+    free(gDvm.inlinedMethods);
+#endif
+}
+
+
+/*
+ * Get a pointer to the inlineops table.
+ */
+const InlineOperation* dvmGetInlineOpsTable(void)
+{
+    return gDvmInlineOpsTable;
+}
+
+/*
+ * Get the number of entries in the inlineops table.
+ */
+int dvmGetInlineOpsTableLength(void)
+{
+    return NELEM(gDvmInlineOpsTable);
+}
+
+/*
+ * Make an inline call for the "debug" interpreter, used when the debugger
+ * or profiler is active.
+ */
+bool dvmPerformInlineOp4Dbg(u4 arg0, u4 arg1, u4 arg2, u4 arg3,
+    JValue* pResult, int opIndex)
+{
+    Thread* self = dvmThreadSelf();
+    bool result;
+
+    assert(opIndex >= 0 && opIndex < NELEM(gDvmInlineOpsTable));
+
+#ifdef WITH_PROFILER
+    /*
+     * Populate the methods table on first use.  It's possible the class
+     * hasn't been resolved yet, so we need to do the full "calling the
+     * method for the first time" routine.  (It's probably okay to skip
+     * the access checks.)
+     *
+     * Currently assuming that we're only inlining stuff loaded by the
+     * bootstrap class loader.  This is a safe assumption for many reasons.
+     */
+    Method* method = gDvm.inlinedMethods[opIndex];
+    if (method == NULL) {
+        ClassObject* clazz;
+        
+        clazz = dvmFindClassNoInit(
+                gDvmInlineOpsTable[opIndex].classDescriptor, NULL);
+        if (clazz == NULL) {
+            LOGW("Warning: can't find class '%s'\n", clazz->descriptor);
+            goto skip_prof;
+        }
+        method = dvmFindDirectMethodByDescriptor(clazz,
+                    gDvmInlineOpsTable[opIndex].methodName,
+                    gDvmInlineOpsTable[opIndex].methodSignature);
+        if (method == NULL)
+            method = dvmFindVirtualMethodByDescriptor(clazz,
+                        gDvmInlineOpsTable[opIndex].methodName,
+                        gDvmInlineOpsTable[opIndex].methodSignature);
+        if (method == NULL) {
+            LOGW("Warning: can't find method %s.%s %s\n",
+                clazz->descriptor,
+                gDvmInlineOpsTable[opIndex].methodName,
+                gDvmInlineOpsTable[opIndex].methodSignature);
+            goto skip_prof;
+        }
+
+        gDvm.inlinedMethods[opIndex] = method;
+        IF_LOGV() {
+            char* desc = dexProtoCopyMethodDescriptor(&method->prototype);
+            LOGV("Registered for profile: %s.%s %s\n",
+                method->clazz->descriptor, method->name, desc);
+            free(desc);
+        }
+    }
+
+    TRACE_METHOD_ENTER(self, method);
+    result = (*gDvmInlineOpsTable[opIndex].func)(arg0, arg1, arg2, arg3,
+                pResult);
+    TRACE_METHOD_EXIT(self, method);
+    return result;
+
+skip_prof:
+#endif
+    return (*gDvmInlineOpsTable[opIndex].func)(arg0, arg1, arg2, arg3, pResult);
+}
+
+/*
+ * org.apache.harmony.dalvik.NativeTestTarget
+ * public static void emptyInlineMethod
+ *
+ * This exists only for benchmarks.
+ */
+static bool org_apache_harmony_dalvik_NativeTestTarget_emptyInlineMethod(
+    u4 arg0, u4 arg1, u4 arg2, u4 arg3, JValue* pResult)
+{
+    // do nothing
+    return true;
+}
+
+/*
+ * java.lang.String
+ * public char charAt(int index)
+ */
+static bool javaLangString_charAt(u4 arg0, u4 arg1, u4 arg2, u4 arg3,
+    JValue* pResult)
+{
+    int count, offset;
+    ArrayObject* chars;
+
+    /* null reference check on "this" */
+    if (!dvmValidateObject((Object*) arg0))
+        return false;
+
+    //LOGI("String.charAt this=0x%08x index=%d\n", arg0, arg1);
+    count = dvmGetFieldInt((Object*) arg0, gDvm.offJavaLangString_count);
+    if ((s4) arg1 < 0 || (s4) arg1 >= count) {
+        dvmThrowException("Ljava/lang/StringIndexOutOfBoundsException;", NULL);
+        return false;
+    } else {
+        offset = dvmGetFieldInt((Object*) arg0, gDvm.offJavaLangString_offset);
+        chars = (ArrayObject*)
+            dvmGetFieldObject((Object*) arg0, gDvm.offJavaLangString_value);
+
+        pResult->i = ((const u2*) chars->contents)[arg1 + offset];
+        return true;
+    }
+}
+
+/*
+ * Utility function when we're evaluating alternative implementations.
+ */
+static void badMatch(StringObject* thisStrObj, StringObject* compStrObj,
+    int expectResult, int newResult, const char* compareType)
+{
+    ArrayObject* thisArray;
+    ArrayObject* compArray;
+    const char* thisStr;
+    const char* compStr;
+    int thisOffset, compOffset, thisCount, compCount;
+
+    thisCount =
+        dvmGetFieldInt((Object*) thisStrObj, gDvm.offJavaLangString_count);
+    compCount =
+        dvmGetFieldInt((Object*) compStrObj, gDvm.offJavaLangString_count);
+    thisOffset =
+        dvmGetFieldInt((Object*) thisStrObj, gDvm.offJavaLangString_offset);
+    compOffset =
+        dvmGetFieldInt((Object*) compStrObj, gDvm.offJavaLangString_offset);
+    thisArray = (ArrayObject*)
+        dvmGetFieldObject((Object*) thisStrObj, gDvm.offJavaLangString_value);
+    compArray = (ArrayObject*)
+        dvmGetFieldObject((Object*) compStrObj, gDvm.offJavaLangString_value);
+
+    thisStr = dvmCreateCstrFromString(thisStrObj);
+    compStr = dvmCreateCstrFromString(compStrObj);
+
+    LOGE("%s expected %d got %d\n", compareType, expectResult, newResult);
+    LOGE(" this (o=%d l=%d) '%s'\n", thisOffset, thisCount, thisStr);
+    LOGE(" comp (o=%d l=%d) '%s'\n", compOffset, compCount, compStr);
+    dvmPrintHexDumpEx(ANDROID_LOG_INFO, LOG_TAG,
+        ((const u2*) thisArray->contents) + thisOffset, thisCount*2,
+        kHexDumpLocal);
+    dvmPrintHexDumpEx(ANDROID_LOG_INFO, LOG_TAG,
+        ((const u2*) compArray->contents) + compOffset, compCount*2,
+        kHexDumpLocal);
+    dvmAbort();
+}
+
+/*
+ * java.lang.String
+ * public int compareTo(String s)
+ */
+static bool javaLangString_compareTo(u4 arg0, u4 arg1, u4 arg2, u4 arg3,
+    JValue* pResult)
+{
+    /*
+     * Null reference check on "this".  Normally this is performed during
+     * the setup of the virtual method call.  We need to do it before
+     * anything else.  While we're at it, check out the other string,
+     * which must also be non-null.
+     */
+    if (!dvmValidateObject((Object*) arg0) ||
+        !dvmValidateObject((Object*) arg1))
+    {
+        return false;
+    }
+
+    /* quick test for comparison with itself */
+    if (arg0 == arg1) {
+        pResult->i = 0;
+        return true;
+    }
+
+    /*
+     * This would be simpler and faster if we promoted StringObject to
+     * a full representation, lining up the C structure fields with the
+     * actual object fields.
+     */
+    int thisCount, thisOffset, compCount, compOffset;
+    ArrayObject* thisArray;
+    ArrayObject* compArray;
+    const u2* thisChars;
+    const u2* compChars;
+    int i, minCount, countDiff;
+
+    thisCount = dvmGetFieldInt((Object*) arg0, gDvm.offJavaLangString_count);
+    compCount = dvmGetFieldInt((Object*) arg1, gDvm.offJavaLangString_count);
+    countDiff = thisCount - compCount;
+    minCount = (countDiff < 0) ? thisCount : compCount;
+    thisOffset = dvmGetFieldInt((Object*) arg0, gDvm.offJavaLangString_offset);
+    compOffset = dvmGetFieldInt((Object*) arg1, gDvm.offJavaLangString_offset);
+    thisArray = (ArrayObject*)
+        dvmGetFieldObject((Object*) arg0, gDvm.offJavaLangString_value);
+    compArray = (ArrayObject*)
+        dvmGetFieldObject((Object*) arg1, gDvm.offJavaLangString_value);
+    thisChars = ((const u2*) thisArray->contents) + thisOffset;
+    compChars = ((const u2*) compArray->contents) + compOffset;
+
+#ifdef HAVE__MEMCMP16
+    /*
+     * Use assembly version, which returns the difference between the
+     * characters.  The annoying part here is that 0x00e9 - 0xffff != 0x00ea,
+     * because the interpreter converts the characters to 32-bit integers
+     * *without* sign extension before it subtracts them (which makes some
+     * sense since "char" is unsigned).  So what we get is the result of
+     * 0x000000e9 - 0x0000ffff, which is 0xffff00ea.
+     */
+    int otherRes = __memcmp16(thisChars, compChars, minCount);
+# ifdef CHECK_MEMCMP16
+    for (i = 0; i < minCount; i++) {
+        if (thisChars[i] != compChars[i]) {
+            pResult->i = (s4) thisChars[i] - (s4) compChars[i];
+            if (pResult->i != otherRes) {
+                badMatch((StringObject*) arg0, (StringObject*) arg1,
+                    pResult->i, otherRes, "compareTo");
+            }
+            return true;
+        }
+    }
+# endif
+    if (otherRes != 0) {
+        pResult->i = otherRes;
+        return true;
+    }
+
+#else
+    /*
+     * Straightforward implementation, examining 16 bits at a time.  Compare
+     * the characters that overlap, and if they're all the same then return
+     * the difference in lengths.
+     */
+    for (i = 0; i < minCount; i++) {
+        if (thisChars[i] != compChars[i]) {
+            pResult->i = (s4) thisChars[i] - (s4) compChars[i];
+            return true;
+        }
+    }
+#endif
+
+    pResult->i = countDiff;
+    return true;
+}
+
+/*
+ * java.lang.String
+ * public boolean equals(Object anObject)
+ */
+static bool javaLangString_equals(u4 arg0, u4 arg1, u4 arg2, u4 arg3,
+    JValue* pResult)
+{
+    /*
+     * Null reference check on "this".
+     */
+    if (!dvmValidateObject((Object*) arg0))
+        return false;
+
+    /* quick test for comparison with itself */
+    if (arg0 == arg1) {
+        pResult->i = true;
+        return true;
+    }
+
+    /*
+     * See if the other object is also a String.
+     *
+     * str.equals(null) is expected to return false, presumably based on
+     * the results of the instanceof test.
+     */
+    if (arg1 == 0 || ((Object*) arg0)->clazz != ((Object*) arg1)->clazz) {
+        pResult->i = false;
+        return true;
+    }
+
+    /*
+     * This would be simpler and faster if we promoted StringObject to
+     * a full representation, lining up the C structure fields with the
+     * actual object fields.
+     */
+    int thisCount, thisOffset, compCount, compOffset;
+    ArrayObject* thisArray;
+    ArrayObject* compArray;
+    const u2* thisChars;
+    const u2* compChars;
+    int i;
+
+    /* quick length check */
+    thisCount = dvmGetFieldInt((Object*) arg0, gDvm.offJavaLangString_count);
+    compCount = dvmGetFieldInt((Object*) arg1, gDvm.offJavaLangString_count);
+    if (thisCount != compCount) {
+        pResult->i = false;
+        return true;
+    }
+
+    thisOffset = dvmGetFieldInt((Object*) arg0, gDvm.offJavaLangString_offset);
+    compOffset = dvmGetFieldInt((Object*) arg1, gDvm.offJavaLangString_offset);
+    thisArray = (ArrayObject*)
+        dvmGetFieldObject((Object*) arg0, gDvm.offJavaLangString_value);
+    compArray = (ArrayObject*)
+        dvmGetFieldObject((Object*) arg1, gDvm.offJavaLangString_value);
+    thisChars = ((const u2*) thisArray->contents) + thisOffset;
+    compChars = ((const u2*) compArray->contents) + compOffset;
+
+#ifdef HAVE__MEMCMP16
+    pResult->i = (__memcmp16(thisChars, compChars, thisCount) == 0);
+# ifdef CHECK_MEMCMP16
+    int otherRes = (memcmp(thisChars, compChars, thisCount * 2) == 0);
+    if (pResult->i != otherRes) {
+        badMatch((StringObject*) arg0, (StringObject*) arg1,
+            otherRes, pResult->i, "equals-1");
+    }
+# endif
+#else
+    /*
+     * Straightforward implementation, examining 16 bits at a time.  The
+     * direction of the loop doesn't matter, and starting at the end may
+     * give us an advantage when comparing certain types of strings (e.g.
+     * class names).
+     *
+     * We want to go forward for benchmarks against __memcmp16 so we get a
+     * meaningful comparison when the strings don't match (could also test
+     * with palindromes).
+     */
+    //for (i = 0; i < thisCount; i++)
+    for (i = thisCount-1; i >= 0; --i)
+    {
+        if (thisChars[i] != compChars[i]) {
+            pResult->i = false;
+            return true;
+        }
+    }
+    pResult->i = true;
+#endif
+
+    return true;
+}
+
+/*
+ * java.lang.String
+ * public int length()
+ */
+static bool javaLangString_length(u4 arg0, u4 arg1, u4 arg2, u4 arg3,
+    JValue* pResult)
+{
+    //LOGI("String.length this=0x%08x pResult=%p\n", arg0, pResult);
+
+    /* null reference check on "this" */
+    if (!dvmValidateObject((Object*) arg0))
+        return false;
+
+    pResult->i = dvmGetFieldInt((Object*) arg0, gDvm.offJavaLangString_count);
+    return true;
+}
+
diff --git a/vm/InlineNative.h b/vm/InlineNative.h
new file mode 100644
index 0000000..a6177a6
--- /dev/null
+++ b/vm/InlineNative.h
@@ -0,0 +1,85 @@
+/*
+ * Copyright (C) 2008 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/*
+ * Inlined native functions.
+ */
+#ifndef _DALVIK_INLINENATIVE
+#define _DALVIK_INLINENATIVE
+
+/* startup/shutdown */
+bool dvmInlineNativeStartup(void);
+void dvmInlineNativeShutdown(void);
+
+/*
+ * Basic 4-argument inline operation handler.
+ */
+typedef bool (*InlineOp4Func)(u4 arg0, u4 arg1, u4 arg2, u4 arg3,
+    JValue* pResult);
+
+/*
+ * Table of inline operations.
+ *
+ * Try to keep this at a power-of-two size, so we don't have to multiply.
+ *
+ * TODO: might be to our advantage to generate a compact jump table on
+ * the heap at runtime (or just declare two static tables, one with full
+ * info and one with just function pointers).  Especially useful if we decide
+ * to support other method call forms, e.g. /range.  We can also just
+ * generate assembly code that knows how many args it needs and has the
+ * target address embedded.
+ */
+typedef struct InlineOperation {
+    InlineOp4Func   func;               /* MUST be first entry */
+    const char*     classDescriptor;
+    const char*     methodName;
+    const char*     methodSignature;
+} InlineOperation;
+
+/*
+ * Get the inlineops table.
+ */
+const InlineOperation* dvmGetInlineOpsTable(void);
+int dvmGetInlineOpsTableLength(void);
+
+/*
+ * The table, exposed so we can access it with C inlines.  Prefer access
+ * through dvmGetInlineOpsTable().
+ */
+extern const InlineOperation gDvmInlineOpsTable[];
+
+/*
+ * Perform the operation specified by "opIndex".
+ *
+ * We want the arguments to appear in the first 4 registers so they can
+ * be passed straight through to the handler function.  Ideally on ARM
+ * they'll go into r0-r3 and stay there.
+ *
+ * Returns "true" if everything went normally, "false" if an exception
+ * was thrown.
+ */
+INLINE bool dvmPerformInlineOp4Std(u4 arg0, u4 arg1, u4 arg2, u4 arg3,
+    JValue* pResult, int opIndex)
+{
+    return (*gDvmInlineOpsTable[opIndex].func)(arg0, arg1, arg2, arg3, pResult);
+}
+
+/*
+ * Like the "std" version, but will emit profiling info.
+ */
+bool dvmPerformInlineOp4Dbg(u4 arg0, u4 arg1, u4 arg2, u4 arg3,
+    JValue* pResult, int opIndex);
+
+#endif /*_DALVIK_INLINENATIVE*/
diff --git a/vm/Inlines.c b/vm/Inlines.c
new file mode 100644
index 0000000..dfef31e
--- /dev/null
+++ b/vm/Inlines.c
@@ -0,0 +1,27 @@
+/*
+ * Copyright (C) 2008 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/*
+ * Generate non-inline copies of inline functions declared in header files.
+ */
+
+#define _DALVIK_GEN_INLINES
+#include "Dalvik.h"
+#include "interp/InterpDefs.h"
+#include "analysis/CodeVerify.h"
+
+#undef LOG_TAG
+#include "jdwp/JdwpPriv.h"
+
diff --git a/vm/Inlines.h b/vm/Inlines.h
new file mode 100644
index 0000000..cdfaf75
--- /dev/null
+++ b/vm/Inlines.h
@@ -0,0 +1,33 @@
+/*
+ * Copyright (C) 2008 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/*
+ * In gcc, "extern inline" ensures that the copy in the header is never
+ * turned into a separate function.  This prevents us from having multiple
+ * non-inline copies.  However, we still need to provide a non-inline
+ * version in the library for the benefit of applications that include our
+ * headers and are built with optimizations disabled.  Either that, or use
+ * the "always_inline" gcc attribute to ensure that the non-inline version
+ * is never needed.
+ *
+ * (Note C99 has different notions about what the keyword combos mean.)
+ */
+#ifndef _DALVIK_GEN_INLINES             /* only defined by Inlines.c */
+# define INLINE extern __inline__
+#else
+# define INLINE
+#endif
+
diff --git a/vm/Intern.c b/vm/Intern.c
new file mode 100644
index 0000000..8584333
--- /dev/null
+++ b/vm/Intern.c
@@ -0,0 +1,173 @@
+/*
+ * Copyright (C) 2008 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/*
+ * String interning.
+ */
+#include "Dalvik.h"
+
+#include <stdlib.h>
+
+#define INTERN_STRING_IMMORTAL_BIT (1<<0)
+#define SET_IMMORTAL_BIT(strObj) \
+            ((uintptr_t)(strObj) | INTERN_STRING_IMMORTAL_BIT)
+#define STRIP_IMMORTAL_BIT(strObj) \
+            ((uintptr_t)(strObj) & ~INTERN_STRING_IMMORTAL_BIT)
+#define IS_IMMORTAL(strObj) \
+            ((uintptr_t)(strObj) & INTERN_STRING_IMMORTAL_BIT)
+
+
+/*
+ * Prep string interning.
+ */
+bool dvmStringInternStartup(void)
+{
+    gDvm.internedStrings = dvmHashTableCreate(256, NULL);
+    if (gDvm.internedStrings == NULL)
+        return false;
+
+    return true;
+}
+
+/*
+ * Chuck the intern list.
+ *
+ * The contents of the list are StringObjects that live on the GC heap.
+ */
+void dvmStringInternShutdown(void)
+{
+    dvmHashTableFree(gDvm.internedStrings);
+    gDvm.internedStrings = NULL;
+}
+
+
+/*
+ * Compare two string objects that may have INTERN_STRING_IMMORTAL_BIT
+ * set in their pointer values.
+ */
+static int hashcmpImmortalStrings(const void* vstrObj1, const void* vstrObj2)
+{
+    return dvmHashcmpStrings((const void*) STRIP_IMMORTAL_BIT(vstrObj1),
+                             (const void*) STRIP_IMMORTAL_BIT(vstrObj2));
+}
+
+static StringObject* lookupInternedString(StringObject* strObj, bool immortal)
+{
+    StringObject* found;
+    u4 hash;
+
+    assert(strObj != NULL);
+    hash = dvmComputeStringHash(strObj);
+
+    if (false) {
+        char* debugStr = dvmCreateCstrFromString(strObj);
+        LOGV("+++ dvmLookupInternedString searching for '%s'\n", debugStr);
+        free(debugStr);
+    }
+
+    if (immortal) {
+        strObj = (StringObject*) SET_IMMORTAL_BIT(strObj);
+    }
+
+    dvmHashTableLock(gDvm.internedStrings);
+
+    found = (StringObject*) dvmHashTableLookup(gDvm.internedStrings,
+                                hash, strObj, hashcmpImmortalStrings, true);
+    if (immortal && !IS_IMMORTAL(found)) {
+        /* Make this entry immortal.  We have to use the existing object
+         * because, as an interned string, it's not allowed to change.
+         *
+         * There's no way to get a pointer to the actual hash table entry,
+         * so the only way to modify the existing entry is to remove,
+         * modify, and re-add it.
+         */
+        dvmHashTableRemove(gDvm.internedStrings, hash, found);
+        found = (StringObject*) SET_IMMORTAL_BIT(found);
+        found = (StringObject*) dvmHashTableLookup(gDvm.internedStrings,
+                                    hash, found, hashcmpImmortalStrings, true);
+        assert(IS_IMMORTAL(found));
+    }
+
+    dvmHashTableUnlock(gDvm.internedStrings);
+
+    //if (found == strObj)
+    //    LOGVV("+++  added string\n");
+    return (StringObject*) STRIP_IMMORTAL_BIT(found);
+}
+
+/*
+ * Find an entry in the interned string list.
+ *
+ * If the string doesn't already exist, the StringObject is added to
+ * the list.  Otherwise, the existing entry is returned.
+ */
+StringObject* dvmLookupInternedString(StringObject* strObj)
+{
+    return lookupInternedString(strObj, false);
+}
+
+/*
+ * Same as dvmLookupInternedString(), but guarantees that the
+ * returned string is immortal.
+ */
+StringObject* dvmLookupImmortalInternedString(StringObject* strObj)
+{
+    return lookupInternedString(strObj, true);
+}
+
+/*
+ * Mark all immortal interned string objects so that they don't
+ * get collected by the GC.  Non-immortal strings may or may not
+ * get marked by other references.
+ */
+static int markStringObject(void* strObj, void* arg)
+{
+    UNUSED_PARAMETER(arg);
+
+    if (IS_IMMORTAL(strObj)) {
+        dvmMarkObjectNonNull((Object*) STRIP_IMMORTAL_BIT(strObj));
+    }
+    return 0;
+}
+
+void dvmGcScanInternedStrings()
+{
+    /* It's possible for a GC to happen before dvmStringInternStartup()
+     * is called.
+     */
+    if (gDvm.internedStrings != NULL) {
+        dvmHashTableLock(gDvm.internedStrings);
+        dvmHashForeach(gDvm.internedStrings, markStringObject, NULL);
+        dvmHashTableUnlock(gDvm.internedStrings);
+    }
+}
+
+/*
+ * Called by the GC after all reachable objects have been
+ * marked.  isUnmarkedObject is a function suitable for passing
+ * to dvmHashForeachRemove();  it must strip the low bits from
+ * its pointer argument to deal with the immortal bit, though.
+ */
+void dvmGcDetachDeadInternedStrings(int (*isUnmarkedObject)(void *))
+{
+    /* It's possible for a GC to happen before dvmStringInternStartup()
+     * is called.
+     */
+    if (gDvm.internedStrings != NULL) {
+        dvmHashTableLock(gDvm.internedStrings);
+        dvmHashForeachRemove(gDvm.internedStrings, isUnmarkedObject);
+        dvmHashTableUnlock(gDvm.internedStrings);
+    }
+}
diff --git a/vm/Intern.h b/vm/Intern.h
new file mode 100644
index 0000000..6b713fb
--- /dev/null
+++ b/vm/Intern.h
@@ -0,0 +1,28 @@
+/*
+ * Copyright (C) 2008 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/*
+ * Interned strings.
+ */
+#ifndef _DALVIK_INTERN
+#define _DALVIK_INTERN
+
+bool dvmStringInternStartup(void);
+void dvmStringInternShutdown(void);
+
+StringObject* dvmLookupInternedString(StringObject* strObj);
+StringObject* dvmLookupImmortalInternedString(StringObject* strObj);
+
+#endif /*_DALVIK_INTERN*/
diff --git a/vm/InternalNative.c b/vm/InternalNative.c
new file mode 100644
index 0000000..7083951
--- /dev/null
+++ b/vm/InternalNative.c
@@ -0,0 +1,5617 @@
+/*
+ * Copyright (C) 2008 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/*
+ * Internal native functions.  All of the functions defined here make
+ * direct use of VM functions or data structures, so they can't be written
+ * with JNI and shouldn't really be in a shared library.
+ *
+ * All functions here either complete quickly or are used to enter a wait
+ * state, so we don't set the thread status to THREAD_NATIVE when executing
+ * these methods.  This means that the GC will wait for these functions
+ * to finish.  DO NOT perform long operations or blocking I/O in here.
+ * These methods may not be declared "synchronized".
+ *
+ * We use "late" binding on these, rather than explicit registration,
+ * because it's easier to handle the core system classes that way.
+ *
+ * The functions here use the DalvikNativeFunc prototype, but we can also
+ * treat them as DalvikBridgeFunc, which takes two extra arguments.  The
+ * former represents the API that we're most likely to expose should JNI
+ * performance be deemed insufficient.  The Bridge version is used as an
+ * optimization for a few high-volume Object calls, and should generally
+ * not be used as we may drop support for it at some point.
+ *
+ * TODO: this is huge.  Consider splitting this into one file per class.
+ */
+#include "Dalvik.h"
+
+#include <stdlib.h>
+#include <stddef.h>
+#include <sys/time.h>
+#include <errno.h>
+#include <unistd.h>
+#include <sys/types.h>
+#include <sys/wait.h>
+#include <grp.h>
+#include <limits.h>
+
+#if defined(HAVE_PRCTL)
+#include <sys/prctl.h>
+#endif
+
+#include "alloc/HeapDebug.h"
+
+/*
+ * Return macros.  Note we use "->i" instead of "->z" for boolean; this
+ * is because the interpreter expects everything to be a 32-bit value.
+ */
+#ifdef NDEBUG
+# define RETURN_VOID()           do { (void)(pResult); return; } while(0)
+#else
+# define RETURN_VOID()           do { pResult->i = 0xfefeabab; return; }while(0)
+#endif
+#define RETURN_BOOLEAN(_val)    do { pResult->i = (_val); return; } while(0)
+#define RETURN_INT(_val)        do { pResult->i = (_val); return; } while(0)
+#define RETURN_LONG(_val)       do { pResult->j = (_val); return; } while(0)
+#define RETURN_FLOAT(_val)      do { pResult->f = (_val); return; } while(0)
+#define RETURN_DOUBLE(_val)     do { pResult->d = (_val); return; } while(0)
+#define RETURN_PTR(_val)        do { pResult->l = (_val); return; } while(0)
+
+
+/*
+ * Verify that "obj" is non-null and is an instance of "clazz".
+ *
+ * Returns "false" and throws an exception if not.
+ */
+static bool verifyObjectInClass(Object* obj, ClassObject* clazz)
+{
+    if (obj == NULL) {
+        dvmThrowException("Ljava/lang/NullPointerException;", NULL);
+        return false;
+    }
+    if (!dvmInstanceof(obj->clazz, clazz)) {
+        dvmThrowException("Ljava/lang/IllegalArgumentException;",
+            "object is not an instance of the class");
+        return false;
+    }
+
+    return true;
+}
+
+/*
+ * Validate a "fully qualified" class name, e.g. "Ljava/lang/String;" or "[I".
+ */
+static bool validateClassName(const char* name)
+{
+    int len = strlen(name);
+    int i = 0;
+
+    /* check for reasonable array types */
+    if (name[0] == '[') {
+        while (name[i] == '[')
+            i++;
+
+        if (name[i] == 'L') {
+            /* array of objects, make sure it ends well */
+            if (name[len-1] != ';')
+                return false;
+        } else if (strchr(PRIM_TYPE_TO_LETTER, name[i]) != NULL) {
+            if (i != len-1)
+                return false;
+        } else {
+            return false;
+        }
+    }
+
+    /* quick check for illegal chars */
+    for ( ; i < len; i++) {
+        if (name[i] == '/')
+            return false;
+    }
+
+    return true;
+}
+
+/*
+ * Find a class by name, initializing it if requested.
+ */
+static ClassObject* findClassByName(StringObject* nameObj, Object* loader,
+    bool doInit)
+{
+    ClassObject* clazz = NULL;
+    char* name = NULL;
+    char* descriptor = NULL;
+
+    if (nameObj == NULL) {
+        dvmThrowException("Ljava/lang/NullPointerException;", NULL);
+        goto bail;
+    }
+    name = dvmCreateCstrFromString(nameObj);
+
+    /*
+     * We need to validate and convert the name (from x.y.z to x/y/z).  This
+     * is especially handy for array types, since we want to avoid
+     * auto-generating bogus array classes.
+     */
+    if (!validateClassName(name)) {
+        LOGW("findClassByName rejecting '%s'\n", name);
+        goto bail;
+    }
+
+    descriptor = dvmDotToDescriptor(name);
+    if (descriptor == NULL) {
+        goto bail;
+    }
+
+    if (doInit)
+        clazz = dvmFindClass(descriptor, loader);
+    else
+        clazz = dvmFindClassNoInit(descriptor, loader);
+
+    if (clazz == NULL) {
+        LOGVV("FAIL: load %s (%d)\n", descriptor, doInit);
+        Thread* self = dvmThreadSelf();
+        Object* oldExcep = dvmGetException(self);
+        dvmAddTrackedAlloc(oldExcep, self);     /* don't let this be GCed */
+        dvmClearException(self);
+        dvmThrowChainedException("Ljava/lang/ClassNotFoundException;",
+            name, oldExcep);
+        dvmReleaseTrackedAlloc(oldExcep, self);
+    } else {
+        LOGVV("GOOD: load %s (%d) --> %p ldr=%p\n",
+            descriptor, doInit, clazz, clazz->classLoader);
+    }
+
+bail:
+    free(name);
+    free(descriptor);
+    return clazz;
+}
+
+/*
+ * We insert native method stubs for abstract methods so we don't have to
+ * check the access flags at the time of the method call.  This results in
+ * "native abstract" methods, which can't exist.  If we see the "abstract"
+ * flag set, clear the "native" flag.
+ * 
+ * We also move the DECLARED_SYNCHRONIZED flag into the SYNCHRONIZED
+ * position, because the callers of this function are trying to convey
+ * the "traditional" meaning of the flags to their callers.
+ */
+static inline u4 fixMethodFlags(u4 flags)
+{
+    if ((flags & ACC_ABSTRACT) != 0) {
+        flags &= ~ACC_NATIVE;
+    }
+
+    flags &= ~ACC_SYNCHRONIZED;
+    
+    if ((flags & ACC_DECLARED_SYNCHRONIZED) != 0) {
+        flags |= ACC_SYNCHRONIZED;
+    }
+    
+    return flags & JAVA_FLAGS_MASK;
+}
+
+
+/*
+ * ===========================================================================
+ *      dalvik.system.VMDebug
+ * ===========================================================================
+ */
+
+#ifdef WITH_PROFILER
+/* These must match the values in dalvik.system.VMDebug.
+ */
+enum {
+    KIND_ALLOCATED_OBJECTS = 1<<0,
+    KIND_ALLOCATED_BYTES   = 1<<1,
+    KIND_FREED_OBJECTS     = 1<<2,
+    KIND_FREED_BYTES       = 1<<3,
+    KIND_GC_INVOCATIONS    = 1<<4,
+#if PROFILE_EXTERNAL_ALLOCATIONS
+    KIND_EXT_ALLOCATED_OBJECTS = 1<<12,
+    KIND_EXT_ALLOCATED_BYTES   = 1<<13,
+    KIND_EXT_FREED_OBJECTS     = 1<<14,
+    KIND_EXT_FREED_BYTES       = 1<<15,
+#endif // PROFILE_EXTERNAL_ALLOCATIONS
+
+    KIND_GLOBAL_ALLOCATED_OBJECTS   = KIND_ALLOCATED_OBJECTS,
+    KIND_GLOBAL_ALLOCATED_BYTES     = KIND_ALLOCATED_BYTES,
+    KIND_GLOBAL_FREED_OBJECTS       = KIND_FREED_OBJECTS,
+    KIND_GLOBAL_FREED_BYTES         = KIND_FREED_BYTES,
+    KIND_GLOBAL_GC_INVOCATIONS      = KIND_GC_INVOCATIONS,
+#if PROFILE_EXTERNAL_ALLOCATIONS
+    KIND_GLOBAL_EXT_ALLOCATED_OBJECTS = KIND_EXT_ALLOCATED_OBJECTS,
+    KIND_GLOBAL_EXT_ALLOCATED_BYTES = KIND_EXT_ALLOCATED_BYTES,
+    KIND_GLOBAL_EXT_FREED_OBJECTS   = KIND_EXT_FREED_OBJECTS,
+    KIND_GLOBAL_EXT_FREED_BYTES     = KIND_EXT_FREED_BYTES,
+#endif // PROFILE_EXTERNAL_ALLOCATIONS
+
+    KIND_THREAD_ALLOCATED_OBJECTS   = KIND_ALLOCATED_OBJECTS << 16,
+    KIND_THREAD_ALLOCATED_BYTES     = KIND_ALLOCATED_BYTES << 16,
+    KIND_THREAD_FREED_OBJECTS       = KIND_FREED_OBJECTS << 16,
+    KIND_THREAD_FREED_BYTES         = KIND_FREED_BYTES << 16,
+#if PROFILE_EXTERNAL_ALLOCATIONS
+    KIND_THREAD_EXT_ALLOCATED_OBJECTS = KIND_EXT_ALLOCATED_OBJECTS << 16,
+    KIND_THREAD_EXT_ALLOCATED_BYTES = KIND_EXT_ALLOCATED_BYTES << 16,
+    KIND_THREAD_EXT_FREED_OBJECTS   = KIND_EXT_FREED_OBJECTS << 16,
+    KIND_THREAD_EXT_FREED_BYTES     = KIND_EXT_FREED_BYTES << 16,
+#endif // PROFILE_EXTERNAL_ALLOCATIONS
+    KIND_THREAD_GC_INVOCATIONS      = KIND_GC_INVOCATIONS << 16,
+
+    // TODO: failedAllocCount, failedAllocSize
+};
+
+#define KIND_ALL_COUNTS 0xffffffff
+
+/*
+ * Zero out the specified fields.
+ */
+static void clearAllocProfStateFields(AllocProfState *allocProf,
+    unsigned int kinds)
+{
+    if (kinds & KIND_ALLOCATED_OBJECTS) {
+        allocProf->allocCount = 0;
+    }
+    if (kinds & KIND_ALLOCATED_BYTES) {
+        allocProf->allocSize = 0;
+    }
+    if (kinds & KIND_FREED_OBJECTS) {
+        allocProf->freeCount = 0;
+    }
+    if (kinds & KIND_FREED_BYTES) {
+        allocProf->freeSize = 0;
+    }
+    if (kinds & KIND_GC_INVOCATIONS) {
+        allocProf->gcCount = 0;
+    }
+#if PROFILE_EXTERNAL_ALLOCATIONS
+    if (kinds & KIND_EXT_ALLOCATED_OBJECTS) {
+        allocProf->externalAllocCount = 0;
+    }
+    if (kinds & KIND_EXT_ALLOCATED_BYTES) {
+        allocProf->externalAllocSize = 0;
+    }
+    if (kinds & KIND_EXT_FREED_OBJECTS) {
+        allocProf->externalFreeCount = 0;
+    }
+    if (kinds & KIND_EXT_FREED_BYTES) {
+        allocProf->externalFreeSize = 0;
+    }
+#endif // PROFILE_EXTERNAL_ALLOCATIONS
+}
+#endif
+
+/*
+ * static void startAllocCounting()
+ *
+ * Reset the counters and enable counting.
+ *
+ * TODO: this currently only resets the per-thread counters for the current
+ * thread.  If we actually start using the per-thread counters we'll
+ * probably want to fix this.
+ */
+static void Dalvik_dalvik_system_VMDebug_startAllocCounting(const u4* args,
+    JValue* pResult)
+{
+    UNUSED_PARAMETER(args);
+
+#ifdef WITH_PROFILER
+    clearAllocProfStateFields(&gDvm.allocProf, KIND_ALL_COUNTS);
+    clearAllocProfStateFields(&dvmThreadSelf()->allocProf, KIND_ALL_COUNTS);
+    dvmStartAllocCounting();
+#endif
+    RETURN_VOID();
+}
+
+/*
+ * public static void stopAllocCounting()
+ */
+static void Dalvik_dalvik_system_VMDebug_stopAllocCounting(const u4* args,
+    JValue* pResult)
+{
+    UNUSED_PARAMETER(args);
+
+#ifdef WITH_PROFILER
+    dvmStopAllocCounting();
+#endif
+    RETURN_VOID();
+}
+
+/*
+ * private static int getAllocCount(int kind)
+ */
+static void Dalvik_dalvik_system_VMDebug_getAllocCount(const u4* args,
+    JValue* pResult)
+{
+#ifdef WITH_PROFILER
+    AllocProfState *allocProf;
+    unsigned int kind = args[0];
+    if (kind < (1<<16)) {
+        allocProf = &gDvm.allocProf;
+    } else {
+        allocProf = &dvmThreadSelf()->allocProf;
+        kind >>= 16;
+    }
+    switch (kind) {
+    case KIND_ALLOCATED_OBJECTS:
+        pResult->i = allocProf->allocCount;
+        break;
+    case KIND_ALLOCATED_BYTES:
+        pResult->i = allocProf->allocSize;
+        break;
+    case KIND_FREED_OBJECTS:
+        pResult->i = allocProf->freeCount;
+        break;
+    case KIND_FREED_BYTES:
+        pResult->i = allocProf->freeSize;
+        break;
+    case KIND_GC_INVOCATIONS:
+        pResult->i = allocProf->gcCount;
+        break;
+#if PROFILE_EXTERNAL_ALLOCATIONS
+    case KIND_EXT_ALLOCATED_OBJECTS:
+        pResult->i = allocProf->externalAllocCount;
+        break;
+    case KIND_EXT_ALLOCATED_BYTES:
+        pResult->i = allocProf->externalAllocSize;
+        break;
+    case KIND_EXT_FREED_OBJECTS:
+        pResult->i = allocProf->externalFreeCount;
+        break;
+    case KIND_EXT_FREED_BYTES:
+        pResult->i = allocProf->externalFreeSize;
+        break;
+#endif // PROFILE_EXTERNAL_ALLOCATIONS
+    default:
+        assert(false);
+        pResult->i = -1;
+    }
+#else
+    RETURN_INT(-1);
+#endif
+}
+
+/*
+ * public static void resetAllocCount(int kinds)
+ */
+static void Dalvik_dalvik_system_VMDebug_resetAllocCount(const u4* args,
+    JValue* pResult)
+{
+#ifdef WITH_PROFILER
+    unsigned int kinds = args[0];
+    clearAllocProfStateFields(&gDvm.allocProf, kinds & 0xffff);
+    clearAllocProfStateFields(&dvmThreadSelf()->allocProf, kinds >> 16);
+#endif
+    RETURN_VOID();
+}
+
+/*
+ * static void startMethodTracing(String traceFileName,
+ *     int bufferSize, int flags)
+ *
+ * Start method trace profiling.
+ */
+static void Dalvik_dalvik_system_VMDebug_startMethodTracing(const u4* args,
+    JValue* pResult)
+{
+#ifdef WITH_PROFILER
+    StringObject* traceFileStr = (StringObject*) args[0];
+    int bufferSize = args[1];
+    int flags = args[2];
+    char* traceFileName;
+
+    if (bufferSize == 0) {
+        // Default to 8MB per the documentation.
+        bufferSize = 8 * 1024 * 1024;
+    }
+
+    if (traceFileStr == NULL || bufferSize < 1024) {
+        dvmThrowException("Ljava/lang/InvalidArgument;", NULL);
+        RETURN_VOID();
+    }
+
+    traceFileName = dvmCreateCstrFromString(traceFileStr);
+
+    dvmMethodTraceStart(traceFileName, bufferSize, flags);
+    free(traceFileName);
+#else
+    // throw exception?
+#endif
+    RETURN_VOID();
+}
+
+/*
+ * static void stopMethodTracing()
+ *
+ * Stop method tracing.
+ */
+static void Dalvik_dalvik_system_VMDebug_stopMethodTracing(const u4* args,
+    JValue* pResult)
+{
+    UNUSED_PARAMETER(args);
+
+#ifdef WITH_PROFILER
+    dvmMethodTraceStop();
+#else
+    // throw exception?
+#endif
+    RETURN_VOID();
+}
+
+/*
+ * static void startEmulatorTracing()
+ *
+ * Start sending method trace info to the emulator.
+ */
+static void Dalvik_dalvik_system_VMDebug_startEmulatorTracing(const u4* args,
+    JValue* pResult)
+{
+    UNUSED_PARAMETER(args);
+
+#ifdef WITH_PROFILER
+    dvmEmulatorTraceStart();
+#else
+    // throw exception?
+#endif
+    RETURN_VOID();
+}
+
+/*
+ * static void stopEmulatorTracing()
+ *
+ * Start sending method trace info to the emulator.
+ */
+static void Dalvik_dalvik_system_VMDebug_stopEmulatorTracing(const u4* args,
+    JValue* pResult)
+{
+    UNUSED_PARAMETER(args);
+
+#ifdef WITH_PROFILER
+    dvmEmulatorTraceStop();
+#else
+    // throw exception?
+#endif
+    RETURN_VOID();
+}
+
+/*
+ * static int setAllocationLimit(int limit)
+ *
+ * Set the current allocation limit in this thread.  Return the previous
+ * value.
+ */
+static void Dalvik_dalvik_system_VMDebug_setAllocationLimit(const u4* args,
+    JValue* pResult)
+{
+#if defined(WITH_ALLOC_LIMITS)
+    gDvm.checkAllocLimits = true;
+
+    Thread* self = dvmThreadSelf();
+    int newLimit = args[0];
+    int oldLimit = self->allocLimit;
+
+    if (newLimit < -1) {
+        LOGE("WARNING: bad limit request (%d)\n", newLimit);
+        newLimit = -1;
+    }
+    self->allocLimit = newLimit;
+    RETURN_INT(oldLimit);
+#else
+    UNUSED_PARAMETER(args);
+    RETURN_INT(-1);
+#endif
+}
+
+/*
+ * static int setGlobalAllocationLimit(int limit)
+ *
+ * Set the allocation limit for this process.  Returns the previous value.
+ */
+static void Dalvik_dalvik_system_VMDebug_setGlobalAllocationLimit(const u4* args,
+    JValue* pResult)
+{
+#if defined(WITH_ALLOC_LIMITS)
+    gDvm.checkAllocLimits = true;
+
+    int newLimit = args[0];
+    int oldLimit = gDvm.allocationLimit;
+
+    if (newLimit < -1 || newLimit > 0) {
+        LOGE("WARNING: bad limit request (%d)\n", newLimit);
+        newLimit = -1;
+    }
+    // TODO: should use an atomic swap here
+    gDvm.allocationLimit = newLimit;
+    RETURN_INT(oldLimit);
+#else
+    UNUSED_PARAMETER(args);
+    RETURN_INT(-1);
+#endif
+}
+
+/*
+ * static boolean isDebuggerConnected()
+ *
+ * Returns "true" if a debugger is attached.
+ */
+static void Dalvik_dalvik_system_VMDebug_isDebuggerConnected(const u4* args,
+    JValue* pResult)
+{
+    UNUSED_PARAMETER(args);
+
+    RETURN_BOOLEAN(dvmDbgIsDebuggerConnected());
+}
+
+/*
+ * static long lastDebuggerActivity()
+ *
+ * Returns the time, in msec, since we last had an interaction with the
+ * debugger (send or receive).
+ */
+static void Dalvik_dalvik_system_VMDebug_lastDebuggerActivity(const u4* args,
+    JValue* pResult)
+{
+    UNUSED_PARAMETER(args);
+
+    RETURN_LONG(dvmDbgLastDebuggerActivity());
+}
+
+/*
+ * static void startInstructionCounting()
+ */
+static void Dalvik_dalvik_system_VMDebug_startInstructionCounting(const u4* args,
+    JValue* pResult)
+{
+#if defined(WITH_PROFILER)
+    dvmStartInstructionCounting();
+    RETURN_VOID();
+#else
+    dvmThrowException("Ljava/lang/UnsupportedOperationException;", NULL);
+#endif
+}
+
+/*
+ * static void stopInstructionCounting()
+ */
+static void Dalvik_dalvik_system_VMDebug_stopInstructionCounting(const u4* args,
+    JValue* pResult)
+{
+#if defined(WITH_PROFILER)
+    dvmStopInstructionCounting();
+    RETURN_VOID();
+#else
+    dvmThrowException("Ljava/lang/UnsupportedOperationException;", NULL);
+#endif
+}
+
+/*
+ * static boolean getInstructionCount(int[] counts)
+ *
+ * Grab a copy of the global instruction count array.
+ *
+ * Since the instruction counts aren't synchronized, we use sched_yield
+ * to improve our chances of finishing without contention.  (Only makes
+ * sense on a uniprocessor.)
+ */
+static void Dalvik_dalvik_system_VMDebug_getInstructionCount(const u4* args,
+    JValue* pResult)
+{
+#if defined(WITH_PROFILER)
+    ArrayObject* countArray = (ArrayObject*) args[0];
+    int* storage;
+
+    storage = (int*) countArray->contents;
+    sched_yield();
+    memcpy(storage, gDvm.executedInstrCounts,
+        kNumDalvikInstructions * sizeof(int));
+
+    RETURN_VOID();
+#else
+    dvmThrowException("Ljava/lang/UnsupportedOperationException;", NULL);
+#endif
+}
+
+/*
+ * static boolean resetInstructionCount()
+ *
+ * Reset the instruction count array.
+ */
+static void Dalvik_dalvik_system_VMDebug_resetInstructionCount(const u4* args,
+    JValue* pResult)
+{
+#if defined(WITH_PROFILER)
+    sched_yield();
+    memset(gDvm.executedInstrCounts, 0, kNumDalvikInstructions * sizeof(int));
+    RETURN_VOID();
+#else
+    dvmThrowException("Ljava/lang/UnsupportedOperationException;", NULL);
+#endif
+}
+
+/*
+ * static void printLoadedClasses(int flags)
+ *
+ * Dump the list of loaded classes.
+ */
+static void Dalvik_dalvik_system_VMDebug_printLoadedClasses(const u4* args,
+    JValue* pResult)
+{
+    int flags = args[0];
+
+    dvmDumpAllClasses(flags);
+
+    RETURN_VOID();
+}
+
+/*
+ * static int getLoadedClassCount()
+ *
+ * Return the number of loaded classes
+ */
+static void Dalvik_dalvik_system_VMDebug_getLoadedClassCount(const u4* args,
+    JValue* pResult)
+{
+    int count;
+
+    UNUSED_PARAMETER(args);
+
+    count = dvmGetNumLoadedClasses();
+
+    RETURN_INT(count);
+}
+
+/*
+ * Returns the thread-specific CPU-time clock value for the current thread,
+ * or -1 if the feature isn't supported.
+ */
+static void Dalvik_dalvik_system_VMDebug_threadCpuTimeNanos(const u4* args,
+        JValue* pResult)
+{
+    jlong result;
+    
+#ifdef HAVE_POSIX_CLOCKS
+    struct timespec now;
+    clock_gettime(CLOCK_THREAD_CPUTIME_ID, &now);
+    result = (jlong) (now.tv_sec*1000000000LL + now.tv_nsec);
+#else
+    result = (jlong) -1;
+#endif
+
+    RETURN_LONG(result);
+}
+
+static const DalvikNativeMethod dalvik_system_VMDebug[] = {
+    { "getAllocCount",          "(I)I",
+        Dalvik_dalvik_system_VMDebug_getAllocCount },
+    { "resetAllocCount",        "(I)V",
+        Dalvik_dalvik_system_VMDebug_resetAllocCount },
+    //{ "print",              "(Ljava/lang/String;)V",
+    //    Dalvik_dalvik_system_VMDebug_print },
+    { "startAllocCounting",     "()V",
+        Dalvik_dalvik_system_VMDebug_startAllocCounting },
+    { "stopAllocCounting",      "()V",
+        Dalvik_dalvik_system_VMDebug_stopAllocCounting },
+    { "startMethodTracing",     "(Ljava/lang/String;II)V",
+        Dalvik_dalvik_system_VMDebug_startMethodTracing },
+    { "stopMethodTracing",      "()V",
+        Dalvik_dalvik_system_VMDebug_stopMethodTracing },
+    { "startEmulatorTracing",   "()V",
+        Dalvik_dalvik_system_VMDebug_startEmulatorTracing },
+    { "stopEmulatorTracing",    "()V",
+        Dalvik_dalvik_system_VMDebug_stopEmulatorTracing },
+    { "setAllocationLimit",     "(I)I",
+        Dalvik_dalvik_system_VMDebug_setAllocationLimit },
+    { "setGlobalAllocationLimit", "(I)I",
+        Dalvik_dalvik_system_VMDebug_setGlobalAllocationLimit },
+    { "startInstructionCounting", "()V",
+        Dalvik_dalvik_system_VMDebug_startInstructionCounting },
+    { "stopInstructionCounting", "()V",
+        Dalvik_dalvik_system_VMDebug_stopInstructionCounting },
+    { "resetInstructionCount",  "()V",
+        Dalvik_dalvik_system_VMDebug_resetInstructionCount },
+    { "getInstructionCount",    "([I)V",
+        Dalvik_dalvik_system_VMDebug_getInstructionCount },
+    { "isDebuggerConnected",    "()Z",
+        Dalvik_dalvik_system_VMDebug_isDebuggerConnected },
+    { "lastDebuggerActivity",   "()J",
+        Dalvik_dalvik_system_VMDebug_lastDebuggerActivity },
+    { "printLoadedClasses",     "(I)V",
+        Dalvik_dalvik_system_VMDebug_printLoadedClasses },
+    { "getLoadedClassCount",     "()I",
+        Dalvik_dalvik_system_VMDebug_getLoadedClassCount },
+    { "threadCpuTimeNanos",     "()J",
+        Dalvik_dalvik_system_VMDebug_threadCpuTimeNanos },
+    { NULL, NULL, NULL },
+};
+
+
+/*
+ * ===========================================================================
+ *      org.apache.harmony.dalvik.NativeTestTarget
+ * ===========================================================================
+ */
+
+/*
+ * public static void emptyInternalStaticMethod()
+ *
+ * For benchmarks, a do-nothing internal method with no arguments.
+ */
+static void Dalvik_org_apache_harmony_dalvik_NativeTestTarget_emptyInternalMethod(
+        const u4* args, JValue* pResult)
+{
+    UNUSED_PARAMETER(args);
+
+    RETURN_VOID();
+}
+
+static const DalvikNativeMethod org_apache_harmony_dalvik_NativeTestTarget[] =
+{
+    { "emptyInternalStaticMethod", "()V",
+      Dalvik_org_apache_harmony_dalvik_NativeTestTarget_emptyInternalMethod },
+    { NULL, NULL, NULL },
+};
+
+
+/*
+ * ===========================================================================
+ *      dalvik.system.DexFile
+ * ===========================================================================
+ */
+
+/*
+ * Internal struct for managing DexFile.
+ */
+typedef struct DexOrJar {
+    char*       fileName;
+    bool        isDex;
+    bool        okayToFree;
+    RawDexFile* pRawDexFile;
+    JarFile*    pJarFile;
+} DexOrJar;
+
+/*
+ * (This is a dvmHashTableFree callback.)
+ */
+static void freeDexOrJar(void* vptr)
+{
+    DexOrJar* pDexOrJar = (DexOrJar*) vptr;
+
+    LOGV("Freeing DexOrJar '%s'\n", pDexOrJar->fileName);
+
+    if (pDexOrJar->isDex)
+        dvmRawDexFileFree(pDexOrJar->pRawDexFile);
+    else
+        dvmJarFileFree(pDexOrJar->pJarFile);
+    free(pDexOrJar->fileName);
+    free(pDexOrJar);
+}
+
+/*
+ * (This is a dvmHashTableLookup compare func.)
+ *
+ * Args are DexOrJar*.
+ */
+static int hashcmpDexOrJar(const void* tableVal, const void* newVal)
+{
+    return (int) newVal - (int) tableVal;
+}
+
+/*
+ * Verify that the "cookie" is a DEX file we opened.
+ */
+static bool validateCookie(int cookie)
+{
+    DexOrJar* pDexOrJar = (DexOrJar*) cookie;
+
+    LOGVV("+++ dex verifying cookie %p\n", pDexOrJar);
+
+    if (pDexOrJar == NULL)
+        return false;
+
+    u4 hash = dvmComputeUtf8Hash(pDexOrJar->fileName);
+    void* result = dvmHashTableLookup(gDvm.userDexFiles, hash, pDexOrJar,
+                hashcmpDexOrJar, false);
+    if (result == NULL)
+        return false;
+
+    return true;
+}
+
+/*
+ * private static int openDexFile(String fileName) throws IOException
+ *
+ * Open a DEX file, returning a pointer to our internal data structure.
+ *
+ * The filename should point to the "source" jar or DEX file.  The DEX
+ * code will automatically find the "optimized" version in the cache
+ * directory, creating it if necessary.
+ *
+ * TODO: at present we will happily open the same file more than once.
+ * To optimize this away we could search for existing entries in the hash
+ * table and refCount them.  Requires atomic ops or adding "synchronized"
+ * to the non-native code that calls here.
+ */
+static void Dalvik_dalvik_system_DexFile_openDexFile(const u4* args,
+    JValue* pResult)
+{
+    StringObject* nameObj = (StringObject*) args[0];
+    DexOrJar* pDexOrJar = NULL;
+    JarFile* pJarFile;
+    RawDexFile* pRawDexFile;
+    char* name;
+
+    name = dvmCreateCstrFromString(nameObj);
+
+    /*
+     * We have to deal with the possibility that somebody might try to
+     * open one of our bootstrap class DEX files.  The set of dependencies
+     * will be different, and hence the results of optimization might be
+     * different, which means we'd actually need to have two versions of
+     * the optimized DEX: one that only knows about part of the boot class
+     * path, and one that knows about everything in it.  The latter might
+     * optimize field/method accesses based on a class that appeared later
+     * in the class path.
+     *
+     * We can't let the user-defined class loader open it and start using
+     * the classes, since the optimized form of the code skips some of
+     * the method and field resolution that we would ordinarily do, and
+     * we'd have the wrong semantics.
+     *
+     * We have to reject attempts to manually open a DEX file from the boot
+     * class path.  The easiest way to do this is by filename, which works
+     * out because variations in name (e.g. "/system/framework/./ext.jar")
+     * result in us hitting a different dalvik-cache entry.
+     */
+    if (dvmClassPathContains(gDvm.bootClassPath, name)) {
+        LOGW("Refusing to reopen boot DEX '%s'\n", name);
+        dvmThrowException("Ljava/io/IOException;",
+            "Re-opening BOOTCLASSPATH DEX files is not allowed");
+        free(name);
+        RETURN_VOID();
+    }
+
+    /*
+     * Try to open it directly as a DEX.  If that fails, try it as a Zip
+     * with a "classes.dex" inside.
+     */
+    if (dvmRawDexFileOpen(name, &pRawDexFile, false) == 0) {
+        LOGV("Opening DEX file '%s' (DEX)\n", name);
+
+        pDexOrJar = (DexOrJar*) malloc(sizeof(DexOrJar));
+        pDexOrJar->isDex = true;
+        pDexOrJar->pRawDexFile = pRawDexFile;
+    } else if (dvmJarFileOpen(name, &pJarFile, false) == 0) {
+        LOGV("Opening DEX file '%s' (Jar)\n", name);
+
+        pDexOrJar = (DexOrJar*) malloc(sizeof(DexOrJar));
+        pDexOrJar->isDex = false;
+        pDexOrJar->pJarFile = pJarFile;
+    } else {
+        LOGV("Unable to open DEX file '%s'\n", name);
+        dvmThrowException("Ljava/io/IOException;", "unable to open DEX file");
+    }
+
+    if (pDexOrJar != NULL) {
+        pDexOrJar->fileName = name;
+
+        /* add to hash table */
+        u4 hash = dvmComputeUtf8Hash(name);
+        void* result;
+        result = dvmHashTableLookup(gDvm.userDexFiles, hash, pDexOrJar,
+                    hashcmpDexOrJar, true);
+        if (result != pDexOrJar) {
+            LOGE("Pointer has already been added?\n");
+            dvmAbort();
+        }
+
+        pDexOrJar->okayToFree = true;
+    } else
+        free(name);
+
+    RETURN_PTR(pDexOrJar);
+}
+
+/*
+ * private static void closeDexFile(int cookie)
+ *
+ * Release resources associated with a user-loaded DEX file.
+ */
+static void Dalvik_dalvik_system_DexFile_closeDexFile(const u4* args,
+    JValue* pResult)
+{
+    int cookie = args[0];
+    DexOrJar* pDexOrJar = (DexOrJar*) cookie;
+
+    if (pDexOrJar == NULL)
+        RETURN_VOID();
+
+    LOGV("Closing DEX file %p (%s)\n", pDexOrJar, pDexOrJar->fileName);
+
+    if (!validateCookie(cookie))
+        dvmAbort();
+
+    /*
+     * We can't just free arbitrary DEX files because they have bits and
+     * pieces of loaded classes.  The only exception to this rule is if
+     * they were never used to load classes.
+     *
+     * If we can't free them here, dvmInternalNativeShutdown() will free
+     * them when the VM shuts down.
+     */
+    if (pDexOrJar->okayToFree) {
+        u4 hash = dvmComputeUtf8Hash(pDexOrJar->fileName);
+        if (!dvmHashTableRemove(gDvm.userDexFiles, hash, pDexOrJar)) {
+            LOGW("WARNING: could not remove '%s' from DEX hash table\n",
+                pDexOrJar->fileName);
+        }
+        LOGV("+++ freeing DexFile '%s' resources\n", pDexOrJar->fileName);
+        freeDexOrJar(pDexOrJar);
+    } else {
+        LOGV("+++ NOT freeing DexFile '%s' resources\n", pDexOrJar->fileName);
+    }
+
+    RETURN_VOID();
+}
+
+/*
+ * private static Class defineClass(String name, ClassLoader loader,
+ *      int cookie, ProtectionDomain pd)
+ *
+ * Load a class from a DEX file.  This is roughly equivalent to defineClass()
+ * in a regular VM -- it's invoked by the class loader to cause the
+ * creation of a specific class.  The difference is that the search for and
+ * reading of the bytes is done within the VM.
+ *
+ * Returns a null pointer with no exception if the class was not found.
+ * Throws an exception on other failures.
+ */
+static void Dalvik_dalvik_system_DexFile_defineClass(const u4* args,
+    JValue* pResult)
+{
+    StringObject* nameObj = (StringObject*) args[0];
+    Object* loader = (Object*) args[1];
+    int cookie = args[2];
+    Object* pd = (Object*) args[3];
+    ClassObject* clazz = NULL;
+    DexOrJar* pDexOrJar = (DexOrJar*) cookie;
+    DvmDex* pDvmDex;
+    char* name;
+    char* descriptor;
+
+    name = dvmCreateCstrFromString(nameObj);
+    descriptor = dvmNameToDescriptor(name);
+    LOGV("--- Explicit class load '%s' 0x%08x\n", name, cookie);
+    free(name);
+
+    if (!validateCookie(cookie))
+        dvmAbort();
+
+    if (pDexOrJar->isDex)
+        pDvmDex = dvmGetRawDexFileDex(pDexOrJar->pRawDexFile);
+    else
+        pDvmDex = dvmGetJarFileDex(pDexOrJar->pJarFile);
+
+    /* once we load something, we can't unmap the storage */
+    pDexOrJar->okayToFree = false;
+
+    clazz = dvmDefineClass(pDvmDex, descriptor, loader);
+    Thread* self = dvmThreadSelf();
+    if (dvmCheckException(self)) {
+        /*
+         * If we threw a "class not found" exception, stifle it, since the
+         * contract in the higher method says we simply return null if
+         * the class is not found.
+         */
+        Object* excep = dvmGetException(self);
+        if (strcmp(excep->clazz->descriptor,
+                   "Ljava/lang/ClassNotFoundException;") == 0 ||
+            strcmp(excep->clazz->descriptor,
+                   "Ljava/lang/NoClassDefFoundError;") == 0)
+        {
+            dvmClearException(self);
+        }
+        clazz = NULL;
+    }
+
+    /*
+     * Set the ProtectionDomain -- do we need this to happen before we
+     * link the class and make it available? If so, we need to pass it
+     * through dvmDefineClass (and figure out some other
+     * stuff, like where it comes from for bootstrap classes).
+     */
+    if (clazz != NULL) {
+        //LOGI("SETTING pd '%s' to %p\n", clazz->descriptor, pd);
+        dvmSetFieldObject((Object*) clazz, gDvm.offJavaLangClass_pd, pd);
+    }
+
+    free(descriptor);
+    RETURN_PTR(clazz);
+}
+
+/*
+ * private static String[] getClassNameList(int cookie)
+ *
+ * Returns a String array that holds the names of all classes in the
+ * specified DEX file.
+ */
+static void Dalvik_dalvik_system_DexFile_getClassNameList(const u4* args,
+    JValue* pResult)
+{
+    int cookie = args[0];
+    DexOrJar* pDexOrJar = (DexOrJar*) cookie;
+    DvmDex* pDvmDex;
+    DexFile* pDexFile;
+    ArrayObject* stringArray;
+
+    if (!validateCookie(cookie))
+        dvmAbort();
+
+    if (pDexOrJar->isDex)
+        pDvmDex = dvmGetRawDexFileDex(pDexOrJar->pRawDexFile);
+    else
+        pDvmDex = dvmGetJarFileDex(pDexOrJar->pJarFile);
+    assert(pDvmDex != NULL);
+    pDexFile = pDvmDex->pDexFile;
+
+    int count = pDexFile->pHeader->classDefsSize;
+    stringArray = dvmAllocObjectArray(gDvm.classJavaLangString, count,
+                    ALLOC_DEFAULT);
+    if (stringArray == NULL)
+        RETURN_VOID();          // should be an OOM pending
+
+    StringObject** contents = (StringObject**) stringArray->contents;
+    int i;
+    for (i = 0; i < count; i++) {
+        const DexClassDef* pClassDef = dexGetClassDef(pDexFile, i);
+        const char* descriptor =
+            dexStringByTypeIdx(pDexFile, pClassDef->classIdx);
+
+        char* className = dvmDescriptorToDot(descriptor);
+        contents[i] = dvmCreateStringFromCstr(className, ALLOC_DEFAULT);
+        dvmReleaseTrackedAlloc((Object*) contents[i], NULL);
+        free(className);
+    }
+
+    dvmReleaseTrackedAlloc((Object*)stringArray, NULL);
+    RETURN_PTR(stringArray);
+}
+
+/*
+ * public static boolean isDexOptNeeded(String apkName)
+ *         throws FileNotFoundException, IOException
+ *
+ * Returns true if the VM believes that the apk/jar file is out of date
+ * and should be passed through "dexopt" again.
+ *
+ * @param fileName the absolute path to the apk/jar file to examine.
+ * @return true if dexopt should be called on the file, false otherwise.
+ * @throws java.io.FileNotFoundException if fileName is not readable,
+ *         not a file, or not present.
+ * @throws java.io.IOException if fileName is not a valid apk/jar file or
+ *         if problems occur while parsing it.
+ * @throws java.lang.NullPointerException if fileName is null.
+ * @throws dalvik.system.StaleDexCacheError if the optimized dex file
+ *         is stale but exists on a read-only partition.
+ */
+static void Dalvik_dalvik_system_DexFile_isDexOptNeeded(const u4* args,
+    JValue* pResult)
+{
+    StringObject* nameObj = (StringObject*) args[0];
+    char* name;
+    DexCacheStatus status;
+    int result;
+
+    name = dvmCreateCstrFromString(nameObj);
+    if (name == NULL) {
+        dvmThrowException("Ljava/lang/NullPointerException;", NULL);
+        RETURN_VOID();
+    }
+    if (access(name, R_OK) != 0) {
+        dvmThrowException("Ljava/io/FileNotFoundException;", name);
+        free(name);
+        RETURN_VOID();
+    }
+    status = dvmDexCacheStatus(name);
+    LOGV("dvmDexCacheStatus(%s) returned %d\n", name, status);
+
+    result = true;
+    switch (status) {
+    default: //FALLTHROUGH
+    case DEX_CACHE_BAD_ARCHIVE:
+        dvmThrowException("Ljava/io/IOException;", name);
+        result = -1;
+        break;
+    case DEX_CACHE_OK:
+        result = false;
+        break;
+    case DEX_CACHE_STALE:
+        result = true;
+        break;
+    case DEX_CACHE_STALE_ODEX:
+        dvmThrowException("Ldalvik/system/StaleDexCacheError;", name);
+        result = -1;
+        break;
+    }
+    free(name);
+
+    if (result >= 0) {
+        RETURN_BOOLEAN(result);
+    } else {
+        RETURN_VOID();
+    }
+}
+
+static const DalvikNativeMethod dalvik_system_DexFile[] = {
+    { "openDexFile",        "(Ljava/lang/String;)I",
+        Dalvik_dalvik_system_DexFile_openDexFile },
+    { "closeDexFile",       "(I)V",
+        Dalvik_dalvik_system_DexFile_closeDexFile },
+    { "defineClass",        "(Ljava/lang/String;Ljava/lang/ClassLoader;ILjava/security/ProtectionDomain;)Ljava/lang/Class;",
+        Dalvik_dalvik_system_DexFile_defineClass },
+    { "getClassNameList",   "(I)[Ljava/lang/String;",
+        Dalvik_dalvik_system_DexFile_getClassNameList },
+    { "isDexOptNeeded",     "(Ljava/lang/String;)Z",
+        Dalvik_dalvik_system_DexFile_isDexOptNeeded },
+    { NULL, NULL, NULL },
+};
+
+
+/*
+ * ===========================================================================
+ *      dalvik.system.VMRuntime
+ * ===========================================================================
+ */
+
+/*
+ * public native float getTargetHeapUtilization()
+ *
+ * Gets the current ideal heap utilization, represented as a number
+ * between zero and one.
+ */
+static void Dalvik_dalvik_system_VMRuntime_getTargetHeapUtilization(
+    const u4* args, JValue* pResult)
+{
+    UNUSED_PARAMETER(args);
+
+    RETURN_FLOAT(dvmGetTargetHeapUtilization());
+}
+
+/*
+ * native float nativeSetTargetHeapUtilization()
+ *
+ * Sets the current ideal heap utilization, represented as a number
+ * between zero and one.  Returns the old utilization.
+ *
+ * Note that this is NOT static.
+ */
+static void Dalvik_dalvik_system_VMRuntime_nativeSetTargetHeapUtilization(
+    const u4* args, JValue* pResult)
+{
+    dvmSetTargetHeapUtilization(dvmU4ToFloat(args[1]));
+
+    RETURN_VOID();
+}
+
+/*
+ * native long nativeMinimumHeapSize(long size, boolean set)
+ *
+ * If set is true, sets the new minimum heap size to size; always
+ * returns the current (or previous) size.  If size is negative or
+ * zero, removes the current minimum constraint (if present).
+ */
+static void Dalvik_dalvik_system_VMRuntime_nativeMinimumHeapSize(
+    const u4* args, JValue* pResult)
+{
+    s8 longSize = GET_ARG_LONG(args, 1);
+    size_t size;
+    bool set = (args[3] != 0);
+
+    /* Fit in 32 bits. */
+    if (longSize < 0) {
+        size = 0;
+    } else if (longSize > INT_MAX) {
+        size = INT_MAX;
+    } else {
+        size = (size_t)longSize;
+    }
+
+    size = dvmMinimumHeapSize(size, set);
+
+    RETURN_LONG(size);
+}
+
+/*
+ * public native void gcSoftReferences()
+ *
+ * Does a GC and forces collection of SoftReferences that are
+ * not strongly-reachable.
+ */
+static void Dalvik_dalvik_system_VMRuntime_gcSoftReferences(const u4* args,
+    JValue* pResult)
+{
+    dvmCollectGarbage(true);
+
+    RETURN_VOID();
+}
+
+/*
+ * public native void runFinalizationSync()
+ *
+ * Does not return until any pending finalizers have been called.
+ * This may or may not happen in the context of the calling thread.
+ * No exceptions will escape.
+ *
+ * Used by zygote, which doesn't have a HeapWorker thread.
+ */
+static void Dalvik_dalvik_system_VMRuntime_runFinalizationSync(const u4* args,
+    JValue* pResult)
+{
+    dvmRunFinalizationSync();
+
+    RETURN_VOID();
+}
+
+/*
+ * public native boolean trackExternalAllocation(long size)
+ *
+ * Asks the VM if <size> bytes can be allocated in an external heap.
+ * This information may be used to limit the amount of memory available
+ * to Dalvik threads.  Returns false if the VM would rather that the caller
+ * did not allocate that much memory.  If the call returns false, the VM
+ * will not update its internal counts.
+ */
+static void Dalvik_dalvik_system_VMRuntime_trackExternalAllocation(
+    const u4* args, JValue* pResult)
+{
+    s8 longSize = GET_ARG_LONG(args, 1);
+
+    /* Fit in 32 bits. */
+    if (longSize < 0) {
+        dvmThrowException("Ljava/lang/IllegalArgumentException;",
+            "size must be positive");
+        RETURN_VOID();
+    } else if (longSize > INT_MAX) {
+        dvmThrowException("Ljava/lang/UnsupportedOperationException;",
+            "size must fit in 32 bits");
+        RETURN_VOID();
+    }
+    RETURN_BOOLEAN(dvmTrackExternalAllocation((size_t)longSize));
+}
+
+/*
+ * public native void trackExternalFree(long size)
+ *
+ * Tells the VM that <size> bytes have been freed in an external
+ * heap.  This information may be used to control the amount of memory
+ * available to Dalvik threads.
+ */
+static void Dalvik_dalvik_system_VMRuntime_trackExternalFree(
+    const u4* args, JValue* pResult)
+{
+    s8 longSize = GET_ARG_LONG(args, 1);
+
+    /* Fit in 32 bits. */
+    if (longSize < 0) {
+        dvmThrowException("Ljava/lang/IllegalArgumentException;",
+            "size must be positive");
+        RETURN_VOID();
+    } else if (longSize > INT_MAX) {
+        dvmThrowException("Ljava/lang/UnsupportedOperationException;",
+            "size must fit in 32 bits");
+        RETURN_VOID();
+    }
+    dvmTrackExternalFree((size_t)longSize);
+
+    RETURN_VOID();
+}
+
+/*
+ * public native long getExternalBytesAllocated()
+ *
+ * Returns the number of externally-allocated bytes being tracked by
+ * trackExternalAllocation/Free().
+ */
+static void Dalvik_dalvik_system_VMRuntime_getExternalBytesAllocated(
+    const u4* args, JValue* pResult)
+{
+    RETURN_LONG((s8)dvmGetExternalBytesAllocated());
+}
+
+static const DalvikNativeMethod dalvik_system_VMRuntime[] = {
+    { "getTargetHeapUtilization", "()F",
+        Dalvik_dalvik_system_VMRuntime_getTargetHeapUtilization },
+    { "nativeSetTargetHeapUtilization", "(F)V",
+        Dalvik_dalvik_system_VMRuntime_nativeSetTargetHeapUtilization },
+    { "nativeMinimumHeapSize", "(JZ)J",
+        Dalvik_dalvik_system_VMRuntime_nativeMinimumHeapSize },
+    { "gcSoftReferences", "()V",
+        Dalvik_dalvik_system_VMRuntime_gcSoftReferences },
+    { "runFinalizationSync", "()V",
+        Dalvik_dalvik_system_VMRuntime_runFinalizationSync },
+    { "trackExternalAllocation", "(J)Z",
+        Dalvik_dalvik_system_VMRuntime_trackExternalAllocation },
+    { "trackExternalFree", "(J)V",
+        Dalvik_dalvik_system_VMRuntime_trackExternalFree },
+    { "getExternalBytesAllocated", "()J",
+        Dalvik_dalvik_system_VMRuntime_getExternalBytesAllocated },
+    { NULL, NULL, NULL },
+};
+
+
+/*
+ * ===========================================================================
+ *      dalvik.system.Zygote
+ * ===========================================================================
+ */
+
+#define ZYGOTE_LOG_TAG "Zygote"
+
+/*
+ * This signal handler is for zygote mode, since the zygote
+ * must reap its children
+ *
+ */
+static void sigchldHandler(int s)
+{
+    pid_t pid;
+    int status;
+
+    while ((pid = waitpid(-1, &status, WNOHANG)) > 0) {
+        /* Log process-death status that we care about.  In general it is not
+           safe to call LOG(...) from a signal handler because of possible
+           reentrancy.  However, we know a priori that the current implementation
+           of LOG() is safe to call from a SIGCHLD handler in the zygote process.
+           If the LOG() implementation changes its locking strategy or its use
+           of syscalls within the lazy-init critical section, its use here may
+           become unsafe. */
+        if (WIFEXITED(status)) {
+            if (WEXITSTATUS(status)) {
+                LOG(LOG_DEBUG, ZYGOTE_LOG_TAG, "Process %d exited cleanly (%d)\n",
+                    (int) pid, WEXITSTATUS(status));
+            } else {
+                IF_LOGV(/*should use ZYGOTE_LOG_TAG*/) {
+                    LOG(LOG_VERBOSE, ZYGOTE_LOG_TAG,
+                        "Process %d exited cleanly (%d)\n",
+                        (int) pid, WEXITSTATUS(status));
+                }
+            }
+        } else if (WIFSIGNALED(status)) {
+            if (WTERMSIG(status) != SIGKILL) {
+                LOG(LOG_DEBUG, ZYGOTE_LOG_TAG,
+                    "Process %d terminated by signal (%d)\n",
+                    (int) pid, WTERMSIG(status));
+            } else {
+                IF_LOGV(/*should use ZYGOTE_LOG_TAG*/) {
+                    LOG(LOG_VERBOSE, ZYGOTE_LOG_TAG,
+                        "Process %d terminated by signal (%d)\n",
+                        (int) pid, WTERMSIG(status));
+                }
+            }
+#ifdef WCOREDUMP
+            if (WCOREDUMP(status)) {
+                LOG(LOG_INFO, ZYGOTE_LOG_TAG, "Process %d dumped core\n",
+                    (int) pid);
+            }
+#endif /* ifdef WCOREDUMP */
+        }
+
+        /* 
+         * If the just-crashed process is the system_server, bring down zygote
+         * so that it is restarted by init and system server will be restarted
+         * from there.
+         */
+        if (pid == gDvm.systemServerPid) {
+            LOG(LOG_INFO, ZYGOTE_LOG_TAG,
+                "Exit zygote because system server (%d) has terminated\n", 
+                (int) pid);
+            kill(getpid(), SIGKILL);
+        }
+    }
+
+    if (pid < 0) {
+        LOG(LOG_WARN, ZYGOTE_LOG_TAG,
+            "Zygote SIGCHLD error (%d) in waitpid\n",errno);
+    }
+}
+
+/*
+ * configure sigchld handler for the zygote process
+ * This is configured very late, because earlier in the dalvik lifecycle
+ * we can fork() and exec() for the verifier/optimizer, and we
+ * want to waitpid() for those rather than have them be harvested immediately.
+ *
+ * This ends up being called repeatedly before each fork(), but there's
+ * no real harm in that.
+ */
+static void setSignalHandler() 
+{
+    int err;
+    struct sigaction sa;
+
+    memset(&sa, 0, sizeof(sa));
+
+    sa.sa_handler = sigchldHandler;
+
+    err = sigaction (SIGCHLD, &sa, NULL);
+    
+    if (err < 0) {
+        LOGW("Error setting SIGCHLD handler errno: %d", errno);
+    }
+}
+
+/*
+ * Set the SIGCHLD handler back to default behavior in zygote children
+ */
+static void unsetSignalHandler()
+{
+    int err;
+    struct sigaction sa;
+
+    memset(&sa, 0, sizeof(sa));
+
+    sa.sa_handler = SIG_DFL;
+
+    err = sigaction (SIGCHLD, &sa, NULL);
+    
+    if (err < 0) {
+        LOGW("Error unsetting SIGCHLD handler errno: %d", errno);
+    }
+}
+
+/* 
+ * Calls POSIX setgroups() using the int[] object as an argument.
+ * A NULL argument is tolerated.
+ */
+
+static int setgroupsIntarray(ArrayObject* gidArray)
+{
+    gid_t *gids;
+    u4 i;
+    s4 *contents;
+
+    if (gidArray == NULL) {
+        return 0;
+    }
+
+    /* just in case gid_t and u4 are different... */
+    gids = alloca(sizeof(gid_t) * gidArray->length);
+    contents = (s4 *)gidArray->contents;
+
+    for (i = 0 ; i < gidArray->length ; i++) {
+        gids[i] = (gid_t) contents[i];
+    }
+
+    return setgroups((size_t) gidArray->length, gids);
+}
+
+/*
+ * Sets the resource limits via setrlimit(2) for the values in the
+ * two-dimensional array of integers that's passed in. The second dimension
+ * contains a tuple of length 3: (resource, rlim_cur, rlim_max). NULL is
+ * treated as an empty array.
+ *
+ * -1 is returned on error.
+ */
+static int setrlimitsFromArray(ArrayObject* rlimits)
+{
+    u4 i;
+    struct rlimit rlim;
+
+    if (rlimits == NULL) {
+        return 0;
+    }
+
+    memset (&rlim, 0, sizeof(rlim));
+
+    ArrayObject** tuples = (ArrayObject **)(rlimits->contents);
+
+    for (i = 0; i < rlimits->length; i++) {
+        ArrayObject * rlimit_tuple = tuples[i];
+        s4* contents = (s4 *)rlimit_tuple->contents;
+        int err;
+
+        if (rlimit_tuple->length != 3) {
+            LOGE("rlimits array must have a second dimension of size 3");
+            return -1;
+        }
+
+        rlim.rlim_cur = contents[1];
+        rlim.rlim_max = contents[2];
+
+        err = setrlimit(contents[0], &rlim);
+
+        if (err < 0) {
+            return -1;
+        }
+    }
+    
+    return 0;
+}
+
+/* native public static int fork(); */
+static void Dalvik_dalvik_system_Zygote_fork(const u4* args, JValue* pResult)
+{
+    pid_t pid;
+    int err;
+
+    if (!gDvm.zygote) {
+        dvmThrowException("Ljava/lang/IllegalStateException;",
+            "VM instance not started with -Xzygote");
+
+        RETURN_VOID();
+    }
+
+    if (!dvmGcPreZygoteFork()) {
+        LOGE("pre-fork heap failed\n");
+        dvmAbort();
+    }
+
+    setSignalHandler();      
+
+    dvmDumpLoaderStats("zygote");
+    pid = fork();
+
+#ifdef HAVE_ANDROID_OS
+    if (pid == 0) {
+        /* child process */
+        extern int gMallocLeakZygoteChild;
+        gMallocLeakZygoteChild = 1;
+    }
+#endif
+
+    RETURN_INT(pid);
+}
+
+/* 
+ * Utility routine to fork zygote and specialize the child process.
+ */
+static pid_t forkAndSpecializeCommon(const u4* args)
+{
+    pid_t pid;
+
+    uid_t uid = (uid_t) args[0];
+    gid_t gid = (gid_t) args[1];
+    ArrayObject* gids = (ArrayObject *)args[2];
+    u4 enableDebugger = args[3];
+    ArrayObject *rlimits = (ArrayObject *)args[4];
+
+    if (!gDvm.zygote) {
+        dvmThrowException("Ljava/lang/IllegalStateException;",
+            "VM instance not started with -Xzygote");
+
+        return -1;
+    }
+
+    if (!dvmGcPreZygoteFork()) {
+        LOGE("pre-fork heap failed\n");
+        dvmAbort();
+    }
+
+    setSignalHandler();      
+
+    dvmDumpLoaderStats("zygote");
+    pid = fork();
+
+    if (pid == 0) {
+        int err;
+        /* The child process */
+
+#ifdef HAVE_ANDROID_OS
+        extern int gMallocLeakZygoteChild;
+        gMallocLeakZygoteChild = 1;
+
+        /* keep caps across UID change, unless we're staying root */
+        if (uid != 0) {
+            err = prctl(PR_SET_KEEPCAPS, 1, 0, 0, 0);
+
+            if (err < 0) {
+                LOGW("cannot PR_SET_KEEPCAPS errno: %d", errno);
+            }
+        }
+
+#endif /* HAVE_ANDROID_OS */
+
+        err = setgroupsIntarray(gids);
+
+        if (err < 0) {
+            LOGW("cannot setgroups() errno: %d", errno);
+        }
+
+        err = setrlimitsFromArray(rlimits);
+
+        if (err < 0) {
+            LOGW("cannot setrlimit() errno: %d", errno);
+        }
+
+        err = setgid(gid);
+        if (err < 0) {
+            LOGW("cannot setgid(%d) errno: %d", gid, errno);
+        }
+
+        err = setuid(uid);
+        if (err < 0) {
+            LOGW("cannot setuid(%d) errno: %d", uid, errno);
+        }
+
+        /*
+         * Our system thread ID has changed.  Get the new one.
+         */
+        Thread* thread = dvmThreadSelf();
+        thread->systemTid = dvmGetSysThreadId();
+
+        // jdwp not started until dvmInitAfterZygote()
+        gDvm.jdwpAllowed = (enableDebugger != 0);
+
+        unsetSignalHandler();      
+        gDvm.zygote = false;
+        if (!dvmInitAfterZygote()) {
+            LOGE("error in post-zygote initialization\n");
+            dvmAbort();
+        }
+    } else if (pid > 0) {
+        /* the parent process */
+    }
+
+    return pid;
+}
+
+/* native public static int forkAndSpecialize(int uid, int gid, 
+ * int[] gids, boolean enableDebugger); 
+ */
+static void Dalvik_dalvik_system_Zygote_forkAndSpecialize(const u4* args,
+    JValue* pResult)
+{
+    pid_t pid;
+
+    pid = forkAndSpecializeCommon(args);
+
+    RETURN_INT(pid);
+}
+
+/* native public static int forkSystemServer(int uid, int gid, 
+ * int[] gids, boolean enableDebugger); 
+ */
+static void Dalvik_dalvik_system_Zygote_forkSystemServer(
+        const u4* args, JValue* pResult)
+{
+    pid_t pid;
+    pid = forkAndSpecializeCommon(args);
+
+    /* The zygote process checks whether the child process has died or not. */
+    if (pid > 0) {
+        int status;
+
+        LOGI("System server process %d has been created", pid);
+        gDvm.systemServerPid = pid;
+        /* There is a slight window that the system server process has crashed
+         * but it went unnoticed because we haven't published its pid yet. So
+         * we recheck here just to make sure that all is well.
+         */
+        if (waitpid(pid, &status, WNOHANG) == pid) {
+            LOGE("System server process %d has died. Restarting Zygote!", pid);
+            kill(getpid(), SIGKILL);
+        }
+    }
+    RETURN_INT(pid);
+}
+
+static const DalvikNativeMethod dalvik_system_Zygote[] = {
+    { "fork",            "()I",
+        Dalvik_dalvik_system_Zygote_fork },
+    { "forkAndSpecialize",            "(II[IZ[[I)I",
+        Dalvik_dalvik_system_Zygote_forkAndSpecialize },
+    { "forkSystemServer",            "(II[IZ[[I)I",
+        Dalvik_dalvik_system_Zygote_forkSystemServer },
+    { NULL, NULL, NULL },
+};
+
+
+/*
+ * ===========================================================================
+ *      java.lang.VMClassLoader
+ * ===========================================================================
+ */
+
+/*
+ * static Class defineClass(ClassLoader cl, String name,
+ *     byte[] data, int offset, int len, ProtectionDomain pd)
+ *     throws ClassFormatError
+ *
+ * Convert an array of bytes to a Class object.
+ */
+static void Dalvik_java_lang_VMClassLoader_defineClass(const u4* args,
+    JValue* pResult)
+{
+    Object* loader = (Object*) args[0];
+    StringObject* nameObj = (StringObject*) args[1];
+    const u1* data = (const u1*) args[2];
+    int offset = args[3];
+    int len = args[4];
+    Object* pd = (Object*) args[5];
+    char* name = NULL;
+
+    name = dvmCreateCstrFromString(nameObj);
+    LOGE("ERROR: defineClass(%p, %s, %p, %d, %d, %p)\n",
+        loader, name, data, offset, len, pd);
+    dvmThrowException("Ljava/lang/UnsupportedOperationException;",
+        "can't load this type of class file");
+
+    free(name);
+    RETURN_VOID();
+}
+
+/*
+ * static Class defineClass(ClassLoader cl, byte[] data, int offset,
+ *     int len, ProtectionDomain pd)
+ *     throws ClassFormatError
+ *
+ * Convert an array of bytes to a Class object. Deprecated version of
+ * previous method, lacks name parameter.
+ */
+static void Dalvik_java_lang_VMClassLoader_defineClass2(const u4* args,
+    JValue* pResult)
+{
+    Object* loader = (Object*) args[0];
+    const u1* data = (const u1*) args[1];
+    int offset = args[2];
+    int len = args[3];
+    Object* pd = (Object*) args[4];
+
+    LOGE("ERROR: defineClass(%p, %p, %d, %d, %p)\n",
+        loader, data, offset, len, pd);
+    dvmThrowException("Ljava/lang/UnsupportedOperationException;",
+        "can't load this type of class file");
+
+    RETURN_VOID();
+}
+
+/*
+ * static Class findLoadedClass(ClassLoader cl, String name)
+ */
+static void Dalvik_java_lang_VMClassLoader_findLoadedClass(const u4* args,
+    JValue* pResult)
+{
+    Object* loader = (Object*) args[0];
+    StringObject* nameObj = (StringObject*) args[1];
+    ClassObject* clazz = NULL;
+    char* name = NULL;
+    char* descriptor = NULL;
+    char* cp;
+
+    if (nameObj == NULL) {
+        dvmThrowException("Ljava/lang/NullPointerException;", NULL);
+        goto bail;
+    }
+
+    /*
+     * Get a UTF-8 copy of the string, and convert dots to slashes.
+     */
+    name = dvmCreateCstrFromString(nameObj);
+    if (name == NULL)
+        goto bail;
+
+    descriptor = dvmDotToDescriptor(name);
+    if (descriptor == NULL)
+        goto bail;
+
+    clazz = dvmLookupClass(descriptor, loader, false);
+    LOGVV("look: %s ldr=%p --> %p\n", descriptor, loader, clazz);
+
+bail:
+    free(name);
+    free(descriptor);
+    RETURN_PTR(clazz);
+}
+
+/*
+ * private static int getBootClassPathSize()
+ *
+ * Get the number of entries in the boot class path.
+ */
+static void Dalvik_java_lang_VMClassLoader_getBootClassPathSize(const u4* args,
+    JValue* pResult)
+{
+    int count = dvmGetBootPathSize();
+    RETURN_INT(count);
+}
+
+/*
+ * private static String getBootClassPathResource(String name, int index)
+ *
+ * Find a resource with a matching name in a boot class path entry.
+ *
+ * This mimics the previous VM interface, since we're sharing class libraries.
+ */
+static void Dalvik_java_lang_VMClassLoader_getBootClassPathResource(
+    const u4* args, JValue* pResult)
+{
+    StringObject* nameObj = (StringObject*) args[0];
+    StringObject* result;
+    int idx = args[1];
+    char* name;
+
+    name = dvmCreateCstrFromString(nameObj);
+    if (name == NULL)
+        RETURN_PTR(NULL);
+
+    result = dvmGetBootPathResource(name, idx);
+    free(name);
+    dvmReleaseTrackedAlloc((Object*)result, NULL);
+    RETURN_PTR(result);
+}
+
+/*
+ * static final Class getPrimitiveClass(char prim_type)
+ */
+static void Dalvik_java_lang_VMClassLoader_getPrimitiveClass(const u4* args,
+    JValue* pResult)
+{
+    int primType = args[0];
+
+    pResult->l = dvmFindPrimitiveClass(primType);
+}
+
+/*
+ * static Class loadClass(String name, boolean resolve)
+ *     throws ClassNotFoundException
+ *
+ * Load class using bootstrap class loader.
+ *
+ * Return the Class object associated with the class or interface with
+ * the specified name.
+ *
+ * "name" is in "binary name" format, e.g. "dalvik.system.Debug$1".
+ */
+static void Dalvik_java_lang_VMClassLoader_loadClass(const u4* args,
+    JValue* pResult)
+{
+    StringObject* nameObj = (StringObject*) args[0];
+    bool resolve = (args[1] != 0);
+    ClassObject* clazz;
+
+    clazz = findClassByName(nameObj, NULL, resolve);
+    assert(clazz == NULL || dvmIsClassLinked(clazz));
+    RETURN_PTR(clazz);
+}
+
+static const DalvikNativeMethod java_lang_VMClassLoader[] = {
+    { "defineClass",        "(Ljava/lang/ClassLoader;Ljava/lang/String;[BIILjava/security/ProtectionDomain;)Ljava/lang/Class;",
+        Dalvik_java_lang_VMClassLoader_defineClass },
+    { "defineClass",        "(Ljava/lang/ClassLoader;[BIILjava/security/ProtectionDomain;)Ljava/lang/Class;",
+        Dalvik_java_lang_VMClassLoader_defineClass2 },
+    { "findLoadedClass",    "(Ljava/lang/ClassLoader;Ljava/lang/String;)Ljava/lang/Class;",
+        Dalvik_java_lang_VMClassLoader_findLoadedClass },
+    { "getBootClassPathSize", "()I",
+        Dalvik_java_lang_VMClassLoader_getBootClassPathSize },
+    { "getBootClassPathResource", "(Ljava/lang/String;I)Ljava/lang/String;",
+        Dalvik_java_lang_VMClassLoader_getBootClassPathResource },
+    { "getPrimitiveClass",  "(C)Ljava/lang/Class;",
+        Dalvik_java_lang_VMClassLoader_getPrimitiveClass },
+    { "loadClass",          "(Ljava/lang/String;Z)Ljava/lang/Class;",
+        Dalvik_java_lang_VMClassLoader_loadClass },
+    { NULL, NULL, NULL },
+};
+
+
+/*
+ * ===========================================================================
+ *      dalvik.system.VMStack
+ * ===========================================================================
+ */
+
+#define NUM_DOPRIV_FUNCS    4
+
+/*
+ * Determine if "method" is a "privileged" invocation, i.e. is it one
+ * of the variations of AccessController.doPrivileged().
+ *
+ * Because the security stuff pulls in a pile of stuff that we may not
+ * want or need, we don't do the class/method lookups at init time, but
+ * instead on first use.
+ */
+static bool dvmIsPrivilegedMethod(const Method* method)
+{
+    int i;
+
+    assert(method != NULL);
+
+    if (!gDvm.javaSecurityAccessControllerReady) {
+        /*
+         * Populate on first use.  No concurrency risk since we're just
+         * finding pointers to fixed structures.
+         */
+        static const char* kSignatures[NUM_DOPRIV_FUNCS] = {
+            "(Ljava/security/PrivilegedAction;)Ljava/lang/Object;",
+            "(Ljava/security/PrivilegedExceptionAction;)Ljava/lang/Object;",
+            "(Ljava/security/PrivilegedAction;Ljava/security/AccessControlContext;)Ljava/lang/Object;",
+            "(Ljava/security/PrivilegedExceptionAction;Ljava/security/AccessControlContext;)Ljava/lang/Object;",
+        };
+        ClassObject* clazz;
+
+        clazz = dvmFindClassNoInit("Ljava/security/AccessController;", NULL);
+        if (clazz == NULL) {
+            LOGW("Couldn't find java/security/AccessController\n");
+            return false;
+        }
+
+        assert(NELEM(gDvm.methJavaSecurityAccessController_doPrivileged) ==
+               NELEM(kSignatures));
+
+        /* verify init */
+        for (i = 0; i < NUM_DOPRIV_FUNCS; i++) {
+            gDvm.methJavaSecurityAccessController_doPrivileged[i] =
+                dvmFindDirectMethodByDescriptor(clazz, "doPrivileged", kSignatures[i]);
+            if (gDvm.methJavaSecurityAccessController_doPrivileged[i] == NULL) {
+                LOGW("Warning: couldn't find java/security/AccessController"
+                    ".doPrivileged %s\n", kSignatures[i]);
+                return false;
+            }
+        }
+
+        /* all good, raise volatile readiness flag */
+        gDvm.javaSecurityAccessControllerReady = true;
+    }
+
+    for (i = 0; i < NUM_DOPRIV_FUNCS; i++) {
+        if (gDvm.methJavaSecurityAccessController_doPrivileged[i] == method) {
+            //LOGI("+++ doPriv match\n");
+            return true;
+        }
+    }
+    return false;
+}
+
+/*
+ * public static ClassLoader getCallingClassLoader()
+ *
+ * Return the defining class loader of the caller's caller.
+ */
+static void Dalvik_dalvik_system_VMStack_getCallingClassLoader(const u4* args,
+    JValue* pResult)
+{
+    ClassObject* clazz = dvmGetCaller2Class(dvmThreadSelf()->curFrame);
+
+    UNUSED_PARAMETER(args);
+
+    if (clazz == NULL)
+        RETURN_PTR(NULL);
+    RETURN_PTR(clazz->classLoader);
+}
+
+/*
+ * public static ClassLoader getCallingClassLoader2()
+ *
+ * Return the defining class loader of the caller's caller's caller.
+ */
+static void Dalvik_dalvik_system_VMStack_getCallingClassLoader2(const u4* args,
+    JValue* pResult)
+{
+    ClassObject* clazz = dvmGetCaller3Class(dvmThreadSelf()->curFrame);
+
+    UNUSED_PARAMETER(args);
+
+    if (clazz == NULL)
+        RETURN_PTR(NULL);
+    RETURN_PTR(clazz->classLoader);
+}
+
+/*
+ * public static Class<?>[] getClasses(int maxDepth, boolean stopAtPrivileged)
+ *
+ * Create an array of classes for the methods on the stack, skipping the
+ * first two and all reflection methods.  If "stopAtPrivileged" is set,
+ * stop shortly after we encounter a privileged class.
+ */
+static void Dalvik_dalvik_system_VMStack_getClasses(const u4* args,
+    JValue* pResult)
+{
+    /* note "maxSize" is unsigned, so -1 turns into a very large value */
+    unsigned int maxSize = args[0];
+    bool stopAtPrivileged = args[1];
+    unsigned int size = 0;
+    const unsigned int kSkip = 2;
+    const Method** methods = NULL;
+    int methodCount;
+
+    /*
+     * Get an array with the stack trace in it.
+     */
+    if (!dvmCreateStackTraceArray(dvmThreadSelf()->curFrame, &methods,
+            &methodCount))
+    {
+        LOGE("Failed to create stack trace array\n");
+        dvmThrowException("Ljava/lang/InternalError;", NULL);
+        RETURN_VOID();
+    }
+
+    //int i;
+    //LOGI("dvmCreateStackTraceArray results:\n");
+    //for (i = 0; i < methodCount; i++) {
+    //    LOGI(" %2d: %s.%s\n",
+    //        i, methods[i]->clazz->descriptor, methods[i]->name);
+    //}
+
+    /*
+     * Run through the array and count up how many elements there are.
+     */
+    unsigned int idx;
+    for (idx = kSkip; (int) idx < methodCount && size < maxSize; idx++) {
+        const Method* meth = methods[idx];
+
+        if (dvmIsReflectionMethod(meth))
+            continue;
+
+        if (stopAtPrivileged && dvmIsPrivilegedMethod(meth)) {
+            /*
+             * We want the last element of the array to be the caller of
+             * the privileged method, so we want to include the privileged
+             * method and the next one.
+             */
+            if (maxSize > size + 2)
+                maxSize = size + 2;
+        }
+
+        size++;
+    }
+
+    /*
+     * Create an array object to hold the classes.
+     * TODO: can use gDvm.classJavaLangClassArray here?
+     */
+    ClassObject* classArrayClass = NULL;
+    ArrayObject* classes = NULL;
+    classArrayClass = dvmFindArrayClass("[Ljava/lang/Class;", NULL);
+    if (classArrayClass == NULL) {
+        LOGW("Unable to find java.lang.Class array class\n");
+        goto bail;
+    }
+    classes = dvmAllocArray(classArrayClass, size, kObjectArrayRefWidth,
+                ALLOC_DEFAULT);
+    if (classes == NULL) {
+        LOGW("Unable to allocate class array (%d elems)\n", size);
+        goto bail;
+    }
+
+    /*
+     * Fill in the array.
+     */
+    ClassObject** objects = (ClassObject**) classes->contents;
+
+    unsigned int sidx = 0;
+    for (idx = kSkip; (int) idx < methodCount && sidx < size; idx++) {
+        const Method* meth = methods[idx];
+
+        if (dvmIsReflectionMethod(meth))
+            continue;
+
+        *objects++ = meth->clazz;
+        sidx++;
+    }
+
+bail:
+    free(methods);
+    dvmReleaseTrackedAlloc((Object*) classes, NULL);
+    RETURN_PTR(classes);
+}
+
+/*
+ * public static StackTraceElement[] getThreadStackTrace(Thread t)
+ *
+ * Retrieve the stack trace of the specified thread and return it as an
+ * array of StackTraceElement.  Returns NULL on failure.
+ */
+static void Dalvik_dalvik_system_VMStack_getThreadStackTrace(const u4* args,
+    JValue* pResult)
+{
+    Object* targetThreadObj = (Object*) args[0];
+    Thread* self = dvmThreadSelf();
+    Thread* thread;
+    int* traceBuf;
+
+    assert(targetThreadObj != NULL);
+
+    dvmLockThreadList(self);
+
+    /*
+     * Make sure the thread is still alive and in the list.
+     */
+    for (thread = gDvm.threadList; thread != NULL; thread = thread->next) {
+        if (thread->threadObj == targetThreadObj)
+            break;
+    }
+    if (thread == NULL) {
+        LOGI("VMStack.getThreadStackTrace: threadObj %p not active\n",
+            targetThreadObj);
+        dvmUnlockThreadList();
+        RETURN_PTR(NULL);
+    }
+
+    /*
+     * Suspend the thread, pull out the stack trace, then resume the thread
+     * and release the thread list lock.  If we're being asked to examine
+     * our own stack trace, skip the suspend/resume.
+     */
+    int stackDepth = -1;
+    if (thread != self)
+        dvmSuspendThread(thread);
+    traceBuf = dvmFillInStackTraceRaw(thread, &stackDepth);
+    if (thread != self)
+        dvmResumeThread(thread);
+    dvmUnlockThreadList();
+
+    /*
+     * Convert the raw buffer into an array of StackTraceElement.
+     */
+    ArrayObject* trace = dvmGetStackTraceRaw(traceBuf, stackDepth);
+    free(traceBuf);
+    RETURN_PTR(trace);
+}
+
+static const DalvikNativeMethod dalvik_system_VMStack[] = {
+    { "getCallingClassLoader",  "()Ljava/lang/ClassLoader;",
+        Dalvik_dalvik_system_VMStack_getCallingClassLoader },
+    { "getCallingClassLoader2", "()Ljava/lang/ClassLoader;",
+        Dalvik_dalvik_system_VMStack_getCallingClassLoader2 },
+    { "getClasses",             "(IZ)[Ljava/lang/Class;",
+        Dalvik_dalvik_system_VMStack_getClasses },
+    { "getThreadStackTrace",    "(Ljava/lang/Thread;)[Ljava/lang/StackTraceElement;",
+        Dalvik_dalvik_system_VMStack_getThreadStackTrace },
+    { NULL, NULL, NULL },
+};
+
+
+/*
+ * ===========================================================================
+ *      java.lang.VMThread
+ * ===========================================================================
+ */
+
+/*
+ * static void create(Thread t, long stacksize)
+ *
+ * This is eventually called as a result of Thread.start().
+ *
+ * Throws an exception on failure.
+ */
+static void Dalvik_java_lang_VMThread_create(const u4* args, JValue* pResult)
+{
+    Object* threadObj = (Object*) args[0];
+    s8 stackSize = GET_ARG_LONG(args, 1);
+
+    dvmCreateInterpThread(threadObj, (int) stackSize);
+    RETURN_VOID();
+}
+
+/*
+ * static Thread currentThread()
+ */
+static void Dalvik_java_lang_VMThread_currentThread(const u4* args,
+    JValue* pResult)
+{
+    UNUSED_PARAMETER(args);
+
+    RETURN_PTR(dvmThreadSelf()->threadObj);
+}
+
+/*
+ * void getStatus()
+ *
+ * Gets the Thread status. Result is in VM terms, has to be mapped to
+ * Thread.State by interpreted code.
+ */
+static void Dalvik_java_lang_VMThread_getStatus(const u4* args, JValue* pResult)
+{
+    Object* thisPtr = (Object*) args[0];
+    Thread* thread;
+    int result;
+
+    dvmLockThreadList(NULL);
+    thread = dvmGetThreadFromThreadObject(thisPtr);
+    if (thread != NULL)
+        result = thread->status;
+    else
+        result = THREAD_ZOMBIE;     // assume it used to exist and is now gone
+    dvmUnlockThreadList();
+    
+    RETURN_INT(result);
+}
+
+/*
+ * boolean holdsLock(Object object)
+ *
+ * Returns whether the current thread has a monitor lock on the specific
+ * object.
+ */
+static void Dalvik_java_lang_VMThread_holdsLock(const u4* args, JValue* pResult)
+{
+    Object* thisPtr = (Object*) args[0];
+    Object* object = (Object*) args[1];
+    Thread* thread;
+
+    if (object == NULL) {
+        dvmThrowException("Ljava/lang/NullPointerException;", NULL);
+        RETURN_VOID();
+    }
+
+    dvmLockThreadList(NULL);
+    thread = dvmGetThreadFromThreadObject(thisPtr);
+    int result = dvmHoldsLock(thread, object);
+    dvmUnlockThreadList();
+
+    RETURN_BOOLEAN(result);
+}
+
+/*
+ * void interrupt()
+ *
+ * Interrupt a thread that is waiting (or is about to wait) on a monitor.
+ */
+static void Dalvik_java_lang_VMThread_interrupt(const u4* args, JValue* pResult)
+{
+    Object* thisPtr = (Object*) args[0];
+    Thread* thread;
+
+    dvmLockThreadList(NULL);
+    thread = dvmGetThreadFromThreadObject(thisPtr);
+    if (thread != NULL)
+        dvmThreadInterrupt(thread);
+    dvmUnlockThreadList();
+    RETURN_VOID();
+}
+
+/*
+ * static boolean interrupted()
+ *
+ * Determine if the current thread has been interrupted.  Clears the flag.
+ */
+static void Dalvik_java_lang_VMThread_interrupted(const u4* args,
+    JValue* pResult)
+{
+    Thread* self = dvmThreadSelf();
+    bool interrupted;
+
+    UNUSED_PARAMETER(args);
+
+    interrupted = self->interrupted;
+    self->interrupted = false;
+    RETURN_BOOLEAN(interrupted);
+}
+
+/*
+ * boolean isInterrupted()
+ *
+ * Determine if the specified thread has been interrupted.  Does not clear
+ * the flag.
+ */
+static void Dalvik_java_lang_VMThread_isInterrupted(const u4* args,
+    JValue* pResult)
+{
+    Object* thisPtr = (Object*) args[0];
+    Thread* thread;
+    bool interrupted;
+
+    dvmLockThreadList(NULL);
+    thread = dvmGetThreadFromThreadObject(thisPtr);
+    if (thread != NULL)
+        interrupted = thread->interrupted;
+    else
+        interrupted = false;
+    dvmUnlockThreadList();
+
+    RETURN_BOOLEAN(interrupted);
+}
+
+/*
+ * void nameChanged(String newName)
+ *
+ * The name of the target thread has changed.  We may need to alert DDMS.
+ */
+static void Dalvik_java_lang_VMThread_nameChanged(const u4* args,
+    JValue* pResult)
+{
+    Object* thisPtr = (Object*) args[0];
+    StringObject* nameStr = (StringObject*) args[1];
+    Thread* thread;
+    int threadId = -1;
+
+    /* get the thread's ID */
+    dvmLockThreadList(NULL);
+    thread = dvmGetThreadFromThreadObject(thisPtr);
+    if (thread != NULL)
+        threadId = thread->threadId;
+    dvmUnlockThreadList();
+
+    dvmDdmSendThreadNameChange(threadId, nameStr);
+    //char* str = dvmCreateCstrFromString(nameStr);
+    //LOGI("UPDATE: threadid=%d now '%s'\n", threadId, str);
+    //free(str);
+
+    RETURN_VOID();
+}
+
+/*
+ * void setPriority(int newPriority)
+ *
+ * Alter the priority of the specified thread.  "newPriority" will range
+ * from Thread.MIN_PRIORITY to Thread.MAX_PRIORITY (1-10), with "normal"
+ * threads at Thread.NORM_PRIORITY (5).
+ */
+static void Dalvik_java_lang_VMThread_setPriority(const u4* args,
+    JValue* pResult)
+{
+    Object* thisPtr = (Object*) args[0];
+    int newPriority = args[1];
+    Thread* thread;
+    
+    dvmLockThreadList(NULL);
+    thread = dvmGetThreadFromThreadObject(thisPtr);
+    if (thread != NULL)
+        dvmChangeThreadPriority(thread, newPriority);
+    //dvmDumpAllThreads(false);
+    dvmUnlockThreadList();
+
+    RETURN_VOID();
+}
+
+/*
+ * static void sleep(long msec, int nsec)
+ */
+static void Dalvik_java_lang_VMThread_sleep(const u4* args, JValue* pResult)
+{
+    Thread* self = dvmThreadSelf();
+    dvmThreadSleep(GET_ARG_LONG(args,0), args[2]);
+    RETURN_VOID();
+}
+
+/*
+ * public void yield()
+ *
+ * Causes the thread to temporarily pause and allow other threads to execute.
+ *
+ * The exact behavior is poorly defined.  Some discussion here:
+ *   http://www.cs.umd.edu/~pugh/java/memoryModel/archive/0944.html
+ */
+static void Dalvik_java_lang_VMThread_yield(const u4* args, JValue* pResult)
+{
+    UNUSED_PARAMETER(args);
+
+    sched_yield();
+
+    RETURN_VOID();
+}
+
+static const DalvikNativeMethod java_lang_VMThread[] = {
+    { "create",         "(Ljava/lang/Thread;J)V",
+        Dalvik_java_lang_VMThread_create },
+    { "currentThread",  "()Ljava/lang/Thread;",
+        Dalvik_java_lang_VMThread_currentThread },
+    { "getStatus",      "()I",
+        Dalvik_java_lang_VMThread_getStatus },
+    { "holdsLock",      "(Ljava/lang/Object;)Z",
+        Dalvik_java_lang_VMThread_holdsLock },
+    { "interrupt",      "()V",
+        Dalvik_java_lang_VMThread_interrupt },
+    { "interrupted",    "()Z",
+        Dalvik_java_lang_VMThread_interrupted },
+    { "isInterrupted",  "()Z",
+        Dalvik_java_lang_VMThread_isInterrupted },
+    { "nameChanged",    "(Ljava/lang/String;)V",
+        Dalvik_java_lang_VMThread_nameChanged },
+    { "setPriority",    "(I)V",
+        Dalvik_java_lang_VMThread_setPriority },
+    { "sleep",          "(JI)V",
+        Dalvik_java_lang_VMThread_sleep },
+    { "yield",          "()V",
+        Dalvik_java_lang_VMThread_yield },
+    { NULL, NULL, NULL },
+};
+
+
+/*
+ * ===========================================================================
+ *      java.lang.Throwable
+ * ===========================================================================
+ */
+
+/*
+ * private static Object nativeFillInStackTrace()
+ */
+static void Dalvik_java_lang_Throwable_nativeFillInStackTrace(const u4* args,
+    JValue* pResult)
+{
+    Object* stackState = NULL;
+
+    UNUSED_PARAMETER(args);
+
+    stackState = dvmFillInStackTrace(dvmThreadSelf());
+    RETURN_PTR(stackState);
+}
+
+/*
+ * private static StackTraceElement[] nativeGetStackTrace(Object stackState)
+ *
+ * The "stackState" argument must be the value returned by an earlier call to
+ * nativeFillInStackTrace().
+ */
+static void Dalvik_java_lang_Throwable_nativeGetStackTrace(const u4* args,
+    JValue* pResult)
+{
+    Object* stackState = (Object*) args[0];
+    ArrayObject* elements = NULL;
+
+    elements = dvmGetStackTrace(stackState);
+    RETURN_PTR(elements);
+}
+
+static const DalvikNativeMethod java_lang_Throwable[] = {
+    { "nativeFillInStackTrace", "()Ljava/lang/Object;",
+        Dalvik_java_lang_Throwable_nativeFillInStackTrace },
+    { "nativeGetStackTrace",    "(Ljava/lang/Object;)[Ljava/lang/StackTraceElement;",
+        Dalvik_java_lang_Throwable_nativeGetStackTrace },
+    { NULL, NULL, NULL },
+};
+
+
+/*
+ * ===========================================================================
+ *      java.lang.Object
+ * ===========================================================================
+ */
+
+/*
+ * private Object internalClone()
+ *
+ * Implements most of Object.clone().
+ */
+static void Dalvik_java_lang_Object_internalClone(const u4* args,
+    JValue* pResult)
+{
+    Object* thisPtr = (Object*) args[0];
+    Object* clone = dvmCloneObject(thisPtr);
+
+    dvmReleaseTrackedAlloc(clone, NULL);
+    RETURN_PTR(clone);
+}
+
+/*
+ * public int hashCode()
+ */
+static void Dalvik_java_lang_Object_hashCode(const u4* args, JValue* pResult)
+{
+    Object* thisPtr = (Object*) args[0];
+
+    RETURN_PTR(thisPtr);    /* use the pointer as the hash code */
+}
+
+/*
+ * public Class getClass()
+ */
+static void Dalvik_java_lang_Object_getClass(const u4* args, JValue* pResult)
+{
+    Object* thisPtr = (Object*) args[0];
+
+    RETURN_PTR(thisPtr->clazz);
+}
+
+/*
+ * public void notify()
+ *
+ * NOTE: we declare this as a full DalvikBridgeFunc, rather than a
+ * DalvikNativeFunc, because we really want to avoid the "self" lookup.
+ */
+static void Dalvik_java_lang_Object_notify(const u4* args, JValue* pResult,
+    const Method* method, Thread* self)
+{
+    Object* thisPtr = (Object*) args[0];
+
+    dvmObjectNotify(self, thisPtr);
+    RETURN_VOID();
+}
+
+/*
+ * public void notifyAll()
+ */
+static void Dalvik_java_lang_Object_notifyAll(const u4* args, JValue* pResult,
+    const Method* method, Thread* self)
+{
+    Object* thisPtr = (Object*) args[0];
+
+    dvmObjectNotifyAll(self, thisPtr);
+    RETURN_VOID();
+}
+
+/*
+ * public void wait(long ms, int ns) throws InterruptedException
+ */
+static void Dalvik_java_lang_Object_wait(const u4* args, JValue* pResult,
+    const Method* method, Thread* self)
+{
+    Object* thisPtr = (Object*) args[0];
+
+    dvmObjectWait(self, thisPtr, GET_ARG_LONG(args,1), (s4)args[3], true);
+    RETURN_VOID();
+}
+
+static const DalvikNativeMethod java_lang_Object[] = {
+    { "internalClone",  "(Ljava/lang/Cloneable;)Ljava/lang/Object;",
+        Dalvik_java_lang_Object_internalClone },
+    { "hashCode",       "()I",
+        Dalvik_java_lang_Object_hashCode },
+    { "notify",         "()V",
+        (DalvikNativeFunc) Dalvik_java_lang_Object_notify },
+    { "notifyAll",      "()V",
+        (DalvikNativeFunc) Dalvik_java_lang_Object_notifyAll },
+    { "wait",           "(JI)V",
+        (DalvikNativeFunc) Dalvik_java_lang_Object_wait },
+    { "getClass",       "()Ljava/lang/Class;",
+        Dalvik_java_lang_Object_getClass },
+    { NULL, NULL, NULL },
+};
+
+
+/*
+ * ===========================================================================
+ *      java.lang.Class
+ * ===========================================================================
+ */
+
+/*
+ * native public boolean desiredAssertionStatus()
+ *
+ * Determine the class-init-time assertion status of a class.  This is
+ * called from <clinit> in javac-generated classes that use the Java
+ * programming language "assert" keyword.
+ */
+static void Dalvik_java_lang_Class_desiredAssertionStatus(const u4* args,
+    JValue* pResult)
+{
+    ClassObject* thisPtr = (ClassObject*) args[0];
+    char* className = dvmDescriptorToName(thisPtr->descriptor);
+    int i;
+    bool enable = false;
+
+    /*
+     * Run through the list of arguments specified on the command line.  The
+     * last matching argument takes precedence.
+     */
+    for (i = 0; i < gDvm.assertionCtrlCount; i++) {
+        const AssertionControl* pCtrl = &gDvm.assertionCtrl[i];
+
+        if (pCtrl->isPackage) {
+            /*
+             * Given "dalvik/system/Debug" or "MyStuff", compute the
+             * length of the package portion of the class name string.
+             *
+             * Unlike most package operations, we allow matching on
+             * "sub-packages", so "dalvik..." will match "dalvik.Foo"
+             * and "dalvik.system.Foo".
+             *
+             * The pkgOrClass string looks like "dalvik/system/", i.e. it still
+             * has the terminating slash, so we can be sure we're comparing
+             * against full package component names.
+             */
+            const char* lastSlash;
+            int pkgLen;
+
+            lastSlash = strrchr(className, '/');
+            if (lastSlash == NULL) {
+                pkgLen = 0;
+            } else {
+                pkgLen = lastSlash - className +1;
+            }
+
+            if (pCtrl->pkgOrClassLen > pkgLen ||
+                memcmp(pCtrl->pkgOrClass, className, pCtrl->pkgOrClassLen) != 0)
+            {
+                LOGV("ASRT: pkg no match: '%s'(%d) vs '%s'\n",
+                    className, pkgLen, pCtrl->pkgOrClass);
+            } else {
+                LOGV("ASRT: pkg match: '%s'(%d) vs '%s' --> %d\n",
+                    className, pkgLen, pCtrl->pkgOrClass, pCtrl->enable);
+                enable = pCtrl->enable;
+            }
+        } else {
+            /*
+             * "pkgOrClass" holds a fully-qualified class name, converted from
+             * dot-form to slash-form.  An empty string means all classes.
+             */
+            if (pCtrl->pkgOrClass == NULL) {
+                /* -esa/-dsa; see if class is a "system" class */
+                if (strncmp(className, "java/", 5) != 0) {
+                    LOGV("ASRT: sys no match: '%s'\n", className);
+                } else {
+                    LOGV("ASRT: sys match: '%s' --> %d\n",
+                        className, pCtrl->enable);
+                    enable = pCtrl->enable;
+                }
+            } else if (*pCtrl->pkgOrClass == '\0') {
+                LOGV("ASRT: class all: '%s' --> %d\n",
+                    className, pCtrl->enable);
+                enable = pCtrl->enable;
+            } else {
+                if (strcmp(pCtrl->pkgOrClass, className) != 0) {
+                    LOGV("ASRT: cls no match: '%s' vs '%s'\n",
+                        className, pCtrl->pkgOrClass);
+                } else {
+                    LOGV("ASRT: cls match: '%s' vs '%s' --> %d\n",
+                        className, pCtrl->pkgOrClass, pCtrl->enable);
+                    enable = pCtrl->enable;
+                }
+            }
+        }
+    }
+
+    free(className);
+    RETURN_INT(enable);
+}
+
+/*
+ * static public Class<?> classForName(String name, boolean initialize,
+ *     ClassLoader loader)
+ *
+ * Return the Class object associated with the class or interface with
+ * the specified name.
+ *
+ * "name" is in "binary name" format, e.g. "dalvik.system.Debug$1".
+ */
+static void Dalvik_java_lang_Class_classForName(const u4* args, JValue* pResult)
+{
+    StringObject* nameObj = (StringObject*) args[0];
+    bool initialize = (args[1] != 0);
+    Object* loader = (Object*) args[2];
+
+    RETURN_PTR(findClassByName(nameObj, loader, initialize));
+}
+
+/*
+ * static private ClassLoader getClassLoader(Class clazz)
+ *
+ * Return the class' defining class loader.
+ */
+static void Dalvik_java_lang_Class_getClassLoader(const u4* args,
+    JValue* pResult)
+{
+    ClassObject* clazz = (ClassObject*) args[0];
+
+    RETURN_PTR(clazz->classLoader);
+}
+
+/*
+ * public Class<?> getComponentType()
+ *
+ * If this is an array type, return the class of the elements; otherwise
+ * return NULL.
+ */
+static void Dalvik_java_lang_Class_getComponentType(const u4* args,
+    JValue* pResult)
+{
+    ClassObject* thisPtr = (ClassObject*) args[0];
+
+    if (!dvmIsArrayClass(thisPtr))
+        RETURN_PTR(NULL);
+
+    /*
+     * We can't just return thisPtr->elementClass, because that gives
+     * us the base type (e.g. X[][][] returns X).  If this is a multi-
+     * dimensional array, we have to do the lookup by name.
+     */
+    if (thisPtr->descriptor[1] == '[')
+        RETURN_PTR(dvmFindArrayClass(&thisPtr->descriptor[1],
+                   thisPtr->classLoader));
+    else
+        RETURN_PTR(thisPtr->elementClass);
+}
+
+/*
+ * private static Class<?>[] getDeclaredClasses(Class<?> clazz,
+ *     boolean publicOnly)
+ *
+ * Return an array with the classes that are declared by the specified class.
+ * If "publicOnly" is set, we strip out any classes that don't have "public"
+ * access.
+ */
+static void Dalvik_java_lang_Class_getDeclaredClasses(const u4* args,
+    JValue* pResult)
+{
+    ClassObject* clazz = (ClassObject*) args[0];
+    bool publicOnly = (args[1] != 0);
+    ArrayObject* classes;
+
+    classes = dvmGetDeclaredClasses(clazz);
+    if (classes == NULL) {
+        if (!dvmCheckException(dvmThreadSelf())) {
+            /* empty list, so create a zero-length array */
+            classes = dvmAllocArrayByClass(gDvm.classJavaLangClassArray,
+                        0, ALLOC_DEFAULT);
+        }
+    } else if (publicOnly) {
+        int i, newIdx, publicCount = 0;
+        ClassObject** pSource = (ClassObject**) classes->contents;
+
+        /* count up public classes */
+        for (i = 0; i < (int)classes->length; i++) {
+            if (dvmIsPublicClass(pSource[i]))
+                publicCount++;
+        }
+
+        /* create a new array to hold them */
+        ArrayObject* newClasses;
+        newClasses = dvmAllocArrayByClass(gDvm.classJavaLangClassArray,
+                        publicCount, ALLOC_DEFAULT);
+
+        /* copy them over */
+        ClassObject** pDest = (ClassObject**) newClasses->contents;
+        for (i = newIdx = 0; i < (int)classes->length; i++) {
+            if (dvmIsPublicClass(pSource[i]))
+                pDest[newIdx++] = pSource[i];
+        }
+
+        assert(newIdx == publicCount);
+        dvmReleaseTrackedAlloc((Object*) classes, NULL);
+        classes = newClasses;
+    }
+
+    dvmReleaseTrackedAlloc((Object*) classes, NULL);
+    RETURN_PTR(classes);
+}
+
+/*
+ * static Constructor[] getDeclaredConstructors(Class clazz, boolean publicOnly)
+ *     throws SecurityException
+ */
+static void Dalvik_java_lang_Class_getDeclaredConstructors(const u4* args,
+    JValue* pResult)
+{
+    ClassObject* clazz = (ClassObject*) args[0];
+    bool publicOnly = (args[1] != 0);
+    ArrayObject* constructors;
+
+    constructors = dvmGetDeclaredConstructors(clazz, publicOnly);
+    dvmReleaseTrackedAlloc((Object*) constructors, NULL);
+
+    RETURN_PTR(constructors);
+}
+
+/*
+ * static Field[] getDeclaredFields(Class klass, boolean publicOnly)
+ *     throws SecurityException
+ */
+static void Dalvik_java_lang_Class_getDeclaredFields(const u4* args,
+    JValue* pResult)
+{
+    ClassObject* clazz = (ClassObject*) args[0];
+    bool publicOnly = (args[1] != 0);
+    ArrayObject* fields;
+
+    fields = dvmGetDeclaredFields(clazz, publicOnly);
+    dvmReleaseTrackedAlloc((Object*) fields, NULL);
+
+    RETURN_PTR(fields);
+}
+
+/*
+ * static Method[] getDeclaredMethods(Class clazz, boolean publicOnly)
+ *     throws SecurityException
+ */
+static void Dalvik_java_lang_Class_getDeclaredMethods(const u4* args,
+    JValue* pResult)
+{
+    ClassObject* clazz = (ClassObject*) args[0];
+    bool publicOnly = (args[1] != 0);
+    ArrayObject* methods;
+
+    methods = dvmGetDeclaredMethods(clazz, publicOnly);
+    dvmReleaseTrackedAlloc((Object*) methods, NULL);
+
+    RETURN_PTR(methods);
+}
+
+/*
+ * Class[] getInterfaces()
+ */
+static void Dalvik_java_lang_Class_getInterfaces(const u4* args,
+    JValue* pResult)
+{
+    ClassObject* clazz = (ClassObject*) args[0];
+    ArrayObject* interfaces;
+
+    interfaces = dvmGetInterfaces(clazz);
+    dvmReleaseTrackedAlloc((Object*) interfaces, NULL);
+
+    RETURN_PTR(interfaces);
+}
+
+/*
+ * private static int getModifiers(Class klass, boolean
+ *     ignoreInnerClassesAttrib)
+ *
+ * Return the class' modifier flags.  If "ignoreInnerClassesAttrib" is false,
+ * and this is an inner class, we return the access flags from the inner class
+ * attribute.
+ */
+static void Dalvik_java_lang_Class_getModifiers(const u4* args, JValue* pResult)
+{
+    ClassObject* clazz = (ClassObject*) args[0];
+    bool ignoreInner = args[1];
+    u4 accessFlags;
+
+    accessFlags = clazz->accessFlags & JAVA_FLAGS_MASK;
+
+    if (!ignoreInner) {
+        /* see if we have an InnerClass annotation with flags in it */
+        StringObject* className = NULL;
+        int innerFlags;
+
+        if (dvmGetInnerClass(clazz, &className, &innerFlags))
+            accessFlags = innerFlags & JAVA_FLAGS_MASK;
+
+        dvmReleaseTrackedAlloc((Object*) className, NULL);
+    }
+
+    RETURN_INT(accessFlags);
+}
+
+/*
+ * public String getName()
+ *
+ * Return the class' name.
+ */
+static void Dalvik_java_lang_Class_getName(const u4* args, JValue* pResult)
+{
+    ClassObject* clazz = (ClassObject*) args[0];
+    const char* descriptor = clazz->descriptor;
+    StringObject* nameObj;
+
+    if ((descriptor[0] != 'L') && (descriptor[0] != '[')) {
+        /*
+         * The descriptor indicates that this is the class for
+         * a primitive type; special-case the return value.
+         */
+        const char* name;
+        switch (descriptor[0]) {
+            case 'Z': name = "boolean"; break;
+            case 'B': name = "byte";    break;
+            case 'C': name = "char";    break;
+            case 'S': name = "short";   break;
+            case 'I': name = "int";     break;
+            case 'J': name = "long";    break;
+            case 'F': name = "float";   break;
+            case 'D': name = "double";  break;
+            case 'V': name = "void";    break;
+            default: {
+                LOGE("Unknown primitive type '%c'\n", descriptor[0]);
+                assert(false);
+                RETURN_PTR(NULL);
+            }
+        }
+
+        nameObj = dvmCreateStringFromCstr(name, ALLOC_DEFAULT);
+    } else {
+        /*
+         * Convert the UTF-8 name to a java.lang.String. The
+         * name must use '.' to separate package components.
+         *
+         * TODO: this could be more efficient. Consider a custom
+         * conversion function here that walks the string once and
+         * avoids the allocation for the common case (name less than,
+         * say, 128 bytes).
+         */
+        char* dotName = dvmDescriptorToDot(clazz->descriptor);
+        nameObj = dvmCreateStringFromCstr(dotName, ALLOC_DEFAULT);
+        free(dotName);
+    }
+
+    dvmReleaseTrackedAlloc((Object*) nameObj, NULL);
+
+#if 0
+    /* doesn't work -- need "java.lang.String" not "java/lang/String" */
+    {
+        /*
+         * Find the string in the DEX file and use the copy in the intern
+         * table if it already exists (else put one there).  Only works
+         * for strings in the DEX file, e.g. not arrays.
+         *
+         * We have to do the class lookup by name in the DEX file because
+         * we don't have a DexClassDef pointer in the ClassObject, and it's
+         * not worth adding one there just for this.  Should be cheaper
+         * to do this than the string-creation above.
+         */
+        const DexFile* pDexFile = clazz->pDexFile;
+        const DexClassDef* pClassDef;
+        const DexClassId* pClassId;
+        
+        pDexFile = clazz->pDexFile;
+        pClassDef = dvmDexFindClass(pDexFile, clazz->descriptor);
+        pClassId = dvmDexGetClassId(pDexFile, pClassDef->classIdx);
+        nameObj = dvmDexGetResolvedString(pDexFile, pClassId->nameIdx);
+        if (nameObj == NULL) {
+            nameObj = dvmResolveString(clazz, pClassId->nameIdx);
+            if (nameObj == NULL)
+                LOGW("WARNING: couldn't find string %u for '%s'\n",
+                    pClassId->nameIdx, clazz->name);
+        }
+    }
+#endif
+
+    RETURN_PTR(nameObj);
+}
+
+/*
+ * Return the superclass for instances of this class.
+ *
+ * If the class represents a java/lang/Object, an interface, a primitive
+ * type, or void (which *is* a primitive type??), return NULL.
+ *
+ * For an array, return the java/lang/Object ClassObject.
+ */
+static void Dalvik_java_lang_Class_getSuperclass(const u4* args,
+    JValue* pResult)
+{
+    ClassObject* clazz = (ClassObject*) args[0];
+
+    if (dvmIsPrimitiveClass(clazz) || dvmIsInterfaceClass(clazz))
+        RETURN_PTR(NULL);
+    else
+        RETURN_PTR(clazz->super);
+}
+
+/*
+ * public boolean isAssignableFrom(Class<?> cls)
+ *
+ * Determine if this class is either the same as, or is a superclass or
+ * superinterface of, the class specified in the "cls" parameter.
+ */
+static void Dalvik_java_lang_Class_isAssignableFrom(const u4* args,
+    JValue* pResult)
+{
+    ClassObject* thisPtr = (ClassObject*) args[0];
+    ClassObject* testClass = (ClassObject*) args[1];
+
+    if (testClass == NULL) {
+        dvmThrowException("Ljava/lang/NullPointerException;", NULL);
+        RETURN_INT(false);
+    }
+    RETURN_INT(dvmInstanceof(testClass, thisPtr));
+}
+
+/*
+ * public boolean isInstance(Object o)
+ *
+ * Dynamic equivalent of Java programming language "instanceof".
+ */
+static void Dalvik_java_lang_Class_isInstance(const u4* args,
+    JValue* pResult)
+{
+    ClassObject* thisPtr = (ClassObject*) args[0];
+    Object* testObj = (Object*) args[1];
+
+    if (testObj == NULL)
+        RETURN_INT(false);
+    RETURN_INT(dvmInstanceof(testObj->clazz, thisPtr));
+}
+
+/*
+ * public boolean isInterface()
+ */
+static void Dalvik_java_lang_Class_isInterface(const u4* args,
+    JValue* pResult)
+{
+    ClassObject* thisPtr = (ClassObject*) args[0];
+
+    RETURN_INT(dvmIsInterfaceClass(thisPtr));
+}
+
+/*
+ * public boolean isPrimitive()
+ */
+static void Dalvik_java_lang_Class_isPrimitive(const u4* args,
+    JValue* pResult)
+{
+    ClassObject* thisPtr = (ClassObject*) args[0];
+
+    RETURN_INT(dvmIsPrimitiveClass(thisPtr));
+}
+
+/*
+ * public T newInstance() throws InstantiationException, IllegalAccessException
+ *
+ * Create a new instance of this class.
+ */
+static void Dalvik_java_lang_Class_newInstance(const u4* args, JValue* pResult)
+{
+    Thread* self = dvmThreadSelf();
+    ClassObject* clazz = (ClassObject*) args[0];
+    Method* init;
+    Object* newObj;
+
+    /* can't instantiate these */
+    if (dvmIsPrimitiveClass(clazz) || dvmIsInterfaceClass(clazz)
+        || dvmIsArrayClass(clazz) || dvmIsAbstractClass(clazz))
+    {
+        LOGD("newInstance failed: p%d i%d [%d a%d\n",
+            dvmIsPrimitiveClass(clazz), dvmIsInterfaceClass(clazz),
+            dvmIsArrayClass(clazz), dvmIsAbstractClass(clazz));
+        dvmThrowExceptionWithClassMessage("Ljava/lang/InstantiationException;",
+            clazz->descriptor);
+        RETURN_VOID();
+    }
+
+    /* initialize the class if it hasn't been already */
+    if (!dvmIsClassInitialized(clazz)) {
+        if (!dvmInitClass(clazz)) {
+            LOGW("Class init failed in newInstance call (%s)\n",
+                clazz->descriptor);
+            assert(dvmCheckException(self));
+            RETURN_VOID();
+        }
+    }
+
+    /* find the "nullary" constructor */
+    init = dvmFindDirectMethodByDescriptor(clazz, "<init>", "()V");
+    if (init == NULL) {
+        /* common cause: secret "this" arg on non-static inner class ctor */
+        LOGD("newInstance failed: no <init>()\n");
+        dvmThrowExceptionWithClassMessage("Ljava/lang/InstantiationException;",
+            clazz->descriptor);
+        RETURN_VOID();
+    }
+
+    /*
+     * Verify access from the call site.
+     *
+     * First, make sure the method invoking Class.newInstance() has permission
+     * to access the class.
+     *
+     * Second, make sure it has permission to invoke the constructor.  The
+     * constructor must be public or, if the caller is in the same package,
+     * have package scope.
+     */
+    ClassObject* callerClass = dvmGetCallerClass(self->curFrame);
+
+    if (!dvmCheckClassAccess(callerClass, clazz)) {
+        LOGD("newInstance failed: %s not accessible to %s\n",
+            clazz->descriptor, callerClass->descriptor);
+        dvmThrowException("Ljava/lang/IllegalAccessException;",
+            "access to class not allowed");
+        RETURN_VOID();
+    }
+    if (!dvmCheckMethodAccess(callerClass, init)) {
+        LOGD("newInstance failed: %s.<init>() not accessible to %s\n",
+            clazz->descriptor, callerClass->descriptor);
+        dvmThrowException("Ljava/lang/IllegalAccessException;",
+            "access to constructor not allowed");
+        RETURN_VOID();
+    }
+
+    newObj = dvmAllocObject(clazz, ALLOC_DEFAULT);
+    JValue unused;
+
+    /* invoke constructor; unlike reflection calls, we don't wrap exceptions */
+    dvmCallMethod(self, init, newObj, &unused);
+    dvmReleaseTrackedAlloc(newObj, NULL);
+
+    RETURN_PTR(newObj);
+}
+
+/*
+ * private Object[] getSignatureAnnotation()
+ *
+ * Returns the signature annotation array.
+ */
+static void Dalvik_java_lang_Class_getSignatureAnnotation(const u4* args,
+    JValue* pResult)
+{
+    ClassObject* clazz = (ClassObject*) args[0];
+    ArrayObject* arr = dvmGetClassSignatureAnnotation(clazz);
+
+    dvmReleaseTrackedAlloc((Object*) arr, NULL);
+    RETURN_PTR(arr);
+}
+
+/*
+ * public Class getDeclaringClass()
+ *
+ * Get the class that encloses this class (if any).
+ */
+static void Dalvik_java_lang_Class_getDeclaringClass(const u4* args,
+    JValue* pResult)
+{
+    ClassObject* clazz = (ClassObject*) args[0];
+
+    ClassObject* enclosing = dvmGetDeclaringClass(clazz);
+    dvmReleaseTrackedAlloc((Object*) enclosing, NULL);
+    RETURN_PTR(enclosing);
+}
+
+/*
+ * public Class getEnclosingClass()
+ *
+ * Get the class that encloses this class (if any).
+ */
+static void Dalvik_java_lang_Class_getEnclosingClass(const u4* args,
+    JValue* pResult)
+{
+    ClassObject* clazz = (ClassObject*) args[0];
+
+    ClassObject* enclosing = dvmGetEnclosingClass(clazz);
+    dvmReleaseTrackedAlloc((Object*) enclosing, NULL);
+    RETURN_PTR(enclosing);
+}
+
+/*
+ * public Constructor getEnclosingConstructor()
+ *
+ * Get the constructor that encloses this class (if any).
+ */
+static void Dalvik_java_lang_Class_getEnclosingConstructor(const u4* args,
+    JValue* pResult)
+{
+    ClassObject* clazz = (ClassObject*) args[0];
+
+    Object* enclosing = dvmGetEnclosingMethod(clazz);
+    if (enclosing != NULL) {
+        dvmReleaseTrackedAlloc(enclosing, NULL);
+        if (enclosing->clazz == gDvm.classJavaLangReflectConstructor) {
+            RETURN_PTR(enclosing);
+        }
+        assert(enclosing->clazz == gDvm.classJavaLangReflectMethod);
+    }
+    RETURN_PTR(NULL);
+}
+
+/*
+ * public Method getEnclosingMethod()
+ *
+ * Get the method that encloses this class (if any).
+ */
+static void Dalvik_java_lang_Class_getEnclosingMethod(const u4* args,
+    JValue* pResult)
+{
+    ClassObject* clazz = (ClassObject*) args[0];
+
+    Object* enclosing = dvmGetEnclosingMethod(clazz);
+    if (enclosing != NULL) {
+        dvmReleaseTrackedAlloc(enclosing, NULL);
+        if (enclosing->clazz == gDvm.classJavaLangReflectMethod) {
+            RETURN_PTR(enclosing);
+        }
+        assert(enclosing->clazz == gDvm.classJavaLangReflectConstructor);
+    }
+    RETURN_PTR(NULL);
+}
+
+#if 0
+static void Dalvik_java_lang_Class_getGenericInterfaces(const u4* args,
+    JValue* pResult)
+{
+    dvmThrowException("Ljava/lang/UnsupportedOperationException;",
+        "native method not implemented");
+
+    RETURN_PTR(NULL);
+}
+
+static void Dalvik_java_lang_Class_getGenericSuperclass(const u4* args,
+    JValue* pResult)
+{
+    dvmThrowException("Ljava/lang/UnsupportedOperationException;",
+        "native method not implemented");
+
+    RETURN_PTR(NULL);
+}
+
+static void Dalvik_java_lang_Class_getTypeParameters(const u4* args,
+    JValue* pResult)
+{
+    dvmThrowException("Ljava/lang/UnsupportedOperationException;",
+        "native method not implemented");
+
+    RETURN_PTR(NULL);
+}
+#endif
+
+/*
+ * public boolean isAnonymousClass()
+ *
+ * Returns true if this is an "anonymous" class.
+ */
+static void Dalvik_java_lang_Class_isAnonymousClass(const u4* args,
+    JValue* pResult)
+{
+    ClassObject* clazz = (ClassObject*) args[0];
+    StringObject* className = NULL;
+    int accessFlags;
+
+    /*
+     * If this has an InnerClass annotation, pull it out.  Lack of the
+     * annotation, or an annotation with a NULL class name, indicates
+     * that this is an anonymous inner class.
+     */
+    if (!dvmGetInnerClass(clazz, &className, &accessFlags))
+        RETURN_BOOLEAN(false);
+
+    dvmReleaseTrackedAlloc((Object*) className, NULL);
+    RETURN_BOOLEAN(className == NULL);
+}
+
+/*
+ * private Annotation[] getDeclaredAnnotations()
+ *
+ * Return the annotations declared on this class.
+ */
+static void Dalvik_java_lang_Class_getDeclaredAnnotations(const u4* args,
+    JValue* pResult)
+{
+    ClassObject* clazz = (ClassObject*) args[0];
+
+    ArrayObject* annos = dvmGetClassAnnotations(clazz);
+    dvmReleaseTrackedAlloc((Object*) annos, NULL);
+    RETURN_PTR(annos);
+}
+
+/*
+ * public String getInnerClassName()
+ *
+ * Returns the simple name of a member class or local class, or null otherwise. 
+ */
+static void Dalvik_java_lang_Class_getInnerClassName(const u4* args,
+    JValue* pResult)
+{
+    ClassObject* clazz = (ClassObject*) args[0];
+    StringObject* nameObj;
+    int flags;
+    
+    if (dvmGetInnerClass(clazz, &nameObj, &flags)) {
+        dvmReleaseTrackedAlloc((Object*) nameObj, NULL);
+        RETURN_PTR(nameObj);
+    } else {
+        RETURN_PTR(NULL);
+    }
+}
+
+/*
+ * static native void setAccessibleNoCheck(AccessibleObject ao, boolean flag);
+ */
+static void Dalvik_java_lang_Class_setAccessibleNoCheck(const u4* args,
+    JValue* pResult)
+{
+    Object* target = (Object*) args[0];
+    u4 flag = (u4) args[1];
+
+    dvmSetFieldBoolean(target, gDvm.offJavaLangReflectAccessibleObject_flag,
+            flag);
+}
+
+static const DalvikNativeMethod java_lang_Class[] = {
+    { "desiredAssertionStatus", "()Z",
+        Dalvik_java_lang_Class_desiredAssertionStatus },
+    { "classForName",           "(Ljava/lang/String;ZLjava/lang/ClassLoader;)Ljava/lang/Class;",
+        Dalvik_java_lang_Class_classForName },
+    { "getClassLoader",         "(Ljava/lang/Class;)Ljava/lang/ClassLoader;",
+        Dalvik_java_lang_Class_getClassLoader },
+    { "getComponentType",       "()Ljava/lang/Class;",
+        Dalvik_java_lang_Class_getComponentType },
+    { "getSignatureAnnotation",  "()[Ljava/lang/Object;",
+        Dalvik_java_lang_Class_getSignatureAnnotation },
+    { "getDeclaredClasses",     "(Ljava/lang/Class;Z)[Ljava/lang/Class;",
+        Dalvik_java_lang_Class_getDeclaredClasses },
+    { "getDeclaredConstructors", "(Ljava/lang/Class;Z)[Ljava/lang/reflect/Constructor;",
+        Dalvik_java_lang_Class_getDeclaredConstructors },
+    { "getDeclaredFields",      "(Ljava/lang/Class;Z)[Ljava/lang/reflect/Field;",
+        Dalvik_java_lang_Class_getDeclaredFields },
+    { "getDeclaredMethods",     "(Ljava/lang/Class;Z)[Ljava/lang/reflect/Method;",
+        Dalvik_java_lang_Class_getDeclaredMethods },
+    { "getInterfaces",          "()[Ljava/lang/Class;",
+        Dalvik_java_lang_Class_getInterfaces },
+    { "getModifiers",           "(Ljava/lang/Class;Z)I",
+        Dalvik_java_lang_Class_getModifiers },
+    { "getName",                "()Ljava/lang/String;",
+        Dalvik_java_lang_Class_getName },
+    { "getSuperclass",          "()Ljava/lang/Class;",
+        Dalvik_java_lang_Class_getSuperclass },
+    { "isAssignableFrom",       "(Ljava/lang/Class;)Z",
+        Dalvik_java_lang_Class_isAssignableFrom },
+    { "isInstance",             "(Ljava/lang/Object;)Z",
+        Dalvik_java_lang_Class_isInstance },
+    { "isInterface",            "()Z",
+        Dalvik_java_lang_Class_isInterface },
+    { "isPrimitive",            "()Z",
+        Dalvik_java_lang_Class_isPrimitive },
+    { "newInstance",            "()Ljava/lang/Object;",
+        Dalvik_java_lang_Class_newInstance },
+    { "getDeclaringClass",      "()Ljava/lang/Class;",
+        Dalvik_java_lang_Class_getDeclaringClass },
+    { "getEnclosingClass",      "()Ljava/lang/Class;",
+        Dalvik_java_lang_Class_getEnclosingClass },
+    { "getEnclosingConstructor", "()Ljava/lang/reflect/Constructor;",
+        Dalvik_java_lang_Class_getEnclosingConstructor },
+    { "getEnclosingMethod",     "()Ljava/lang/reflect/Method;",
+        Dalvik_java_lang_Class_getEnclosingMethod },
+#if 0
+    { "getGenericInterfaces",   "()[Ljava/lang/reflect/Type;",
+        Dalvik_java_lang_Class_getGenericInterfaces },
+    { "getGenericSuperclass",   "()Ljava/lang/reflect/Type;",
+        Dalvik_java_lang_Class_getGenericSuperclass },
+    { "getTypeParameters",      "()Ljava/lang/reflect/TypeVariable;",
+        Dalvik_java_lang_Class_getTypeParameters },
+#endif
+    { "isAnonymousClass",       "()Z",
+        Dalvik_java_lang_Class_isAnonymousClass },
+    { "getDeclaredAnnotations", "()[Ljava/lang/annotation/Annotation;",
+        Dalvik_java_lang_Class_getDeclaredAnnotations },
+    { "getInnerClassName",       "()Ljava/lang/String;",
+        Dalvik_java_lang_Class_getInnerClassName },
+    { "setAccessibleNoCheck",   "(Ljava/lang/reflect/AccessibleObject;Z)V",
+        Dalvik_java_lang_Class_setAccessibleNoCheck },
+    { NULL, NULL, NULL },
+};
+
+
+/*
+ * ===========================================================================
+ *      java.lang.System
+ * ===========================================================================
+ */
+
+/*
+ * public static void arraycopy(Object src, int srcPos, Object dest,
+ *      int destPos, int length)
+ *
+ * The description of this function is long, and describes a multitude
+ * of checks and exceptions.
+ */
+static void Dalvik_java_lang_System_arraycopy(const u4* args, JValue* pResult)
+{
+    void* (*copyFunc)(void *dest, const void *src, size_t n);
+    ArrayObject* srcArray;
+    ArrayObject* dstArray;
+    ClassObject* srcClass;
+    ClassObject* dstClass;
+    int srcPos, dstPos, length;
+    char srcType, dstType;
+    bool srcPrim, dstPrim;
+
+    srcArray = (ArrayObject*) args[0];
+    srcPos = args[1];
+    dstArray = (ArrayObject*) args[2];
+    dstPos = args[3];
+    length = args[4];
+
+    if (srcArray == dstArray)
+        copyFunc = memmove;         /* might overlap */
+    else
+        copyFunc = memcpy;          /* can't overlap, use faster func */
+
+    /* check for null or bad pointer */
+    if (!dvmValidateObject((Object*)srcArray) ||
+        !dvmValidateObject((Object*)dstArray))
+    {
+        assert(dvmCheckException(dvmThreadSelf()));
+        RETURN_VOID();
+    }
+    /* make sure it's an array */
+    if (!dvmIsArray(srcArray) || !dvmIsArray(dstArray)) {
+        dvmThrowException("Ljava/lang/ArrayStoreException;", NULL);
+        RETURN_VOID();
+    }
+
+    if (srcPos < 0 || dstPos < 0 || length < 0 ||
+        srcPos + length > (int) srcArray->length ||
+        dstPos + length > (int) dstArray->length)
+    {
+        dvmThrowException("Ljava/lang/IndexOutOfBoundsException;", NULL);
+        RETURN_VOID();
+    }
+
+    srcClass = srcArray->obj.clazz;
+    dstClass = dstArray->obj.clazz;
+    srcType = srcClass->descriptor[1];
+    dstType = dstClass->descriptor[1];
+
+    /*
+     * If one of the arrays holds a primitive type, the other array must
+     * hold the same type.
+     */
+    srcPrim = (srcType != '[' && srcType != 'L');
+    dstPrim = (dstType != '[' && dstType != 'L');
+    if (srcPrim || dstPrim) {
+        int width;
+
+        if (srcPrim != dstPrim || srcType != dstType) {
+            dvmThrowException("Ljava/lang/ArrayStoreException;", NULL);
+            RETURN_VOID();
+        }
+
+        switch (srcClass->descriptor[1]) {
+        case 'B':
+        case 'Z':
+            width = 1;
+            break;
+        case 'C':
+        case 'S':
+            width = 2;
+            break;
+        case 'F':
+        case 'I':
+            width = 4;
+            break;
+        case 'D':
+        case 'J':
+            width = 8;
+            break;
+        default:        /* 'V' or something weird */
+            LOGE("Weird array type '%s'\n", srcClass->descriptor);
+            assert(false);
+            width = 0;
+            break;
+        }
+
+        if (false) LOGVV("arraycopy prim dst=%p %d src=%p %d len=%d\n",
+                dstArray->contents, dstPos * width,
+                srcArray->contents, srcPos * width,
+                length * width);
+        (*copyFunc)((u1*)dstArray->contents + dstPos * width,
+                (const u1*)srcArray->contents + srcPos * width,
+                length * width);
+    } else {
+        /*
+         * Neither class is primitive.  See if elements in "src" are instances
+         * of elements in "dst" (e.g. copy String to String or String to
+         * Object).
+         */
+        int width = sizeof(Object*);
+
+        if (srcClass->arrayDim == dstClass->arrayDim &&
+            dvmInstanceof(srcClass, dstClass))
+        {
+            /*
+             * "dst" can hold "src"; copy the whole thing.
+             */
+            if (false) LOGVV("arraycopy ref dst=%p %d src=%p %d len=%d\n",
+                dstArray->contents, dstPos * width,
+                srcArray->contents, srcPos * width,
+                length * width);
+            (*copyFunc)((u1*)dstArray->contents + dstPos * width,
+                    (const u1*)srcArray->contents + srcPos * width,
+                    length * width);
+        } else {
+            /*
+             * The arrays are not fundamentally compatible.  However, we may
+             * still be able to do this if the destination object is compatible
+             * (e.g. copy Object to String, but the Object being copied is
+             * actually a String).  We need to copy elements one by one until
+             * something goes wrong.
+             *
+             * Because of overlapping moves, what we really want to do is
+             * compare the types and count up how many we can move, then call
+             * memmove() to shift the actual data.  If we just start from the
+             * front we could do a smear rather than a move.
+             */
+            Object** srcObj;
+            Object** dstObj;
+            int copyCount;
+            ClassObject*   clazz = NULL;
+
+            srcObj = ((Object**) srcArray->contents) + srcPos;
+            dstObj = ((Object**) dstArray->contents) + dstPos;
+
+            if (length > 0 && srcObj[0] != NULL)
+            {
+                clazz = srcObj[0]->clazz;
+                if (!dvmCanPutArrayElement(clazz, dstClass))
+                    clazz = NULL;
+            }
+
+            for (copyCount = 0; copyCount < length; copyCount++)
+            {
+                if (srcObj[copyCount] != NULL &&
+                    srcObj[copyCount]->clazz != clazz &&
+                    !dvmCanPutArrayElement(srcObj[copyCount]->clazz, dstClass))
+                {
+                    /* can't put this element into the array */
+                    break;
+                }
+            }
+
+            if (false) LOGVV("arraycopy iref dst=%p %d src=%p %d count=%d of %d\n",
+                dstArray->contents, dstPos * width,
+                srcArray->contents, srcPos * width,
+                copyCount, length);
+            (*copyFunc)((u1*)dstArray->contents + dstPos * width,
+                    (const u1*)srcArray->contents + srcPos * width,
+                    copyCount * width);
+
+            if (copyCount != length) {
+                dvmThrowException("Ljava/lang/ArrayStoreException;", NULL);
+                RETURN_VOID();
+            }
+        }
+    }
+
+    RETURN_VOID();
+}
+
+/*
+ * static long currentTimeMillis()
+ *
+ * Current time, in miliseconds.  This doesn't need to be internal to the
+ * VM, but we're already handling java.lang.System here.
+ */
+static void Dalvik_java_lang_System_currentTimeMillis(const u4* args,
+    JValue* pResult)
+{
+    struct timeval tv;
+
+    UNUSED_PARAMETER(args);
+
+    gettimeofday(&tv, (struct timezone *) NULL);
+    long long when = tv.tv_sec * 1000LL + tv.tv_usec / 1000;
+
+    RETURN_LONG(when);
+}
+
+/*
+ * static long nanoTime()
+ *
+ * Current monotonically-increasing time, in nanoseconds.  This doesn't
+ * need to be internal to the VM, but we're already handling
+ * java.lang.System here.
+ */
+static void Dalvik_java_lang_System_nanoTime(const u4* args, JValue* pResult)
+{
+    UNUSED_PARAMETER(args);
+
+    u8 when = dvmGetRelativeTimeNsec();
+    RETURN_LONG(when);
+}
+
+/*
+ * static int identityHashCode(Object x)
+ *
+ * Returns that hash code that the default hashCode()
+ * method would return for "x", even if "x"s class
+ * overrides hashCode().
+ */
+static void Dalvik_java_lang_System_identityHashCode(const u4* args,
+    JValue* pResult)
+{
+    /* This is a static method, which means args[0] is the Object.
+     * Passing the same args to Object.hashCode will work because
+     * it treats the first arg as the "this" pointer.
+     */
+    Dalvik_java_lang_Object_hashCode(args, pResult);
+}
+
+/*
+ * public static String mapLibraryName(String libname)
+ */
+static void Dalvik_java_lang_System_mapLibraryName(const u4* args,
+    JValue* pResult)
+{
+    StringObject* nameObj = (StringObject*) args[0];
+    StringObject* result = NULL;
+    char* name;
+    char* mappedName;
+
+    if (nameObj == NULL) {
+        dvmThrowException("Ljava/lang/NullPointerException;", NULL);
+        RETURN_VOID();
+    }
+
+    name = dvmCreateCstrFromString(nameObj);
+    mappedName = dvmCreateSystemLibraryName(name);
+    if (mappedName != NULL) {
+        result = dvmCreateStringFromCstr(mappedName, ALLOC_DEFAULT);
+        dvmReleaseTrackedAlloc((Object*) result, NULL);
+    }
+
+    free(name);
+    free(mappedName);
+    RETURN_PTR(result);
+}
+
+static const DalvikNativeMethod java_lang_System[] = {
+    { "arraycopy",          "(Ljava/lang/Object;ILjava/lang/Object;II)V",
+        Dalvik_java_lang_System_arraycopy },
+    { "currentTimeMillis",  "()J",
+        Dalvik_java_lang_System_currentTimeMillis },
+    { "nanoTime",  "()J",
+        Dalvik_java_lang_System_nanoTime },
+    { "identityHashCode",  "(Ljava/lang/Object;)I",
+        Dalvik_java_lang_System_identityHashCode },
+    { "mapLibraryName",     "(Ljava/lang/String;)Ljava/lang/String;",
+        Dalvik_java_lang_System_mapLibraryName },
+    { NULL, NULL, NULL },
+};
+
+
+/*
+ * ===========================================================================
+ *      java.lang.SystemProperties
+ * ===========================================================================
+ */
+
+/*
+ * Expected call sequence:
+ *  (1) call SystemProperties.preInit() to get VM defaults
+ *  (2) set any higher-level defaults
+ *  (3) call SystemProperties.postInit() to get command-line overrides
+ * This currently happens the first time somebody tries to access a property.
+ *
+ * SystemProperties is a Dalvik-specific package-scope class.
+ */
+
+/*
+ * void preInit()
+ *
+ * Tells the VM to populate the properties table with VM defaults.
+ */
+static void Dalvik_java_lang_SystemProperties_preInit(const u4* args,
+    JValue* pResult)
+{
+    dvmCreateDefaultProperties((Object*) args[0]);
+    RETURN_VOID();
+}
+
+/*
+ * void postInit()
+ *
+ * Tells the VM to update properties with values from the command line.
+ */
+static void Dalvik_java_lang_SystemProperties_postInit(const u4* args,
+    JValue* pResult)
+{
+    dvmSetCommandLineProperties((Object*) args[0]);
+    RETURN_VOID();
+}
+
+static const DalvikNativeMethod java_lang_SystemProperties[] = {
+    { "preInit",            "()V",
+        Dalvik_java_lang_SystemProperties_preInit },
+    { "postInit",           "()V",
+        Dalvik_java_lang_SystemProperties_postInit },
+    { NULL, NULL, NULL },
+};
+
+
+/*
+ * ===========================================================================
+ *      java.lang.Runtime
+ * ===========================================================================
+ */
+
+/*
+ * public void gc()
+ *
+ * Initiate a gc.
+ */
+static void Dalvik_java_lang_Runtime_gc(const u4* args, JValue* pResult)
+{
+    UNUSED_PARAMETER(args);
+
+    dvmCollectGarbage(false);
+    RETURN_VOID();
+}
+
+/*
+ * private static void nativeExit(int code, boolean isExit)
+ *
+ * Runtime.exit() calls this after doing shutdown processing.  Runtime.halt()
+ * uses this as well.
+ */
+static void Dalvik_java_lang_Runtime_nativeExit(const u4* args,
+    JValue* pResult)
+{
+    int status = args[0];
+    bool isExit = (args[1] != 0);
+
+    if (isExit && gDvm.exitHook != NULL) {
+        dvmChangeStatus(NULL, THREAD_NATIVE);
+        (*gDvm.exitHook)(status);     // not expected to return
+        dvmChangeStatus(NULL, THREAD_RUNNING);
+        LOGW("JNI exit hook returned\n");
+    }
+    LOGD("Calling exit(%d)\n", status);
+    exit(status);
+}
+
+/*
+ * static boolean nativeLoad(String filename, ClassLoader loader)
+ *
+ * Load the specified full path as a dynamic library filled with
+ * JNI-compatible methods.
+ */
+static void Dalvik_java_lang_Runtime_nativeLoad(const u4* args,
+    JValue* pResult)
+{
+    StringObject* fileNameObj = (StringObject*) args[0];
+    Object* classLoader = (Object*) args[1];
+    char* fileName;
+    int result;
+
+    if (fileNameObj == NULL)
+        RETURN_INT(false);
+    fileName = dvmCreateCstrFromString(fileNameObj);
+
+    result = dvmLoadNativeCode(fileName, classLoader);
+
+    free(fileName);
+    RETURN_INT(result);
+}
+
+/*
+ * public void runFinalization(boolean forced)
+ *
+ * Requests that the VM runs finalizers for objects on the heap. If the
+ * parameter forced is true, then the VM needs to ensure finalization.
+ * Otherwise this only inspires the VM to make a best-effort attempt to
+ * run finalizers before returning, but it's not guaranteed to actually
+ * do anything.
+ */
+static void Dalvik_java_lang_Runtime_runFinalization(const u4* args,
+    JValue* pResult)
+{
+    bool forced = (args[0] != 0);
+
+    dvmWaitForHeapWorkerIdle();
+    if (forced) {
+        // TODO(Google) Need to explicitly implement this,
+        //              although dvmWaitForHeapWorkerIdle()
+        //              should usually provide the "forced"
+        //              behavior already.
+    }
+
+    RETURN_VOID();
+}
+
+/*
+ * public void maxMemory()
+ *
+ * Returns GC heap max memory in bytes.
+ */
+static void Dalvik_java_lang_Runtime_maxMemory(const u4* args, JValue* pResult)
+{
+    unsigned int result = gDvm.heapSizeMax;
+    RETURN_LONG(result);
+}
+
+/*
+ * public void totalMemory()
+ *
+ * Returns GC heap total memory in bytes.
+ */
+static void Dalvik_java_lang_Runtime_totalMemory(const u4* args,
+    JValue* pResult)
+{
+    int result = dvmGetHeapDebugInfo(kVirtualHeapSize);
+    RETURN_LONG(result);
+}
+
+/*
+ * public void freeMemory()
+ *
+ * Returns GC heap free memory in bytes.
+ */
+static void Dalvik_java_lang_Runtime_freeMemory(const u4* args,
+    JValue* pResult)
+{
+    int result = dvmGetHeapDebugInfo(kVirtualHeapSize)
+                 - dvmGetHeapDebugInfo(kVirtualHeapAllocated);
+    if (result < 0) {
+        result = 0;
+    }
+    RETURN_LONG(result);
+}
+
+static const DalvikNativeMethod java_lang_Runtime[] = {
+    { "freeMemory",          "()J",
+        Dalvik_java_lang_Runtime_freeMemory },
+    { "gc",                 "()V",
+        Dalvik_java_lang_Runtime_gc },
+    { "maxMemory",          "()J",
+        Dalvik_java_lang_Runtime_maxMemory },
+    { "nativeExit",         "(IZ)V",
+        Dalvik_java_lang_Runtime_nativeExit },
+    { "nativeLoad",         "(Ljava/lang/String;Ljava/lang/ClassLoader;)Z",
+        Dalvik_java_lang_Runtime_nativeLoad },
+    { "runFinalization",    "(Z)V",
+        Dalvik_java_lang_Runtime_runFinalization },
+    { "totalMemory",          "()J",
+        Dalvik_java_lang_Runtime_totalMemory },
+    { NULL, NULL, NULL },
+};
+
+
+/*
+ * ===========================================================================
+ *      java.lang.String
+ * ===========================================================================
+ */
+
+/*
+ * public String intern()
+ *
+ * Intern a string in the VM string table.
+ */
+static void Dalvik_java_lang_String_intern(const u4* args, JValue* pResult)
+{
+    StringObject* str = (StringObject*) args[0];
+    StringObject* interned;
+
+    interned = dvmLookupInternedString(str);
+    RETURN_PTR(interned);
+}
+
+static const DalvikNativeMethod java_lang_String[] = {
+    { "intern",             "()Ljava/lang/String;",
+        Dalvik_java_lang_String_intern },
+    { NULL, NULL, NULL },
+};
+
+
+/*
+ * ===========================================================================
+ *      java.lang.reflect.AccessibleObject
+ * ===========================================================================
+ */
+
+/*
+ * private static Object[] getClassSignatureAnnotation(Class clazz)
+ *
+ * Return the Signature annotation for the specified class.  Equivalent to
+ * Class.getSignatureAnnotation(), but available to java.lang.reflect.
+ */
+static void Dalvik_java_lang_reflect_AccessibleObject_getClassSignatureAnnotation(
+    const u4* args, JValue* pResult)
+{
+    ClassObject* clazz = (ClassObject*) args[0];
+    ArrayObject* arr = dvmGetClassSignatureAnnotation(clazz);
+
+    dvmReleaseTrackedAlloc((Object*) arr, NULL);
+    RETURN_PTR(arr);
+}
+
+static const DalvikNativeMethod java_lang_reflect_AccessibleObject[] = {
+    { "getClassSignatureAnnotation", "(Ljava/lang/Class;)[Ljava/lang/Object;",
+      Dalvik_java_lang_reflect_AccessibleObject_getClassSignatureAnnotation },
+    { NULL, NULL, NULL },
+};
+
+
+/*
+ * ===========================================================================
+ *      java.lang.reflect.Array
+ * ===========================================================================
+ */
+
+/*
+ * private static Object createObjectArray(Class<?> componentType,
+ *     int length) throws NegativeArraySizeException;
+ *
+ * Create a one-dimensional array of Objects.
+ */
+static void Dalvik_java_lang_reflect_Array_createObjectArray(const u4* args,
+    JValue* pResult)
+{
+    ClassObject* elementClass = (ClassObject*) args[0];
+    int length = args[1];
+    ArrayObject* newArray;
+
+    assert(elementClass != NULL);       // tested by caller
+    if (length < 0) {
+        dvmThrowException("Ljava/lang/NegativeArraySizeException;", NULL);
+        RETURN_VOID();
+    }
+
+    newArray = dvmAllocObjectArray(elementClass, length, ALLOC_DEFAULT);
+    if (newArray == NULL) {
+        assert(dvmCheckException(dvmThreadSelf()));
+        RETURN_VOID();
+    }
+    dvmReleaseTrackedAlloc((Object*) newArray, NULL);
+
+    RETURN_PTR(newArray);
+}
+
+/*
+ * private static Object createMultiArray(Class<?> componentType,
+ *     int[] dimensions) throws NegativeArraySizeException;
+ *
+ * Create a multi-dimensional array of Objects or primitive types.
+ *
+ * We have to generate the names for X[], X[][], X[][][], and so on.  The
+ * easiest way to deal with that is to create the full name once and then
+ * subtract pieces off.  Besides, we want to start with the outermost
+ * piece and work our way in.
+ */
+static void Dalvik_java_lang_reflect_Array_createMultiArray(const u4* args,
+    JValue* pResult)
+{
+    static const char kPrimLetter[] = PRIM_TYPE_TO_LETTER;
+    ClassObject* elementClass = (ClassObject*) args[0];
+    ArrayObject* dimArray = (ArrayObject*) args[1];
+    ClassObject* arrayClass;
+    ArrayObject* newArray;
+    char* acDescriptor;
+    int numDim, i;
+    int* dimensions;
+
+    LOGV("createMultiArray: '%s' [%d]\n",
+        elementClass->descriptor, dimArray->length);
+
+    assert(elementClass != NULL);       // verified by caller
+
+    /*
+     * Verify dimensions.
+     *
+     * The caller is responsible for verifying that "dimArray" is non-null
+     * and has a length > 0 and <= 255.
+     */
+    assert(dimArray != NULL);           // verified by caller
+    numDim = dimArray->length;
+    assert(numDim > 0 && numDim <= 255);
+
+    dimensions = (int*) dimArray->contents;
+    for (i = 0; i < numDim; i++) {
+        if (dimensions[i] < 0) {
+            dvmThrowException("Ljava/lang/NegativeArraySizeException;", NULL);
+            RETURN_VOID();
+        }
+        LOGVV("DIM %d: %d\n", i, dimensions[i]);
+    }
+
+    /*
+     * Generate the full name of the array class.
+     */
+    acDescriptor =
+        (char*) malloc(strlen(elementClass->descriptor) + numDim + 1);
+    memset(acDescriptor, '[', numDim);
+
+    LOGVV("#### element name = '%s'\n", elementClass->descriptor);
+    if (dvmIsPrimitiveClass(elementClass)) {
+        assert(elementClass->primitiveType >= 0);
+        acDescriptor[numDim] = kPrimLetter[elementClass->primitiveType];
+        acDescriptor[numDim+1] = '\0';
+    } else {
+        strcpy(acDescriptor+numDim, elementClass->descriptor);
+    }
+    LOGVV("#### array name = '%s'\n", acDescriptor);
+
+    /*
+     * Find/generate the array class.
+     */
+    arrayClass = dvmFindArrayClass(acDescriptor, elementClass->classLoader);
+    if (arrayClass == NULL) {
+        LOGW("Unable to find or generate array class '%s'\n", acDescriptor);
+        assert(dvmCheckException(dvmThreadSelf()));
+        free(acDescriptor);
+        RETURN_VOID();
+    }
+    free(acDescriptor);
+
+    /* create the array */
+    newArray = dvmAllocMultiArray(arrayClass, numDim-1, dimensions);
+    if (newArray == NULL) {
+        assert(dvmCheckException(dvmThreadSelf()));
+        RETURN_VOID();
+    }
+
+    dvmReleaseTrackedAlloc((Object*) newArray, NULL);
+    RETURN_PTR(newArray);
+}
+
+static const DalvikNativeMethod java_lang_reflect_Array[] = {
+    { "createObjectArray",  "(Ljava/lang/Class;I)Ljava/lang/Object;",
+        Dalvik_java_lang_reflect_Array_createObjectArray },
+    { "createMultiArray",   "(Ljava/lang/Class;[I)Ljava/lang/Object;",
+        Dalvik_java_lang_reflect_Array_createMultiArray },
+    { NULL, NULL, NULL },
+};
+
+
+/*
+ * ===========================================================================
+ *      java.lang.reflect.Constructor
+ * ===========================================================================
+ */
+
+/*
+ * public int getConstructorModifiers(Class declaringClass, int slot)
+ */
+static void Dalvik_java_lang_reflect_Constructor_getConstructorModifiers(
+    const u4* args, JValue* pResult)
+{
+    // ignore thisPtr in args[0]
+    ClassObject* declaringClass = (ClassObject*) args[1];
+    int slot = args[2];
+    Method* meth;
+
+    meth = dvmSlotToMethod(declaringClass, slot);
+    RETURN_INT(fixMethodFlags(meth->accessFlags));
+}
+
+/*
+ * public int constructNative(Object[] args, Class declaringClass,
+ *     Class[] parameterTypes, int slot, boolean noAccessCheck)
+ */
+static void Dalvik_java_lang_reflect_Constructor_constructNative(
+    const u4* args, JValue* pResult)
+{
+    // ignore thisPtr in args[0]
+    ArrayObject* argList = (ArrayObject*) args[1];
+    ClassObject* declaringClass = (ClassObject*) args[2];
+    ArrayObject* params = (ArrayObject*) args[3];
+    int slot = args[4];
+    bool noAccessCheck = (args[5] != 0);
+    Object* newObj;
+    Method* meth;
+
+    newObj = dvmAllocObject(declaringClass, ALLOC_DEFAULT);
+    if (newObj == NULL)
+        RETURN_PTR(NULL);
+
+    meth = dvmSlotToMethod(declaringClass, slot);
+    assert(meth != NULL);
+
+    (void) dvmInvokeMethod(newObj, meth, argList, params, NULL, noAccessCheck);
+    dvmReleaseTrackedAlloc(newObj, NULL);
+    RETURN_PTR(newObj);
+}
+
+/*
+ * public Annotation[] getDeclaredAnnotations(Class declaringClass, int slot)
+ *
+ * Return the annotations declared for this constructor.
+ */
+static void Dalvik_java_lang_reflect_Constructor_getDeclaredAnnotations(
+    const u4* args, JValue* pResult)
+{
+    // ignore thisPtr in args[0]
+    ClassObject* declaringClass = (ClassObject*) args[1];
+    int slot = args[2];
+    Method* meth;
+
+    meth = dvmSlotToMethod(declaringClass, slot);
+    assert(meth != NULL);
+
+    ArrayObject* annos = dvmGetMethodAnnotations(meth);
+    dvmReleaseTrackedAlloc((Object*)annos, NULL);
+    RETURN_PTR(annos);
+}
+
+/*
+ * public Annotation[][] getParameterAnnotations(Class declaringClass, int slot)
+ *
+ * Return the annotations declared for this constructor's parameters.
+ */
+static void Dalvik_java_lang_reflect_Constructor_getParameterAnnotations(
+    const u4* args, JValue* pResult)
+{
+    // ignore thisPtr in args[0]
+    ClassObject* declaringClass = (ClassObject*) args[1];
+    int slot = args[2];
+    Method* meth;
+
+    meth = dvmSlotToMethod(declaringClass, slot);
+    assert(meth != NULL);
+
+    ArrayObject* annos = dvmGetParameterAnnotations(meth);
+    dvmReleaseTrackedAlloc((Object*)annos, NULL);
+    RETURN_PTR(annos);
+}
+
+/*
+ * private Object[] getSignatureAnnotation()
+ *
+ * Returns the signature annotation.
+ */
+static void Dalvik_java_lang_reflect_Constructor_getSignatureAnnotation(
+    const u4* args, JValue* pResult)
+{
+    // ignore thisPtr in args[0]
+    ClassObject* declaringClass = (ClassObject*) args[1];
+    int slot = args[2];
+    Method* meth;
+
+    meth = dvmSlotToMethod(declaringClass, slot);
+    assert(meth != NULL);
+
+    ArrayObject* arr = dvmGetMethodSignatureAnnotation(meth);
+    dvmReleaseTrackedAlloc((Object*) arr, NULL);
+    RETURN_PTR(arr);
+}
+
+static const DalvikNativeMethod java_lang_reflect_Constructor[] = {
+    { "constructNative",    "([Ljava/lang/Object;Ljava/lang/Class;[Ljava/lang/Class;IZ)Ljava/lang/Object;",
+        Dalvik_java_lang_reflect_Constructor_constructNative },
+    { "getConstructorModifiers", "(Ljava/lang/Class;I)I",
+        Dalvik_java_lang_reflect_Constructor_getConstructorModifiers },
+    { "getDeclaredAnnotations", "(Ljava/lang/Class;I)[Ljava/lang/annotation/Annotation;",
+        Dalvik_java_lang_reflect_Constructor_getDeclaredAnnotations },
+    { "getParameterAnnotations", "(Ljava/lang/Class;I)[[Ljava/lang/annotation/Annotation;",
+        Dalvik_java_lang_reflect_Constructor_getParameterAnnotations },
+    { "getSignatureAnnotation",  "(Ljava/lang/Class;I)[Ljava/lang/Object;",
+        Dalvik_java_lang_reflect_Constructor_getSignatureAnnotation },
+    { NULL, NULL, NULL },
+};
+
+
+/*
+ * ===========================================================================
+ *      java.lang.reflect.Field
+ * ===========================================================================
+ */
+
+/*
+ * Get the address of a field from an object.  This can be used with "get"
+ * or "set".
+ *
+ * "declaringClass" is the class in which the field was declared.  For an
+ * instance field, "obj" is the object that holds the field data; for a
+ * static field its value is ignored.
+ *
+ * "If the underlying field is static, the class that declared the
+ * field is initialized if it has not already been initialized."
+ *
+ * On failure, throws an exception and returns NULL.
+ *
+ * The documentation lists exceptional conditions and the exceptions that
+ * should be thrown, but doesn't say which exception previals when two or
+ * more exceptional conditions exist at the same time.  For example,
+ * attempting to set a protected field from an unrelated class causes an
+ * IllegalAccessException, while passing in a data type that doesn't match
+ * the field causes an IllegalArgumentException.  If code does both at the
+ * same time, we have to choose one or othe other.
+ *
+ * The expected order is:
+ *  (1) Check for illegal access. Throw IllegalAccessException.
+ *  (2) Make sure the object actually has the field.  Throw
+ *      IllegalArgumentException.
+ *  (3) Make sure the field matches the expected type, e.g. if we issued
+ *      a "getInteger" call make sure the field is an integer or can be
+ *      converted to an int with a widening conversion.  Throw
+ *      IllegalArgumentException.
+ *  (4) Make sure "obj" is not null.  Throw NullPointerException.
+ *
+ * TODO: we're currently handling #3 after #4, because we don't check the
+ * widening conversion until we're actually extracting the value from the
+ * object (which won't work well if it's a null reference).
+ */
+static JValue* getFieldDataAddr(Object* obj, ClassObject* declaringClass,
+    int slot, bool isSetOperation, bool noAccessCheck)
+{
+    Field* field;
+    JValue* result;
+
+    field = dvmSlotToField(declaringClass, slot);
+    assert(field != NULL);
+
+    /* verify access */
+    if (!noAccessCheck) {
+        if (isSetOperation && dvmIsFinalField(field)) {
+            dvmThrowException("Ljava/lang/IllegalAccessException;",
+                "field is marked 'final'");
+            return NULL;
+        }
+
+        ClassObject* callerClass =
+            dvmGetCaller2Class(dvmThreadSelf()->curFrame);
+
+        /*
+         * We need to check two things:
+         *  (1) Would an instance of the calling class have access to the field?
+         *  (2) If the field is "protected", is the object an instance of the
+         *      calling class, or is the field's declaring class in the same
+         *      package as the calling class?
+         *
+         * #1 is basic access control.  #2 ensures that, just because
+         * you're a subclass of Foo, you can't mess with protected fields
+         * in arbitrary Foo objects from other packages.
+         */
+        if (!dvmCheckFieldAccess(callerClass, field)) {
+            dvmThrowException("Ljava/lang/IllegalAccessException;",
+                "access to field not allowed");
+            return NULL;
+        }
+        if (dvmIsProtectedField(field)) {
+            bool isInstance, samePackage;
+
+            if (obj != NULL)
+                isInstance = dvmInstanceof(obj->clazz, callerClass);
+            else
+                isInstance = false;
+            samePackage = dvmInSamePackage(declaringClass, callerClass);
+
+            if (!isInstance && !samePackage) {
+                dvmThrowException("Ljava/lang/IllegalAccessException;",
+                    "access to protected field not allowed");
+                return NULL;
+            }
+        }
+    }
+
+    if (dvmIsStaticField(field)) {
+        /* init class if necessary, then return ptr to storage in "field" */
+        if (!dvmIsClassInitialized(declaringClass)) {
+            if (!dvmInitClass(declaringClass)) {
+                assert(dvmCheckException(dvmThreadSelf()));
+                return NULL;
+            }
+        }
+
+        result = dvmStaticFieldPtr((StaticField*) field);
+    } else {
+        /*
+         * Verify object is of correct type (i.e. it actually has the
+         * expected field in it), then grab a pointer to obj storage.
+         * The call to verifyObjectInClass throws an NPE if "obj" is NULL.
+         */
+        if (!verifyObjectInClass(obj, declaringClass)) {
+            assert(dvmCheckException(dvmThreadSelf()));
+            if (obj != NULL) {
+                LOGD("Wrong type of object for field lookup: %s %s\n",
+                    obj->clazz->descriptor, declaringClass->descriptor);
+            }
+            return NULL;
+        }
+        result = dvmFieldPtr(obj, ((InstField*) field)->byteOffset);
+    }
+
+    return result;
+}
+
+/*
+ * public int getFieldModifiers(Class declaringClass, int slot)
+ */
+static void Dalvik_java_lang_reflect_Field_getFieldModifiers(
+    const u4* args, JValue* pResult)
+{
+    // ignore thisPtr in args[0]
+    ClassObject* declaringClass = (ClassObject*) args[1];
+    int slot = args[2];
+    Field* field;
+
+    field = dvmSlotToField(declaringClass, slot);
+    RETURN_INT(field->accessFlags & JAVA_FLAGS_MASK);
+}
+
+/*
+ * private Object getField(Object o, Class declaringClass, Class type,
+ *     int slot, boolean noAccessCheck)
+ *
+ * Primitive types need to be boxed.
+ */
+static void Dalvik_java_lang_reflect_Field_getField(const u4* args,
+    JValue* pResult)
+{
+    // ignore thisPtr in args[0]
+    Object* obj = (Object*) args[1];
+    ClassObject* declaringClass = (ClassObject*) args[2];
+    ClassObject* fieldType = (ClassObject*) args[3];
+    int slot = args[4];
+    bool noAccessCheck = (args[5] != 0);
+    JValue value;
+    const JValue* fieldPtr;
+    DataObject* result;
+
+    //dvmDumpClass(obj->clazz, kDumpClassFullDetail);
+
+    /* get a pointer to the field's data; performs access checks */
+    fieldPtr = getFieldDataAddr(obj, declaringClass, slot, false,noAccessCheck);
+    if (fieldPtr == NULL)
+        RETURN_VOID();
+
+    /* copy 4 or 8 bytes out */
+    if (fieldType->primitiveType == PRIM_LONG ||
+        fieldType->primitiveType == PRIM_DOUBLE)
+    {
+        value.j = fieldPtr->j;
+    } else {
+        value.i = fieldPtr->i;
+    }
+
+    result = dvmWrapPrimitive(value, fieldType);
+    dvmReleaseTrackedAlloc((Object*) result, NULL);
+    RETURN_PTR(result);
+}
+
+/*
+ * private void setField(Object o, Class declaringClass, Class type,
+ *     int slot, boolean noAccessCheck, Object value)
+ *
+ * When assigning into a primitive field we will automatically extract
+ * the value from box types.
+ */
+static void Dalvik_java_lang_reflect_Field_setField(const u4* args,
+    JValue* pResult)
+{
+    // ignore thisPtr in args[0]
+    Object* obj = (Object*) args[1];
+    ClassObject* declaringClass = (ClassObject*) args[2];
+    ClassObject* fieldType = (ClassObject*) args[3];
+    int slot = args[4];
+    bool noAccessCheck = (args[5] != 0);
+    Object* valueObj = (Object*) args[6];
+    JValue* fieldPtr;
+    JValue value;
+
+    /* unwrap primitive, or verify object type */
+    if (!dvmUnwrapPrimitive(valueObj, fieldType, &value)) {
+        dvmThrowException("Ljava/lang/IllegalArgumentException;",
+            "invalid value for field");
+        RETURN_VOID();
+    }
+
+    /* get a pointer to the field's data; performs access checks */
+    fieldPtr = getFieldDataAddr(obj, declaringClass, slot, true, noAccessCheck);
+    if (fieldPtr == NULL)
+        RETURN_VOID();
+
+    /* store 4 or 8 bytes */
+    if (fieldType->primitiveType == PRIM_LONG ||
+        fieldType->primitiveType == PRIM_DOUBLE)
+    {
+        fieldPtr->j = value.j;
+    } else {
+        fieldPtr->i = value.i;
+    }
+
+    RETURN_VOID();
+}
+
+/*
+ * Convert a reflection primitive type ordinal (inherited from the previous
+ * VM's reflection classes) to our value.
+ */
+static PrimitiveType convPrimType(int typeNum)
+{
+    static const PrimitiveType conv[PRIM_MAX] = {
+        PRIM_NOT, PRIM_BOOLEAN, PRIM_BYTE, PRIM_CHAR, PRIM_SHORT,
+        PRIM_INT, PRIM_FLOAT, PRIM_LONG, PRIM_DOUBLE
+    };
+    if (typeNum <= 0 || typeNum > 8)
+        return PRIM_NOT;
+    return conv[typeNum];
+}
+
+/*
+ * Primitive field getters, e.g.:
+ * private double getIField(Object o, Class declaringClass,
+ *     Class type, int slot, boolean noAccessCheck, int type_no)
+ *
+ * The "type_no" is defined by the java.lang.reflect.Field class.
+ */
+static void Dalvik_java_lang_reflect_Field_getPrimitiveField(const u4* args,
+    JValue* pResult)
+{
+    // ignore thisPtr in args[0]
+    Object* obj = (Object*) args[1];
+    ClassObject* declaringClass = (ClassObject*) args[2];
+    ClassObject* fieldType = (ClassObject*) args[3];
+    int slot = args[4];
+    bool noAccessCheck = (args[5] != 0);
+    int typeNum = args[6];
+    PrimitiveType targetType = convPrimType(typeNum);
+    const JValue* fieldPtr;
+    JValue value;
+
+    if (!dvmIsPrimitiveClass(fieldType)) {
+        dvmThrowException("Ljava/lang/IllegalArgumentException;",
+            "not a primitive field");
+        RETURN_VOID();
+    }
+
+    /* get a pointer to the field's data; performs access checks */
+    fieldPtr = getFieldDataAddr(obj, declaringClass, slot, false,noAccessCheck);
+    if (fieldPtr == NULL)
+        RETURN_VOID();
+
+    /* copy 4 or 8 bytes out */
+    if (fieldType->primitiveType == PRIM_LONG ||
+        fieldType->primitiveType == PRIM_DOUBLE)
+    {
+        value.j = fieldPtr->j;
+    } else {
+        value.i = fieldPtr->i;
+    }
+
+    /* retrieve value, performing a widening conversion if necessary */
+    if (dvmConvertPrimitiveValue(fieldType->primitiveType, targetType,
+        &(value.i), &(pResult->i)) < 0)
+    {
+        dvmThrowException("Ljava/lang/IllegalArgumentException;",
+            "invalid primitive conversion");
+        RETURN_VOID();
+    }
+}
+
+/*
+ * Primitive field setters, e.g.:
+ * private void setIField(Object o, Class declaringClass,
+ *     Class type, int slot, boolean noAccessCheck, int type_no, int value)
+ *
+ * The "type_no" is defined by the java.lang.reflect.Field class.
+ */
+static void Dalvik_java_lang_reflect_Field_setPrimitiveField(const u4* args,
+    JValue* pResult)
+{
+    // ignore thisPtr in args[0]
+    Object* obj = (Object*) args[1];
+    ClassObject* declaringClass = (ClassObject*) args[2];
+    ClassObject* fieldType = (ClassObject*) args[3];
+    int slot = args[4];
+    bool noAccessCheck = (args[5] != 0);
+    int typeNum = args[6];
+    const s4* valuePtr = (s4*) &args[7];
+    PrimitiveType srcType = convPrimType(typeNum);
+    JValue* fieldPtr;
+    JValue value;
+
+    if (!dvmIsPrimitiveClass(fieldType)) {
+        dvmThrowException("Ljava/lang/IllegalArgumentException;",
+            "not a primitive field");
+        RETURN_VOID();
+    }
+
+    /* convert the 32/64-bit arg to a JValue matching the field type */
+    if (dvmConvertPrimitiveValue(srcType, fieldType->primitiveType,
+        valuePtr, &(value.i)) < 0)
+    {
+        dvmThrowException("Ljava/lang/IllegalArgumentException;",
+            "invalid primitive conversion");
+        RETURN_VOID();
+    }
+
+    /* get a pointer to the field's data; performs access checks */
+    fieldPtr = getFieldDataAddr(obj, declaringClass, slot, true, noAccessCheck);
+    if (fieldPtr == NULL)
+        RETURN_VOID();
+
+    /* store 4 or 8 bytes */
+    if (fieldType->primitiveType == PRIM_LONG ||
+        fieldType->primitiveType == PRIM_DOUBLE)
+    {
+        fieldPtr->j = value.j;
+    } else {
+        fieldPtr->i = value.i;
+    }
+
+    RETURN_VOID();
+}
+
+/*
+ * public Annotation[] getDeclaredAnnotations(Class declaringClass, int slot)
+ *
+ * Return the annotations declared for this field.
+ */
+static void Dalvik_java_lang_reflect_Field_getDeclaredAnnotations(
+    const u4* args, JValue* pResult)
+{
+    // ignore thisPtr in args[0]
+    ClassObject* declaringClass = (ClassObject*) args[1];
+    int slot = args[2];
+    Field* field;
+
+    field = dvmSlotToField(declaringClass, slot);
+    assert(field != NULL);
+
+    ArrayObject* annos = dvmGetFieldAnnotations(field);
+    dvmReleaseTrackedAlloc((Object*) annos, NULL);
+    RETURN_PTR(annos);
+}
+
+/*
+ * private Object[] getSignatureAnnotation()
+ *
+ * Returns the signature annotation.
+ */
+static void Dalvik_java_lang_reflect_Field_getSignatureAnnotation(const u4* args,
+    JValue* pResult)
+{
+    // ignore thisPtr in args[0]
+    ClassObject* declaringClass = (ClassObject*) args[1];
+    int slot = args[2];
+    Field* field;
+
+    field = dvmSlotToField(declaringClass, slot);
+    assert(field != NULL);
+
+    ArrayObject* arr = dvmGetFieldSignatureAnnotation(field);
+    dvmReleaseTrackedAlloc((Object*) arr, NULL);
+    RETURN_PTR(arr);
+}
+
+static const DalvikNativeMethod java_lang_reflect_Field[] = {
+    { "getFieldModifiers",  "(Ljava/lang/Class;I)I",
+        Dalvik_java_lang_reflect_Field_getFieldModifiers },
+    { "getField",           "(Ljava/lang/Object;Ljava/lang/Class;Ljava/lang/Class;IZ)Ljava/lang/Object;",
+        Dalvik_java_lang_reflect_Field_getField },
+    { "getBField",          "(Ljava/lang/Object;Ljava/lang/Class;Ljava/lang/Class;IZI)B",
+        Dalvik_java_lang_reflect_Field_getPrimitiveField },
+    { "getCField",          "(Ljava/lang/Object;Ljava/lang/Class;Ljava/lang/Class;IZI)C",
+        Dalvik_java_lang_reflect_Field_getPrimitiveField },
+    { "getDField",          "(Ljava/lang/Object;Ljava/lang/Class;Ljava/lang/Class;IZI)D",
+        Dalvik_java_lang_reflect_Field_getPrimitiveField },
+    { "getFField",          "(Ljava/lang/Object;Ljava/lang/Class;Ljava/lang/Class;IZI)F",
+        Dalvik_java_lang_reflect_Field_getPrimitiveField },
+    { "getIField",          "(Ljava/lang/Object;Ljava/lang/Class;Ljava/lang/Class;IZI)I",
+        Dalvik_java_lang_reflect_Field_getPrimitiveField },
+    { "getJField",          "(Ljava/lang/Object;Ljava/lang/Class;Ljava/lang/Class;IZI)J",
+        Dalvik_java_lang_reflect_Field_getPrimitiveField },
+    { "getSField",          "(Ljava/lang/Object;Ljava/lang/Class;Ljava/lang/Class;IZI)S",
+        Dalvik_java_lang_reflect_Field_getPrimitiveField },
+    { "getZField",          "(Ljava/lang/Object;Ljava/lang/Class;Ljava/lang/Class;IZI)Z",
+        Dalvik_java_lang_reflect_Field_getPrimitiveField },
+    { "setField",           "(Ljava/lang/Object;Ljava/lang/Class;Ljava/lang/Class;IZLjava/lang/Object;)V",
+        Dalvik_java_lang_reflect_Field_setField },
+    { "setBField",          "(Ljava/lang/Object;Ljava/lang/Class;Ljava/lang/Class;IZIB)V",
+        Dalvik_java_lang_reflect_Field_setPrimitiveField },
+    { "setCField",          "(Ljava/lang/Object;Ljava/lang/Class;Ljava/lang/Class;IZIC)V",
+        Dalvik_java_lang_reflect_Field_setPrimitiveField },
+    { "setDField",          "(Ljava/lang/Object;Ljava/lang/Class;Ljava/lang/Class;IZID)V",
+        Dalvik_java_lang_reflect_Field_setPrimitiveField },
+    { "setFField",          "(Ljava/lang/Object;Ljava/lang/Class;Ljava/lang/Class;IZIF)V",
+        Dalvik_java_lang_reflect_Field_setPrimitiveField },
+    { "setIField",          "(Ljava/lang/Object;Ljava/lang/Class;Ljava/lang/Class;IZII)V",
+        Dalvik_java_lang_reflect_Field_setPrimitiveField },
+    { "setJField",          "(Ljava/lang/Object;Ljava/lang/Class;Ljava/lang/Class;IZIJ)V",
+        Dalvik_java_lang_reflect_Field_setPrimitiveField },
+    { "setSField",          "(Ljava/lang/Object;Ljava/lang/Class;Ljava/lang/Class;IZIS)V",
+        Dalvik_java_lang_reflect_Field_setPrimitiveField },
+    { "setZField",          "(Ljava/lang/Object;Ljava/lang/Class;Ljava/lang/Class;IZIZ)V",
+        Dalvik_java_lang_reflect_Field_setPrimitiveField },
+    { "getDeclaredAnnotations", "(Ljava/lang/Class;I)[Ljava/lang/annotation/Annotation;",
+        Dalvik_java_lang_reflect_Field_getDeclaredAnnotations },
+    { "getSignatureAnnotation",  "(Ljava/lang/Class;I)[Ljava/lang/Object;",
+        Dalvik_java_lang_reflect_Field_getSignatureAnnotation },
+    { NULL, NULL, NULL },
+};
+
+
+/*
+ * ===========================================================================
+ *      java.lang.reflect.Method
+ * ===========================================================================
+ */
+
+/*
+ * private int getMethodModifiers(Class decl_class, int slot)
+ *
+ * (Not sure why the access flags weren't stored in the class along with
+ * everything else.  Not sure why this isn't static.)
+ */
+static void Dalvik_java_lang_reflect_Method_getMethodModifiers(const u4* args,
+    JValue* pResult)
+{
+    // ignore thisPtr in args[0]
+    ClassObject* declaringClass = (ClassObject*) args[1];
+    int slot = args[2];
+    Method* meth;
+
+    meth = dvmSlotToMethod(declaringClass, slot);
+    RETURN_INT(fixMethodFlags(meth->accessFlags));
+}
+
+/*
+ * private Object invokeNative(Object obj, Object[] args, Class declaringClass,
+ *   Class[] parameterTypes, Class returnType, int slot, boolean noAccessCheck)
+ *
+ * Invoke a static or virtual method via reflection.
+ */
+static void Dalvik_java_lang_reflect_Method_invokeNative(const u4* args,
+    JValue* pResult)
+{
+    // ignore thisPtr in args[0]
+    Object* methObj = (Object*) args[1];        // null for static methods
+    ArrayObject* argList = (ArrayObject*) args[2];
+    ClassObject* declaringClass = (ClassObject*) args[3];
+    ArrayObject* params = (ArrayObject*) args[4];
+    ClassObject* returnType = (ClassObject*) args[5];
+    int slot = args[6];
+    bool noAccessCheck = (args[7] != 0);
+    const Method* meth;
+    Object* result;
+
+    /*
+     * "If the underlying method is static, the class that declared the
+     * method is initialized if it has not already been initialized."
+     */
+    meth = dvmSlotToMethod(declaringClass, slot);
+    assert(meth != NULL);
+
+    if (dvmIsStaticMethod(meth)) {
+        if (!dvmIsClassInitialized(declaringClass)) {
+            if (!dvmInitClass(declaringClass))
+                goto init_failed;
+        }
+    } else {
+        /* looks like interfaces need this too? */
+        if (dvmIsInterfaceClass(declaringClass) &&
+            !dvmIsClassInitialized(declaringClass))
+        {
+            if (!dvmInitClass(declaringClass))
+                goto init_failed;
+        }
+
+        /* make sure the object is an instance of the expected class */
+        if (!verifyObjectInClass(methObj, declaringClass)) {
+            assert(dvmCheckException(dvmThreadSelf()));
+            RETURN_VOID();
+        }
+
+        /* do the virtual table lookup for the method */
+        meth = dvmGetVirtualizedMethod(methObj->clazz, meth);
+        if (meth == NULL) {
+            assert(dvmCheckException(dvmThreadSelf()));
+            RETURN_VOID();
+        }
+    }
+
+    /*
+     * If the method has a return value, "result" will be an object or
+     * a boxed primitive.
+     */
+    result = dvmInvokeMethod(methObj, meth, argList, params, returnType,
+                noAccessCheck);
+
+    RETURN_PTR(result);
+
+init_failed:
+    /*
+     * If initialization failed, an exception will be raised.
+     */
+    LOGD("Method.invoke() on bad class %s failed\n",
+        declaringClass->descriptor);
+    assert(dvmCheckException(dvmThreadSelf()));
+    RETURN_VOID();
+}
+
+/*
+ * public Annotation[] getDeclaredAnnotations(Class declaringClass, int slot)
+ *
+ * Return the annotations declared for this method.
+ */
+static void Dalvik_java_lang_reflect_Method_getDeclaredAnnotations(
+    const u4* args, JValue* pResult)
+{
+    // ignore thisPtr in args[0]
+    ClassObject* declaringClass = (ClassObject*) args[1];
+    int slot = args[2];
+    Method* meth;
+
+    meth = dvmSlotToMethod(declaringClass, slot);
+    assert(meth != NULL);
+
+    ArrayObject* annos = dvmGetMethodAnnotations(meth);
+    dvmReleaseTrackedAlloc((Object*)annos, NULL);
+    RETURN_PTR(annos);
+}
+
+/*
+ * public Annotation[] getParameterAnnotations(Class declaringClass, int slot)
+ *
+ * Return the annotations declared for this method's parameters.
+ */
+static void Dalvik_java_lang_reflect_Method_getParameterAnnotations(
+    const u4* args, JValue* pResult)
+{
+    // ignore thisPtr in args[0]
+    ClassObject* declaringClass = (ClassObject*) args[1];
+    int slot = args[2];
+    Method* meth;
+
+    meth = dvmSlotToMethod(declaringClass, slot);
+    assert(meth != NULL);
+
+    ArrayObject* annos = dvmGetParameterAnnotations(meth);
+    dvmReleaseTrackedAlloc((Object*)annos, NULL);
+    RETURN_PTR(annos);
+}
+
+/*
+ * private Object getDefaultValue(Class declaringClass, int slot)
+ *
+ * Return the default value for the annotation member represented by
+ * this Method instance.  Returns NULL if none is defined.
+ */
+static void Dalvik_java_lang_reflect_Method_getDefaultValue(const u4* args,
+    JValue* pResult)
+{
+    // ignore thisPtr in args[0]
+    ClassObject* declaringClass = (ClassObject*) args[1];
+    int slot = args[2];
+    Method* meth;
+
+    /* make sure this is an annotation class member */
+    if (!dvmIsAnnotationClass(declaringClass))
+        RETURN_PTR(NULL);
+
+    meth = dvmSlotToMethod(declaringClass, slot);
+    assert(meth != NULL);
+
+    Object* def = dvmGetAnnotationDefaultValue(meth);
+    dvmReleaseTrackedAlloc(def, NULL);
+    RETURN_PTR(def);
+}
+
+/*
+ * private Object[] getSignatureAnnotation()
+ *
+ * Returns the signature annotation.
+ */
+static void Dalvik_java_lang_reflect_Method_getSignatureAnnotation(
+    const u4* args, JValue* pResult)
+{
+    // ignore thisPtr in args[0]
+    ClassObject* declaringClass = (ClassObject*) args[1];
+    int slot = args[2];
+    Method* meth;
+
+    meth = dvmSlotToMethod(declaringClass, slot);
+    assert(meth != NULL);
+
+    ArrayObject* arr = dvmGetMethodSignatureAnnotation(meth);
+    dvmReleaseTrackedAlloc((Object*) arr, NULL);
+    RETURN_PTR(arr);
+}
+
+static const DalvikNativeMethod java_lang_reflect_Method[] = {
+    { "getMethodModifiers", "(Ljava/lang/Class;I)I",
+        Dalvik_java_lang_reflect_Method_getMethodModifiers },
+    { "invokeNative",       "(Ljava/lang/Object;[Ljava/lang/Object;Ljava/lang/Class;[Ljava/lang/Class;Ljava/lang/Class;IZ)Ljava/lang/Object;",
+        Dalvik_java_lang_reflect_Method_invokeNative },
+    { "getDeclaredAnnotations", "(Ljava/lang/Class;I)[Ljava/lang/annotation/Annotation;",
+        Dalvik_java_lang_reflect_Method_getDeclaredAnnotations },
+    { "getParameterAnnotations", "(Ljava/lang/Class;I)[[Ljava/lang/annotation/Annotation;",
+        Dalvik_java_lang_reflect_Method_getParameterAnnotations },
+    { "getDefaultValue",    "(Ljava/lang/Class;I)Ljava/lang/Object;",
+        Dalvik_java_lang_reflect_Method_getDefaultValue },
+    { "getSignatureAnnotation",  "(Ljava/lang/Class;I)[Ljava/lang/Object;",
+        Dalvik_java_lang_reflect_Method_getSignatureAnnotation },
+    { NULL, NULL, NULL },
+};
+
+
+/*
+ * ===========================================================================
+ *      java.lang.reflect.Proxy
+ * ===========================================================================
+ */
+
+/*
+ * static Class generateProxy(String name, Class[] interfaces,
+ *      ClassLoader loader)
+ *
+ * Generate a proxy class with the specified characteristics.  Throws an
+ * exception on error.
+ */
+static void Dalvik_java_lang_reflect_Proxy_generateProxy(const u4* args,
+    JValue* pResult)
+{
+    StringObject* str = (StringObject*) args[0];
+    ArrayObject* interfaces = (ArrayObject*) args[1];
+    Object* loader = (Object*) args[2];
+    ClassObject* result;
+
+    result = dvmGenerateProxyClass(str, interfaces, loader);
+    RETURN_PTR(result);
+}
+
+static const DalvikNativeMethod java_lang_reflect_Proxy[] = {
+    { "generateProxy", "(Ljava/lang/String;[Ljava/lang/Class;Ljava/lang/ClassLoader;)Ljava/lang/Class;",
+        Dalvik_java_lang_reflect_Proxy_generateProxy },
+    { NULL, NULL, NULL },
+};
+
+
+/*
+ * ===========================================================================
+ *      java.security.AccessController
+ * ===========================================================================
+ */
+
+/*
+ * private static ProtectionDomain[] getStackDomains()
+ *
+ * Return an array of ProtectionDomain objects from the classes of the
+ * methods on the stack.  Ignore reflection frames.  Stop at the first
+ * privileged frame we see.
+ */
+static void Dalvik_java_security_AccessController_getStackDomains(
+    const u4* args, JValue* pResult)
+{
+    UNUSED_PARAMETER(args);
+
+    const Method** methods = NULL;
+    int length;
+
+    /*
+     * Get an array with the stack trace in it.
+     */
+    if (!dvmCreateStackTraceArray(dvmThreadSelf()->curFrame, &methods, &length))
+    {
+        LOGE("Failed to create stack trace array\n");
+        dvmThrowException("Ljava/lang/InternalError;", NULL);
+        RETURN_VOID();
+    }
+
+    //int i;
+    //LOGI("dvmCreateStackTraceArray results:\n");
+    //for (i = 0; i < length; i++)
+    //    LOGI(" %2d: %s.%s\n", i, methods[i]->clazz->name, methods[i]->name);
+
+    /*
+     * Generate a list of ProtectionDomain objects from the frames that
+     * we're interested in.  Skip the first two methods (this method, and
+     * the one that called us), and ignore reflection frames.  Stop on the
+     * frame *after* the first privileged frame we see as we walk up.
+     *
+     * We create a new array, probably over-allocated, and fill in the
+     * stuff we want.  We could also just run the list twice, but the
+     * costs of the per-frame tests could be more expensive than the
+     * second alloc.  (We could also allocate it on the stack using C99
+     * array creation, but it's not guaranteed to fit.)
+     *
+     * The array we return doesn't include null ProtectionDomain objects,
+     * so we skip those here.
+     */
+    Object** subSet = (Object**) malloc((length-2) * sizeof(Object*));
+    if (subSet == NULL) {
+        LOGE("Failed to allocate subSet (length=%d)\n", length);
+        free(methods);
+        dvmThrowException("Ljava/lang/InternalError;", NULL);
+        RETURN_VOID();
+    }
+    int idx, subIdx = 0;
+    for (idx = 2; idx < length; idx++) {
+        const Method* meth = methods[idx];
+        Object* pd;
+
+        if (dvmIsReflectionMethod(meth))
+            continue;
+
+        if (dvmIsPrivilegedMethod(meth)) {
+            /* find nearest non-reflection frame; note we skip priv frame */
+            //LOGI("GSD priv frame at %s.%s\n", meth->clazz->name, meth->name);
+            while (++idx < length && dvmIsReflectionMethod(methods[idx]))
+                ;
+            length = idx;       // stomp length to end loop
+            meth = methods[idx];
+        }
+
+        /* get the pd object from the method's class */
+        assert(gDvm.offJavaLangClass_pd != 0);
+        pd = dvmGetFieldObject((Object*) meth->clazz,
+                gDvm.offJavaLangClass_pd);
+        //LOGI("FOUND '%s' pd=%p\n", meth->clazz->name, pd);
+        if (pd != NULL)
+            subSet[subIdx++] = pd;
+    }
+
+    //LOGI("subSet:\n");
+    //for (i = 0; i < subIdx; i++)
+    //    LOGI("  %2d: %s\n", i, subSet[i]->clazz->name);
+
+    /*
+     * Create an array object to contain "subSet".
+     */
+    ClassObject* pdArrayClass = NULL;
+    ArrayObject* domains = NULL;
+    pdArrayClass = dvmFindArrayClass("[Ljava/security/ProtectionDomain;", NULL);
+    if (pdArrayClass == NULL) {
+        LOGW("Unable to find ProtectionDomain class for array\n");
+        goto bail;
+    }
+    domains = dvmAllocArray(pdArrayClass, subIdx, kObjectArrayRefWidth,
+                ALLOC_DEFAULT);
+    if (domains == NULL) {
+        LOGW("Unable to allocate pd array (%d elems)\n", subIdx);
+        goto bail;
+    }
+
+    /* copy the ProtectionDomain objects out */
+    Object** objects = (Object**) domains->contents;
+    for (idx = 0; idx < subIdx; idx++)
+        *objects++ = subSet[idx];
+
+bail:
+    free(subSet);
+    free(methods);
+    dvmReleaseTrackedAlloc((Object*) domains, NULL);
+    RETURN_PTR(domains);
+}
+
+static const DalvikNativeMethod java_security_AccessController[] = {
+    { "getStackDomains",    "()[Ljava/security/ProtectionDomain;",
+        Dalvik_java_security_AccessController_getStackDomains },
+    { NULL, NULL, NULL },
+};
+
+
+/*
+ * ===========================================================================
+ *      java.util.concurrent.atomic.AtomicLong
+ * ===========================================================================
+ */
+
+/*
+ * private static native boolean VMSupportsCS8();
+ */
+static void Dalvik_java_util_concurrent_atomic_AtomicLong_VMSupportsCS8(
+    const u4* args, JValue* pResult)
+{
+    UNUSED_PARAMETER(args);
+    RETURN_BOOLEAN(1);
+}
+
+static const DalvikNativeMethod java_util_concurrent_atomic_AtomicLong[] = {
+    { "VMSupportsCS8", "()Z",
+      Dalvik_java_util_concurrent_atomic_AtomicLong_VMSupportsCS8 },
+    { NULL, NULL, NULL },
+};
+
+
+/*
+ * ===========================================================================
+ *      org.apache.harmony.dalvik.ddmc.DdmServer
+ * ===========================================================================
+ */
+
+/*
+ * private static void nativeSendChunk(int type, byte[] data,
+ *      int offset, int length)
+ *
+ * Send a DDM chunk to the server.
+ */
+static void Dalvik_org_apache_harmony_dalvik_ddmc_DdmServer_nativeSendChunk(
+    const u4* args, JValue* pResult)
+{
+    int type = args[0];
+    ArrayObject* data = (ArrayObject*) args[1];
+    int offset = args[2];
+    int length = args[3];
+
+    assert(offset+length <= (int)data->length);
+
+    dvmDbgDdmSendChunk(type, length, (const u1*)data->contents + offset);
+    RETURN_VOID();
+}
+
+static const DalvikNativeMethod org_apache_harmony_dalvik_ddmc_DdmServer[] = {
+    { "nativeSendChunk",    "(I[BII)V",
+        Dalvik_org_apache_harmony_dalvik_ddmc_DdmServer_nativeSendChunk },
+    { NULL, NULL, NULL },
+};
+
+
+/*
+ * ===========================================================================
+ *      org.apache.harmony.dalvik.ddmc.DdmVmInternal
+ * ===========================================================================
+ */
+
+/*
+ * public static void threadNotify(boolean enable)
+ *
+ * Enable DDM thread notifications.
+ */
+static void Dalvik_org_apache_harmony_dalvik_ddmc_DdmVmInternal_threadNotify(
+    const u4* args, JValue* pResult)
+{
+    bool enable = (args[0] != 0);
+
+    //LOGI("ddmThreadNotification: %d\n", enable);
+    dvmDdmSetThreadNotification(enable);
+    RETURN_VOID();
+}
+
+/*
+ * public static byte[] getThreadStats()
+ *
+ * Get a buffer full of thread info.
+ */
+static void Dalvik_org_apache_harmony_dalvik_ddmc_DdmVmInternal_getThreadStats(
+    const u4* args, JValue* pResult)
+{
+    UNUSED_PARAMETER(args);
+
+    ArrayObject* result = dvmDdmGenerateThreadStats();
+    dvmReleaseTrackedAlloc((Object*) result, NULL);
+    RETURN_PTR(result);
+}
+
+/*
+ * public static int heapInfoNotify(int what)
+ *
+ * Enable DDM heap notifications.
+ */
+static void Dalvik_org_apache_harmony_dalvik_ddmc_DdmVmInternal_heapInfoNotify(
+    const u4* args, JValue* pResult)
+{
+    int when = args[0];
+    bool ret;
+
+    ret = dvmDdmHandleHpifChunk(when);
+    RETURN_BOOLEAN(ret);
+}
+
+/*
+ * public static boolean heapSegmentNotify(int when, int what, bool native)
+ *
+ * Enable DDM heap notifications.
+ */
+static void
+    Dalvik_org_apache_harmony_dalvik_ddmc_DdmVmInternal_heapSegmentNotify(
+    const u4* args, JValue* pResult)
+{
+    int  when   = args[0];        // 0=never (off), 1=during GC
+    int  what   = args[1];        // 0=merged objects, 1=distinct objects
+    bool native = (args[2] != 0); // false=virtual heap, true=native heap
+    bool ret;
+
+    ret = dvmDdmHandleHpsgNhsgChunk(when, what, native);
+    RETURN_BOOLEAN(ret);
+}
+
+/*
+ * public static StackTraceElement[] getStackTraceById(int threadId)
+ *
+ * Get a stack trace as an array of StackTraceElement objects.  Returns
+ * NULL on failure, e.g. if the threadId couldn't be found.
+ */
+static void
+    Dalvik_org_apache_harmony_dalvik_ddmc_DdmVmInternal_getStackTraceById(
+    const u4* args, JValue* pResult)
+{
+    u4 threadId = args[0];
+    ArrayObject* trace;
+
+    trace = dvmDdmGetStackTraceById(threadId);
+    RETURN_PTR(trace);
+}
+
+/*
+ * public static void enableRecentAllocations(boolean enable)
+ *
+ * Enable or disable recent allocation tracking.
+ */
+static void
+    Dalvik_org_apache_harmony_dalvik_ddmc_DdmVmInternal_enableRecentAllocations(
+    const u4* args, JValue* pResult)
+{
+    bool enable = (args[0] != 0);
+
+    if (enable)
+        (void) dvmEnableAllocTracker();
+    else
+        (void) dvmDisableAllocTracker();
+    RETURN_VOID();
+}
+
+/*
+ * public static boolean getRecentAllocationStatus()
+ *
+ * Returns "true" if allocation tracking is enabled.
+ */
+static void
+    Dalvik_org_apache_harmony_dalvik_ddmc_DdmVmInternal_getRecentAllocationStatus(
+    const u4* args, JValue* pResult)
+{
+    UNUSED_PARAMETER(args);
+    RETURN_BOOLEAN(gDvm.allocRecords != NULL);
+}
+
+/*
+ * public static byte[] getRecentAllocations()
+ *
+ * Fill a buffer with data on recent heap allocations.
+ */
+static void
+    Dalvik_org_apache_harmony_dalvik_ddmc_DdmVmInternal_getRecentAllocations(
+    const u4* args, JValue* pResult)
+{
+    ArrayObject* data;
+
+    data = dvmDdmGetRecentAllocations();
+    dvmReleaseTrackedAlloc((Object*) data, NULL);
+    RETURN_PTR(data);
+}
+
+static const DalvikNativeMethod
+    org_apache_harmony_dalvik_ddmc_DdmVmInternal[] = {
+    { "threadNotify",       "(Z)V",
+      Dalvik_org_apache_harmony_dalvik_ddmc_DdmVmInternal_threadNotify },
+    { "getThreadStats",     "()[B",
+      Dalvik_org_apache_harmony_dalvik_ddmc_DdmVmInternal_getThreadStats },
+    { "heapInfoNotify",     "(I)Z",
+      Dalvik_org_apache_harmony_dalvik_ddmc_DdmVmInternal_heapInfoNotify },
+    { "heapSegmentNotify",  "(IIZ)Z",
+      Dalvik_org_apache_harmony_dalvik_ddmc_DdmVmInternal_heapSegmentNotify },
+    { "getStackTraceById",  "(I)[Ljava/lang/StackTraceElement;",
+      Dalvik_org_apache_harmony_dalvik_ddmc_DdmVmInternal_getStackTraceById },
+    { "enableRecentAllocations", "(Z)V",
+      Dalvik_org_apache_harmony_dalvik_ddmc_DdmVmInternal_enableRecentAllocations },
+    { "getRecentAllocationStatus", "()Z",
+      Dalvik_org_apache_harmony_dalvik_ddmc_DdmVmInternal_getRecentAllocationStatus },
+    { "getRecentAllocations", "()[B",
+      Dalvik_org_apache_harmony_dalvik_ddmc_DdmVmInternal_getRecentAllocations },
+    { NULL, NULL, NULL },
+};
+
+
+/*
+ * ===========================================================================
+ *      sun.misc.Unsafe
+ * ===========================================================================
+ */
+
+/*
+ * private static native long objectFieldOffset0(Field field);
+ */
+static void Dalvik_sun_misc_Unsafe_objectFieldOffset0(const u4* args,
+    JValue* pResult)
+{
+    Object* fieldObject = (Object*) args[0];
+    InstField* field = (InstField*) dvmGetFieldFromReflectObj(fieldObject);
+    s8 result = ((s8) field->byteOffset);
+
+    RETURN_LONG(result);
+}
+
+/*
+ * private static native int arrayBaseOffset0(Class clazz);
+ */
+static void Dalvik_sun_misc_Unsafe_arrayBaseOffset0(const u4* args,
+    JValue* pResult)
+{
+    // The base offset is not type-dependent in this vm.
+    UNUSED_PARAMETER(args);
+    RETURN_INT(offsetof(ArrayObject, contents));
+}
+
+/*
+ * private static native int arrayIndexScale0(Class clazz);
+ */
+static void Dalvik_sun_misc_Unsafe_arrayIndexScale0(const u4* args,
+    JValue* pResult)
+{
+    ClassObject* clazz = (ClassObject*) args[0];
+    int result;
+
+    if ((clazz == gDvm.classArrayBoolean) ||
+            (clazz == gDvm.classArrayByte)) {
+        result = sizeof(u1);
+    } else if ((clazz == gDvm.classArrayChar) ||
+            (clazz == gDvm.classArrayShort)) {
+        result = sizeof(u2);
+    } else if ((clazz == gDvm.classArrayLong) ||
+            (clazz == gDvm.classArrayDouble)) {
+        result = sizeof(u8);
+    } else {
+        result = sizeof(u4);
+    }
+
+    RETURN_INT(result);
+}
+
+/*
+ * public native boolean compareAndSwapInt(Object obj, long offset,
+ *         int expectedValue, int newValue);
+ */
+static void Dalvik_sun_misc_Unsafe_compareAndSwapInt(const u4* args,
+    JValue* pResult)
+{
+    // We ignore the this pointer in args[0].
+    Object* obj = (Object*) args[1];
+    s8 offset = GET_ARG_LONG(args, 2);
+    s4 expectedValue = args[4];
+    s4 newValue = args[5];
+    volatile int32_t* address = (volatile int32_t*) (((u1*) obj) + offset);
+
+    // Note: android_atomic_cmpxchg() returns 0 on success, not failure.
+    int result = android_atomic_cmpxchg(expectedValue, newValue, address);
+
+    RETURN_BOOLEAN(result == 0);
+}
+
+/*
+ * public native boolean compareAndSwapLong(Object obj, long offset,
+ *         long expectedValue, long newValue);
+ */
+static void Dalvik_sun_misc_Unsafe_compareAndSwapLong(const u4* args,
+    JValue* pResult)
+{
+    // We ignore the this pointer in args[0].
+    Object* obj = (Object*) args[1];
+    s8 offset = GET_ARG_LONG(args, 2);
+    s8 expectedValue = GET_ARG_LONG(args, 4);
+    s8 newValue = GET_ARG_LONG(args, 6);
+    volatile int64_t* address = (volatile int64_t*) (((u1*) obj) + offset);
+
+    // Note: android_atomic_cmpxchg() returns 0 on success, not failure.
+    int result =
+        android_quasiatomic_cmpxchg_64(expectedValue, newValue, address);
+
+    RETURN_BOOLEAN(result == 0);
+}
+
+/*
+ * public native boolean compareAndSwapObject(Object obj, long offset,
+ *         Object expectedValue, Object newValue);
+ */
+static void Dalvik_sun_misc_Unsafe_compareAndSwapObject(const u4* args,
+    JValue* pResult)
+{
+    // We ignore the this pointer in args[0].
+    Object* obj = (Object*) args[1];
+    s8 offset = GET_ARG_LONG(args, 2);
+    Object* expectedValue = (Object*) args[4];
+    Object* newValue = (Object*) args[5];
+    int32_t* address = (int32_t*) (((u1*) obj) + offset);
+
+    // Note: android_atomic_cmpxchg() returns 0 on success, not failure.
+    int result = android_atomic_cmpxchg((int32_t) expectedValue,
+            (int32_t) newValue, address);
+    
+    RETURN_BOOLEAN(result == 0);
+}
+
+/*
+ * public native int getIntVolatile(Object obj, long offset);
+ */
+static void Dalvik_sun_misc_Unsafe_getIntVolatile(const u4* args,
+    JValue* pResult)
+{
+    // We ignore the this pointer in args[0].
+    Object* obj = (Object*) args[1];
+    s8 offset = GET_ARG_LONG(args, 2);
+    volatile s4* address = (volatile s4*) (((u1*) obj) + offset);
+
+    RETURN_INT(*address);
+}
+
+/*
+ * public native void putIntVolatile(Object obj, long offset, int newValue);
+ */
+static void Dalvik_sun_misc_Unsafe_putIntVolatile(const u4* args,
+    JValue* pResult)
+{
+    // We ignore the this pointer in args[0].
+    Object* obj = (Object*) args[1];
+    s8 offset = GET_ARG_LONG(args, 2);
+    s4 value = (s4) args[4];
+    volatile s4* address = (volatile s4*) (((u1*) obj) + offset);
+
+    *address = value;
+    RETURN_VOID();
+}
+
+/*
+ * public native long getLongVolatile(Object obj, long offset);
+ */
+static void Dalvik_sun_misc_Unsafe_getLongVolatile(const u4* args,
+    JValue* pResult)
+{
+    // We ignore the this pointer in args[0].
+    Object* obj = (Object*) args[1];
+    s8 offset = GET_ARG_LONG(args, 2);
+    volatile s8* address = (volatile s8*) (((u1*) obj) + offset);
+
+    RETURN_LONG(android_quasiatomic_read_64(address));
+}
+
+/*
+ * public native void putLongVolatile(Object obj, long offset, long newValue);
+ */
+static void Dalvik_sun_misc_Unsafe_putLongVolatile(const u4* args,
+    JValue* pResult)
+{
+    // We ignore the this pointer in args[0].
+    Object* obj = (Object*) args[1];
+    s8 offset = GET_ARG_LONG(args, 2);
+    s8 value = GET_ARG_LONG(args, 4);
+    volatile s8* address = (volatile s8*) (((u1*) obj) + offset);
+
+    android_quasiatomic_swap_64(value, address);
+    RETURN_VOID();
+}
+
+/*
+ * public native Object getObjectVolatile(Object obj, long offset);
+ */
+static void Dalvik_sun_misc_Unsafe_getObjectVolatile(const u4* args,
+    JValue* pResult)
+{
+    // We ignore the this pointer in args[0].
+    Object* obj = (Object*) args[1];
+    s8 offset = GET_ARG_LONG(args, 2);
+    volatile Object** address = (volatile Object**) (((u1*) obj) + offset);
+
+    RETURN_PTR((void*) *address);
+}
+
+/*
+ * public native void putObjectVolatile(Object obj, long offset,
+ *         Object newValue);
+ */
+static void Dalvik_sun_misc_Unsafe_putObjectVolatile(const u4* args,
+    JValue* pResult)
+{
+    // We ignore the this pointer in args[0].
+    Object* obj = (Object*) args[1];
+    s8 offset = GET_ARG_LONG(args, 2);
+    Object* value = (Object*) args[4];
+    volatile Object** address = (volatile Object**) (((u1*) obj) + offset);
+
+    *address = value;
+    RETURN_VOID();
+}
+            
+/*
+ * public native int getInt(Object obj, long offset);
+ */
+static void Dalvik_sun_misc_Unsafe_getInt(const u4* args, JValue* pResult)
+{
+    // We ignore the this pointer in args[0].
+    Object* obj = (Object*) args[1];
+    s8 offset = GET_ARG_LONG(args, 2);
+    s4* address = (s4*) (((u1*) obj) + offset);
+
+    RETURN_INT(*address);
+}
+
+/*
+ * public native void putInt(Object obj, long offset, int newValue);
+ */
+static void Dalvik_sun_misc_Unsafe_putInt(const u4* args, JValue* pResult)
+{
+    // We ignore the this pointer in args[0].
+    Object* obj = (Object*) args[1];
+    s8 offset = GET_ARG_LONG(args, 2);
+    s4 value = (s4) args[4];
+    s4* address = (s4*) (((u1*) obj) + offset);
+
+    *address = value;
+    RETURN_VOID();
+}
+
+/*
+ * public native long getLong(Object obj, long offset);
+ */
+static void Dalvik_sun_misc_Unsafe_getLong(const u4* args, JValue* pResult)
+{
+    // We ignore the this pointer in args[0].
+    Object* obj = (Object*) args[1];
+    s8 offset = GET_ARG_LONG(args, 2);
+    s8* address = (s8*) (((u1*) obj) + offset);
+
+    RETURN_LONG(*address);
+}
+
+/*
+ * public native void putLong(Object obj, long offset, long newValue);
+ */
+static void Dalvik_sun_misc_Unsafe_putLong(const u4* args, JValue* pResult)
+{
+    // We ignore the this pointer in args[0].
+    Object* obj = (Object*) args[1];
+    s8 offset = GET_ARG_LONG(args, 2);
+    s8 value = GET_ARG_LONG(args, 4);
+    s8* address = (s8*) (((u1*) obj) + offset);
+
+    *address = value;
+    RETURN_VOID();
+}
+
+/*
+ * public native Object getObject(Object obj, long offset);
+ */
+static void Dalvik_sun_misc_Unsafe_getObject(const u4* args, JValue* pResult)
+{
+    // We ignore the this pointer in args[0].
+    Object* obj = (Object*) args[1];
+    s8 offset = GET_ARG_LONG(args, 2);
+    Object** address = (Object**) (((u1*) obj) + offset);
+
+    RETURN_PTR(*address);
+}
+
+/*
+ * public native void putObject(Object obj, long offset, Object newValue);
+ */
+static void Dalvik_sun_misc_Unsafe_putObject(const u4* args, JValue* pResult)
+{
+    // We ignore the this pointer in args[0].
+    Object* obj = (Object*) args[1];
+    s8 offset = GET_ARG_LONG(args, 2);
+    Object* value = (Object*) args[4];
+    Object** address = (Object**) (((u1*) obj) + offset);
+
+    *address = value;
+    RETURN_VOID();
+}
+
+static const DalvikNativeMethod sun_misc_Unsafe[] = {
+    { "objectFieldOffset0", "(Ljava/lang/reflect/Field;)J",
+      Dalvik_sun_misc_Unsafe_objectFieldOffset0 },
+    { "arrayBaseOffset0", "(Ljava/lang/Class;)I",
+      Dalvik_sun_misc_Unsafe_arrayBaseOffset0 },
+    { "arrayIndexScale0", "(Ljava/lang/Class;)I",
+      Dalvik_sun_misc_Unsafe_arrayIndexScale0 },
+    { "compareAndSwapInt", "(Ljava/lang/Object;JII)Z",
+      Dalvik_sun_misc_Unsafe_compareAndSwapInt },
+    { "compareAndSwapLong", "(Ljava/lang/Object;JJJ)Z",
+      Dalvik_sun_misc_Unsafe_compareAndSwapLong },
+    { "compareAndSwapObject",
+      "(Ljava/lang/Object;JLjava/lang/Object;Ljava/lang/Object;)Z",
+      Dalvik_sun_misc_Unsafe_compareAndSwapObject },
+    { "getIntVolatile", "(Ljava/lang/Object;J)I",
+      Dalvik_sun_misc_Unsafe_getIntVolatile },
+    { "putIntVolatile", "(Ljava/lang/Object;JI)V",
+      Dalvik_sun_misc_Unsafe_putIntVolatile },
+    { "getLongVolatile", "(Ljava/lang/Object;J)J",
+      Dalvik_sun_misc_Unsafe_getLongVolatile },
+    { "putLongVolatile", "(Ljava/lang/Object;JJ)V",
+      Dalvik_sun_misc_Unsafe_putLongVolatile },
+    { "getObjectVolatile", "(Ljava/lang/Object;J)Ljava/lang/Object;",
+      Dalvik_sun_misc_Unsafe_getObjectVolatile },
+    { "putObjectVolatile", "(Ljava/lang/Object;JLjava/lang/Object;)V",
+      Dalvik_sun_misc_Unsafe_putObjectVolatile },
+    { "getInt", "(Ljava/lang/Object;J)I",
+      Dalvik_sun_misc_Unsafe_getInt },
+    { "putInt", "(Ljava/lang/Object;JI)V",
+      Dalvik_sun_misc_Unsafe_putInt },
+    { "getLong", "(Ljava/lang/Object;J)J",
+      Dalvik_sun_misc_Unsafe_getLong },
+    { "putLong", "(Ljava/lang/Object;JJ)V",
+      Dalvik_sun_misc_Unsafe_putLong },
+    { "getObject", "(Ljava/lang/Object;J)Ljava/lang/Object;",
+      Dalvik_sun_misc_Unsafe_getObject },
+    { "putObject", "(Ljava/lang/Object;JLjava/lang/Object;)V",
+      Dalvik_sun_misc_Unsafe_putObject },
+    { NULL, NULL, NULL },
+};
+
+
+/*
+ * ===========================================================================
+ *      General
+ * ===========================================================================
+ */
+
+/*
+ * Set of classes for which we provide methods.
+ *
+ * The last field, classNameHash, is filled in at startup.
+ */
+static DalvikNativeClass gDvmNativeMethodSet[] = {
+    { "Ljava/lang/Object;",               java_lang_Object, 0 },
+    { "Ljava/lang/Class;",                java_lang_Class, 0 },
+    { "Ljava/lang/Runtime;",              java_lang_Runtime, 0 },
+    { "Ljava/lang/String;",               java_lang_String, 0 },
+    { "Ljava/lang/System;",               java_lang_System, 0 },
+    { "Ljava/lang/SystemProperties;",     java_lang_SystemProperties, 0 },
+    { "Ljava/lang/Throwable;",            java_lang_Throwable, 0 },
+    { "Ljava/lang/VMClassLoader;",        java_lang_VMClassLoader, 0 },
+    { "Ljava/lang/VMThread;",             java_lang_VMThread, 0 },
+    { "Ljava/lang/reflect/AccessibleObject;",
+                                java_lang_reflect_AccessibleObject, 0 },
+    { "Ljava/lang/reflect/Array;",        java_lang_reflect_Array, 0 },
+    { "Ljava/lang/reflect/Constructor;",  java_lang_reflect_Constructor, 0 },
+    { "Ljava/lang/reflect/Field;",        java_lang_reflect_Field, 0 },
+    { "Ljava/lang/reflect/Method;",       java_lang_reflect_Method, 0 },
+    { "Ljava/lang/reflect/Proxy;",        java_lang_reflect_Proxy, 0 },
+    { "Ljava/security/AccessController;", java_security_AccessController, 0 },
+    { "Ljava/util/concurrent/atomic/AtomicLong;",
+                                java_util_concurrent_atomic_AtomicLong, 0 },
+    { "Ldalvik/system/VMDebug;",          dalvik_system_VMDebug, 0 },
+    { "Ldalvik/system/DexFile;",          dalvik_system_DexFile, 0 },
+    { "Ldalvik/system/VMRuntime;",        dalvik_system_VMRuntime, 0 },
+    { "Ldalvik/system/Zygote;",           dalvik_system_Zygote, 0 },
+    { "Ldalvik/system/VMStack;",          dalvik_system_VMStack, 0 },
+    { "Lorg/apache/harmony/dalvik/ddmc/DdmServer;",
+          org_apache_harmony_dalvik_ddmc_DdmServer, 0 },
+    { "Lorg/apache/harmony/dalvik/ddmc/DdmVmInternal;", 
+          org_apache_harmony_dalvik_ddmc_DdmVmInternal, 0 },
+    { "Lorg/apache/harmony/dalvik/NativeTestTarget;",
+          org_apache_harmony_dalvik_NativeTestTarget, 0 },
+    { "Lsun/misc/Unsafe;",                sun_misc_Unsafe, 0 },
+    { NULL, NULL, 0 },
+};
+
+
+/*
+ * Set up hash values on the class names.
+ */
+bool dvmInternalNativeStartup(void)
+{
+    DalvikNativeClass* classPtr = gDvmNativeMethodSet;
+
+    while (classPtr->classDescriptor != NULL) {
+        classPtr->classDescriptorHash =
+            dvmComputeUtf8Hash(classPtr->classDescriptor);
+        classPtr++;
+    }
+
+    gDvm.userDexFiles = dvmHashTableCreate(2, freeDexOrJar);
+    if (gDvm.userDexFiles == NULL)
+        return false;
+
+    return true;
+}
+
+/*
+ * Clean up.
+ */
+void dvmInternalNativeShutdown(void)
+{
+    dvmHashTableFree(gDvm.userDexFiles);
+}
+
+/*
+ * Search the internal native set for a match.
+ */
+DalvikNativeFunc dvmLookupInternalNativeMethod(const Method* method)
+{
+    const char* classDescriptor = method->clazz->descriptor;
+    const DalvikNativeClass* pClass;
+    u4 hash;
+
+    hash = dvmComputeUtf8Hash(classDescriptor);
+    pClass = gDvmNativeMethodSet;
+    while (true) {
+        if (pClass->classDescriptor == NULL)
+            break;
+        if (pClass->classDescriptorHash == hash &&
+            strcmp(pClass->classDescriptor, classDescriptor) == 0)
+        {
+            const DalvikNativeMethod* pMeth = pClass->methodInfo;
+            while (true) {
+                if (pMeth->name == NULL)
+                    break;
+
+                if (dvmCompareNameDescriptorAndMethod(pMeth->name,
+                    pMeth->signature, method) == 0)
+                {
+                    /* match */
+                    //LOGV("+++  match on %s.%s %s at %p\n",
+                    //    className, methodName, methodSignature, pMeth->fnPtr);
+                    return pMeth->fnPtr;
+                }
+
+                pMeth++;
+            }
+        }
+
+        pClass++;
+    }
+
+    return NULL;
+}
+
+
+/*
+ * Magic "internal native" code stub, inserted into abstract method
+ * definitions when a class is first loaded.  This throws the expected
+ * exception so we don't have to explicitly check for it in the interpreter.
+ */
+void dvmAbstractMethodStub(const u4* args, JValue* pResult)
+{
+    LOGI("--- called into dvmAbstractMethodStub\n");
+    dvmThrowException("Ljava/lang/AbstractMethodError;",
+        "abstract method not implemented");
+}
diff --git a/vm/JarFile.c b/vm/JarFile.c
new file mode 100644
index 0000000..ac857a3
--- /dev/null
+++ b/vm/JarFile.c
@@ -0,0 +1,353 @@
+/*
+ * Copyright (C) 2008 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/*
+ * Access the contents of a Jar file.
+ *
+ * This isn't actually concerned with any of the Jar-like elements; it
+ * just wants a zip archive with "classes.dex" inside.  In Android the
+ * most common example is ".apk".
+ */
+#include "Dalvik.h"
+
+#include <stdlib.h>
+#include <string.h>
+#include <zlib.h>
+#include <fcntl.h>
+#include <errno.h>
+
+static const char* kDexInJarName = "classes.dex";
+
+/*
+ * Attempt to open a file whose name is similar to <fileName>,
+ * but with the supplied suffix.  E.g.,
+ * openAlternateSuffix("Home.apk", "dex", O_RDONLY) will attempt
+ * to open "Home.dex".  If the open succeeds, a pointer to a
+ * malloc()ed copy of the opened file name will be put in <*pCachedName>.
+ *
+ * <flags> is passed directly to open(). O_CREAT is not supported.
+ */
+static int openAlternateSuffix(const char *fileName, const char *suffix,
+    int flags, char **pCachedName)
+{
+    char *buf, *c;
+    size_t fileNameLen = strlen(fileName);
+    size_t suffixLen = strlen(suffix);
+    size_t bufLen = fileNameLen + suffixLen + 1;
+    int fd = -1;
+
+    buf = malloc(bufLen);
+    if (buf == NULL) {
+        errno = ENOMEM;
+        return -1;
+    }
+
+    /* Copy the original filename into the buffer, find
+     * the last dot, and copy the suffix to just after it.
+     */
+    memcpy(buf, fileName, fileNameLen + 1);
+    c = strrchr(buf, '.');
+    if (c == NULL) {
+        errno = ENOENT;
+        goto bail;
+    }
+    memcpy(c + 1, suffix, suffixLen + 1);
+
+    fd = open(buf, flags);
+    if (fd >= 0) {
+        *pCachedName = buf;
+        return fd;
+    }
+    LOGV("Couldn't open %s: %s\n", buf, strerror(errno));
+bail:
+    free(buf);
+    return -1;
+}
+
+/*
+ * Checks the dependencies of the dex cache file corresponding
+ * to the jar file at the absolute path "fileName".
+ */
+DexCacheStatus dvmDexCacheStatus(const char *fileName)
+{
+    ZipArchive archive;
+    char* cachedName = NULL;
+    int fd;
+    DexCacheStatus result = DEX_CACHE_ERROR;
+    ZipEntry entry;
+
+    /* Always treat elements of the bootclasspath as up-to-date.
+     * The fact that interpreted code is running at all means that this
+     * should be true.
+     */
+    if (dvmClassPathContains(gDvm.bootClassPath, fileName)) {
+        return DEX_CACHE_OK;
+    }
+
+    //TODO: match dvmJarFileOpen()'s logic.  Not super-important
+    //      (the odex-first logic is only necessary for dexpreopt)
+    //      but it would be nice to be consistent.
+
+    /* Try to find the dex file inside of the archive.
+     */
+    if (dexZipOpenArchive(fileName, &archive) != 0) {
+        return DEX_CACHE_BAD_ARCHIVE;
+    }
+    entry = dexZipFindEntry(&archive, kDexInJarName);
+    if (entry != NULL) {
+        bool newFile = false;
+
+        /*
+         * See if there's an up-to-date copy of the optimized dex
+         * in the cache, but don't create one if there isn't.
+         */
+        LOGV("dvmDexCacheStatus: Checking cache for %s\n", fileName);
+        fd = dvmOpenCachedDexFile(fileName, kDexInJarName,
+                dexGetZipEntryModTime(&archive, entry),
+                dexGetZipEntryCrc32(&archive, entry),
+                /*isBootstrap=*/false, &cachedName, &newFile,
+                /*createIfMissing=*/false);
+        LOGV("dvmOpenCachedDexFile returned fd %d\n", fd);
+        if (fd < 0) {
+            result = DEX_CACHE_STALE;
+            goto bail;
+        }
+
+        /* dvmOpenCachedDexFile locks the file as a side-effect.
+         * Unlock and close it.
+         */
+        if (!dvmUnlockCachedDexFile(fd)) {
+            /* uh oh -- this process needs to exit or we'll wedge the system */
+            LOGE("Unable to unlock DEX file\n");
+            goto bail;
+        }
+
+        /* When createIfMissing is false, dvmOpenCachedDexFile() only
+         * returns a valid fd if the cache file is up-to-date.
+         */
+    } else {
+        /*
+         * There's no dex file in the jar file.  See if there's an
+         * optimized dex file living alongside the jar.
+         */
+        fd = openAlternateSuffix(fileName, "odex", O_RDONLY, &cachedName);
+        if (fd < 0) {
+            LOGI("Zip is good, but no %s inside, and no .odex "
+                    "file in the same directory\n", kDexInJarName);
+            result = DEX_CACHE_BAD_ARCHIVE;
+            goto bail;
+        }
+
+        LOGV("Using alternate file (odex) for %s ...\n", fileName);
+        if (!dvmCheckOptHeaderAndDependencies(fd, false, 0, 0, true, true)) {
+            LOGE("%s odex has stale dependencies\n", fileName);
+            LOGE("odex source not available -- failing\n");
+            result = DEX_CACHE_STALE_ODEX;
+            goto bail;
+        } else {
+            LOGV("%s odex has good dependencies\n", fileName);
+        }
+    }
+    result = DEX_CACHE_OK;
+
+bail:
+    dexZipCloseArchive(&archive);
+    free(cachedName);
+    if (fd >= 0) {
+        close(fd);
+    }
+    return result;
+}
+
+/*
+ * Open a Jar file.  It's okay if it's just a Zip archive without all of
+ * the Jar trimmings, but we do insist on finding "classes.dex" inside
+ * or an appropriately-named ".odex" file alongside.
+ *
+ * If "isBootstrap" is not set, the optimizer/verifier regards this DEX as
+ * being part of a different class loader.
+ */
+int dvmJarFileOpen(const char* fileName, JarFile** ppJarFile, bool isBootstrap)
+{
+    ZipArchive archive;
+    DvmDex* pDvmDex = NULL;
+    char* cachedName = NULL;
+    bool archiveOpen = false;
+    bool locked = false;
+    int fd = -1;
+    int result = -1;
+
+    /* Even if we're not going to look at the archive, we need to
+     * open it so we can stuff it into ppJarFile.
+     */
+    if (dexZipOpenArchive(fileName, &archive) != 0)
+        goto bail;
+    archiveOpen = true;
+
+    /* If we fork/exec into dexopt, don't let it inherit the archive's fd.
+     */
+    dvmSetCloseOnExec(dexZipGetArchiveFd(&archive));
+
+    /* First, look for a ".odex" alongside the jar file.  It will
+     * have the same name/path except for the extension.
+     */
+    fd = openAlternateSuffix(fileName, "odex", O_RDONLY, &cachedName);
+    if (fd >= 0) {
+        LOGV("Using alternate file (odex) for %s ...\n", fileName);
+        if (!dvmCheckOptHeaderAndDependencies(fd, false, 0, 0, true, true)) {
+            LOGE("%s odex has stale dependencies\n", fileName);
+            free(cachedName);
+            close(fd);
+            fd = -1;
+            goto tryArchive;
+        } else {
+            LOGV("%s odex has good dependencies\n", fileName);
+            //TODO: make sure that the .odex actually corresponds
+            //      to the classes.dex inside the archive (if present).
+        }
+    } else {
+        ZipEntry entry;
+
+tryArchive:
+        /* Missing or out-of-date .odex.  Look inside the jar.
+         */
+        entry = dexZipFindEntry(&archive, kDexInJarName);
+        if (entry != NULL) {
+            bool newFile = false;
+
+            /*
+             * We've found the one we want.  See if there's an up-to-date copy
+             * in the cache.
+             *
+             * On return, "fd" will be seeked just past the "opt" header.
+             *
+             * If a stale .odex file is present and classes.dex exists in
+             * the archive, this will *not* return an fd pointing to the
+             * .odex file; the fd will point into dalvik-cache like any
+             * other jar.
+             */
+            fd = dvmOpenCachedDexFile(fileName, kDexInJarName,
+                    dexGetZipEntryModTime(&archive, entry),
+                    dexGetZipEntryCrc32(&archive, entry),
+                    isBootstrap, &cachedName, &newFile,
+                    /*createIfMissing=*/true);
+            if (fd < 0) {
+                LOGI("Unable to open or create cache for %s\n", fileName);
+                goto bail;
+            }
+            locked = true;
+
+            /*
+             * If fd points to a new file (because there was no cached version,
+             * or the cached version was stale), generate the optimized DEX.
+             * The file descriptor returned is still locked, and is positioned
+             * just past the optimization header.
+             */
+            if (newFile) {
+                u8 startWhen, extractWhen, endWhen;
+                bool result;
+                off_t dexOffset, fileLen;
+
+                dexOffset = lseek(fd, 0, SEEK_CUR);
+                result = (dexOffset > 0);
+
+                if (result) {
+                    startWhen = dvmGetRelativeTimeUsec();
+                    result = dexZipExtractEntryToFile(&archive, entry, fd);
+                    extractWhen = dvmGetRelativeTimeUsec();
+                }
+                if (result) {
+                    result = dvmOptimizeDexFile(fd, dexOffset,
+                                dexGetZipEntryUncompLen(&archive, entry),
+                                fileName,
+                                dexGetZipEntryModTime(&archive, entry),
+                                dexGetZipEntryCrc32(&archive, entry),
+                                isBootstrap);
+                }
+
+                if (!result) {
+                    LOGE("Unable to extract+optimize DEX from '%s'\n",
+                        fileName);
+                    goto bail;
+                }
+
+                endWhen = dvmGetRelativeTimeUsec();
+                LOGD("DEX prep '%s': unzip in %dms, rewrite %dms\n",
+                    fileName,
+                    (int) (extractWhen - startWhen) / 1000,
+                    (int) (endWhen - extractWhen) / 1000);
+            }
+        } else {
+            LOGI("Zip is good, but no %s inside, and no valid .odex "
+                    "file in the same directory\n", kDexInJarName);
+            goto bail;
+        }
+    }
+
+    /*
+     * Map the cached version.  This immediately rewinds the fd, so it
+     * doesn't have to be seeked anywhere in particular.
+     */
+    if (dvmDexFileOpenFromFd(fd, &pDvmDex) != 0) {
+        LOGI("Unable to map %s in %s\n", kDexInJarName, fileName);
+        goto bail;
+    }
+
+    if (locked) {
+        /* unlock the fd */
+        if (!dvmUnlockCachedDexFile(fd)) {
+            /* uh oh -- this process needs to exit or we'll wedge the system */
+            LOGE("Unable to unlock DEX file\n");
+            goto bail;
+        }
+        locked = false;
+    }
+
+    LOGV("Successfully opened '%s' in '%s'\n", kDexInJarName, fileName);
+
+    *ppJarFile = (JarFile*) calloc(1, sizeof(JarFile));
+    (*ppJarFile)->archive = archive;
+    (*ppJarFile)->cacheFileName = cachedName;
+    (*ppJarFile)->pDvmDex = pDvmDex;
+    cachedName = NULL;      // don't free it below
+    result = 0;
+
+bail:
+    /* clean up, closing the open file */
+    if (archiveOpen && result != 0)
+        dexZipCloseArchive(&archive);
+    free(cachedName);
+    if (fd >= 0) {
+        if (locked)
+            (void) dvmUnlockCachedDexFile(fd);
+        close(fd);
+    }
+    return result;
+}
+
+/*
+ * Close a Jar file and free the struct.
+ */
+void dvmJarFileFree(JarFile* pJarFile)
+{
+    if (pJarFile == NULL)
+        return;
+
+    dvmDexFileFree(pJarFile->pDvmDex);
+    dexZipCloseArchive(&pJarFile->archive);
+    free(pJarFile->cacheFileName);
+    free(pJarFile);
+}
+
diff --git a/vm/JarFile.h b/vm/JarFile.h
new file mode 100644
index 0000000..a73c782
--- /dev/null
+++ b/vm/JarFile.h
@@ -0,0 +1,70 @@
+/*
+ * Copyright (C) 2008 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/*
+ * Decode jar/apk/zip files.
+ */
+#ifndef _DALVIK_JARFILE
+#define _DALVIK_JARFILE
+
+/*
+ * This represents an open, scanned Jar file.  (It's actually for any Zip
+ * archive that happens to hold a Dex file.)
+ */
+typedef struct JarFile {
+    ZipArchive  archive;
+    //MemMapping  map;
+    char*       cacheFileName;
+    DvmDex*     pDvmDex;
+} JarFile;
+
+/*
+ * Open the Zip archive and get a list of the classfile entries.
+ *
+ * On success, returns 0 and sets "*ppJarFile" to a newly-allocated JarFile.
+ * On failure, returns a meaningful error code [currently just -1].
+ */
+int dvmJarFileOpen(const char* fileName, JarFile** ppJarFile, bool isBootstrap);
+
+/*
+ * Free a JarFile structure, along with any associated structures.
+ */
+void dvmJarFileFree(JarFile* pJarFile);
+
+/* pry the DexFile out of a JarFile */
+INLINE DvmDex* dvmGetJarFileDex(JarFile* pJarFile) {
+    return pJarFile->pDvmDex;
+}
+
+/* get full path of optimized DEX file */
+INLINE const char* dvmGetJarFileCacheFileName(JarFile* pJarFile) {
+    return pJarFile->cacheFileName;
+}
+
+typedef enum DexCacheStatus {
+    DEX_CACHE_ERROR = -2,
+    DEX_CACHE_BAD_ARCHIVE = -1,
+    DEX_CACHE_OK = 0,
+    DEX_CACHE_STALE,
+    DEX_CACHE_STALE_ODEX,
+} DexCacheStatus;
+
+/*
+ * Checks the dependencies of the dex cache file corresponding
+ * to the jar file at the absolute path "fileName".
+ */
+DexCacheStatus dvmDexCacheStatus(const char *fileName);
+
+#endif /*_DALVIK_JARFILE*/
diff --git a/vm/Jni.c b/vm/Jni.c
new file mode 100644
index 0000000..76810aa
--- /dev/null
+++ b/vm/Jni.c
@@ -0,0 +1,3487 @@
+/*
+ * Copyright (C) 2008 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/*
+ * Dalvik implementation of JNI interfaces.
+ */
+#include "Dalvik.h"
+#include "JniInternal.h"
+
+#include <stdlib.h>
+#include <stdarg.h>
+#include <limits.h>
+
+/*
+Native methods and interaction with the GC
+
+All JNI methods must start by changing their thread status to
+THREAD_RUNNING, and finish by changing it back to THREAD_NATIVE before
+returning to native code.  The switch to "running" triggers a thread
+suspension check.
+
+With a rudimentary GC we should be able to skip the status change for
+simple functions, e.g.  IsSameObject, GetJavaVM, GetStringLength, maybe
+even access to fields with primitive types.  Our options are more limited
+with a compacting GC, so we should replace JNI_ENTER with JNI_ENTER_NCGC
+or somesuch on the "lite" functions if we want to try this optimization.
+
+For performance reasons we do as little error-checking as possible here.
+For example, we don't check to make sure the correct type of Object is
+passed in when setting a field, and we don't prevent you from storing
+new values in a "final" field.  Such things are best handled in the
+"check" version.  For actions that are common, dangerous, and must be
+checked at runtime, such as array bounds checks, we do the tests here.
+
+
+General notes on local/global reference tracking
+
+JNI provides explicit control over natively-held references that the VM GC
+needs to know about.  These can be local, in which case they're released
+when the native method returns, or global, which are held until explicitly
+released.
+
+The references can be created and deleted with JNI NewLocalRef /
+NewGlobalRef calls, but this is unusual except perhaps for holding on
+to a Class reference.  Most often they are created transparently by the
+JNI functions.  For example, the paired Get/Release calls guarantee that
+objects survive until explicitly released, so a simple way to implement
+this is to create a global reference on "Get" and delete it on "Release".
+The AllocObject/NewObject functions must create local references, because
+nothing else in the GC root set has a reference to the new objects.
+
+The most common mode of operation is for a method to create zero or
+more local references and return.  Explicit "local delete" operations
+are expected to be exceedingly rare, except when walking through an
+object array, and the Push/PopLocalFrame calls are expected to be used
+infrequently.  For efficient operation, we want to add new local refs
+with a simple store/increment operation; to avoid infinite growth in
+pathological situations, we need to reclaim the space used by deleted
+entries.
+
+The simplest implementation is an expanding append-only array that compacts
+when objects are deleted.  In typical situations, e.g. running through
+an array of objects, we will be deleting one of the most recently added
+entries, so we can minimize the number of elements moved (or avoid having
+to move any).
+
+The spec says, "Local references are only valid in the thread in which
+they are created.  The native code must not pass local references from
+one thread to another."  It should also be noted that, while some calls
+will *create* global references as a side-effect, only the NewGlobalRef
+and NewWeakGlobalRef calls actually *return* global references.
+
+
+Global reference tracking
+
+There should be a small "active" set centered around the most-recently
+added items.  We can use an append-only, compacting array like we do for
+local refs.
+
+Because it's global, access to it has to be synchronized.
+
+The JNI spec does not define any sort of limit, so the list must be able
+to expand.  It may be useful to log significant increases in usage to
+help identify resource leaks.
+
+TODO: we currently use global references on strings and primitive array
+data, because they have the property we need (i.e. the pointer we return
+is guaranteed valid until we explicitly release it).  However, if we have
+a compacting GC and don't want to pin all memory held by all global refs,
+we actually want to treat these differently.  Either we need a way to
+tell the GC that specific global references are pinned, or we have to
+make a copy of the data and return that instead (something JNI supports).
+
+
+Local reference tracking
+
+The table of local references can be stored on the interpreted stack or
+in a parallel data structure (one per thread).
+
+*** Approach #1: use the interpreted stack
+
+The easiest place to tuck it is between the frame ptr and the first saved
+register, which is always in0.  (See the ASCII art in Stack.h.)  We can
+shift the "VM-specific goop" and frame ptr down, effectively inserting
+the JNI local refs in the space normally occupied by local variables.
+
+(Three things are accessed from the frame pointer:
+ (1) framePtr[N] is register vN, used to get at "ins" and "locals".
+ (2) framePtr - sizeof(StackSaveArea) is the VM frame goop.
+ (3) framePtr - sizeof(StackSaveArea) - numOuts is where the "outs" go.
+The only thing that isn't determined by an offset from the current FP
+is the previous frame.  However, tucking things below the previous frame
+can be problematic because the "outs" of the previous frame overlap with
+the "ins" of the current frame.  If the "ins" are altered they must be
+restored before we return.  For a native method call, the easiest and
+safest thing to disrupt is #1, because there are no locals and the "ins"
+are all copied to the native stack.)
+
+We can implement Push/PopLocalFrame with the existing stack frame calls,
+making sure we copy some goop from the previous frame (notably the method
+ptr, so that dvmGetCurrentJNIMethod() doesn't require extra effort).
+
+We can pre-allocate the storage at the time the stack frame is first
+set up, but we have to be careful.  When calling from interpreted code
+the frame ptr points directly at the arguments we're passing, but we can
+offset the args pointer when calling the native bridge.
+
+To manage the local ref collection, we need to be able to find three
+things: (1) the start of the region, (2) the end of the region, and (3)
+the next available entry.  The last is only required for quick adds.
+We currently have two easily-accessible pointers, the current FP and the
+previous frame's FP.  (The "stack pointer" shown in the ASCII art doesn't
+actually exist in the interpreted world.)
+
+We can't use the current FP to find the first "in", because we want to
+insert the variable-sized local refs table between them.  It's awkward
+to use the previous frame's FP because native methods invoked via
+dvmCallMethod() or dvmInvokeMethod() don't have "ins", but native methods
+invoked from interpreted code do.  We can either track the local refs
+table size with a field in the stack frame, or insert unnecessary items
+so that all native stack frames have "ins".
+
+Assuming we can find the region bounds, we still need pointer #3
+for an efficient implementation.  This can be stored in an otherwise
+unused-for-native field in the frame goop.
+
+When we run out of room we have to make more space.  If we start allocating
+locals immediately below in0 and grow downward, we will detect end-of-space
+by running into the current frame's FP.  We then memmove() the goop down
+(memcpy if we guarantee the additional size is larger than the frame).
+This is nice because we only have to move sizeof(StackSaveArea) bytes
+each time.
+
+Stack walking should be okay so long as nothing tries to access the
+"ins" by an offset from the FP.  In theory the "ins" could be read by
+the debugger or SIGQUIT handler looking for "this" or other arguments,
+but in practice this behavior isn't expected to work for native methods,
+so we can simply disallow it.
+
+A conservative GC can just scan the entire stack from top to bottom to find
+all references.  An exact GC will need to understand the actual layout.
+
+*** Approach #2: use a parallel stack
+
+Each Thread/JNIEnv points to a ReferenceTable struct.  The struct
+has a system-heap-allocated array of references and a pointer to the
+next-available entry ("nextEntry").
+
+Each stack frame has a pointer to what it sees as the "top" element in the
+array (we can double-up the "currentPc" field).  This is set to "nextEntry"
+when the frame is pushed on.  As local references are added or removed,
+"nextEntry" is updated.
+
+We implement Push/PopLocalFrame with actual stack frames.  Before a JNI
+frame gets popped, we set "nextEntry" to the "top" pointer of the current
+frame, effectively releasing the references.
+
+The GC will scan all references from the start of the table to the
+"nextEntry" pointer.
+
+*** Comparison
+
+All approaches will return a failure result when they run out of local
+reference space.  For #1 that means blowing out the stack, for #2 it's
+running out of room in the array.
+
+Compared to #1, approach #2:
+ - Needs only one pointer in the stack frame goop.
+ - Makes pre-allocating storage unnecessary.
+ - Doesn't contend with interpreted stack depth for space.  In most
+   cases, if something blows out the local ref storage, it's because the
+   JNI code was misbehaving rather than called from way down.
+ - Allows the GC to do a linear scan per thread in a buffer that is 100%
+   references.  The GC can be slightly less smart when scanning the stack.
+ - Will be easier to work with if we combine native and interpeted stacks.
+
+ - Isn't as clean, especially when popping frames, since we have to do
+   explicit work.  Fortunately we only have to do it when popping native
+   method calls off, so it doesn't add overhead to interpreted code paths.
+ - Is awkward to expand dynamically.  We'll want to pre-allocate the full
+   amount of space; this is fine, since something on the order of 1KB should
+   be plenty.  The JNI spec allows us to limit this.
+ - Requires the GC to scan even more memory.  With the references embedded
+   in the stack we get better locality of reference.
+
+*/
+
+static const struct JNINativeInterface gNativeInterface;        // fwd
+
+
+#ifdef WITH_JNI_STACK_CHECK
+# define COMPUTE_STACK_SUM(_self)   computeStackSum(_self);
+# define CHECK_STACK_SUM(_self)     checkStackSum(_self);
+static void computeStackSum(Thread* self);
+static void checkStackSum(Thread* self);
+#else
+# define COMPUTE_STACK_SUM(_self)   ((void)0)
+# define CHECK_STACK_SUM(_self)     ((void)0)
+#endif
+
+
+/*
+ * ===========================================================================
+ *      JNI call bridge
+ * ===========================================================================
+ */
+
+/*
+ * Bridge to calling a JNI function.  This ideally gets some help from
+ * assembly language code in dvmPlatformInvoke, because the arguments
+ * must be pushed into the native stack as if we were calling a <stdarg.h>
+ * function.
+ *
+ * The number of values in "args" must match method->insSize.
+ *
+ * This is generally just set up by the resolver and then called through.
+ * We don't call here explicitly.  This takes the same arguments as all
+ * of the "internal native" methods.
+ */
+void dvmCallJNIMethod(const u4* args, JValue* pResult, const Method* method,
+    Thread* self)
+{
+    int oldStatus;
+
+    assert(method->insns != NULL);
+
+    //int i;
+    //LOGI("JNI calling %p (%s.%s %s):\n", method->insns,
+    //    method->clazz->descriptor, method->name, method->signature);
+    //for (i = 0; i < method->insSize; i++)
+    //    LOGI("  %d: 0x%08x\n", i, args[i]);
+
+    oldStatus = dvmChangeStatus(self, THREAD_NATIVE);
+
+    COMPUTE_STACK_SUM(self);
+    // TODO: should we be converting 'this' to a local ref?
+    dvmPlatformInvoke(self->jniEnv,
+        dvmIsStaticMethod(method) ? method->clazz : NULL,
+        method->jniArgInfo, method->insSize, args, method->shorty,
+        (void*)method->insns, pResult);
+    CHECK_STACK_SUM(self);
+
+    dvmChangeStatus(self, oldStatus);
+}
+
+/*
+ * Alternate call bridge for the unusual case of a synchronized native method.
+ *
+ * Lock the object, then call through the usual function.
+ */
+void dvmCallSynchronizedJNIMethod(const u4* args, JValue* pResult,
+    const Method* method, Thread* self)
+{
+    Object* lockObj;
+
+    assert(dvmIsSynchronizedMethod(method));
+
+    if (dvmIsStaticMethod(method))
+        lockObj = (Object*) method->clazz;
+    else
+        lockObj = (Object*) args[0];
+
+    LOGVV("Calling %s.%s: locking %p (%s)\n",
+        method->clazz->descriptor, method->name,
+        lockObj, lockObj->clazz->descriptor);
+
+    dvmLockObject(self, lockObj);
+    dvmCallJNIMethod(args, pResult, method, self);
+    dvmUnlockObject(self, lockObj);
+}
+
+/*
+ * Extract the return type enum from the "jniArgInfo" field.
+ */
+DalvikJniReturnType dvmGetArgInfoReturnType(int jniArgInfo)
+{
+    return (jniArgInfo & DALVIK_JNI_RETURN_MASK) >> DALVIK_JNI_RETURN_SHIFT;
+}
+
+
+/*
+ * ===========================================================================
+ *      Utility functions
+ * ===========================================================================
+ */
+
+/*
+ * Entry/exit processing for all JNI calls.
+ *
+ * If TRUSTED_JNIENV is set, we get to skip the (curiously expensive)
+ * thread-local storage lookup on our Thread*.  If the caller has passed
+ * the wrong JNIEnv in, we're going to be accessing unsynchronized
+ * structures from more than one thread, and things are going to fail
+ * in bizarre ways.  This is only sensible if the native code has been
+ * fully exercised with CheckJNI enabled.
+ */
+#define TRUSTED_JNIENV
+#ifdef TRUSTED_JNIENV
+# define JNI_ENTER()                                                        \
+        Thread* _self = ((JNIEnvExt*)env)->self;                            \
+        CHECK_STACK_SUM(_self);                                             \
+        dvmChangeStatus(_self, THREAD_RUNNING)
+#else
+# define JNI_ENTER()                                                        \
+        Thread* _self = dvmThreadSelf();                                    \
+        UNUSED_PARAMETER(env);                                              \
+        CHECK_STACK_SUM(_self);                                             \
+        dvmChangeStatus(_self, THREAD_RUNNING)
+#endif
+#define JNI_EXIT()                                                          \
+        dvmChangeStatus(_self, THREAD_NATIVE);                              \
+        COMPUTE_STACK_SUM(_self)
+
+#define kGlobalRefsTableInitialSize 512
+#define kGlobalRefsTableMaxSize     51200       /* arbitrary */
+#define kGrefWaterInterval          100
+
+#define kTrackGrefUsage             true
+
+/*
+ * Allocate the global references table.
+ */
+bool dvmJniStartup(void)
+{
+    if (!dvmInitReferenceTable(&gDvm.jniGlobalRefTable,
+            kGlobalRefsTableInitialSize, kGlobalRefsTableMaxSize))
+        return false;
+
+    dvmInitMutex(&gDvm.jniGlobalRefLock);
+
+    gDvm.jniGlobalRefLoMark = 0;
+    gDvm.jniGlobalRefHiMark = kGrefWaterInterval * 2;
+
+    return true;
+}
+
+/*
+ * Free the global references table.
+ */
+void dvmJniShutdown(void)
+{
+    dvmClearReferenceTable(&gDvm.jniGlobalRefTable);
+}
+
+
+/*
+ * Find the JNIEnv associated with the current thread.
+ *
+ * Currently stored in the Thread struct.  Could also just drop this into
+ * thread-local storage.
+ */
+JNIEnvExt* dvmGetJNIEnvForThread(void)
+{
+    Thread* self = dvmThreadSelf();
+    if (self == NULL)
+        return NULL;
+    return (JNIEnvExt*) dvmGetThreadJNIEnv(self);
+}
+
+/*
+ * Create a new JNIEnv struct and add it to the VM's list.
+ *
+ * "self" will be NULL for the main thread, since the VM hasn't started
+ * yet; the value will be filled in later.
+ */
+JNIEnv* dvmCreateJNIEnv(Thread* self)
+{
+    JavaVMExt* vm = (JavaVMExt*) gDvm.vmList;
+    JNIEnvExt* newEnv;
+
+    //if (self != NULL)
+    //    LOGI("Ent CreateJNIEnv: threadid=%d %p\n", self->threadId, self);
+
+    assert(vm != NULL);
+
+    newEnv = (JNIEnvExt*) calloc(1, sizeof(JNIEnvExt));
+    newEnv->funcTable = &gNativeInterface;
+    newEnv->vm = vm;
+    if (self != NULL) {
+        dvmSetJniEnvThreadId((JNIEnv*) newEnv, self);
+        assert(newEnv->envThreadId != 0);
+    } else {
+        /* make it obvious if we fail to initialize these later */
+        newEnv->envThreadId = 0x77777775;
+        newEnv->self = (Thread*) 0x77777779;
+    }
+    if (vm->useChecked)
+        dvmUseCheckedJniEnv(newEnv);
+
+    dvmLockMutex(&vm->envListLock);
+
+    /* insert at head of list */
+    newEnv->next = vm->envList;
+    assert(newEnv->prev == NULL);
+    if (vm->envList == NULL)            // rare, but possible
+        vm->envList = newEnv;
+    else
+        vm->envList->prev = newEnv;
+    vm->envList = newEnv;
+
+    dvmUnlockMutex(&vm->envListLock);
+
+    //if (self != NULL)
+    //    LOGI("Xit CreateJNIEnv: threadid=%d %p\n", self->threadId, self);
+    return (JNIEnv*) newEnv;
+}
+
+/*
+ * Remove a JNIEnv struct from the list and free it.
+ */
+void dvmDestroyJNIEnv(JNIEnv* env)
+{
+    JNIEnvExt* extEnv = (JNIEnvExt*) env;
+    JavaVMExt* vm = extEnv->vm;
+    Thread* self;
+
+    if (env == NULL)
+        return;
+
+    self = dvmThreadSelf();
+    assert(self != NULL);
+
+    //LOGI("Ent DestroyJNIEnv: threadid=%d %p\n", self->threadId, self);
+
+    dvmLockMutex(&vm->envListLock);
+
+    if (extEnv == vm->envList) {
+        assert(extEnv->prev == NULL);
+        vm->envList = extEnv->next;
+    } else {
+        assert(extEnv->prev != NULL);
+        extEnv->prev->next = extEnv->next;
+    }
+    if (extEnv->next != NULL)
+        extEnv->next->prev = extEnv->prev;
+
+    dvmUnlockMutex(&extEnv->vm->envListLock);
+
+    free(env);
+    //LOGI("Xit DestroyJNIEnv: threadid=%d %p\n", self->threadId, self);
+}
+
+
+/*
+ * Retrieve the ReferenceTable struct for the current thread.
+ *
+ * If we know the code isn't sharing JNIEnv pointers between threads, we
+ * could put this into env and skip the TLS lookup.
+ */
+static inline ReferenceTable* getLocalRefTable(void)
+{
+    return &dvmThreadSelf()->jniLocalRefTable;
+}
+
+/*
+ * Add a local reference for an object to the current stack frame.  When
+ * the native function returns, the reference will be discarded.
+ *
+ * We need to allow the same reference to be added multiple times.
+ *
+ * This will be called on otherwise unreferenced objects.  We cannot do
+ * GC allocations here, and it's best if we don't grab a mutex.
+ *
+ * Returns the local reference (currently just the same pointer that was
+ * passed in), or NULL on failure.
+ */
+static jobject addLocalReference(jobject obj)
+{
+    if (obj == NULL)
+        return NULL;
+
+    ReferenceTable* pRef = getLocalRefTable();
+
+    if (!dvmAddToReferenceTable(pRef, (Object*)obj)) {
+        dvmDumpReferenceTable(pRef, "JNI local");
+        LOGE("Failed adding to JNI local ref table (has %d entries)\n",
+            (int) dvmReferenceTableEntries(pRef));
+        dvmAbort();     // spec says call FatalError; this is equivalent
+    } else {
+        LOGVV("LREF add %p  (%s.%s)\n", obj,
+            dvmGetCurrentJNIMethod()->clazz->descriptor,
+            dvmGetCurrentJNIMethod()->name);
+    }
+
+    return obj;
+}
+
+/*
+ * Ensure that at least "capacity" references can be held in the local
+ * refs table of the current thread.
+ */
+static bool ensureLocalCapacity(int capacity)
+{
+    ReferenceTable* pRef = getLocalRefTable();
+
+    return (kJniLocalRefMax - (pRef->nextEntry - pRef->table) >= capacity);
+}
+
+/*
+ * Explicitly delete a reference from the local list.
+ */
+static void deleteLocalReference(jobject obj)
+{
+    if (obj == NULL)
+        return;
+
+    ReferenceTable* pRef = getLocalRefTable();
+    Thread* self = dvmThreadSelf();
+    Object** top = SAVEAREA_FROM_FP(self->curFrame)->xtra.localRefTop;
+
+    if (!dvmRemoveFromReferenceTable(pRef, top, (Object*) obj)) {
+        /*
+         * Attempting to delete a local reference that is not in the
+         * topmost local reference frame is a no-op.  DeleteLocalRef returns
+         * void and doesn't throw any exceptions, but we should probably
+         * complain about it so the user will notice that things aren't
+         * going quite the way they expect.
+         */
+        LOGW("JNI WARNING: DeleteLocalRef(%p) failed to find entry (valid=%d)\n",
+            obj, dvmIsValidObject((Object*) obj));
+    }
+}
+
+/*
+ * Add a global reference for an object.
+ *
+ * We may add the same object more than once.  Add/remove calls are paired,
+ * so it needs to appear on the list multiple times.
+ */
+static jobject addGlobalReference(jobject obj)
+{
+    if (obj == NULL)
+        return NULL;
+
+    //LOGI("adding obj=%p\n", obj);
+    //dvmDumpThread(dvmThreadSelf(), false);
+
+    if (false && ((Object*)obj)->clazz == gDvm.classJavaLangClass) {
+        ClassObject* clazz = (ClassObject*) obj;
+        LOGI("-------\n");
+        LOGI("Adding global ref on class %s\n", clazz->descriptor);
+        dvmDumpThread(dvmThreadSelf(), false);
+    }
+    if (false && ((Object*)obj)->clazz == gDvm.classJavaLangString) {
+        StringObject* strObj = (StringObject*) obj;
+        char* str = dvmCreateCstrFromString(strObj);
+        if (strcmp(str, "sync-response") == 0) {
+            LOGI("-------\n");
+            LOGI("Adding global ref on string '%s'\n", str);
+            dvmDumpThread(dvmThreadSelf(), false);
+            //dvmAbort();
+        }
+        free(str);
+    }
+    if (false && ((Object*)obj)->clazz == gDvm.classArrayByte) {
+        ArrayObject* arrayObj = (ArrayObject*) obj;
+        if (arrayObj->length == 8192 &&
+            dvmReferenceTableEntries(&gDvm.jniGlobalRefTable) > 400)
+        {
+            LOGI("Adding global ref on byte array %p (len=%d)\n",
+                arrayObj, arrayObj->length);
+            dvmDumpThread(dvmThreadSelf(), false);
+        }
+    }
+
+    dvmLockMutex(&gDvm.jniGlobalRefLock);
+
+    /*
+     * Expanding the table should happen rarely, so I'm not overly
+     * concerned about the performance impact of copying the old list
+     * over.  We shouldn't see one-time activity spikes, so freeing
+     * up storage shouldn't be required.
+     *
+     * Throwing an exception on failure is problematic, because JNI code
+     * may not be expecting an exception, and things sort of cascade.  We
+     * want to have a hard limit to catch leaks during debugging, but this
+     * otherwise needs to expand until memory is consumed.  As a practical
+     * matter, if we have many thousands of global references, chances are
+     * we're either leaking global ref table entries or we're going to
+     * run out of space in the GC heap.
+     */
+    if (!dvmAddToReferenceTable(&gDvm.jniGlobalRefTable, (Object*)obj)) {
+        dvmDumpReferenceTable(&gDvm.jniGlobalRefTable, "JNI global");
+        LOGE("Failed adding to JNI global ref table (%d entries)\n",
+            (int) dvmReferenceTableEntries(&gDvm.jniGlobalRefTable));
+        dvmAbort();
+    }
+
+    LOGVV("GREF add %p  (%s.%s)\n", obj,
+        dvmGetCurrentJNIMethod()->clazz->descriptor,
+        dvmGetCurrentJNIMethod()->name);
+
+    /* GREF usage tracking; should probably be disabled for production env */
+    if (kTrackGrefUsage && gDvm.jniGrefLimit != 0) {
+        int count = dvmReferenceTableEntries(&gDvm.jniGlobalRefTable);
+        if (count > gDvm.jniGlobalRefHiMark) {
+            LOGD("GREF has increased to %d\n", count);
+            gDvm.jniGlobalRefHiMark += kGrefWaterInterval;
+            gDvm.jniGlobalRefLoMark += kGrefWaterInterval;
+
+            /* watch for "excessive" use; not generally appropriate */
+            if (count >= gDvm.jniGrefLimit) {
+                if (gDvm.jniWarnError) {
+                    dvmDumpReferenceTable(&gDvm.jniGlobalRefTable,"JNI global");
+                    LOGE("Excessive JNI global references (%d)\n", count);
+                    dvmAbort();
+                } else {
+                    LOGW("Excessive JNI global references (%d)\n", count);
+                }
+            }
+        }
+    }
+
+bail:
+    dvmUnlockMutex(&gDvm.jniGlobalRefLock);
+    return obj;
+}
+
+/*
+ * Remove a global reference.  In most cases it's the entry most recently
+ * added, which makes this pretty quick.
+ *
+ * Thought: if it's not the most recent entry, just null it out.  When we
+ * fill up, do a compaction pass before we expand the list.
+ */
+static void deleteGlobalReference(jobject obj)
+{
+    if (obj == NULL)
+        return;
+
+    dvmLockMutex(&gDvm.jniGlobalRefLock);
+
+    if (!dvmRemoveFromReferenceTable(&gDvm.jniGlobalRefTable,
+            gDvm.jniGlobalRefTable.table, obj))
+    {
+        LOGW("JNI: DeleteGlobalRef(%p) failed to find entry (valid=%d)\n",
+            obj, dvmIsValidObject((Object*) obj));
+        goto bail;
+    }
+
+    if (kTrackGrefUsage && gDvm.jniGrefLimit != 0) {
+        int count = dvmReferenceTableEntries(&gDvm.jniGlobalRefTable);
+        if (count < gDvm.jniGlobalRefLoMark) {
+            LOGD("GREF has decreased to %d\n", count);
+            gDvm.jniGlobalRefHiMark -= kGrefWaterInterval;
+            gDvm.jniGlobalRefLoMark -= kGrefWaterInterval;
+        }
+    }
+
+bail:
+    dvmUnlockMutex(&gDvm.jniGlobalRefLock);
+}
+
+/*
+ * GC helper function to mark all JNI global references.
+ */
+void dvmGcMarkJniGlobalRefs()
+{
+    Object **op;
+
+    dvmLockMutex(&gDvm.jniGlobalRefLock);
+
+    op = gDvm.jniGlobalRefTable.table;
+    while ((uintptr_t)op < (uintptr_t)gDvm.jniGlobalRefTable.nextEntry) {
+        dvmMarkObjectNonNull(*(op++));
+    }
+
+    dvmUnlockMutex(&gDvm.jniGlobalRefLock);
+}
+
+
+/*
+ * Determine if "obj" appears in the argument list for the native method.
+ *
+ * We use the "shorty" signature to determine which argument slots hold
+ * reference types.
+ */
+static bool findInArgList(Thread* self, Object* obj)
+{
+    const Method* meth;
+    u4* fp;
+    int i;
+
+    fp = self->curFrame;
+    while (1) {
+        /*
+         * Back up over JNI PushLocalFrame frames.  This works because the
+         * previous frame on the interpreted stack is either a break frame
+         * (if we called here via native code) or an interpreted method (if
+         * we called here via the interpreter).  In both cases the method
+         * pointer won't match.
+         */
+        StackSaveArea* saveArea = SAVEAREA_FROM_FP(fp);
+        meth = saveArea->method;
+        if (meth != SAVEAREA_FROM_FP(saveArea->prevFrame)->method)
+            break;
+        fp = saveArea->prevFrame;
+    }
+
+    LOGVV("+++ scanning %d args in %s (%s)\n",
+        meth->insSize, meth->name, meth->shorty);
+    const char* shorty = meth->shorty +1;       /* skip return type char */
+    for (i = 0; i < meth->insSize; i++) {
+        if (i == 0 && !dvmIsStaticMethod(meth)) {
+            /* first arg is "this" ref, not represented in "shorty" */
+            if (fp[i] == (u4) obj)
+                return true;
+        } else {
+            /* if this is a reference type, see if it matches */
+            switch (*shorty) {
+            case 'L':
+                if (fp[i] == (u4) obj)
+                    return true;
+                break;
+            case 'D':
+            case 'J':
+                i++;
+                break;
+            case '\0':
+                LOGE("Whoops! ran off the end of %s (%d)\n",
+                    meth->shorty, meth->insSize);
+                break;
+            default:
+                if (fp[i] == (u4) obj)
+                    LOGI("NOTE: ref %p match on arg type %c\n", obj, *shorty);
+                break;
+            }
+            shorty++;
+        }
+    }
+
+    /*
+     * For static methods, we also pass a class pointer in.
+     */
+    if (dvmIsStaticMethod(meth)) {
+        //LOGI("+++ checking class pointer in %s\n", meth->name);
+        if ((void*)obj == (void*)meth->clazz)
+            return true;
+    }
+    return false;
+}
+
+/*
+ * Verify that a reference passed in from native code is one that the
+ * code is allowed to have.
+ *
+ * It's okay for native code to pass us a reference that:
+ *  - was just passed in as an argument when invoked by native code
+ *  - was returned to it from JNI (and is now in the JNI local refs table)
+ *  - is present in the JNI global refs table
+ * The first one is a little awkward.  The latter two are just table lookups.
+ *
+ * Used by -Xcheck:jni and GetObjectRefType.
+ *
+ * NOTE: in the current VM, global and local references are identical.  If
+ * something is both global and local, we can't tell them apart, and always
+ * return "local".
+ */
+jobjectRefType dvmGetJNIRefType(Object* obj)
+{
+    ReferenceTable* pRef = getLocalRefTable();
+    Thread* self = dvmThreadSelf();
+    //Object** top;
+    Object** ptr;
+
+    /* check args */
+    if (findInArgList(self, obj)) {
+        //LOGI("--- REF found %p on stack\n", obj);
+        return JNILocalRefType;
+    }
+
+    /* check locals */
+    //top = SAVEAREA_FROM_FP(self->curFrame)->xtra.localRefTop;
+    if (dvmFindInReferenceTable(pRef, pRef->table, obj) != NULL) {
+        //LOGI("--- REF found %p in locals\n", obj);
+        return JNILocalRefType;
+    }
+
+    /* check globals */
+    dvmLockMutex(&gDvm.jniGlobalRefLock);
+    if (dvmFindInReferenceTable(&gDvm.jniGlobalRefTable,
+            gDvm.jniGlobalRefTable.table, obj))
+    {
+        //LOGI("--- REF found %p in globals\n", obj);
+        dvmUnlockMutex(&gDvm.jniGlobalRefLock);
+        return JNIGlobalRefType;
+    }
+    dvmUnlockMutex(&gDvm.jniGlobalRefLock);
+
+    /* not found! */
+    return JNIInvalidRefType;
+}
+
+/*
+ * Register a method that uses JNI calling conventions.
+ */
+static bool dvmRegisterJNIMethod(ClassObject* clazz, const char* methodName,
+    const char* signature, void* fnPtr)
+{
+    Method* method;
+    bool result = false;
+
+    if (fnPtr == NULL)
+        goto bail;
+
+    method = dvmFindDirectMethodByDescriptor(clazz, methodName, signature);
+    if (method == NULL)
+        method = dvmFindVirtualMethodByDescriptor(clazz, methodName, signature);
+    if (method == NULL) {
+        LOGW("ERROR: Unable to find decl for native %s.%s %s\n",
+            clazz->descriptor, methodName, signature);
+        goto bail;
+    }
+
+    if (!dvmIsNativeMethod(method)) {
+        LOGW("Unable to register: not native: %s.%s %s\n",
+            clazz->descriptor, methodName, signature);
+        goto bail;
+    }
+
+    if (method->nativeFunc != dvmResolveNativeMethod) {
+        LOGW("Warning: %s.%s %s was already registered/resolved?\n",
+            clazz->descriptor, methodName, signature);
+        /* keep going, I guess */
+    }
+
+    /*
+     * Point "nativeFunc" at the JNI bridge, and overload "insns" to
+     * point at the actual function.
+     */
+    if (dvmIsSynchronizedMethod(method))
+        dvmSetNativeFunc(method, dvmCallSynchronizedJNIMethod, fnPtr);
+    else
+        dvmSetNativeFunc(method, dvmCallJNIMethod, fnPtr);
+
+    LOGV("JNI-registered %s.%s %s\n", clazz->descriptor, methodName,
+        signature);
+    result = true;
+
+bail:
+    return result;
+}
+
+/*
+ * Get the method currently being executed by examining the interp stack.
+ */
+const Method* dvmGetCurrentJNIMethod(void)
+{
+    assert(dvmThreadSelf() != NULL);
+
+    void* fp = dvmThreadSelf()->curFrame;
+    const Method* meth = SAVEAREA_FROM_FP(fp)->method;
+
+    assert(meth != NULL);
+    assert(dvmIsNativeMethod(meth));
+    return meth;
+}
+
+
+/*
+ * Track a JNI MonitorEnter in the current thread.
+ *
+ * The goal is to be able to "implicitly" release all JNI-held monitors
+ * when the thread detaches.
+ *
+ * Monitors may be entered multiple times, so we add a new entry for each
+ * enter call.  It would be more efficient to keep a counter.  At present
+ * there's no real motivation to improve this however.
+ */
+static void trackMonitorEnter(Thread* self, Object* obj)
+{
+    static const int kInitialSize = 16;
+    ReferenceTable* refTable = &self->jniMonitorRefTable;
+
+    /* init table on first use */
+    if (refTable->table == NULL) {
+        assert(refTable->maxEntries == 0);
+
+        if (!dvmInitReferenceTable(refTable, kInitialSize, INT_MAX)) {
+            LOGE("Unable to initialize monitor tracking table\n");
+            dvmAbort();
+        }
+    }
+
+    if (!dvmAddToReferenceTable(refTable, obj)) {
+        /* ran out of memory? could throw exception instead */
+        LOGE("Unable to add entry to monitor tracking table\n");
+        dvmAbort();
+    } else {
+        LOGVV("--- added monitor %p\n", obj);
+    }
+}
+
+/*
+ * Track a JNI MonitorExit in the current thread.
+ */
+static void trackMonitorExit(Thread* self, Object* obj)
+{
+    ReferenceTable* refTable = &self->jniMonitorRefTable;
+
+    if (!dvmRemoveFromReferenceTable(refTable, refTable->table, obj)) {
+        LOGE("JNI monitor %p not found in tracking list\n", obj);
+        /* keep going? */
+    } else {
+        LOGVV("--- removed monitor %p\n", obj);
+    }
+}
+
+/*
+ * Release all monitors held by the jniMonitorRefTable list.
+ */
+void dvmReleaseJniMonitors(Thread* self)
+{
+    ReferenceTable* refTable = &self->jniMonitorRefTable;
+    Object** top = refTable->table;
+
+    if (top == NULL)
+        return;
+
+    Object** ptr = refTable->nextEntry;
+    while (--ptr >= top) {
+        if (!dvmUnlockObject(self, *ptr)) {
+            LOGW("Unable to unlock monitor %p at thread detach\n", *ptr);
+        } else {
+            LOGVV("--- detach-releasing monitor %p\n", *ptr);
+        }
+    }
+
+    /* zap it */
+    refTable->nextEntry = refTable->table;
+}
+
+#ifdef WITH_JNI_STACK_CHECK
+/*
+ * Compute a CRC on the entire interpreted stack.
+ *
+ * Would be nice to compute it on "self" as well, but there are parts of
+ * the Thread that can be altered by other threads (e.g. prev/next pointers).
+ */
+static void computeStackSum(Thread* self)
+{
+    const u1* low = (const u1*)SAVEAREA_FROM_FP(self->curFrame);
+    u4 crc = dvmInitCrc32();
+    self->stackCrc = 0;
+    crc = dvmComputeCrc32(crc, low, self->interpStackStart - low);
+    self->stackCrc = crc;
+}
+
+/*
+ * Compute a CRC on the entire interpreted stack, and compare it to what
+ * we previously computed.
+ *
+ * We can execute JNI directly from native code without calling in from
+ * interpreted code during VM initialization and immediately after JNI
+ * thread attachment.  Another opportunity exists during JNI_OnLoad.  Rather
+ * than catching these cases we just ignore them here, which is marginally
+ * less accurate but reduces the amount of code we have to touch with #ifdefs.
+ */
+static void checkStackSum(Thread* self)
+{
+    const u1* low = (const u1*)SAVEAREA_FROM_FP(self->curFrame);
+    u4 stackCrc, crc;
+
+    stackCrc = self->stackCrc;
+    self->stackCrc = 0;
+    crc = dvmInitCrc32();
+    crc = dvmComputeCrc32(crc, low, self->interpStackStart - low);
+    if (crc != stackCrc) {
+        const Method* meth = dvmGetCurrentJNIMethod();
+        if (dvmComputeExactFrameDepth(self->curFrame) == 1) {
+            LOGD("JNI: bad stack CRC (0x%08x) -- okay during init\n",
+                stackCrc);
+        } else if (strcmp(meth->name, "nativeLoad") == 0 &&
+                  (strcmp(meth->clazz->descriptor, "Ljava/lang/Runtime;") == 0))
+        {
+            LOGD("JNI: bad stack CRC (0x%08x) -- okay during JNI_OnLoad\n",
+                stackCrc);
+        } else {
+            LOGW("JNI: bad stack CRC (%08x vs %08x)\n", crc, stackCrc);
+            dvmAbort();
+        }
+    }
+    self->stackCrc = (u4) -1;       /* make logic errors more noticeable */
+}
+#endif
+
+
+/*
+ * ===========================================================================
+ *      JNI implementation
+ * ===========================================================================
+ */
+
+/*
+ * Return the version of the native method interface.
+ */
+static jint GetVersion(JNIEnv* env)
+{
+    JNI_ENTER();
+    /*
+     * There is absolutely no need to toggle the mode for correct behavior.
+     * However, it does provide native code with a simple "suspend self
+     * if necessary" call.
+     */
+    JNI_EXIT();
+    return JNI_VERSION_1_6;
+}
+
+/*
+ * Create a new class from a bag of bytes.
+ *
+ * This is not currently supported within Dalvik.
+ */
+static jclass DefineClass(JNIEnv* env, const char *name, jobject loader,
+    const jbyte* buf, jsize bufLen)
+{
+    UNUSED_PARAMETER(name);
+    UNUSED_PARAMETER(loader);
+    UNUSED_PARAMETER(buf);
+    UNUSED_PARAMETER(bufLen);
+
+    JNI_ENTER();
+    LOGW("Rejecting JNI DefineClass request\n");
+    JNI_EXIT();
+    return NULL;
+}
+
+/*
+ * Find a class by name.
+ *
+ * We have to use the "no init" version of FindClass here, because we might
+ * be getting the class prior to registering native methods that will be
+ * used in <clinit>.
+ *
+ * We need to get the class loader associated with the current native
+ * method.  If there is no native method, e.g. we're calling this from native
+ * code right after creating the VM, the spec says we need to use the class
+ * loader returned by "ClassLoader.getBaseClassLoader".  There is no such
+ * method, but it's likely they meant ClassLoader.getSystemClassLoader.
+ * We can't get that until after the VM has initialized though.
+ */
+static jclass FindClass(JNIEnv* env, const char* name)
+{
+    JNI_ENTER();
+
+    const Method* thisMethod;
+    ClassObject* clazz;
+    Object* loader;
+    char* descriptor = NULL;
+
+    thisMethod = dvmGetCurrentJNIMethod();
+    assert(thisMethod != NULL);
+
+    descriptor = dvmNameToDescriptor(name);
+    if (descriptor == NULL) {
+        clazz = NULL;
+        goto bail;
+    }
+
+    //Thread* self = dvmThreadSelf();
+    if (_self->classLoaderOverride != NULL) {
+        /* hack for JNI_OnLoad */
+        assert(strcmp(thisMethod->name, "nativeLoad") == 0);
+        loader = _self->classLoaderOverride;
+    } else if (thisMethod == gDvm.methFakeNativeEntry) {
+        /* start point of invocation interface */
+        if (!gDvm.initializing)
+            loader = dvmGetSystemClassLoader();
+        else
+            loader = NULL;
+    } else {
+        loader = thisMethod->clazz->classLoader;
+    }
+
+    clazz = dvmFindClassNoInit(descriptor, loader);
+    clazz = addLocalReference(clazz);
+
+bail:
+    free(descriptor);
+    
+    JNI_EXIT();
+    return (jclass)clazz;
+}
+
+/*
+ * Return the superclass of a class.
+ */
+static jclass GetSuperclass(JNIEnv* env, jclass clazz)
+{
+    JNI_ENTER();
+    jclass super = (jclass) ((ClassObject*) clazz)->super;
+    super = addLocalReference(super);
+    JNI_EXIT();
+    return super;
+}
+
+/*
+ * Determine whether an object of clazz1 can be safely cast to clazz2.
+ *
+ * Like IsInstanceOf, but with a pair of class objects instead of obj+class.
+ */
+static jboolean IsAssignableFrom(JNIEnv* env, jclass clazz1, jclass clazz2)
+{
+    JNI_ENTER();
+
+    jboolean result;
+    result = dvmInstanceof((ClassObject*) clazz1, (ClassObject*) clazz2);
+
+    JNI_EXIT();
+    return result;
+}
+
+/*
+ * Given a java.lang.reflect.Method or .Constructor, return a methodID.
+ */
+static jmethodID FromReflectedMethod(JNIEnv* env, jobject method)
+{
+    JNI_ENTER();
+    jmethodID methodID;
+    methodID = (jmethodID) dvmGetMethodFromReflectObj((Object*)method);
+    JNI_EXIT();
+    return methodID;
+}
+
+/*
+ * Given a java.lang.reflect.Field, return a fieldID.
+ */
+static jfieldID FromReflectedField(JNIEnv* env, jobject field)
+{
+    JNI_ENTER();
+    jfieldID fieldID = (jfieldID) dvmGetFieldFromReflectObj((Object*)field);
+    JNI_EXIT();
+    return fieldID;
+}
+
+/*
+ * Convert a methodID to a java.lang.reflect.Method or .Constructor.
+ *
+ * (The "isStatic" field does not appear in the spec.)
+ *
+ * Throws OutOfMemory and returns NULL on failure.
+ */
+static jobject ToReflectedMethod(JNIEnv* env, jclass cls, jmethodID methodID,
+    jboolean isStatic)
+{
+    JNI_ENTER();
+    jobject obj;
+    obj = (jobject) dvmCreateReflectObjForMethod((ClassObject*) cls,
+            (Method*) methodID);
+    dvmReleaseTrackedAlloc(obj, NULL);
+    obj = addLocalReference(obj);
+    JNI_EXIT();
+    return obj;
+}
+
+/*
+ * Convert a fieldID to a java.lang.reflect.Field.
+ *
+ * (The "isStatic" field does not appear in the spec.)
+ *
+ * Throws OutOfMemory and returns NULL on failure.
+ */
+static jobject ToReflectedField(JNIEnv* env, jclass cls, jfieldID fieldID,
+    jboolean isStatic)
+{
+    JNI_ENTER();
+    jobject obj;
+    obj = (jobject) dvmCreateReflectObjForField((ClassObject*) cls,
+            (Field*) fieldID);
+    dvmReleaseTrackedAlloc(obj, NULL);
+    obj = addLocalReference(obj);
+    JNI_EXIT();
+    return obj;
+}
+
+
+/*
+ * Take this exception and throw it.
+ */
+static jint Throw(JNIEnv* env, jthrowable obj)
+{
+    JNI_ENTER();
+
+    jint retval;
+
+    if (obj != NULL) {
+        dvmSetException(_self, obj);
+        retval = JNI_OK;
+    } else
+        retval = JNI_ERR;
+
+    JNI_EXIT();
+    return retval;
+}
+
+/*
+ * Constructs an exeption object from the specified class with the message
+ * specified by "message", and throws it.
+ */
+static jint ThrowNew(JNIEnv* env, jclass clazz, const char* message)
+{
+    JNI_ENTER();
+
+    ClassObject* classObj = (ClassObject*) clazz;
+
+    dvmThrowExceptionByClass(classObj, message);
+
+    JNI_EXIT();
+    return JNI_OK;
+}
+
+/*
+ * If an exception is being thrown, return the exception object.  Otherwise,
+ * return NULL.
+ *
+ * TODO: if there is no pending exception, we should be able to skip the
+ * enter/exit checks.  If we find one, we need to enter and then re-fetch
+ * the exception (in case it got moved by a compacting GC).
+ */
+static jthrowable ExceptionOccurred(JNIEnv* env)
+{
+    JNI_ENTER();
+
+    Object* exception;
+    Object* localException;
+
+    exception = (Object*) dvmGetException(_self);
+    localException = addLocalReference(exception);
+    if (localException == NULL && exception != NULL) {
+        /*
+         * We were unable to add a new local reference, and threw a new
+         * exception.  We can't return "exception", because it's not a
+         * local reference.  So we have to return NULL, indicating that
+         * there was no exception, even though it's pretty much raining
+         * exceptions in here.
+         */
+        LOGW("JNI WARNING: addLocal/exception combo\n");
+    }
+
+    JNI_EXIT();
+    return localException;
+}
+
+/*
+ * Print an exception and stack trace to stderr.
+ */
+static void ExceptionDescribe(JNIEnv* env)
+{
+    JNI_ENTER();
+
+    Object* exception = dvmGetException(_self);
+    if (exception != NULL) {
+        dvmPrintExceptionStackTrace();
+    } else {
+        LOGI("Odd: ExceptionDescribe called, but no exception pending\n");
+    }
+
+    JNI_EXIT();
+}
+
+/*
+ * Clear the exception currently being thrown.
+ *
+ * TODO: we should be able to skip the enter/exit stuff.
+ */
+static void ExceptionClear(JNIEnv* env)
+{
+    JNI_ENTER();
+    dvmClearException(_self);
+    JNI_EXIT();
+}
+
+/*
+ * Kill the VM.  This function does not return.
+ */
+static void FatalError(JNIEnv* env, const char* msg)
+{
+    //dvmChangeStatus(NULL, THREAD_RUNNING);
+    LOGE("JNI posting fatal error: %s\n", msg);
+    dvmAbort();
+}
+
+/*
+ * Push a new JNI frame on the stack, with a new set of locals.
+ *
+ * The new frame must have the same method pointer.  (If for no other
+ * reason than FindClass needs it to get the appropriate class loader.)
+ */
+static jint PushLocalFrame(JNIEnv* env, jint capacity)
+{
+    JNI_ENTER();
+    int result = JNI_OK;
+    if (!ensureLocalCapacity(capacity) ||
+        !dvmPushLocalFrame(_self /*dvmThreadSelf()*/, dvmGetCurrentJNIMethod()))
+    {
+        /* yes, OutOfMemoryError, not StackOverflowError */
+        dvmClearException(_self);
+        dvmThrowException("Ljava/lang/OutOfMemoryError;",
+            "out of stack in JNI PushLocalFrame");
+        result = JNI_ERR;
+    }
+    JNI_EXIT();
+    return result;
+}
+
+/*
+ * Pop the local frame off.  If "result" is not null, add it as a
+ * local reference on the now-current frame.
+ */
+static jobject PopLocalFrame(JNIEnv* env, jobject result)
+{
+    JNI_ENTER();
+    if (!dvmPopLocalFrame(_self /*dvmThreadSelf()*/)) {
+        LOGW("JNI WARNING: too many PopLocalFrame calls\n");
+        dvmClearException(_self);
+        dvmThrowException("Ljava/lang/RuntimeException;",
+            "too many PopLocalFrame calls");
+    }
+    result = addLocalReference(result);
+    JNI_EXIT();
+    return result;
+}
+
+/*
+ * Add a reference to the global list.
+ */
+static jobject NewGlobalRef(JNIEnv* env, jobject obj)
+{
+    JNI_ENTER();
+    jobject retval = addGlobalReference(obj);
+    JNI_EXIT();
+    return retval;
+}
+
+/*
+ * Delete a reference from the global list.
+ */
+static void DeleteGlobalRef(JNIEnv* env, jobject globalRef)
+{
+    JNI_ENTER();
+    deleteGlobalReference(globalRef);
+    JNI_EXIT();
+}
+
+
+/*
+ * Add a reference to the local list.
+ */
+static jobject NewLocalRef(JNIEnv* env, jobject ref)
+{
+    JNI_ENTER();
+
+    jobject retval = addLocalReference(ref);
+
+    JNI_EXIT();
+    return retval;
+}
+
+/*
+ * Delete a reference from the local list.
+ */
+static void DeleteLocalRef(JNIEnv* env, jobject localRef)
+{
+    JNI_ENTER();
+    deleteLocalReference(localRef);
+    JNI_EXIT();
+}
+
+/*
+ * Ensure that the local references table can hold at least this many
+ * references.
+ */
+static jint EnsureLocalCapacity(JNIEnv *env, jint capacity)
+{
+    JNI_ENTER();
+    bool okay = ensureLocalCapacity(capacity);
+    if (!okay) {
+        dvmThrowException("Ljava/lang/OutOfMemoryError;",
+            "can't ensure local reference capacity");
+    }
+    JNI_EXIT();
+    if (okay)
+        return 0;
+    else
+        return -1;
+}
+
+
+/*
+ * Determine whether two Object references refer to the same underlying object.
+ */
+static jboolean IsSameObject(JNIEnv* env, jobject ref1, jobject ref2)
+{
+    JNI_ENTER();
+    jboolean result = (ref1 == ref2);
+    JNI_EXIT();
+    return result;
+}
+
+/*
+ * Allocate a new object without invoking any constructors.
+ */
+static jobject AllocObject(JNIEnv* env, jclass jclazz)
+{
+    JNI_ENTER();
+
+    ClassObject* clazz = (ClassObject*) jclazz;
+    jobject newObj;
+
+    if (!dvmIsClassInitialized(clazz) && !dvmInitClass(clazz)) {
+        assert(dvmCheckException(_self));
+        newObj = NULL;
+    } else {
+        newObj = (jobject) dvmAllocObject(clazz, ALLOC_DONT_TRACK);
+        newObj = addLocalReference(newObj);
+    }
+
+    JNI_EXIT();
+    return newObj;
+}
+
+/*
+ * Construct a new object.
+ */
+static jobject NewObject(JNIEnv* env, jclass jclazz, jmethodID methodID, ...)
+{
+    JNI_ENTER();
+
+    ClassObject* clazz = (ClassObject*) jclazz;
+    jobject newObj;
+
+    if (!dvmIsClassInitialized(clazz) && !dvmInitClass(clazz)) {
+        assert(dvmCheckException(_self));
+        newObj = NULL;
+    } else {
+        newObj = (jobject) dvmAllocObject(clazz, ALLOC_DONT_TRACK);
+        newObj = addLocalReference(newObj);
+        if (newObj != NULL) {
+            JValue unused;
+            va_list args;
+            va_start(args, methodID);
+            dvmCallMethodV(_self, (Method*) methodID, (Object*)newObj, &unused,
+                args);
+            va_end(args);
+        }
+    }
+
+    JNI_EXIT();
+    return newObj;
+}
+static jobject NewObjectV(JNIEnv* env, jclass clazz, jmethodID methodID,
+    va_list args)
+{
+    JNI_ENTER();
+
+    jobject newObj;
+    newObj = (jobject) dvmAllocObject((ClassObject*) clazz, ALLOC_DONT_TRACK);
+    newObj = addLocalReference(newObj);
+    if (newObj != NULL) {
+        JValue unused;
+        dvmCallMethodV(_self, (Method*) methodID, (Object*)newObj, &unused,
+            args);
+    }
+
+    JNI_EXIT();
+    return newObj;
+}
+static jobject NewObjectA(JNIEnv* env, jclass clazz, jmethodID methodID,
+    jvalue* args)
+{
+    JNI_ENTER();
+
+    jobject newObj;
+    newObj = (jobject) dvmAllocObject((ClassObject*) clazz, ALLOC_DONT_TRACK);
+    newObj = addLocalReference(newObj);
+    if (newObj != NULL) {
+        JValue unused;
+        dvmCallMethodA(_self, (Method*) methodID, (Object*)newObj, &unused,
+            args);
+    }
+
+    JNI_EXIT();
+    return newObj;
+}
+
+/*
+ * Returns the class of an object.
+ *
+ * JNI spec says: obj must not be NULL.
+ */
+static jclass GetObjectClass(JNIEnv* env, jobject obj)
+{
+    JNI_ENTER();
+
+    assert(obj != NULL);
+
+    jclass clazz;
+    clazz = (jclass) ((Object*)obj)->clazz;
+    clazz = addLocalReference(clazz);
+
+    JNI_EXIT();
+    return clazz;
+}
+
+/*
+ * Determine whether "obj" is an instance of "clazz".
+ */
+static jboolean IsInstanceOf(JNIEnv* env, jobject obj, jclass clazz)
+{
+    JNI_ENTER();
+
+    jboolean result;
+
+    if (obj == NULL)
+        result = true;
+    else
+        result = dvmInstanceof(((Object*)obj)->clazz, (ClassObject*) clazz);
+
+    JNI_EXIT();
+    return result;
+}
+
+/*
+ * Get a method ID for an instance method.
+ *
+ * JNI defines <init> as an instance method, but Dalvik considers it a
+ * "direct" method, so we have to special-case it here.
+ *
+ * Dalvik also puts all private methods into the "direct" list, so we
+ * really need to just search both lists.
+ */
+static jmethodID GetMethodID(JNIEnv* env, jclass jclazz, const char* name,
+    const char* sig)
+{
+    JNI_ENTER();
+
+    ClassObject* clazz = (ClassObject*) jclazz;
+    jmethodID id = NULL;
+
+    if (!dvmIsClassInitialized(clazz) && !dvmInitClass(clazz)) {
+        assert(dvmCheckException(_self));
+    } else {
+        Method* meth;
+
+        meth = dvmFindVirtualMethodHierByDescriptor(clazz, name, sig);
+        if (meth == NULL) {
+            /* search private methods and constructors; non-hierarchical */
+            meth = dvmFindDirectMethodByDescriptor(clazz, name, sig);
+        }
+        if (meth != NULL && dvmIsStaticMethod(meth)) {
+            IF_LOGD() {
+                char* desc = dexProtoCopyMethodDescriptor(&meth->prototype);
+                LOGD("GetMethodID: not returning static method %s.%s %s\n",
+                    clazz->descriptor, meth->name, desc);
+                free(desc);
+            }
+            meth = NULL;
+        }
+        if (meth == NULL) {
+            LOGI("Method not found: '%s' '%s' in %s\n",
+                name, sig, clazz->descriptor);
+            dvmThrowException("Ljava/lang/NoSuchMethodError;", name);
+        }
+
+        /*
+         * The method's class may not be the same as clazz, but if
+         * it isn't this must be a virtual method and the class must
+         * be a superclass (and, hence, already initialized).
+         */
+        if (meth != NULL) {
+            assert(dvmIsClassInitialized(meth->clazz) ||
+                   dvmIsClassInitializing(meth->clazz));
+        }
+        id = (jmethodID) meth;
+    }
+    JNI_EXIT();
+    return id;
+}
+
+/*
+ * Get a field ID (instance fields).
+ */
+static jfieldID GetFieldID(JNIEnv* env, jclass jclazz,
+    const char* name, const char* sig)
+{
+    JNI_ENTER();
+
+    ClassObject* clazz = (ClassObject*) jclazz;
+    jfieldID id;
+
+    if (!dvmIsClassInitialized(clazz) && !dvmInitClass(clazz)) {
+        assert(dvmCheckException(_self));
+        id = NULL;
+    } else {
+        id = (jfieldID) dvmFindInstanceFieldHier(clazz, name, sig);
+        if (id == NULL)
+            dvmThrowException("Ljava/lang/NoSuchFieldError;", name);
+    }
+    JNI_EXIT();
+    return id;
+}
+
+/*
+ * Get the method ID for a static method in a class.
+ */
+static jmethodID GetStaticMethodID(JNIEnv* env, jclass jclazz,
+    const char* name, const char* sig)
+{
+    JNI_ENTER();
+
+    ClassObject* clazz = (ClassObject*) jclazz;
+    jmethodID id;
+
+    if (!dvmIsClassInitialized(clazz) && !dvmInitClass(clazz)) {
+        assert(dvmCheckException(_self));
+        id = NULL;
+    } else {
+        Method* meth;
+
+        meth = dvmFindDirectMethodHierByDescriptor(clazz, name, sig);
+
+        /* make sure it's static, not virtual+private */
+        if (meth != NULL && !dvmIsStaticMethod(meth)) {
+            IF_LOGD() {
+                char* desc = dexProtoCopyMethodDescriptor(&meth->prototype);
+                LOGD("GetStaticMethodID: "
+                    "not returning nonstatic method %s.%s %s\n",
+                    clazz->descriptor, meth->name, desc);
+                free(desc);
+            }
+            meth = NULL;
+        }
+
+        id = (jmethodID) meth;
+        if (id == NULL)
+            dvmThrowException("Ljava/lang/NoSuchMethodError;", name);
+    }
+
+    JNI_EXIT();
+    return id;
+}
+
+/*
+ * Get a field ID (static fields).
+ */
+static jfieldID GetStaticFieldID(JNIEnv* env, jclass jclazz,
+    const char* name, const char* sig)
+{
+    JNI_ENTER();
+
+    ClassObject* clazz = (ClassObject*) jclazz;
+    jfieldID id;
+
+    if (!dvmIsClassInitialized(clazz) && !dvmInitClass(clazz)) {
+        assert(dvmCheckException(_self));
+        id = NULL;
+    } else {
+        id = (jfieldID) dvmFindStaticField(clazz, name, sig);
+        if (id == NULL)
+            dvmThrowException("Ljava/lang/NoSuchFieldError;", name);
+    }
+    JNI_EXIT();
+    return id;
+}
+
+/*
+ * Get a static field.
+ *
+ * If we get an object reference, add it to the local refs list.
+ */
+#define GET_STATIC_TYPE_FIELD(_ctype, _jname, _isref)                       \
+    static _ctype GetStatic##_jname##Field(JNIEnv* env, jclass clazz,       \
+        jfieldID fieldID)                                                   \
+    {                                                                       \
+        UNUSED_PARAMETER(clazz);                                            \
+        JNI_ENTER();                                                        \
+        StaticField* sfield = (StaticField*) fieldID;                       \
+        _ctype value = dvmGetStaticField##_jname(sfield);                   \
+        if (_isref)     /* only when _ctype==jobject */                     \
+            value = (_ctype)(u4)addLocalReference((jobject)(u4)value);      \
+        JNI_EXIT();                                                         \
+        return value;                                                       \
+    }
+GET_STATIC_TYPE_FIELD(jobject, Object, true);
+GET_STATIC_TYPE_FIELD(jboolean, Boolean, false);
+GET_STATIC_TYPE_FIELD(jbyte, Byte, false);
+GET_STATIC_TYPE_FIELD(jchar, Char, false);
+GET_STATIC_TYPE_FIELD(jshort, Short, false);
+GET_STATIC_TYPE_FIELD(jint, Int, false);
+GET_STATIC_TYPE_FIELD(jlong, Long, false);
+GET_STATIC_TYPE_FIELD(jfloat, Float, false);
+GET_STATIC_TYPE_FIELD(jdouble, Double, false);
+
+/*
+ * Set a static field.
+ */
+#define SET_STATIC_TYPE_FIELD(_ctype, _jname, _jvfld)                       \
+    static void SetStatic##_jname##Field(JNIEnv* env, jclass clazz,         \
+        jfieldID fieldID, _ctype value)                                     \
+    {                                                                       \
+        UNUSED_PARAMETER(clazz);                                            \
+        JNI_ENTER();                                                        \
+        StaticField* sfield = (StaticField*) fieldID;                       \
+        dvmSetStaticField##_jname(sfield, value);                           \
+        JNI_EXIT();                                                         \
+    }
+SET_STATIC_TYPE_FIELD(jobject, Object, l);
+SET_STATIC_TYPE_FIELD(jboolean, Boolean, z);
+SET_STATIC_TYPE_FIELD(jbyte, Byte, b);
+SET_STATIC_TYPE_FIELD(jchar, Char, c);
+SET_STATIC_TYPE_FIELD(jshort, Short, s);
+SET_STATIC_TYPE_FIELD(jint, Int, i);
+SET_STATIC_TYPE_FIELD(jlong, Long, j);
+SET_STATIC_TYPE_FIELD(jfloat, Float, f);
+SET_STATIC_TYPE_FIELD(jdouble, Double, d);
+
+/*
+ * Get an instance field.
+ *
+ * If we get an object reference, add it to the local refs list.
+ */
+#define GET_TYPE_FIELD(_ctype, _jname, _isref)                              \
+    static _ctype Get##_jname##Field(JNIEnv* env, jobject obj,              \
+        jfieldID fieldID)                                                   \
+    {                                                                       \
+        JNI_ENTER();                                                        \
+        InstField* field = (InstField*) fieldID;                            \
+        _ctype value = dvmGetField##_jname((Object*) obj,field->byteOffset);\
+        if (_isref)     /* only when _ctype==jobject */                     \
+            value = (_ctype)(u4)addLocalReference((jobject)(u4)value);      \
+        JNI_EXIT();                                                         \
+        return value;                                                       \
+    }
+GET_TYPE_FIELD(jobject, Object, true);
+GET_TYPE_FIELD(jboolean, Boolean, false);
+GET_TYPE_FIELD(jbyte, Byte, false);
+GET_TYPE_FIELD(jchar, Char, false);
+GET_TYPE_FIELD(jshort, Short, false);
+GET_TYPE_FIELD(jint, Int, false);
+GET_TYPE_FIELD(jlong, Long, false);
+GET_TYPE_FIELD(jfloat, Float, false);
+GET_TYPE_FIELD(jdouble, Double, false);
+
+/*
+ * Set an instance field.
+ */
+#define SET_TYPE_FIELD(_ctype, _jname)                                      \
+    static void Set##_jname##Field(JNIEnv* env, jobject obj,                \
+        jfieldID fieldID, _ctype value)                                     \
+    {                                                                       \
+        JNI_ENTER();                                                        \
+        InstField* field = (InstField*) fieldID;                            \
+        dvmSetField##_jname((Object*) obj, field->byteOffset, value);       \
+        JNI_EXIT();                                                         \
+    }
+SET_TYPE_FIELD(jobject, Object);
+SET_TYPE_FIELD(jboolean, Boolean);
+SET_TYPE_FIELD(jbyte, Byte);
+SET_TYPE_FIELD(jchar, Char);
+SET_TYPE_FIELD(jshort, Short);
+SET_TYPE_FIELD(jint, Int);
+SET_TYPE_FIELD(jlong, Long);
+SET_TYPE_FIELD(jfloat, Float);
+SET_TYPE_FIELD(jdouble, Double);
+
+/*
+ * Make a virtual method call.
+ *
+ * Three versions (..., va_list, jvalue[]) for each return type.  If we're
+ * returning an Object, we have to add it to the local references table.
+ */
+#define CALL_VIRTUAL(_ctype, _jname, _retfail, _retok, _isref)              \
+    static _ctype Call##_jname##Method(JNIEnv* env, jobject obj,            \
+        jmethodID methodID, ...)                                            \
+    {                                                                       \
+        JNI_ENTER();                                                        \
+        Object* dobj = (Object*) obj;                                       \
+        const Method* meth;                                                 \
+        va_list args;                                                       \
+        JValue result;                                                      \
+        meth = dvmGetVirtualizedMethod(dobj->clazz, (Method*)methodID);     \
+        if (meth == NULL) {                                                 \
+            JNI_EXIT();                                                     \
+            return _retfail;                                                \
+        }                                                                   \
+        va_start(args, methodID);                                           \
+        dvmCallMethodV(_self, meth, dobj, &result, args);                   \
+        va_end(args);                                                       \
+        if (_isref)                                                         \
+            result.l = addLocalReference(result.l);                         \
+        JNI_EXIT();                                                         \
+        return _retok;                                                      \
+    }                                                                       \
+    static _ctype Call##_jname##MethodV(JNIEnv* env, jobject obj,           \
+        jmethodID methodID, va_list args)                                   \
+    {                                                                       \
+        JNI_ENTER();                                                        \
+        Object* dobj = (Object*) obj;                                       \
+        const Method* meth;                                                 \
+        JValue result;                                                      \
+        meth = dvmGetVirtualizedMethod(dobj->clazz, (Method*)methodID);     \
+        if (meth == NULL) {                                                 \
+            JNI_EXIT();                                                     \
+            return _retfail;                                                \
+        }                                                                   \
+        dvmCallMethodV(_self, meth, dobj, &result, args);                   \
+        if (_isref)                                                         \
+            result.l = addLocalReference(result.l);                         \
+        JNI_EXIT();                                                         \
+        return _retok;                                                      \
+    }                                                                       \
+    static _ctype Call##_jname##MethodA(JNIEnv* env, jobject obj,           \
+        jmethodID methodID, jvalue* args)                                   \
+    {                                                                       \
+        JNI_ENTER();                                                        \
+        Object* dobj = (Object*) obj;                                       \
+        const Method* meth;                                                 \
+        JValue result;                                                      \
+        meth = dvmGetVirtualizedMethod(dobj->clazz, (Method*)methodID);     \
+        if (meth == NULL) {                                                 \
+            JNI_EXIT();                                                     \
+            return _retfail;                                                \
+        }                                                                   \
+        dvmCallMethodA(_self, meth, dobj, &result, args);                   \
+        if (_isref)                                                         \
+            result.l = addLocalReference(result.l);                         \
+        JNI_EXIT();                                                         \
+        return _retok;                                                      \
+    }
+CALL_VIRTUAL(jobject, Object, NULL, result.l, true);
+CALL_VIRTUAL(jboolean, Boolean, 0, result.z, false);
+CALL_VIRTUAL(jbyte, Byte, 0, result.b, false);
+CALL_VIRTUAL(jchar, Char, 0, result.c, false);
+CALL_VIRTUAL(jshort, Short, 0, result.s, false);
+CALL_VIRTUAL(jint, Int, 0, result.i, false);
+CALL_VIRTUAL(jlong, Long, 0, result.j, false);
+CALL_VIRTUAL(jfloat, Float, 0.0f, result.f, false);
+CALL_VIRTUAL(jdouble, Double, 0.0, result.d, false);
+CALL_VIRTUAL(void, Void, , , false);
+
+/*
+ * Make a "non-virtual" method call.  We're still calling a virtual method,
+ * but this time we're not doing an indirection through the object's vtable.
+ * The "clazz" parameter defines which implementation of a method we want.
+ *
+ * Three versions (..., va_list, jvalue[]) for each return type.
+ */
+#define CALL_NONVIRTUAL(_ctype, _jname, _retfail, _retok, _isref)           \
+    static _ctype CallNonvirtual##_jname##Method(JNIEnv* env, jobject obj,  \
+        jclass clazz, jmethodID methodID, ...)                              \
+    {                                                                       \
+        JNI_ENTER();                                                        \
+        Object* dobj = (Object*) obj;                                       \
+        const Method* meth;                                                 \
+        va_list args;                                                       \
+        JValue result;                                                      \
+        meth = dvmGetVirtualizedMethod((ClassObject*)clazz,                 \
+                (Method*)methodID);                                         \
+        if (meth == NULL) {                                                 \
+            JNI_EXIT();                                                     \
+            return _retfail;                                                \
+        }                                                                   \
+        va_start(args, methodID);                                           \
+        dvmCallMethodV(_self, meth, dobj, &result, args);                   \
+        if (_isref)                                                         \
+            result.l = addLocalReference(result.l);                         \
+        va_end(args);                                                       \
+        JNI_EXIT();                                                         \
+        return _retok;                                                      \
+    }                                                                       \
+    static _ctype CallNonvirtual##_jname##MethodV(JNIEnv* env, jobject obj, \
+        jclass clazz, jmethodID methodID, va_list args)                     \
+    {                                                                       \
+        JNI_ENTER();                                                        \
+        Object* dobj = (Object*) obj;                                       \
+        const Method* meth;                                                 \
+        JValue result;                                                      \
+        meth = dvmGetVirtualizedMethod((ClassObject*)clazz,                 \
+                (Method*)methodID);                                         \
+        if (meth == NULL) {                                                 \
+            JNI_EXIT();                                                     \
+            return _retfail;                                                \
+        }                                                                   \
+        dvmCallMethodV(_self, meth, dobj, &result, args);                   \
+        if (_isref)                                                         \
+            result.l = addLocalReference(result.l);                         \
+        JNI_EXIT();                                                         \
+        return _retok;                                                      \
+    }                                                                       \
+    static _ctype CallNonvirtual##_jname##MethodA(JNIEnv* env, jobject obj, \
+        jclass clazz, jmethodID methodID, jvalue* args)                     \
+    {                                                                       \
+        JNI_ENTER();                                                        \
+        Object* dobj = (Object*) obj;                                       \
+        const Method* meth;                                                 \
+        JValue result;                                                      \
+        meth = dvmGetVirtualizedMethod((ClassObject*)clazz,                 \
+                (Method*)methodID);                                         \
+        if (meth == NULL) {                                                 \
+            JNI_EXIT();                                                     \
+            return _retfail;                                                \
+        }                                                                   \
+        dvmCallMethodA(_self, meth, dobj, &result, args);                   \
+        if (_isref)                                                         \
+            result.l = addLocalReference(result.l);                         \
+        JNI_EXIT();                                                         \
+        return _retok;                                                      \
+    }
+CALL_NONVIRTUAL(jobject, Object, NULL, result.l, true);
+CALL_NONVIRTUAL(jboolean, Boolean, 0, result.z, false);
+CALL_NONVIRTUAL(jbyte, Byte, 0, result.b, false);
+CALL_NONVIRTUAL(jchar, Char, 0, result.c, false);
+CALL_NONVIRTUAL(jshort, Short, 0, result.s, false);
+CALL_NONVIRTUAL(jint, Int, 0, result.i, false);
+CALL_NONVIRTUAL(jlong, Long, 0, result.j, false);
+CALL_NONVIRTUAL(jfloat, Float, 0.0f, result.f, false);
+CALL_NONVIRTUAL(jdouble, Double, 0.0, result.d, false);
+CALL_NONVIRTUAL(void, Void, , , false);
+
+
+/*
+ * Call a static method.
+ */
+#define CALL_STATIC(_ctype, _jname, _retfail, _retok, _isref)               \
+    static _ctype CallStatic##_jname##Method(JNIEnv* env, jclass clazz,     \
+        jmethodID methodID, ...)                                            \
+    {                                                                       \
+        JNI_ENTER();                                                        \
+        JValue result;                                                      \
+        va_list args;                                                       \
+        assert((ClassObject*) clazz == ((Method*)methodID)->clazz);         \
+        va_start(args, methodID);                                           \
+        dvmCallMethodV(_self, (Method*) methodID, NULL, &result, args);     \
+        va_end(args);                                                       \
+        if (_isref)                                                         \
+            result.l = addLocalReference(result.l);                         \
+        JNI_EXIT();                                                         \
+        return _retok;                                                      \
+    }                                                                       \
+    static _ctype CallStatic##_jname##MethodV(JNIEnv* env, jclass clazz,    \
+        jmethodID methodID, va_list args)                                   \
+    {                                                                       \
+        JNI_ENTER();                                                        \
+        JValue result;                                                      \
+        assert((ClassObject*) clazz == ((Method*)methodID)->clazz);         \
+        dvmCallMethodV(_self, (Method*) methodID, NULL, &result, args);     \
+        if (_isref)                                                         \
+            result.l = addLocalReference(result.l);                         \
+        JNI_EXIT();                                                         \
+        return _retok;                                                      \
+    }                                                                       \
+    static _ctype CallStatic##_jname##MethodA(JNIEnv* env, jclass clazz,    \
+        jmethodID methodID, jvalue* args)                                   \
+    {                                                                       \
+        JNI_ENTER();                                                        \
+        JValue result;                                                      \
+        assert((ClassObject*) clazz == ((Method*)methodID)->clazz);         \
+        dvmCallMethodA(_self, (Method*) methodID, NULL, &result, args);     \
+        if (_isref)                                                         \
+            result.l = addLocalReference(result.l);                         \
+        JNI_EXIT();                                                         \
+        return _retok;                                                      \
+    }
+CALL_STATIC(jobject, Object, NULL, result.l, true);
+CALL_STATIC(jboolean, Boolean, 0, result.z, false);
+CALL_STATIC(jbyte, Byte, 0, result.b, false);
+CALL_STATIC(jchar, Char, 0, result.c, false);
+CALL_STATIC(jshort, Short, 0, result.s, false);
+CALL_STATIC(jint, Int, 0, result.i, false);
+CALL_STATIC(jlong, Long, 0, result.j, false);
+CALL_STATIC(jfloat, Float, 0.0f, result.f, false);
+CALL_STATIC(jdouble, Double, 0.0, result.d, false);
+CALL_STATIC(void, Void, , , false);
+
+/*
+ * Create a new String from Unicode data.
+ *
+ * If "len" is zero, we will return an empty string even if "unicodeChars"
+ * is NULL.  (The JNI spec is vague here.)
+ */
+static jstring NewString(JNIEnv* env, const jchar* unicodeChars, jsize len)
+{
+    JNI_ENTER();
+
+    StringObject* jstr;
+    jstr = dvmCreateStringFromUnicode(unicodeChars, len);
+    if (jstr != NULL) {
+        dvmReleaseTrackedAlloc((Object*) jstr, NULL);
+        jstr = addLocalReference((jstring) jstr);
+    }
+
+    JNI_EXIT();
+    return jstr;
+}
+
+/*
+ * Return the length of a String in Unicode character units.
+ */
+static jsize GetStringLength(JNIEnv* env, jstring string)
+{
+    JNI_ENTER();
+
+    jsize len = dvmStringLen((StringObject*) string);
+
+    JNI_EXIT();
+    return len;
+}
+
+/*
+ * Get a pointer to the string's character data.
+ *
+ * The result is guaranteed to be valid until ReleaseStringChars is
+ * called, which means we can't just hold a reference to it in the local
+ * refs table.  We have to add it to the global refs.
+ *
+ * Technically, we don't need to hold a reference to the String, but rather
+ * to the Char[] object within the String.
+ *
+ * We could also just allocate some storage and copy the data into it,
+ * but it's a choice between our synchronized global reference table and
+ * libc's synchronized heap allocator.
+ */
+static const jchar* GetStringChars(JNIEnv* env, jstring string,
+    jboolean* isCopy)
+{
+    JNI_ENTER();
+
+    const u2* data = dvmStringChars((StringObject*) string);
+    addGlobalReference(string);
+
+    if (isCopy != NULL)
+        *isCopy = JNI_FALSE;
+
+    JNI_EXIT();
+    return (jchar*)data;
+}
+
+/*
+ * Release our grip on some characters from a string.
+ */
+static void ReleaseStringChars(JNIEnv* env, jstring string, const jchar* chars)
+{
+    JNI_ENTER();
+    deleteGlobalReference(string);
+    JNI_EXIT();
+}
+
+/*
+ * Create a new java.lang.String object from chars in modified UTF-8 form.
+ *
+ * The spec doesn't say how to handle a NULL string.  Popular desktop VMs
+ * accept it and return a NULL pointer in response.
+ */
+static jstring NewStringUTF(JNIEnv* env, const char* bytes)
+{
+    JNI_ENTER();
+
+    StringObject* newStr;
+    
+    if (bytes == NULL) {
+        newStr = NULL;
+    } else {
+        newStr = dvmCreateStringFromCstr(bytes, ALLOC_DEFAULT);
+        if (newStr != NULL) {
+            dvmReleaseTrackedAlloc((Object*)newStr, NULL);
+            newStr = addLocalReference((jstring) newStr);
+        }
+    }
+
+    JNI_EXIT();
+    return (jstring)newStr;
+}
+
+/*
+ * Return the length in bytes of the modified UTF-8 form of the string.
+ */
+static jsize GetStringUTFLength(JNIEnv* env, jstring string)
+{
+    JNI_ENTER();
+
+    jsize len = dvmStringUtf8ByteLen((StringObject*) string);
+
+    JNI_EXIT();
+    return len;
+}
+
+/*
+ * Convert "string" to modified UTF-8 and return a pointer.  The returned
+ * value must be released with ReleaseStringUTFChars.
+ *
+ * According to the JNI reference, "Returns a pointer to a UTF-8 string,
+ * or NULL if the operation fails. Returns NULL if and only if an invocation
+ * of this function has thrown an exception."
+ *
+ * The behavior here currently follows that of other open-source VMs, which
+ * quietly return NULL if "string" is NULL.  We should consider throwing an
+ * NPE.  (The CheckJNI code blows up if you try to pass in a NULL string,
+ * which should catch this sort of thing during development.)  Certain other
+ * VMs will crash with a segmentation fault.
+ */
+static const char* GetStringUTFChars(JNIEnv* env, jstring string,
+    jboolean* isCopy)
+{
+    JNI_ENTER();
+    char* newStr;
+
+    if (string == NULL) {
+        /* this shouldn't happen; throw NPE? */
+        newStr = NULL;
+    } else {
+        if (isCopy != NULL)
+            *isCopy = JNI_TRUE;
+
+        newStr = dvmCreateCstrFromString((StringObject*) string);
+        if (newStr == NULL) {
+            /* assume memory failure */
+            dvmThrowException("Ljava/lang/OutOfMemoryError;",
+                "native heap string alloc failed");
+        }
+    }
+
+    JNI_EXIT();
+    return newStr;
+}
+
+/*
+ * Release a string created by GetStringUTFChars().
+ */
+static void ReleaseStringUTFChars(JNIEnv* env, jstring string, const char* utf)
+{
+    JNI_ENTER();
+    free((char*)utf);
+    JNI_EXIT();
+}
+
+/*
+ * Return the capacity of the array.
+ */
+static jsize GetArrayLength(JNIEnv* env, jarray array)
+{
+    JNI_ENTER();
+
+    jsize length = ((ArrayObject*) array)->length;
+
+    JNI_EXIT();
+    return length;
+}
+
+/*
+ * Construct a new array that holds objects from class "elementClass".
+ */
+static jobjectArray NewObjectArray(JNIEnv* env, jsize length,
+    jclass elementClass, jobject initialElement)
+{
+    JNI_ENTER();
+
+    ClassObject* elemClassObj = (ClassObject*) elementClass;
+    ArrayObject* newObj = NULL;
+
+    if (elemClassObj == NULL) {
+        dvmThrowException("Ljava/lang/NullPointerException;",
+            "JNI NewObjectArray");
+        goto bail;
+    }
+
+    newObj = dvmAllocObjectArray(elemClassObj, length, ALLOC_DEFAULT);
+    if (newObj == NULL) {
+        assert(dvmCheckException(_self));
+        goto bail;
+    }
+    dvmReleaseTrackedAlloc((Object*) newObj, NULL);
+
+    /*
+     * Initialize the array.  Trashes "length".
+     */
+    if (initialElement != NULL) {
+        Object** arrayData = (Object**) newObj->contents;
+
+        while (length--)
+            *arrayData++ = (Object*) initialElement;
+    }
+
+    newObj = addLocalReference((jobjectArray) newObj);
+
+bail:
+    JNI_EXIT();
+    return (jobjectArray) newObj;
+}
+
+/*
+ * Get one element of an Object array.
+ *
+ * Add the object to the local references table in case the array goes away.
+ */
+static jobject GetObjectArrayElement(JNIEnv* env, jobjectArray array,
+    jsize index)
+{
+    JNI_ENTER();
+
+    ArrayObject* arrayObj = (ArrayObject*) array;
+    Object* value = NULL;
+
+    assert(array != NULL);
+
+    /* check the array bounds */
+    if (index < 0 || index >= (int) arrayObj->length) {
+        dvmThrowException("Ljava/lang/ArrayIndexOutOfBoundsException;",
+            arrayObj->obj.clazz->descriptor);
+        goto bail;
+    }
+
+    value = ((Object**) arrayObj->contents)[index];
+    value = addLocalReference(value);
+
+bail:
+    JNI_EXIT();
+    return (jobject) value;
+}
+
+/*
+ * Set one element of an Object array.
+ */
+static void SetObjectArrayElement(JNIEnv* env, jobjectArray array,
+    jsize index, jobject value)
+{
+    JNI_ENTER();
+
+    ArrayObject* arrayObj = (ArrayObject*) array;
+
+    assert(array != NULL);
+
+    /* check the array bounds */
+    if (index < 0 || index >= (int) arrayObj->length) {
+        dvmThrowException("Ljava/lang/ArrayIndexOutOfBoundsException;",
+            arrayObj->obj.clazz->descriptor);
+        goto bail;
+    }
+
+    //LOGV("JNI: set element %d in array %p to %p\n", index, array, value);
+
+    ((Object**) arrayObj->contents)[index] = (Object*) value;
+
+bail:
+    JNI_EXIT();
+}
+
+/*
+ * Create a new array of primitive elements.
+ */
+#define NEW_PRIMITIVE_ARRAY(_artype, _jname, _typechar)                     \
+    static _artype New##_jname##Array(JNIEnv* env, jsize length)            \
+    {                                                                       \
+        JNI_ENTER();                                                        \
+        ArrayObject* arrayObj;                                              \
+        arrayObj = dvmAllocPrimitiveArray(_typechar, length,                \
+            ALLOC_DEFAULT);                                                 \
+        if (arrayObj != NULL) {                                             \
+            dvmReleaseTrackedAlloc((Object*) arrayObj, NULL);               \
+            arrayObj = addLocalReference(arrayObj);                         \
+        }                                                                   \
+        JNI_EXIT();                                                         \
+        return (_artype)arrayObj;                                           \
+    }
+NEW_PRIMITIVE_ARRAY(jbooleanArray, Boolean, 'Z');
+NEW_PRIMITIVE_ARRAY(jbyteArray, Byte, 'B');
+NEW_PRIMITIVE_ARRAY(jcharArray, Char, 'C');
+NEW_PRIMITIVE_ARRAY(jshortArray, Short, 'S');
+NEW_PRIMITIVE_ARRAY(jintArray, Int, 'I');
+NEW_PRIMITIVE_ARRAY(jlongArray, Long, 'J');
+NEW_PRIMITIVE_ARRAY(jfloatArray, Float, 'F');
+NEW_PRIMITIVE_ARRAY(jdoubleArray, Double, 'D');
+
+/*
+ * Get a pointer to a C array of primitive elements from an array object
+ * of the matching type.
+ *
+ * We guarantee availability until Release is called, so we have to add
+ * the array object to the global refs table.
+ *
+ * In a compacting GC, we either need to return a copy of the elements
+ * or "pin" the memory.  Otherwise we run the risk of native code using
+ * the buffer as the destination of a blocking read() call that wakes up
+ * during a GC.
+ */
+#define GET_PRIMITIVE_ARRAY_ELEMENTS(_ctype, _jname)                        \
+    static _ctype* Get##_jname##ArrayElements(JNIEnv* env,                  \
+        _ctype##Array array, jboolean* isCopy)                              \
+    {                                                                       \
+        JNI_ENTER();                                                        \
+        _ctype* data;                                                       \
+        ArrayObject* arrayObj = (ArrayObject*)array;                        \
+        addGlobalReference(arrayObj);                                       \
+        data = (_ctype*) arrayObj->contents;                                \
+        if (isCopy != NULL)                                                 \
+            *isCopy = JNI_FALSE;                                            \
+        JNI_EXIT();                                                         \
+        return data;                                                        \
+    }
+
+/*
+ * Release the storage locked down by the "get" function.
+ *
+ * The API says, ""'mode' has no effect if 'elems' is not a copy of the
+ * elements in 'array'."  They apparently did not anticipate the need to
+ * create a global reference to avoid GC race conditions.  We actually
+ * want to delete the global reference in all circumstances that would
+ * result in a copied array being freed.  This means anything but a
+ * JNI_COMMIT release.
+ */
+#define RELEASE_PRIMITIVE_ARRAY_ELEMENTS(_ctype, _jname)                    \
+    static void Release##_jname##ArrayElements(JNIEnv* env,                 \
+        _ctype##Array array, _ctype* elems, jint mode)                      \
+    {                                                                       \
+        UNUSED_PARAMETER(elems);                                            \
+        JNI_ENTER();                                                        \
+        if (mode != JNI_COMMIT)                                             \
+            deleteGlobalReference(array);                                   \
+        JNI_EXIT();                                                         \
+    }
+
+/*
+ * Copy a section of a primitive array to a buffer.
+ */
+#define GET_PRIMITIVE_ARRAY_REGION(_ctype, _jname)                          \
+    static void Get##_jname##ArrayRegion(JNIEnv* env,                       \
+        _ctype##Array array, jsize start, jsize len, _ctype* buf)           \
+    {                                                                       \
+        JNI_ENTER();                                                        \
+        ArrayObject* arrayObj = (ArrayObject*)array;                        \
+        _ctype* data = (_ctype*) arrayObj->contents;                        \
+        if (start < 0 || len < 0 || start + len > (int) arrayObj->length) { \
+            dvmThrowException("Ljava/lang/ArrayIndexOutOfBoundsException;", \
+                arrayObj->obj.clazz->descriptor);                           \
+        } else {                                                            \
+            memcpy(buf, data + start, len * sizeof(_ctype));                \
+        }                                                                   \
+        JNI_EXIT();                                                         \
+    }
+
+/*
+ * Copy a section of a primitive array to a buffer.
+ */
+#define SET_PRIMITIVE_ARRAY_REGION(_ctype, _jname)                          \
+    static void Set##_jname##ArrayRegion(JNIEnv* env,                       \
+        _ctype##Array array, jsize start, jsize len, const _ctype* buf)     \
+    {                                                                       \
+        JNI_ENTER();                                                        \
+        ArrayObject* arrayObj = (ArrayObject*)array;                        \
+        _ctype* data = (_ctype*) arrayObj->contents;                        \
+        if (start < 0 || len < 0 || start + len > (int) arrayObj->length) { \
+            dvmThrowException("Ljava/lang/ArrayIndexOutOfBoundsException;", \
+                arrayObj->obj.clazz->descriptor);                           \
+        } else {                                                            \
+            memcpy(data + start, buf, len * sizeof(_ctype));                \
+        }                                                                   \
+        JNI_EXIT();                                                         \
+    }
+
+/*
+ * 4-in-1:
+ *  Get<Type>ArrayElements
+ *  Release<Type>ArrayElements
+ *  Get<Type>ArrayRegion
+ *  Set<Type>ArrayRegion
+ */
+#define PRIMITIVE_ARRAY_FUNCTIONS(_ctype, _jname)                           \
+    GET_PRIMITIVE_ARRAY_ELEMENTS(_ctype, _jname);                           \
+    RELEASE_PRIMITIVE_ARRAY_ELEMENTS(_ctype, _jname);                       \
+    GET_PRIMITIVE_ARRAY_REGION(_ctype, _jname);                             \
+    SET_PRIMITIVE_ARRAY_REGION(_ctype, _jname);
+
+PRIMITIVE_ARRAY_FUNCTIONS(jboolean, Boolean);
+PRIMITIVE_ARRAY_FUNCTIONS(jbyte, Byte);
+PRIMITIVE_ARRAY_FUNCTIONS(jchar, Char);
+PRIMITIVE_ARRAY_FUNCTIONS(jshort, Short);
+PRIMITIVE_ARRAY_FUNCTIONS(jint, Int);
+PRIMITIVE_ARRAY_FUNCTIONS(jlong, Long);
+PRIMITIVE_ARRAY_FUNCTIONS(jfloat, Float);
+PRIMITIVE_ARRAY_FUNCTIONS(jdouble, Double);
+
+/*
+ * Register one or more native functions in one class.
+ */
+static jint RegisterNatives(JNIEnv* env, jclass clazz,
+    const JNINativeMethod* methods, jint nMethods)
+{
+    JNI_ENTER();
+
+    jint retval;
+    int i;
+
+    if (gDvm.verboseJni) {
+        LOGI("[Registering JNI native methods for class %s]\n",
+            ((ClassObject*) clazz)->descriptor);
+    }
+
+    for (i = 0; i < nMethods; i++) {
+        if (!dvmRegisterJNIMethod((ClassObject*) clazz,
+                methods[i].name, methods[i].signature, methods[i].fnPtr))
+        {
+            retval = JNI_ERR;
+            goto bail;
+        }
+    }
+    retval = JNI_OK;
+
+bail:
+    JNI_EXIT();
+    return retval;
+}
+
+/*
+ * Un-register a native function.
+ */
+static jint UnregisterNatives(JNIEnv* env, jclass clazz)
+{
+    JNI_ENTER();
+    /*
+     * The JNI docs refer to this as a way to reload/relink native libraries,
+     * and say it "should not be used in normal native code".
+     *
+     * We can implement it if we decide we need it.
+     */
+    JNI_EXIT();
+    return JNI_ERR;
+}
+
+/*
+ * Lock the monitor.
+ *
+ * We have to track all monitor enters and exits, so that we can undo any
+ * outstanding synchronization before the thread exits.
+ */
+static jint MonitorEnter(JNIEnv* env, jobject obj)
+{
+    JNI_ENTER();
+    dvmLockObject(_self, (Object*) obj);
+    trackMonitorEnter(_self, (Object*) obj);
+    JNI_EXIT();
+    return JNI_OK;
+}
+
+/*
+ * Unlock the monitor.
+ *
+ * Throws an IllegalMonitorStateException if the current thread
+ * doesn't own the monitor. (dvmUnlockObject() takes care of the throw.)
+ *
+ * According to the 1.6 spec, it's legal to call here with an exception
+ * pending.  If this fails, we'll stomp the original exception.
+ */
+static jint MonitorExit(JNIEnv* env, jobject obj)
+{
+    JNI_ENTER();
+    bool success = dvmUnlockObject(_self, (Object*) obj);
+    if (success)
+        trackMonitorExit(_self, (Object*) obj);
+    JNI_EXIT();
+    return success ? JNI_OK : JNI_ERR;
+}
+
+/*
+ * Return the JavaVM interface associated with the current thread.
+ */
+static jint GetJavaVM(JNIEnv* env, JavaVM** vm)
+{
+    JNI_ENTER();
+    //*vm = gDvm.vmList;
+    *vm = (JavaVM*) ((JNIEnvExt*)env)->vm;
+    JNI_EXIT();
+    if (*vm == NULL)
+        return JNI_ERR;
+    else
+        return JNI_OK;
+}
+
+/*
+ * Copies "len" Unicode characters, from offset "start".
+ */
+static void GetStringRegion(JNIEnv* env, jstring str, jsize start, jsize len,
+    jchar* buf)
+{
+    JNI_ENTER();
+    StringObject* strObj = (StringObject*) str;
+    if (start + len > dvmStringLen(strObj))
+        dvmThrowException("Ljava/lang/StringIndexOutOfBoundsException;", NULL);
+    else
+        memcpy(buf, dvmStringChars(strObj) + start, len * sizeof(u2));
+    JNI_EXIT();
+}
+
+/*
+ * Translates "len" Unicode characters, from offset "start", into
+ * modified UTF-8 encoding.
+ */
+static void GetStringUTFRegion(JNIEnv* env, jstring str, jsize start,
+    jsize len, char* buf)
+{
+    JNI_ENTER();
+    StringObject* strObj = (StringObject*) str;
+    if (start + len > dvmStringLen(strObj))
+        dvmThrowException("Ljava/lang/StringIndexOutOfBoundsException;", NULL);
+    else
+        dvmCreateCstrFromStringRegion(strObj, start, len, buf);
+    JNI_EXIT();
+}
+
+/*
+ * Get a raw pointer to array data.
+ *
+ * The caller is expected to call "release" before doing any JNI calls
+ * or blocking I/O operations.
+ *
+ * In a compacting GC, we need to pin the memory or block GC.
+ */
+static void* GetPrimitiveArrayCritical(JNIEnv* env, jarray array,
+    jboolean* isCopy)
+{
+    JNI_ENTER();
+    void* data;
+    ArrayObject* arrayObj = (ArrayObject*)array;
+    addGlobalReference(arrayObj);
+    data = arrayObj->contents;
+    if (isCopy != NULL)
+        *isCopy = JNI_FALSE;
+    JNI_EXIT();
+    return data;
+}
+
+/*
+ * Release an array obtained with GetPrimitiveArrayCritical.
+ */
+static void ReleasePrimitiveArrayCritical(JNIEnv* env, jarray array,
+    void* carray, jint mode)
+{
+    JNI_ENTER();
+    if (mode != JNI_COMMIT)
+        deleteGlobalReference(array);
+    JNI_EXIT();
+}
+
+/*
+ * Like GetStringChars, but with restricted use.
+ */
+static const jchar* GetStringCritical(JNIEnv* env, jstring string,
+    jboolean* isCopy)
+{
+    JNI_ENTER();
+    const u2* data = dvmStringChars((StringObject*) string);
+    addGlobalReference(string);
+
+    if (isCopy != NULL)
+        *isCopy = JNI_FALSE;
+
+    JNI_EXIT();
+    return (jchar*)data;
+}
+
+/*
+ * Like ReleaseStringChars, but with restricted use.
+ */
+static void ReleaseStringCritical(JNIEnv* env, jstring string,
+    const jchar* carray)
+{
+    JNI_ENTER();
+    deleteGlobalReference(string);
+    JNI_EXIT();
+}
+
+/*
+ * Create a new weak global reference.
+ */
+static jweak NewWeakGlobalRef(JNIEnv* env, jobject obj)
+{
+    JNI_ENTER();
+    // TODO - implement
+    jobject gref = NULL;
+    LOGE("JNI ERROR: NewWeakGlobalRef not implemented\n");
+    dvmAbort();
+    JNI_EXIT();
+    return gref;
+}
+
+/*
+ * Delete the specified weak global reference.
+ */
+static void DeleteWeakGlobalRef(JNIEnv* env, jweak obj)
+{
+    JNI_ENTER();
+    // TODO - implement
+    LOGE("JNI ERROR: DeleteWeakGlobalRef not implemented\n");
+    dvmAbort();
+    JNI_EXIT();
+}
+
+/*
+ * Quick check for pending exceptions.
+ *
+ * TODO: we should be able to skip the enter/exit macros here.
+ */
+static jboolean ExceptionCheck(JNIEnv* env)
+{
+    JNI_ENTER();
+    bool result = dvmCheckException(_self);
+    JNI_EXIT();
+    return result;
+}
+
+/*
+ * Returns the type of the object referred to by "obj".  It can be local,
+ * global, or weak global.
+ *
+ * In the current implementation, references can be global and local at
+ * the same time, so while the return value is accurate it may not tell
+ * the whole story.
+ */
+static jobjectRefType GetObjectRefType(JNIEnv* env, jobject obj)
+{
+    JNI_ENTER();
+    jobjectRefType type;
+    
+    if (obj == NULL)
+        type = JNIInvalidRefType;
+    else
+        type = dvmGetJNIRefType(obj);
+    JNI_EXIT();
+    return type;
+}
+
+/*
+ * Allocate and return a new java.nio.ByteBuffer for this block of memory.
+ *
+ * ** IMPORTANT **  This function is not considered to be internal to the
+ * VM.  It may make JNI calls but must not examine or update internal VM
+ * state.  It is not protected by JNI_ENTER/JNI_EXIT.
+ *
+ * "address" may not be NULL.  We only test for that when JNI checks are
+ * enabled.
+ * 
+ * copied from harmony: DirectBufferUtil.c
+ */
+static jobject NewDirectByteBuffer(JNIEnv * env, void* address, jlong capacity)
+{
+    jmethodID newBufferMethod;
+    jclass directBufferClass;
+    jclass platformaddressClass;
+    jobject platformaddress;
+    jmethodID onMethod;
+
+    directBufferClass = (*env)->FindClass(env, 
+            "java/nio/ReadWriteDirectByteBuffer");
+
+    if(!directBufferClass)
+    {
+        return NULL;
+    }
+
+    newBufferMethod = (*env)->GetMethodID(env, directBufferClass, "<init>",
+            "(Lorg/apache/harmony/luni/platform/PlatformAddress;II)V");
+    if(!newBufferMethod)
+    {
+        return NULL;
+    }
+
+    platformaddressClass = (*env)->FindClass(env, 
+            "org/apache/harmony/luni/platform/PlatformAddressFactory");
+    if(!platformaddressClass)
+    {
+        return NULL;
+    }
+
+    onMethod = (*env)->GetStaticMethodID(env, platformaddressClass, "on",
+            "(I)Lorg/apache/harmony/luni/platform/PlatformAddress;");
+    if(!onMethod)
+    {
+        return NULL;
+    }
+
+    platformaddress = (*env)->CallStaticObjectMethod(env, platformaddressClass, 
+            onMethod, (jint)address);
+
+    return (*env)->NewObject(env, directBufferClass, newBufferMethod, 
+            platformaddress, (jint)capacity, (jint)0);
+}
+
+/*
+ * Get the starting address of the buffer for the specified java.nio.Buffer.
+ *
+ * ** IMPORTANT **  This function is not considered to be internal to the
+ * VM.  It may make JNI calls but must not examine or update internal VM
+ * state.  It is not protected by JNI_ENTER/JNI_EXIT.
+ *
+ * copied from harmony: DirectBufferUtil.c
+ */
+static void* GetDirectBufferAddress(JNIEnv * env, jobject buf)
+{
+    jmethodID tempMethod;
+    jclass tempClass;
+    jobject platformAddr;
+    jclass platformAddrClass;
+    jmethodID toLongMethod;
+
+    tempClass = (*env)->FindClass(env, 
+            "org/apache/harmony/nio/internal/DirectBuffer");
+    if(!tempClass)
+    {
+        return 0;
+    }
+
+    if(JNI_FALSE == (*env)->IsInstanceOf(env, buf, tempClass))
+    {
+        return 0;
+    }
+
+    tempMethod = (*env)->GetMethodID(env, tempClass, "getBaseAddress",
+             "()Lorg/apache/harmony/luni/platform/PlatformAddress;");        
+    if(!tempMethod){
+        return 0;
+    }    
+    platformAddr = (*env)->CallObjectMethod(env, buf, tempMethod);
+    platformAddrClass = (*env)->FindClass (env, 
+            "org/apache/harmony/luni/platform/PlatformAddress");
+    if(!platformAddrClass)
+    {
+        return 0;
+
+    }
+    toLongMethod = (*env)->GetMethodID(env, platformAddrClass, "toLong", "()J");
+    if (!toLongMethod)
+    {
+        return 0;
+    }
+
+    return (void*)(u4)(*env)->CallLongMethod(env, platformAddr, toLongMethod);    
+}
+
+/*
+ * Get the capacity of the buffer for the specified java.nio.Buffer.
+ *
+ * ** IMPORTANT **  This function is not considered to be internal to the
+ * VM.  It may make JNI calls but must not examine or update internal VM
+ * state.  It is not protected by JNI_ENTER/JNI_EXIT.
+ *
+ * copied from harmony: DirectBufferUtil.c
+ */
+static jlong GetDirectBufferCapacity(JNIEnv * env, jobject buf)
+{
+    jfieldID fieldCapacity;
+    jclass directBufferClass;
+    jclass bufferClass;
+
+    directBufferClass = (*env)->FindClass(env,
+            "org/apache/harmony/nio/internal/DirectBuffer");
+    if (!directBufferClass)
+    {
+        return -1;
+    }
+
+    if (JNI_FALSE == (*env)->IsInstanceOf(env, buf, directBufferClass))
+    {
+        return -1;
+    }
+
+    bufferClass = (*env)->FindClass(env, "java/nio/Buffer");
+    if (!bufferClass)
+    {
+        return -1;
+    }
+
+    fieldCapacity = (*env)->GetFieldID(env, bufferClass, "capacity", "I");
+    if (!fieldCapacity)
+    {
+        return -1;
+    }
+
+    return (*env)->GetIntField(env, buf, fieldCapacity);
+}
+
+
+/*
+ * ===========================================================================
+ *      JNI invocation functions
+ * ===========================================================================
+ */
+
+/*
+ * Handle AttachCurrentThread{AsDaemon}.
+ *
+ * We need to make sure the VM is actually running.  For example, if we start
+ * up, issue an Attach, and the VM exits almost immediately, by the time the
+ * attaching happens the VM could already be shutting down.
+ *
+ * It's hard to avoid a race condition here because we don't want to hold
+ * a lock across the entire operation.  What we can do is temporarily
+ * increment the thread count to prevent a VM exit.
+ *
+ * This could potentially still have problems if a daemon thread calls here
+ * while the VM is shutting down.  dvmThreadSelf() will work, since it just
+ * uses pthread TLS, but dereferencing "vm" could fail.  Such is life when
+ * you shut down a VM while threads are still running inside it.
+ *
+ * Remember that some code may call this as a way to find the per-thread
+ * JNIEnv pointer.  Don't do excess work for that case.
+ */
+static jint attachThread(JavaVM* vm, JNIEnv** p_env, void* thr_args,
+    bool isDaemon)
+{
+    JavaVMAttachArgs* args = (JavaVMAttachArgs*) thr_args;
+    Thread* self;
+    bool result = false;
+
+    /*
+     * Return immediately if we're already one with the VM.
+     */
+    self = dvmThreadSelf();
+    if (self != NULL) {
+        *p_env = self->jniEnv;
+        return JNI_OK;
+    }
+
+    /*
+     * No threads allowed in zygote mode.
+     */
+    if (gDvm.zygote) {
+        return JNI_ERR;
+    }
+
+    /* increment the count to keep the VM from bailing while we run */
+    dvmLockThreadList(NULL);
+    if (gDvm.nonDaemonThreadCount == 0) {
+        // dead or dying
+        dvmUnlockThreadList();
+        return JNI_ERR;
+    }
+    gDvm.nonDaemonThreadCount++;
+    dvmUnlockThreadList();
+
+    /* tweak the JavaVMAttachArgs as needed */
+    JavaVMAttachArgs argsCopy;
+    if (args == NULL) {
+        /* allow the v1.1 calling convention */
+        argsCopy.version = JNI_VERSION_1_2;
+        argsCopy.name = NULL;
+        argsCopy.group = dvmGetMainThreadGroup();
+    } else {
+        assert(args->version >= JNI_VERSION_1_2);
+
+        argsCopy.version = args->version;
+        argsCopy.name = args->name;
+        if (args->group != NULL)
+            argsCopy.group = args->group;
+        else
+            argsCopy.group = dvmGetMainThreadGroup();
+    }
+
+    result = dvmAttachCurrentThread(&argsCopy, isDaemon);
+
+    /* restore the count */
+    dvmLockThreadList(NULL);
+    gDvm.nonDaemonThreadCount--;
+    dvmUnlockThreadList();
+
+    /*
+     * Change the status to indicate that we're out in native code.  This
+     * call is not guarded with state-change macros, so we have to do it
+     * by hand.
+     */
+    if (result) {
+        self = dvmThreadSelf();
+        assert(self != NULL);
+        dvmChangeStatus(self, THREAD_NATIVE);
+        *p_env = self->jniEnv;
+        return JNI_OK;
+    } else {
+        return JNI_ERR;
+    }
+}
+
+/*
+ * Attach the current thread to the VM.  If the thread is already attached,
+ * this is a no-op.
+ */
+static jint AttachCurrentThread(JavaVM* vm, JNIEnv** p_env, void* thr_args)
+{
+    return attachThread(vm, p_env, thr_args, false);
+}
+
+/*
+ * Like AttachCurrentThread, but set the "daemon" flag.
+ */
+static jint AttachCurrentThreadAsDaemon(JavaVM* vm, JNIEnv** p_env,
+    void* thr_args)
+{
+    return attachThread(vm, p_env, thr_args, true);
+}
+
+/*
+ * Dissociate the current thread from the VM.
+ */
+static jint DetachCurrentThread(JavaVM* vm)
+{
+    Thread* self = dvmThreadSelf();
+
+    if (self == NULL)               /* not attached, can't do anything */
+        return JNI_ERR;
+
+    /* switch to "running" to check for suspension */
+    dvmChangeStatus(self, THREAD_RUNNING);
+
+    /* detach the thread */
+    dvmDetachCurrentThread();
+
+    /* (no need to change status back -- we have no status) */
+    return JNI_OK;
+}
+
+/*
+ * If current thread is attached to VM, return the associated JNIEnv.
+ * Otherwise, stuff NULL in and return JNI_EDETACHED.
+ *
+ * JVMTI overloads this by specifying a magic value for "version", so we
+ * do want to check that here.
+ */
+static jint GetEnv(JavaVM* vm, void** env, jint version)
+{
+    Thread* self = dvmThreadSelf();
+
+    if (version < JNI_VERSION_1_1 || version > JNI_VERSION_1_6)
+        return JNI_EVERSION;
+
+    if (self == NULL) {
+        *env = NULL;
+    } else {
+        /* TODO: status change is probably unnecessary */
+        dvmChangeStatus(self, THREAD_RUNNING);
+        *env = (void*) dvmGetThreadJNIEnv(self);
+        dvmChangeStatus(self, THREAD_NATIVE);
+    }
+    if (*env == NULL)
+        return JNI_EDETACHED;
+    else
+        return JNI_OK;
+}
+
+/*
+ * Destroy the VM.  This may be called from any thread.
+ *
+ * If the current thread is attached, wait until the current thread is
+ * the only non-daemon user-level thread.  If the current thread is not
+ * attached, we attach it and do the processing as usual.  (If the attach
+ * fails, it's probably because all the non-daemon threads have already
+ * exited and the VM doesn't want to let us back in.)
+ *
+ * TODO: we don't really deal with the situation where more than one thread
+ * has called here.  One thread wins, the other stays trapped waiting on
+ * the condition variable forever.  Not sure this situation is interesting
+ * in real life.
+ */
+static jint DestroyJavaVM(JavaVM* vm)
+{
+    JavaVMExt* ext = (JavaVMExt*) vm;
+    Thread* self;
+
+    if (ext == NULL)
+        return JNI_ERR;
+
+    LOGD("DestroyJavaVM waiting for non-daemon threads to exit\n");
+
+    /*
+     * Sleep on a condition variable until it's okay to exit.
+     */
+    self = dvmThreadSelf();
+    if (self == NULL) {
+        JNIEnv* tmpEnv;
+        if (AttachCurrentThread(vm, &tmpEnv, NULL) != JNI_OK) {
+            LOGV("+++ Unable to attach for Destroy;"
+                 " assuming VM is shutting down\n");
+            goto shutdown;
+        } else {
+            LOGV("+++ Attached to wait for shutdown in Destroy\n");
+        }
+    }
+    dvmChangeStatus(self, THREAD_VMWAIT);
+
+    dvmLockThreadList(self);
+    gDvm.nonDaemonThreadCount--;    // remove current thread from count
+
+    while (gDvm.nonDaemonThreadCount > 0)
+        pthread_cond_wait(&gDvm.vmExitCond, &gDvm.threadListLock);
+
+    dvmUnlockThreadList();
+    self = NULL;
+
+shutdown:
+    // TODO: call System.exit() to run any registered shutdown hooks
+    // (this may not return -- figure out how this should work)
+
+    LOGI("DestroyJavaVM shutting VM down\n");
+    dvmShutdown();
+
+    // TODO - free resources associated with JNI-attached daemon threads
+    free(ext->envList);
+    free(ext);
+
+    return JNI_OK;
+}
+
+
+/*
+ * ===========================================================================
+ *      Function tables
+ * ===========================================================================
+ */
+
+static const struct JNINativeInterface gNativeInterface = {
+    NULL,
+    NULL,
+    NULL,
+    NULL,
+
+    GetVersion,
+
+    DefineClass,
+    FindClass,
+
+    FromReflectedMethod,
+    FromReflectedField,
+    ToReflectedMethod,
+
+    GetSuperclass,
+    IsAssignableFrom,
+
+    ToReflectedField,
+
+    Throw,
+    ThrowNew,
+    ExceptionOccurred,
+    ExceptionDescribe,
+    ExceptionClear,
+    FatalError,
+
+    PushLocalFrame,
+    PopLocalFrame,
+
+    NewGlobalRef,
+    DeleteGlobalRef,
+    DeleteLocalRef,
+    IsSameObject,
+    NewLocalRef,
+    EnsureLocalCapacity,
+
+    AllocObject,
+    NewObject,
+    NewObjectV,
+    NewObjectA,
+
+    GetObjectClass,
+    IsInstanceOf,
+
+    GetMethodID,
+
+    CallObjectMethod,
+    CallObjectMethodV,
+    CallObjectMethodA,
+    CallBooleanMethod,
+    CallBooleanMethodV,
+    CallBooleanMethodA,
+    CallByteMethod,
+    CallByteMethodV,
+    CallByteMethodA,
+    CallCharMethod,
+    CallCharMethodV,
+    CallCharMethodA,
+    CallShortMethod,
+    CallShortMethodV,
+    CallShortMethodA,
+    CallIntMethod,
+    CallIntMethodV,
+    CallIntMethodA,
+    CallLongMethod,
+    CallLongMethodV,
+    CallLongMethodA,
+    CallFloatMethod,
+    CallFloatMethodV,
+    CallFloatMethodA,
+    CallDoubleMethod,
+    CallDoubleMethodV,
+    CallDoubleMethodA,
+    CallVoidMethod,
+    CallVoidMethodV,
+    CallVoidMethodA,
+
+    CallNonvirtualObjectMethod,
+    CallNonvirtualObjectMethodV,
+    CallNonvirtualObjectMethodA,
+    CallNonvirtualBooleanMethod,
+    CallNonvirtualBooleanMethodV,
+    CallNonvirtualBooleanMethodA,
+    CallNonvirtualByteMethod,
+    CallNonvirtualByteMethodV,
+    CallNonvirtualByteMethodA,
+    CallNonvirtualCharMethod,
+    CallNonvirtualCharMethodV,
+    CallNonvirtualCharMethodA,
+    CallNonvirtualShortMethod,
+    CallNonvirtualShortMethodV,
+    CallNonvirtualShortMethodA,
+    CallNonvirtualIntMethod,
+    CallNonvirtualIntMethodV,
+    CallNonvirtualIntMethodA,
+    CallNonvirtualLongMethod,
+    CallNonvirtualLongMethodV,
+    CallNonvirtualLongMethodA,
+    CallNonvirtualFloatMethod,
+    CallNonvirtualFloatMethodV,
+    CallNonvirtualFloatMethodA,
+    CallNonvirtualDoubleMethod,
+    CallNonvirtualDoubleMethodV,
+    CallNonvirtualDoubleMethodA,
+    CallNonvirtualVoidMethod,
+    CallNonvirtualVoidMethodV,
+    CallNonvirtualVoidMethodA,
+
+    GetFieldID,
+
+    GetObjectField,
+    GetBooleanField,
+    GetByteField,
+    GetCharField,
+    GetShortField,
+    GetIntField,
+    GetLongField,
+    GetFloatField,
+    GetDoubleField,
+    SetObjectField,
+    SetBooleanField,
+    SetByteField,
+    SetCharField,
+    SetShortField,
+    SetIntField,
+    SetLongField,
+    SetFloatField,
+    SetDoubleField,
+
+    GetStaticMethodID,
+
+    CallStaticObjectMethod,
+    CallStaticObjectMethodV,
+    CallStaticObjectMethodA,
+    CallStaticBooleanMethod,
+    CallStaticBooleanMethodV,
+    CallStaticBooleanMethodA,
+    CallStaticByteMethod,
+    CallStaticByteMethodV,
+    CallStaticByteMethodA,
+    CallStaticCharMethod,
+    CallStaticCharMethodV,
+    CallStaticCharMethodA,
+    CallStaticShortMethod,
+    CallStaticShortMethodV,
+    CallStaticShortMethodA,
+    CallStaticIntMethod,
+    CallStaticIntMethodV,
+    CallStaticIntMethodA,
+    CallStaticLongMethod,
+    CallStaticLongMethodV,
+    CallStaticLongMethodA,
+    CallStaticFloatMethod,
+    CallStaticFloatMethodV,
+    CallStaticFloatMethodA,
+    CallStaticDoubleMethod,
+    CallStaticDoubleMethodV,
+    CallStaticDoubleMethodA,
+    CallStaticVoidMethod,
+    CallStaticVoidMethodV,
+    CallStaticVoidMethodA,
+
+    GetStaticFieldID,
+
+    GetStaticObjectField,
+    GetStaticBooleanField,
+    GetStaticByteField,
+    GetStaticCharField,
+    GetStaticShortField,
+    GetStaticIntField,
+    GetStaticLongField,
+    GetStaticFloatField,
+    GetStaticDoubleField,
+
+    SetStaticObjectField,
+    SetStaticBooleanField,
+    SetStaticByteField,
+    SetStaticCharField,
+    SetStaticShortField,
+    SetStaticIntField,
+    SetStaticLongField,
+    SetStaticFloatField,
+    SetStaticDoubleField,
+
+    NewString,
+
+    GetStringLength,
+    GetStringChars,
+    ReleaseStringChars,
+
+    NewStringUTF,
+    GetStringUTFLength,
+    GetStringUTFChars,
+    ReleaseStringUTFChars,
+
+    GetArrayLength,
+    NewObjectArray,
+    GetObjectArrayElement,
+    SetObjectArrayElement,
+
+    NewBooleanArray,
+    NewByteArray,
+    NewCharArray,
+    NewShortArray,
+    NewIntArray,
+    NewLongArray,
+    NewFloatArray,
+    NewDoubleArray,
+
+    GetBooleanArrayElements,
+    GetByteArrayElements,
+    GetCharArrayElements,
+    GetShortArrayElements,
+    GetIntArrayElements,
+    GetLongArrayElements,
+    GetFloatArrayElements,
+    GetDoubleArrayElements,
+
+    ReleaseBooleanArrayElements,
+    ReleaseByteArrayElements,
+    ReleaseCharArrayElements,
+    ReleaseShortArrayElements,
+    ReleaseIntArrayElements,
+    ReleaseLongArrayElements,
+    ReleaseFloatArrayElements,
+    ReleaseDoubleArrayElements,
+
+    GetBooleanArrayRegion,
+    GetByteArrayRegion,
+    GetCharArrayRegion,
+    GetShortArrayRegion,
+    GetIntArrayRegion,
+    GetLongArrayRegion,
+    GetFloatArrayRegion,
+    GetDoubleArrayRegion,
+    SetBooleanArrayRegion,
+    SetByteArrayRegion,
+    SetCharArrayRegion,
+    SetShortArrayRegion,
+    SetIntArrayRegion,
+    SetLongArrayRegion,
+    SetFloatArrayRegion,
+    SetDoubleArrayRegion,
+
+    RegisterNatives,
+    UnregisterNatives,
+
+    MonitorEnter,
+    MonitorExit,
+
+    GetJavaVM,
+
+    GetStringRegion,
+    GetStringUTFRegion,
+
+    GetPrimitiveArrayCritical,
+    ReleasePrimitiveArrayCritical,
+
+    GetStringCritical,
+    ReleaseStringCritical,
+
+    NewWeakGlobalRef,
+    DeleteWeakGlobalRef,
+
+    ExceptionCheck,
+
+    NewDirectByteBuffer,
+    GetDirectBufferAddress,
+    GetDirectBufferCapacity,
+
+    GetObjectRefType
+};
+static const struct JNIInvokeInterface gInvokeInterface = {
+    NULL,
+    NULL,
+    NULL,
+
+    DestroyJavaVM,
+    AttachCurrentThread,
+    DetachCurrentThread,
+
+    GetEnv,
+
+    AttachCurrentThreadAsDaemon,
+};
+
+
+/*
+ * ===========================================================================
+ *      VM/Env creation
+ * ===========================================================================
+ */
+
+/*
+ * Not supported.
+ */
+jint JNI_GetDefaultJavaVMInitArgs(void* vm_args)
+{
+    return JNI_ERR;
+}
+
+/*
+ * Return a buffer full of created VMs.
+ *
+ * We always have zero or one.
+ */
+jint JNI_GetCreatedJavaVMs(JavaVM** vmBuf, jsize bufLen, jsize* nVMs)
+{
+    if (gDvm.vmList != NULL) {
+        *nVMs = 1;
+
+        if (bufLen > 0)
+            *vmBuf++ = gDvm.vmList;
+    } else {
+        *nVMs = 0;
+    }
+
+    return JNI_OK;
+}
+
+
+/*
+ * Create a new VM instance.
+ *
+ * The current thread becomes the main VM thread.  We return immediately,
+ * which effectively means the caller is executing in a native method.
+ */
+jint JNI_CreateJavaVM(JavaVM** p_vm, JNIEnv** p_env, void* vm_args)
+{
+    const JavaVMInitArgs* args = (JavaVMInitArgs*) vm_args;
+    JNIEnvExt* pEnv = NULL;
+    JavaVMExt* pVM = NULL;
+    const char** argv;
+    int argc = 0;
+    int i, curOpt;
+    int result = JNI_ERR;
+    bool checkJni = false;
+    bool checkJniFatal = true;
+
+    if (args->version < JNI_VERSION_1_2)
+        return JNI_EVERSION;
+
+    // TODO: don't allow creation of multiple VMs -- one per customer for now
+
+    /* zero globals; not strictly necessary the first time a VM is started */
+    memset(&gDvm, 0, sizeof(gDvm));
+
+    /*
+     * Set up structures for JNIEnv and VM.
+     */
+    //pEnv = (JNIEnvExt*) malloc(sizeof(JNIEnvExt));
+    pVM = (JavaVMExt*) malloc(sizeof(JavaVMExt));
+
+    //memset(pEnv, 0, sizeof(JNIEnvExt));
+    //pEnv->funcTable = &gNativeInterface;
+    //pEnv->vm = pVM;
+    memset(pVM, 0, sizeof(JavaVMExt));
+    pVM->funcTable = &gInvokeInterface;
+    pVM->envList = pEnv;
+    dvmInitMutex(&pVM->envListLock);
+
+    argv = (const char**) malloc(sizeof(char*) * (args->nOptions));
+    memset(argv, 0, sizeof(char*) * (args->nOptions));
+
+    curOpt = 0;
+
+    /*
+     * Convert JNI args to argv.
+     *
+     * We have to pull out vfprintf/exit/abort, because they use the
+     * "extraInfo" field to pass function pointer "hooks" in.  We also
+     * look for the -Xcheck:jni arg here.
+     */
+    for (i = 0; i < args->nOptions; i++) {
+        const char* optStr = args->options[i].optionString;
+
+        if (optStr == NULL) {
+            fprintf(stderr, "ERROR: arg %d string was null\n", i);
+            goto bail;
+        } else if (strcmp(optStr, "vfprintf") == 0) {
+            gDvm.vfprintfHook = args->options[i].extraInfo;
+        } else if (strcmp(optStr, "exit") == 0) {
+            gDvm.exitHook = args->options[i].extraInfo;
+        } else if (strcmp(optStr, "abort") == 0) {
+            gDvm.abortHook = args->options[i].extraInfo;
+        } else if (strcmp(optStr, "-Xcheck:jni") == 0) {
+            checkJni = true;
+        } else if (strcmp(optStr, "-Xcheck:jni-warnonly") == 0) {
+            checkJni = true;
+            checkJniFatal = false;
+        } else {
+            /* regular option */
+            argv[curOpt++] = optStr;
+        }
+    }
+    argc = curOpt;
+
+    if (checkJni) {
+        dvmUseCheckedJniVm(pVM);
+        pVM->useChecked = true;
+    }
+
+    /* set this up before initializing VM, so it can create some JNIEnvs */
+    gDvm.vmList = (JavaVM*) pVM;
+
+    /*
+     * Create an env for main thread.  We need to have something set up
+     * here because some of the class initialization we do when starting
+     * up the VM will call into native code.
+     */
+    pEnv = (JNIEnvExt*) dvmCreateJNIEnv(NULL);
+
+    /* initialize VM */
+    gDvm.initializing = true;
+    if (dvmStartup(argc, argv, args->ignoreUnrecognized, (JNIEnv*)pEnv) != 0) {
+        free(pEnv);
+        free(pVM);
+        goto bail;
+    }
+
+    gDvm.jniWarnError = checkJniFatal;
+
+    /*
+     * Success!  Return stuff to caller.
+     */
+    dvmChangeStatus(NULL, THREAD_NATIVE);
+    *p_env = (JNIEnv*) pEnv;
+    *p_vm = (JavaVM*) pVM;
+    result = JNI_OK;
+
+bail:
+    gDvm.initializing = false;
+    if (result == JNI_OK)
+        LOGV("JNI_CreateJavaVM succeeded\n");
+    else
+        LOGW("JNI_CreateJavaVM failed\n");
+    free(argv);
+    return result;
+}
+
diff --git a/vm/JniInternal.h b/vm/JniInternal.h
new file mode 100644
index 0000000..65c0cba
--- /dev/null
+++ b/vm/JniInternal.h
@@ -0,0 +1,154 @@
+/*
+ * Copyright (C) 2008 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/*
+ * JNI innards, common to the regular and "checked" interfaces.
+ */
+#ifndef _DALVIK_JNIINTERNAL
+#define _DALVIK_JNIINTERNAL
+
+#include "jni.h"
+
+/* system init/shutdown */
+bool dvmJniStartup(void);
+void dvmJniShutdown(void);
+
+/*
+ * Our data structures for JNIEnv and JavaVM.
+ *
+ * Native code thinks it has a pointer to a pointer.  We know better.
+ */
+struct JavaVMExt;
+
+typedef struct JNIEnvExt {
+    const struct JNINativeInterface* funcTable;     /* must be first */
+
+    const struct JNINativeInterface* baseFuncTable;
+
+    /* pointer to the VM we are a part of */
+    struct JavaVMExt* vm;
+
+    u4      envThreadId;
+    Thread* self;
+    int     critical;
+
+    struct JNIEnvExt* prev;
+    struct JNIEnvExt* next;
+} JNIEnvExt;
+
+typedef struct JavaVMExt {
+    const struct JNIInvokeInterface* funcTable;     /* must be first */
+
+    const struct JNIInvokeInterface* baseFuncTable;
+
+    /* if multiple VMs are desired, add doubly-linked list stuff here */
+
+    bool    useChecked;
+
+    /* head of list of JNIEnvs associated with this VM */
+    JNIEnvExt*      envList;
+    pthread_mutex_t envListLock;
+} JavaVMExt;
+
+/*
+ * Native function return type; used by dvmPlatformInvoke().
+ */
+typedef enum DalvikJniReturnType {
+    DALVIK_JNI_RETURN_VOID = 0,     /* must be zero */
+    DALVIK_JNI_RETURN_FLOAT,
+    DALVIK_JNI_RETURN_DOUBLE,
+    DALVIK_JNI_RETURN_S8,
+    DALVIK_JNI_RETURN_S4
+} DalvikJniReturnType;
+
+#define DALVIK_JNI_NO_ARG_INFO  0x80000000
+#define DALVIK_JNI_RETURN_MASK  0x70000000
+#define DALVIK_JNI_RETURN_SHIFT 28
+#define DALVIK_JNI_COUNT_MASK   0x0f000000
+#define DALVIK_JNI_COUNT_SHIFT  24
+
+
+/*
+ * Pop the JNI local stack when we return from a native method.  "saveArea"
+ * points to the StackSaveArea for the method we're leaving.
+ */
+INLINE void dvmPopJniLocals(Thread* self, StackSaveArea* saveArea)
+{
+    if (saveArea->xtra.localRefTop != self->jniLocalRefTable.nextEntry) {
+        LOGVV("LREF: popped %d entries (%d remain)\n",
+            (int)(self->jniLocalRefTable.nextEntry-saveArea->xtra.localRefTop),
+            (int)(saveArea->xtra.localRefTop - self->jniLocalRefTable.table));
+    }
+    self->jniLocalRefTable.nextEntry = saveArea->xtra.localRefTop;
+}
+
+/*
+ * Set the envThreadId field.
+ */
+INLINE void dvmSetJniEnvThreadId(JNIEnv* pEnv, Thread* self)
+{
+    ((JNIEnvExt*)pEnv)->envThreadId = self->threadId;
+    ((JNIEnvExt*)pEnv)->self = self;
+}
+
+/*
+ * JNI call bridges.  Not usually called directly.
+ */
+void dvmCallJNIMethod(const u4* args, JValue* pResult, const Method* method,
+    Thread* self);
+void dvmCallSynchronizedJNIMethod(const u4* args, JValue* pResult,
+    const Method* method, Thread* self);
+
+/*
+ * Enable the "checked" versions.
+ */
+void dvmUseCheckedJniEnv(JNIEnvExt* pEnv);
+void dvmUseCheckedJniVm(JavaVMExt* pVm);
+
+/*
+ * Verify that a reference passed in from native code is valid.  Returns
+ * an indication of local/global/invalid.
+ */
+jobjectRefType dvmGetJNIRefType(Object* obj);
+
+/*
+ * Get the last method called on the interp stack.  This is the method
+ * "responsible" for calling into JNI.
+ */
+const Method* dvmGetCurrentJNIMethod(void);
+
+/*
+ * Create/destroy a JNIEnv for the current thread.
+ */
+JNIEnv* dvmCreateJNIEnv(Thread* self);
+void dvmDestroyJNIEnv(JNIEnv* env);
+
+/*
+ * Find the JNIEnv associated with the current thread.
+ */
+JNIEnvExt* dvmGetJNIEnvForThread(void);
+
+/*
+ * Extract the return type enum from the "jniArgInfo" value.
+ */
+DalvikJniReturnType dvmGetArgInfoReturnType(int jniArgInfo);
+
+/*
+ * Release all MonitorEnter-acquired locks that are still held.  Called at
+ * DetachCurrentThread time.
+ */
+void dvmReleaseJniMonitors(Thread* self);
+
+#endif /*_DALVIK_JNIINTERNAL*/
diff --git a/vm/LinearAlloc.c b/vm/LinearAlloc.c
new file mode 100644
index 0000000..77802ee
--- /dev/null
+++ b/vm/LinearAlloc.c
@@ -0,0 +1,688 @@
+/*
+ * Copyright (C) 2008 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/*
+ * Linear memory allocation, tied to class loaders.
+ */
+#include "Dalvik.h"
+
+#include <sys/mman.h>
+#include <limits.h>
+#include <errno.h>
+
+//#define DISABLE_LINEAR_ALLOC
+
+// Use ashmem to name the LinearAlloc section
+#define USE_ASHMEM 1
+
+#ifdef USE_ASHMEM
+#include <cutils/ashmem.h>
+#endif /* USE_ASHMEM */
+
+/*
+Overview
+
+This is intended to be a simple, fast allocator for "write-once" storage.
+The expectation is that this will hold small allocations that don't change,
+such as parts of classes (vtables, fields, methods, interfaces).  Because
+the lifetime of these items is tied to classes, which in turn are tied
+to class loaders, we associate the storage with a ClassLoader object.
+
+[ We don't yet support class unloading, and our ClassLoader implementation
+is in flux, so for now we just have a single global region and the
+"classLoader" argument is ignored. ]
+
+By storing the data here, rather than on the system heap, we reduce heap
+clutter, speed class loading, reduce the memory footprint (reduced heap
+structure overhead), and most importantly we increase the number of pages
+that remain shared between processes launched in "Zygote mode".
+
+The 4 bytes preceding each block contain the block length.  This allows us
+to support "free" and "realloc" calls in a limited way.  We don't free
+storage once it has been allocated, but in some circumstances it could be
+useful to erase storage to garbage values after a "free" or "realloc".
+(Bad idea if we're trying to share pages.)  We need to align to 8-byte
+boundaries for some architectures, so we have a 50-50 chance of getting
+this for free in a given block.
+
+A NULL value for the "classLoader" argument refers to the bootstrap class
+loader, which is never unloaded (until the VM shuts down).
+
+Because the memory is not expected to be updated, we can use mprotect to
+guard the pages on debug builds.  Handy when tracking down corruption.
+*/
+
+/* alignment for allocations; must be power of 2, and currently >= hdr_xtra */
+#define BLOCK_ALIGN         8
+
+/* default length of memory segment (worst case is probably "dexopt") */
+#define DEFAULT_MAX_LENGTH  (4*1024*1024)
+
+/* leave enough space for a length word */
+#define HEADER_EXTRA        4
+
+/* overload the length word */
+#define LENGTHFLAG_FREE    0x80000000
+#define LENGTHFLAG_RW      0x40000000
+#define LENGTHFLAG_MASK    (~(LENGTHFLAG_FREE|LENGTHFLAG_RW))
+
+/* in case limits.h doesn't have it; must be a power of 2 */
+#ifndef PAGESIZE
+# define PAGESIZE           4096
+#endif
+
+
+/* fwd */
+static void checkAllFree(Object* classLoader);
+
+
+/*
+ * Someday, retrieve the linear alloc struct associated with a particular
+ * class loader.  For now, always use the boostrap loader's instance.
+ */
+static inline LinearAllocHdr* getHeader(Object* classLoader)
+{
+    return gDvm.pBootLoaderAlloc;
+}
+
+/*
+ * Convert a pointer to memory to a pointer to the block header (which is
+ * currently just a length word).
+ */
+static inline u4* getBlockHeader(void* mem)
+{
+    return ((u4*) mem) -1;
+}
+
+/*
+ * Create a new linear allocation block.
+ */
+LinearAllocHdr* dvmLinearAllocCreate(Object* classLoader)
+{
+#ifdef DISABLE_LINEAR_ALLOC
+    return (LinearAllocHdr*) 0x12345;
+#endif
+    LinearAllocHdr* pHdr;
+
+    pHdr = (LinearAllocHdr*) malloc(sizeof(*pHdr));
+
+
+    /*
+     * "curOffset" points to the location of the next pre-block header,
+     * which means we have to advance to the next BLOCK_ALIGN address and
+     * back up.
+     *
+     * Note we leave the first page empty (see below), and start the
+     * first entry on the second page at an offset that ensures the next
+     * chunk of data will be properly aligned.
+     */
+    assert(BLOCK_ALIGN >= HEADER_EXTRA);
+    pHdr->curOffset = pHdr->firstOffset = (BLOCK_ALIGN-HEADER_EXTRA) + PAGESIZE;
+    pHdr->mapLength = DEFAULT_MAX_LENGTH;
+
+#ifdef USE_ASHMEM
+    int fd;
+
+    fd = ashmem_create_region("dalvik-LinearAlloc", DEFAULT_MAX_LENGTH);
+    if (fd < 0) {
+        LOGE("ashmem LinearAlloc failed %s", strerror(errno)); 
+        free(pHdr);
+        return NULL;
+    }
+
+    pHdr->mapAddr = mmap(NULL, pHdr->mapLength, PROT_READ | PROT_WRITE,
+        MAP_PRIVATE, fd, 0);
+    if (pHdr->mapAddr == MAP_FAILED) {
+        LOGE("LinearAlloc mmap(%d) failed: %s\n", pHdr->mapLength,
+            strerror(errno));
+        free(pHdr);
+	close(fd);
+        return NULL;
+    }
+
+    close(fd);
+#else /*USE_ASHMEM*/
+    // MAP_ANON is listed as "deprecated" on Linux, 
+    // but MAP_ANONYMOUS is not defined under Mac OS X.
+    pHdr->mapAddr = mmap(NULL, pHdr->mapLength, PROT_READ | PROT_WRITE,
+        MAP_PRIVATE | MAP_ANON, -1, 0);
+    if (pHdr->mapAddr == MAP_FAILED) {
+        LOGE("LinearAlloc mmap(%d) failed: %s\n", pHdr->mapLength,
+            strerror(errno));
+        free(pHdr);
+        return NULL;
+    }
+#endif /*USE_ASHMEM*/
+
+    /* region expected to begin on a page boundary */
+    assert(((int) pHdr->mapAddr & (PAGESIZE-1)) == 0);
+
+    /* the system should initialize newly-mapped memory to zero */
+    assert(*(u4*) (pHdr->mapAddr + pHdr->curOffset) == 0);
+
+    /*
+     * Disable access to all except starting page.  We will enable pages
+     * as we use them.  This helps prevent bad pointers from working.  The
+     * pages start out PROT_NONE, become read/write while we access them,
+     * then go to read-only after we finish our changes.
+     *
+     * We have to make the first page readable because we have 4 pad bytes,
+     * followed by 4 length bytes, giving an initial offset of 8.  The
+     * generic code below assumes that there could have been a previous
+     * allocation that wrote into those 4 pad bytes, therefore the page
+     * must have been marked readable by the previous allocation.
+     *
+     * We insert an extra page in here to force a break in the memory map
+     * so we can see ourselves more easily in "showmap".  Otherwise this
+     * stuff blends into the neighboring pages.  [TODO: do we still need
+     * the extra page now that we have ashmem?]
+     */
+    if (mprotect(pHdr->mapAddr, pHdr->mapLength, PROT_NONE) != 0) {
+        LOGW("LinearAlloc init mprotect failed: %s\n", strerror(errno));
+        free(pHdr);
+        return NULL;
+    }
+    if (mprotect(pHdr->mapAddr + PAGESIZE, PAGESIZE,
+            ENFORCE_READ_ONLY ? PROT_READ : PROT_READ|PROT_WRITE) != 0)
+    {
+        LOGW("LinearAlloc init mprotect #2 failed: %s\n", strerror(errno));
+        free(pHdr);
+        return NULL;
+    }
+
+    if (ENFORCE_READ_ONLY) {
+        /* allocate the per-page ref count */
+        int numPages = (pHdr->mapLength+PAGESIZE-1) / PAGESIZE;
+        pHdr->writeRefCount = calloc(numPages, sizeof(short));
+        if (pHdr->writeRefCount == NULL) {
+            free(pHdr);
+            return NULL;
+        }
+    }
+
+    dvmInitMutex(&pHdr->lock);
+
+    LOGV("LinearAlloc: created region at %p-%p\n",
+        pHdr->mapAddr, pHdr->mapAddr + pHdr->mapLength-1);
+
+    return pHdr;
+}
+
+/*
+ * Destroy a linear allocation area.
+ *
+ * We do a trivial "has everything been freed?" check before unmapping the
+ * memory and freeing the LinearAllocHdr.
+ */
+void dvmLinearAllocDestroy(Object* classLoader)
+{
+#ifdef DISABLE_LINEAR_ALLOC
+    return;
+#endif
+    LinearAllocHdr* pHdr = getHeader(classLoader);
+    if (pHdr == NULL)
+        return;
+
+    checkAllFree(classLoader);
+
+    //dvmLinearAllocDump(classLoader);
+
+    LOGV("Unmapping linear allocator base=%p\n", pHdr->mapAddr);
+    LOGD("LinearAlloc %p used %d of %d (%d%%)\n",
+        classLoader, pHdr->curOffset, pHdr->mapLength,
+        (pHdr->curOffset * 100) / pHdr->mapLength);
+
+    if (munmap(pHdr->mapAddr, pHdr->mapLength) != 0) {
+        LOGW("LinearAlloc munmap(%p, %d) failed: %s\n",
+            pHdr->mapAddr, pHdr->mapLength, strerror(errno));
+    }
+    free(pHdr);
+}
+
+/*
+ * Allocate "size" bytes of storage, associated with a particular class
+ * loader.
+ *
+ * It's okay for size to be zero.
+ *
+ * We always leave "curOffset" pointing at the next place where we will
+ * store the header that precedes the returned storage.
+ *
+ * This aborts the VM on failure, so it's not necessary to check for a
+ * NULL return value.
+ */
+void* dvmLinearAlloc(Object* classLoader, size_t size)
+{
+    LinearAllocHdr* pHdr = getHeader(classLoader);
+    int startOffset, nextOffset;
+    int lastGoodOff, firstWriteOff, lastWriteOff;
+
+#ifdef DISABLE_LINEAR_ALLOC
+    return calloc(1, size);
+#endif
+
+    LOGVV("--- LinearAlloc(%p, %d)\n", classLoader, size);
+
+    /*
+     * What we'd like to do is just determine the new end-of-alloc size
+     * and atomic-swap the updated value in.  The trouble is that, the
+     * first time we reach a new page, we need to call mprotect() to
+     * make the page available, and we don't want to call mprotect() on
+     * every allocation.  The troubled situation is:
+     *  - thread A allocs across a page boundary, but gets preempted
+     *    before mprotect() completes
+     *  - thread B allocs within the new page, and doesn't call mprotect()
+     */
+    dvmLockMutex(&pHdr->lock);
+
+    startOffset = pHdr->curOffset;
+    assert(((startOffset + HEADER_EXTRA) & (BLOCK_ALIGN-1)) == 0);
+
+    /*
+     * Compute the new offset.  The old offset points at the address where
+     * we will store the hidden block header, so we advance past that,
+     * add the size of data they want, add another header's worth so we
+     * know we have room for that, and round up to BLOCK_ALIGN.  That's
+     * the next location where we'll put user data.  We then subtract the
+     * chunk header size off so we're back to the header pointer.
+     *
+     * Examples:
+     *   old=12 size=3 new=((12+(4*2)+3+7) & ~7)-4 = 24-4 --> 20
+     *   old=12 size=5 new=((12+(4*2)+5+7) & ~7)-4 = 32-4 --> 28
+     */
+    nextOffset = ((startOffset + HEADER_EXTRA*2 + size + (BLOCK_ALIGN-1))
+                    & ~(BLOCK_ALIGN-1)) - HEADER_EXTRA;
+    LOGVV("--- old=%d size=%d new=%d\n", startOffset, size, nextOffset);
+
+    if (nextOffset > pHdr->mapLength) {
+        /*
+         * We don't have to abort here.  We could fall back on the system
+         * malloc(), and have our "free" call figure out what to do.  Only
+         * works if the users of these functions actually free everything
+         * they allocate.
+         */
+        LOGE("LinearAlloc exceeded capacity, last=%d\n", (int) size);
+        dvmAbort();
+    }
+
+    /*
+     * Round up "size" to encompass the entire region, including the 0-7
+     * pad bytes before the next chunk header.  This way we get maximum
+     * utility out of "realloc", and when we're doing ENFORCE_READ_ONLY
+     * stuff we always treat the full extent.
+     */
+    size = nextOffset - (startOffset + HEADER_EXTRA);
+    LOGVV("--- (size now %d)\n", size);
+
+    /*
+     * See if we are starting on or have crossed into a new page.  If so,
+     * call mprotect on the page(s) we're about to write to.  We have to
+     * page-align the start address, but don't have to make the length a
+     * PAGESIZE multiple (but we do it anyway).
+     *
+     * Note that "startOffset" is not the last *allocated* byte, but rather
+     * the offset of the first *unallocated* byte (which we are about to
+     * write the chunk header to).  "nextOffset" is similar.
+     *
+     * If ENFORCE_READ_ONLY is enabled, we have to call mprotect even if
+     * we've written to this page before, because it might be read-only.
+     */
+    lastGoodOff = (startOffset-1) & ~(PAGESIZE-1);
+    firstWriteOff = startOffset & ~(PAGESIZE-1);
+    lastWriteOff = (nextOffset-1) & ~(PAGESIZE-1);
+    LOGVV("---  lastGood=0x%04x firstWrite=0x%04x lastWrite=0x%04x\n",
+        lastGoodOff, firstWriteOff, lastWriteOff);
+    if (lastGoodOff != lastWriteOff || ENFORCE_READ_ONLY) {
+        int cc, start, len;
+
+        start = firstWriteOff;
+        assert(start <= nextOffset);
+        len = (lastWriteOff - firstWriteOff) + PAGESIZE;
+
+        LOGVV("---    calling mprotect(start=%d len=%d RW)\n", start, len);
+        cc = mprotect(pHdr->mapAddr + start, len, PROT_READ | PROT_WRITE);
+        if (cc != 0) {
+            LOGE("LinearAlloc mprotect (+%d %d) failed: %s\n",
+                start, len, strerror(errno));
+            /* we're going to fail soon, might as do it now */
+            dvmAbort();
+        }
+    }
+
+    /* update the ref counts on the now-writable pages */
+    if (ENFORCE_READ_ONLY) {
+        int i, start, end;
+
+        start = firstWriteOff / PAGESIZE;
+        end = lastWriteOff / PAGESIZE;
+
+        LOGVV("---  marking pages %d-%d RW (alloc %d at %p)\n",
+            start, end, size, pHdr->mapAddr + startOffset + HEADER_EXTRA);
+        for (i = start; i <= end; i++)
+            pHdr->writeRefCount[i]++;
+    }
+
+    /* stow the size in the header */
+    if (ENFORCE_READ_ONLY)
+        *(u4*)(pHdr->mapAddr + startOffset) = size | LENGTHFLAG_RW;
+    else
+        *(u4*)(pHdr->mapAddr + startOffset) = size;
+
+    /*
+     * Update data structure.
+     */
+    pHdr->curOffset = nextOffset;
+
+    dvmUnlockMutex(&pHdr->lock);
+    return pHdr->mapAddr + startOffset + HEADER_EXTRA;
+}
+
+/*
+ * Helper function, replaces strdup().
+ */
+char* dvmLinearStrdup(Object* classLoader, const char* str)
+{
+#ifdef DISABLE_LINEAR_ALLOC
+    return strdup(str);
+#endif
+    int len = strlen(str);
+    void* mem = dvmLinearAlloc(classLoader, len+1);
+    memcpy(mem, str, len+1);
+    if (ENFORCE_READ_ONLY)
+        dvmLinearSetReadOnly(classLoader, mem);
+    return (char*) mem;
+}
+
+/*
+ * "Reallocate" a piece of memory.
+ *
+ * If the new size is <= the old size, we return the original pointer
+ * without doing anything.
+ *
+ * If the new size is > the old size, we allocate new storage, copy the
+ * old stuff over, and mark the new stuff as free.
+ */
+void* dvmLinearRealloc(Object* classLoader, void* mem, size_t newSize)
+{
+#ifdef DISABLE_LINEAR_ALLOC
+    return realloc(mem, newSize);
+#endif
+    LinearAllocHdr* pHdr = getHeader(classLoader);
+
+    /* make sure we have the right region (and mem != NULL) */
+    assert(mem != NULL);
+    assert(mem >= (void*) pHdr->mapAddr &&
+           mem < (void*) (pHdr->mapAddr + pHdr->curOffset));
+
+    const u4* pLen = getBlockHeader(mem);
+    LOGV("--- LinearRealloc(%d) old=%d\n", newSize, *pLen);
+
+    /* handle size reduction case */
+    if (*pLen >= newSize) {
+        if (ENFORCE_READ_ONLY)
+            dvmLinearSetReadWrite(classLoader, mem);
+        return mem;
+    }
+
+    void* newMem;
+
+    newMem = dvmLinearAlloc(classLoader, newSize);
+    assert(newMem != NULL);
+    memcpy(newMem, mem, *pLen);
+    dvmLinearFree(classLoader, mem);
+
+    return newMem;
+}
+
+
+/*
+ * Update the read/write status of one or more pages.
+ */
+static void updatePages(Object* classLoader, void* mem, int direction)
+{
+    LinearAllocHdr* pHdr = getHeader(classLoader);
+    dvmLockMutex(&pHdr->lock);
+
+    /* make sure we have the right region */
+    assert(mem >= (void*) pHdr->mapAddr &&
+           mem < (void*) (pHdr->mapAddr + pHdr->curOffset));
+
+    u4* pLen = getBlockHeader(mem);
+    u4 len = *pLen & LENGTHFLAG_MASK;
+    int firstPage, lastPage;
+
+    firstPage = ((u1*)pLen - (u1*)pHdr->mapAddr) / PAGESIZE;
+    lastPage = ((u1*)mem - (u1*)pHdr->mapAddr + (len-1)) / PAGESIZE;
+    LOGVV("--- updating pages %d-%d (%d)\n", firstPage, lastPage, direction);
+
+    int i, cc;
+
+    /*
+     * Update individual pages.  We could do some sort of "lazy update" to
+     * combine mprotect calls, but that's almost certainly more trouble
+     * than it's worth.
+     */
+    for (i = firstPage; i <= lastPage; i++) {
+        if (direction < 0) {
+            /*
+             * Trying to mark read-only.
+             */
+            if (i == firstPage) {
+                if ((*pLen & LENGTHFLAG_RW) == 0) {
+                    LOGW("Double RO on %p\n", mem);
+                    dvmAbort();
+                } else
+                    *pLen &= ~LENGTHFLAG_RW;
+            }
+
+            if (pHdr->writeRefCount[i] == 0) {
+                LOGE("Can't make page %d any less writable\n", i);
+                dvmAbort();
+            }
+            pHdr->writeRefCount[i]--;
+            if (pHdr->writeRefCount[i] == 0) {
+                LOGVV("---  prot page %d RO\n", i);
+                cc = mprotect(pHdr->mapAddr + PAGESIZE * i, PAGESIZE, PROT_READ);
+                assert(cc == 0);
+            }
+        } else {
+            /*
+             * Trying to mark writable.
+             */
+            if (pHdr->writeRefCount[i] >= 32767) {
+                LOGE("Can't make page %d any more writable\n", i);
+                dvmAbort();
+            }
+            if (pHdr->writeRefCount[i] == 0) {
+                LOGVV("---  prot page %d RW\n", i);
+                cc = mprotect(pHdr->mapAddr + PAGESIZE * i, PAGESIZE,
+                        PROT_READ | PROT_WRITE);
+                assert(cc == 0);
+            }
+            pHdr->writeRefCount[i]++;
+
+            if (i == firstPage) {
+                if ((*pLen & LENGTHFLAG_RW) != 0) {
+                    LOGW("Double RW on %p\n", mem);
+                    dvmAbort();
+                } else
+                    *pLen |= LENGTHFLAG_RW;
+            }
+        }
+    }
+
+    dvmUnlockMutex(&pHdr->lock);
+}
+
+/*
+ * Try to mark the pages in which a chunk of memory lives as read-only.
+ * Whether or not the pages actually change state depends on how many
+ * others are trying to access the same pages.
+ *
+ * Only call here if ENFORCE_READ_ONLY is true.
+ */
+void dvmLinearSetReadOnly(Object* classLoader, void* mem)
+{
+#ifdef DISABLE_LINEAR_ALLOC
+    return;
+#endif
+    updatePages(classLoader, mem, -1);
+}
+
+/*
+ * Make the pages on which "mem" sits read-write.
+ *
+ * This covers the header as well as the data itself.  (We could add a
+ * "header-only" mode for dvmLinearFree.)
+ *
+ * Only call here if ENFORCE_READ_ONLY is true.
+ */
+void dvmLinearSetReadWrite(Object* classLoader, void* mem)
+{
+#ifdef DISABLE_LINEAR_ALLOC
+    return;
+#endif
+    updatePages(classLoader, mem, 1);
+}
+
+/*
+ * Mark an allocation as free.
+ */
+void dvmLinearFree(Object* classLoader, void* mem)
+{
+#ifdef DISABLE_LINEAR_ALLOC
+    free(mem);
+    return;
+#endif
+    if (mem == NULL)
+        return;
+
+    LinearAllocHdr* pHdr = getHeader(classLoader);
+
+    /* make sure we have the right region */
+    assert(mem >= (void*) pHdr->mapAddr &&
+           mem < (void*) (pHdr->mapAddr + pHdr->curOffset));
+
+    if (ENFORCE_READ_ONLY)
+        dvmLinearSetReadWrite(classLoader, mem);
+
+    u4* pLen = getBlockHeader(mem);
+    *pLen |= LENGTHFLAG_FREE;
+
+    if (ENFORCE_READ_ONLY)
+        dvmLinearSetReadOnly(classLoader, mem);
+}
+
+/*
+ * For debugging, dump the contents of a linear alloc area.
+ *
+ * We grab the lock so that the header contents and list output are
+ * consistent.
+ */
+void dvmLinearAllocDump(Object* classLoader)
+{
+#ifdef DISABLE_LINEAR_ALLOC
+    return;
+#endif
+    LinearAllocHdr* pHdr = getHeader(classLoader);
+
+    dvmLockMutex(&pHdr->lock);
+
+    LOGI("LinearAlloc classLoader=%p\n", classLoader);
+    LOGI("  mapAddr=%p mapLength=%d firstOffset=%d\n",
+        pHdr->mapAddr, pHdr->mapLength, pHdr->firstOffset);
+    LOGI("  curOffset=%d\n", pHdr->curOffset);
+
+    int off = pHdr->firstOffset;
+    u4 rawLen, fullLen;
+
+    while (off < pHdr->curOffset) {
+        rawLen = *(u4*) (pHdr->mapAddr + off);
+        fullLen = ((HEADER_EXTRA*2 + (rawLen & LENGTHFLAG_MASK))
+                    & ~(BLOCK_ALIGN-1));
+
+        LOGI("  %p (%3d): %clen=%d%s\n", pHdr->mapAddr + off + HEADER_EXTRA,
+            (int) ((off + HEADER_EXTRA) / PAGESIZE),
+            (rawLen & LENGTHFLAG_FREE) != 0 ? '*' : ' ',
+            rawLen & LENGTHFLAG_MASK,
+            (rawLen & LENGTHFLAG_RW) != 0 ? " [RW]" : "");
+
+        off += fullLen;
+    }
+
+    if (ENFORCE_READ_ONLY) {
+        LOGI("writeRefCount map:\n");
+
+        int numPages = (pHdr->mapLength+PAGESIZE-1) / PAGESIZE;
+        int zstart = 0;
+        int i;
+
+        for (i = 0; i < numPages; i++) {
+            int count = pHdr->writeRefCount[i];
+
+            if (count != 0) {
+                if (zstart < i-1)
+                    printf(" %d-%d: zero\n", zstart, i-1);
+                else if (zstart == i-1)
+                    printf(" %d: zero\n", zstart);
+                zstart = i+1;
+                printf(" %d: %d\n", i, count);
+            }
+        }
+        if (zstart < i)
+            printf(" %d-%d: zero\n", zstart, i-1);
+    }
+
+    LOGD("LinearAlloc %p using %d of %d (%d%%)\n",
+        classLoader, pHdr->curOffset, pHdr->mapLength,
+        (pHdr->curOffset * 100) / pHdr->mapLength);
+
+    dvmUnlockMutex(&pHdr->lock);
+}
+
+/*
+ * Verify that all blocks are freed.
+ *
+ * This should only be done as we're shutting down, but there could be a
+ * daemon thread that's still trying to do something, so we grab the locks.
+ */
+static void checkAllFree(Object* classLoader)
+{
+#ifdef DISABLE_LINEAR_ALLOC
+    return;
+#endif
+    LinearAllocHdr* pHdr = getHeader(classLoader);
+
+    dvmLockMutex(&pHdr->lock);
+
+    int off = pHdr->firstOffset;
+    u4 rawLen, fullLen;
+
+    while (off < pHdr->curOffset) {
+        rawLen = *(u4*) (pHdr->mapAddr + off);
+        fullLen = ((HEADER_EXTRA*2 + (rawLen & LENGTHFLAG_MASK))
+                    & ~(BLOCK_ALIGN-1));
+
+        if ((rawLen & LENGTHFLAG_FREE) == 0) {
+            LOGW("LinearAlloc %p not freed: %p len=%d\n", classLoader,
+                pHdr->mapAddr + off + HEADER_EXTRA, rawLen & LENGTHFLAG_MASK);
+        }
+
+        off += fullLen;
+    }
+
+    dvmUnlockMutex(&pHdr->lock);
+}
+
diff --git a/vm/LinearAlloc.h b/vm/LinearAlloc.h
new file mode 100644
index 0000000..9c1d096
--- /dev/null
+++ b/vm/LinearAlloc.h
@@ -0,0 +1,115 @@
+/*
+ * Copyright (C) 2008 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/*
+ * Simple linear memory allocator.
+ */
+#ifndef _DALVIK_LINEARALLOC
+#define _DALVIK_LINEARALLOC
+
+/*
+ * If this is set, we create additional data structures and make many
+ * additional mprotect() calls.
+ * (this breaks the debugger because the debugBreakpointCount cannot be updated)
+ */
+#define ENFORCE_READ_ONLY   false
+
+/*
+ * Linear allocation state.  We could tuck this into the start of the
+ * allocated region, but that would prevent us from sharing the rest of
+ * that first page.
+ */
+typedef struct LinearAllocHdr {
+    int     curOffset;          /* offset where next data goes */
+    pthread_mutex_t lock;       /* controls updates to this struct */
+
+    char*   mapAddr;            /* start of mmap()ed region */
+    int     mapLength;          /* length of region */
+    int     firstOffset;        /* for chasing through */
+
+    short*  writeRefCount;      /* for ENFORCE_READ_ONLY */
+} LinearAllocHdr;
+
+
+/*
+ * Create a new alloc region.
+ */
+LinearAllocHdr* dvmLinearAllocCreate(Object* classLoader);
+
+/*
+ * Destroy a region.
+ */
+void dvmLinearAllocDestroy(Object* classLoader);
+
+/*
+ * Allocate a chunk of memory.  The memory will be zeroed out.
+ *
+ * For ENFORCE_READ_ONLY, call dvmLinearReadOnly on the result.
+ */
+void* dvmLinearAlloc(Object* classLoader, size_t size);
+
+/*
+ * Reallocate a chunk.  The original storage is not released, but may be
+ * erased to aid debugging.
+ *
+ * For ENFORCE_READ_ONLY, call dvmLinearReadOnly on the result.  Also, the
+ * caller should probably mark the "mem" argument read-only before calling.
+ */
+void* dvmLinearRealloc(Object* classLoader, void* mem, size_t newSize);
+
+/* don't call these directly */
+void dvmLinearSetReadOnly(Object* classLoader, void* mem);
+void dvmLinearSetReadWrite(Object* classLoader, void* mem);
+
+/*
+ * Mark a chunk of memory from Alloc or Realloc as read-only.  This must
+ * be done after all changes to the block of memory have been made.  This
+ * actually operates on a page granularity.
+ */
+INLINE void dvmLinearReadOnly(Object* classLoader, void* mem)
+{
+    if (ENFORCE_READ_ONLY && mem != NULL)
+        dvmLinearSetReadOnly(classLoader, mem);
+}
+
+/*
+ * Make a chunk of memory writable again.
+ */
+INLINE void dvmLinearReadWrite(Object* classLoader, void* mem)
+{
+    if (ENFORCE_READ_ONLY && mem != NULL)
+        dvmLinearSetReadWrite(classLoader, mem);
+}
+
+/*
+ * Free a chunk.  Does not increase available storage, but the freed area
+ * may be erased to aid debugging.
+ */
+void dvmLinearFree(Object* classLoader, void* mem);
+
+/*
+ * Helper function; allocates new storage and copies "str" into it.
+ *
+ * For ENFORCE_READ_ONLY, do *not* call dvmLinearReadOnly on the result.
+ * This is done automatically.
+ */
+char* dvmLinearStrdup(Object* classLoader, const char* str);
+
+/*
+ * Dump the contents of a linear alloc area.
+ */
+void dvmLinearAllocDump(Object* classLoader);
+
+#endif /*_DALVIK_LINEARALLOC*/
diff --git a/vm/Misc.c b/vm/Misc.c
new file mode 100644
index 0000000..7eee877
--- /dev/null
+++ b/vm/Misc.c
@@ -0,0 +1,576 @@
+/*
+ * Copyright (C) 2008 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/*
+ * Miscellaneous utility functions.
+ */
+#include "Dalvik.h"
+
+#include <stdlib.h>
+#include <stddef.h>
+#include <string.h>
+#include <ctype.h>
+#include <time.h>
+#include <sys/time.h>
+#include <fcntl.h>
+
+
+/*
+ * Print a hex dump in this format:
+ *
+01234567: 00 11 22 33 44 55 66 77 88 99 aa bb cc dd ee ff  0123456789abcdef\n
+ *
+ * If "mode" is kHexDumpLocal, we start at offset zero, and show a full
+ * 16 bytes on the first line.  If it's kHexDumpMem, we make this look
+ * like a memory dump, using the actual address, outputting a partial line
+ * if "vaddr" isn't aligned on a 16-byte boundary.
+ *
+ * "priority" and "tag" determine the values passed to the log calls.
+ *
+ * Does not use printf() or other string-formatting calls.
+ */
+void dvmPrintHexDumpEx(int priority, const char* tag, const void* vaddr,
+    size_t length, HexDumpMode mode)
+{
+    static const char gHexDigit[] = "0123456789abcdef";
+    const unsigned char* addr = vaddr;
+    char out[77];           /* exact fit */
+    unsigned int offset;    /* offset to show while printing */
+    char* hex;
+    char* asc;
+    int gap;
+    //int trickle = 0;
+
+    if (mode == kHexDumpLocal)
+        offset = 0;
+    else
+        offset = (int) addr;
+
+    memset(out, ' ', sizeof(out)-1);
+    out[8] = ':';
+    out[sizeof(out)-2] = '\n';
+    out[sizeof(out)-1] = '\0';
+
+    gap = (int) offset & 0x0f;
+    while (length) {
+        unsigned int lineOffset = offset & ~0x0f;
+        int i, count;
+        
+        hex = out;
+        asc = out + 59;
+
+        for (i = 0; i < 8; i++) {
+            *hex++ = gHexDigit[lineOffset >> 28];
+            lineOffset <<= 4;
+        }
+        hex++;
+        hex++;
+
+        count = ((int)length > 16-gap) ? 16-gap : (int)length; /* cap length */
+        assert(count != 0);
+        assert(count+gap <= 16);
+
+        if (gap) {
+            /* only on first line */
+            hex += gap * 3;
+            asc += gap;
+        }
+
+        for (i = gap ; i < count+gap; i++) {
+            *hex++ = gHexDigit[*addr >> 4];
+            *hex++ = gHexDigit[*addr & 0x0f];
+            hex++;
+            if (*addr >= 0x20 && *addr < 0x7f /*isprint(*addr)*/)
+                *asc++ = *addr;
+            else
+                *asc++ = '.';
+            addr++;
+        }
+        for ( ; i < 16; i++) {
+            /* erase extra stuff; only happens on last line */
+            *hex++ = ' ';
+            *hex++ = ' ';
+            hex++;
+            *asc++ = ' ';
+        }
+
+        LOG_PRI(priority, tag, "%s", out);
+#if 0 //def HAVE_ANDROID_OS
+        /*
+         * We can overrun logcat easily by writing at full speed.  On the
+         * other hand, we can make Eclipse time out if we're showing
+         * packet dumps while debugging JDWP.
+         */
+        {
+            if (trickle++ == 8) {
+                trickle = 0;
+                usleep(20000);
+            }
+        }
+#endif
+
+        gap = 0;
+        length -= count;
+        offset += count;
+    }
+}
+
+
+/*
+ * Fill out a DebugOutputTarget, suitable for printing to the log.
+ */
+void dvmCreateLogOutputTarget(DebugOutputTarget* target, int priority,
+    const char* tag)
+{
+    assert(target != NULL);
+    assert(tag != NULL);
+
+    target->which = kDebugTargetLog;
+    target->data.log.priority = priority;
+    target->data.log.tag = tag;
+}
+
+/*
+ * Fill out a DebugOutputTarget suitable for printing to a file pointer.
+ */
+void dvmCreateFileOutputTarget(DebugOutputTarget* target, FILE* fp)
+{
+    assert(target != NULL);
+    assert(fp != NULL);
+
+    target->which = kDebugTargetFile;
+    target->data.file.fp = fp;
+}
+
+/*
+ * Free "target" and any associated data.
+ */
+void dvmFreeOutputTarget(DebugOutputTarget* target)
+{
+    free(target);
+}
+
+/*
+ * Print a debug message, to either a file or the log.
+ */
+void dvmPrintDebugMessage(const DebugOutputTarget* target, const char* format,
+    ...)
+{
+    va_list args;
+
+    va_start(args, format);
+
+    switch (target->which) {
+    case kDebugTargetLog:
+        LOG_PRI_VA(target->data.log.priority, target->data.log.tag,
+            format, args);
+        break;
+    case kDebugTargetFile:
+        vfprintf(target->data.file.fp, format, args);
+        break;
+    default:
+        LOGE("unexpected 'which' %d\n", target->which);
+        break;
+    }
+
+    va_end(args);
+}
+
+
+/*
+ * Allocate a bit vector with enough space to hold at least the specified
+ * number of bits.
+ */
+BitVector* dvmAllocBitVector(int startBits, bool expandable)
+{
+    BitVector* bv;
+    int count;
+
+    assert(sizeof(bv->storage[0]) == 4);        /* assuming 32-bit units */
+    assert(startBits > 0);
+
+    bv = (BitVector*) malloc(sizeof(BitVector));
+
+    count = (startBits + 31) >> 5;
+
+    bv->storageSize = count;
+    bv->expandable = expandable;
+    bv->storage = (u4*) malloc(count * sizeof(u4));
+    memset(bv->storage, 0xff, count * sizeof(u4));
+    return bv;
+}
+
+/*
+ * Free a BitVector.
+ */
+void dvmFreeBitVector(BitVector* pBits)
+{
+    if (pBits == NULL)
+        return;
+
+    free(pBits->storage);
+    free(pBits);
+}
+
+/*
+ * "Allocate" the first-available bit in the bitmap.
+ *
+ * This is not synchronized.  The caller is expected to hold some sort of
+ * lock that prevents multiple threads from executing simultaneously in
+ * dvmAllocBit/dvmFreeBit.
+ *
+ * The bitmap indicates which resources are free, so we use '1' to indicate
+ * available and '0' to indicate allocated.
+ */
+int dvmAllocBit(BitVector* pBits)
+{
+    int word, bit;
+
+retry:
+    for (word = 0; word < pBits->storageSize; word++) {
+        if (pBits->storage[word] != 0) {
+            /*
+             * There are unallocated bits in this word.  Return the first.
+             */
+            bit = ffs(pBits->storage[word]) -1;
+            assert(bit >= 0 && bit < 32);
+            pBits->storage[word] &= ~(1 << bit);
+            return (word << 5) | bit;
+        }
+    }
+
+    /*
+     * Ran out of space, allocate more if we're allowed to.
+     */
+    if (!pBits->expandable)
+        return -1;
+
+    pBits->storage = realloc(pBits->storage,
+                    (pBits->storageSize + kBitVectorGrowth) * sizeof(u4));
+    memset(&pBits->storage[pBits->storageSize], 0xff,
+        kBitVectorGrowth * sizeof(u4));
+    pBits->storageSize += kBitVectorGrowth;
+    goto retry;
+}
+
+/*
+ * Mark the specified bit as "free".
+ */
+void dvmFreeBit(BitVector* pBits, int num)
+{
+    assert(num >= 0 && num < (int) pBits->storageSize * (int)sizeof(u4) * 8);
+
+    pBits->storage[num >> 5] |= 1 << (num & 0x1f);
+}
+
+
+/*
+ * Return a newly-allocated string in which all occurrences of '.' have
+ * been changed to '/'.  If we find a '/' in the original string, NULL
+ * is returned to avoid ambiguity.
+ */
+char* dvmDotToSlash(const char* str)
+{
+    char* newStr = strdup(str);
+    char* cp = newStr;
+
+    while (*cp != '\0') {
+        if (*cp == '/') {
+            assert(false);
+            return NULL;
+        }
+        if (*cp == '.')
+            *cp = '/';
+        cp++;
+    }
+
+    return newStr;
+}
+
+/*
+ * Return a newly-allocated string for the "dot version" of the class
+ * name for the given type descriptor. That is, The initial "L" and
+ * final ";" (if any) have been removed and all occurrences of '/'
+ * have been changed to '.'.
+ */
+char* dvmDescriptorToDot(const char* str)
+{
+    size_t at = strlen(str);
+    char* newStr;
+
+    if ((at >= 2) && (str[0] == 'L') && (str[at - 1] == ';')) {
+        at -= 2; /* Two fewer chars to copy. */
+        str++; /* Skip the 'L'. */
+    }
+    
+    newStr = malloc(at + 1); /* Add one for the '\0'. */
+    newStr[at] = '\0';
+
+    while (at > 0) {
+        at--;
+        newStr[at] = (str[at] == '/') ? '.' : str[at];
+    }
+
+    return newStr;
+}
+
+/*
+ * Return a newly-allocated string for the type descriptor
+ * corresponding to the "dot version" of the given class name. That
+ * is, non-array names are surrounded by "L" and ";", and all
+ * occurrences of '.' are changed to '/'.
+ */
+char* dvmDotToDescriptor(const char* str)
+{
+    size_t length = strlen(str);
+    int wrapElSemi = 0;
+    char* newStr;
+    char* at;
+
+    if (str[0] != '[') {
+        length += 2; /* for "L" and ";" */
+        wrapElSemi = 1;
+    }
+
+    newStr = at = malloc(length + 1); /* + 1 for the '\0' */
+    
+    if (newStr == NULL) {
+        return NULL;
+    }
+    
+    if (wrapElSemi) {
+        *(at++) = 'L';
+    }
+
+    while (*str) {
+        char c = *(str++);
+        if (c == '.') {
+            c = '/';
+        }
+        *(at++) = c;
+    }
+
+    if (wrapElSemi) {
+        *(at++) = ';';
+    }
+
+    *at = '\0';
+    return newStr;
+}
+
+/*
+ * Return a newly-allocated string for the internal-form class name for
+ * the given type descriptor. That is, the initial "L" and final ";" (if
+ * any) have been removed.
+ */
+char* dvmDescriptorToName(const char* str)
+{
+    if (str[0] == 'L') {
+        size_t length = strlen(str) - 1;
+        char* newStr = malloc(length);
+
+        if (newStr == NULL) {
+            return NULL;
+        }
+        
+        strlcpy(newStr, str + 1, length);
+        return newStr;
+    }
+
+    return strdup(str);
+}
+
+/*
+ * Return a newly-allocated string for the type descriptor for the given
+ * internal-form class name. That is, a non-array class name will get
+ * surrounded by "L" and ";", while array names are left as-is.
+ */
+char* dvmNameToDescriptor(const char* str)
+{
+    if (str[0] != '[') {
+        size_t length = strlen(str);
+        char* descriptor = malloc(length + 3);
+
+        if (descriptor == NULL) {
+            return NULL;
+        }
+        
+        descriptor[0] = 'L';
+        strcpy(descriptor + 1, str);
+        descriptor[length + 1] = ';';
+        descriptor[length + 2] = '\0';
+
+        return descriptor;
+    }
+
+    return strdup(str);
+}
+
+/*
+ * Get a notion of the current time, in nanoseconds.  This is meant for
+ * computing durations (e.g. "operation X took 52nsec"), so the result
+ * should not be used to get the current date/time.
+ */
+u8 dvmGetRelativeTimeNsec(void)
+{
+#ifdef HAVE_POSIX_CLOCKS
+    struct timespec now;
+    clock_gettime(CLOCK_MONOTONIC, &now);
+    return (u8)now.tv_sec*1000000000LL + now.tv_nsec;
+#else
+    struct timeval now;
+    gettimeofday(&now, NULL);
+    return (u8)now.tv_sec*1000000000LL + now.tv_usec * 1000LL;
+#endif
+}
+
+/*
+ * Get the per-thread CPU time, in nanoseconds.
+ *
+ * Only useful for time deltas.
+ */
+u8 dvmGetThreadCpuTimeNsec(void)
+{
+#ifdef HAVE_POSIX_CLOCKS
+    struct timespec now;
+    clock_gettime(CLOCK_THREAD_CPUTIME_ID, &now);
+    return (u8)now.tv_sec*1000000000LL + now.tv_nsec;
+#else
+    return (u8) -1;
+#endif
+}
+
+/*
+ * Get the per-thread CPU time, in nanoseconds, for the specified thread.
+ */
+u8 dvmGetOtherThreadCpuTimeNsec(pthread_t thread)
+{
+#if 0 /*def HAVE_POSIX_CLOCKS*/
+    int clockId;
+    
+    if (pthread_getcpuclockid(thread, &clockId) != 0)
+        return (u8) -1;
+
+    struct timespec now;
+    clock_gettime(clockId, &now);
+    return (u8)now.tv_sec*1000000000LL + now.tv_nsec;
+#else
+    return (u8) -1;
+#endif
+}
+
+
+/*
+ * Call this repeatedly, with successively higher values for "iteration",
+ * to sleep for a period of time not to exceed "maxTotalSleep".
+ *
+ * For example, when called with iteration==0 we will sleep for a very
+ * brief time.  On the next call we will sleep for a longer time.  When
+ * the sum total of all sleeps reaches "maxTotalSleep", this returns false.
+ *
+ * The initial start time value for "relStartTime" MUST come from the
+ * dvmGetRelativeTimeUsec call.  On the device this must come from the
+ * monotonic clock source, not the wall clock.
+ *
+ * Returns "false" if we were unable to sleep because our time was up.
+ */
+bool dvmIterativeSleep(int iteration, int maxTotalSleep, u8 relStartTime)
+{
+    const int minSleep = 10000;
+    u8 curTime;
+    int curDelay;
+
+    /*
+     * Get current time, and see if we've already exceeded the limit.
+     */
+    curTime = dvmGetRelativeTimeUsec();
+    if (curTime >= relStartTime + maxTotalSleep) {
+        LOGVV("exsl: sleep exceeded (start=%llu max=%d now=%llu)\n",
+            relStartTime, maxTotalSleep, curTime);
+        return false;
+    }
+
+    /*
+     * Compute current delay.  We're bounded by "maxTotalSleep", so no
+     * real risk of overflow assuming "usleep" isn't returning early.
+     * (Besides, 2^30 usec is about 18 minutes by itself.)
+     *
+     * For iteration==0 we just call sched_yield(), so the first sleep
+     * at iteration==1 is actually (minSleep * 2).
+     */
+    curDelay = minSleep;
+    while (iteration-- > 0)
+        curDelay *= 2;
+    assert(curDelay > 0);
+
+    if (curTime + curDelay >= relStartTime + maxTotalSleep) {
+        LOGVV("exsl: reduced delay from %d to %d\n",
+            curDelay, (int) ((relStartTime + maxTotalSleep) - curTime));
+        curDelay = (int) ((relStartTime + maxTotalSleep) - curTime);
+    }
+
+    if (iteration == 0) {
+        LOGVV("exsl: yield\n");
+        sched_yield();
+    } else {
+        LOGVV("exsl: sleep for %d\n", curDelay);
+        usleep(curDelay);
+    }
+    return true;
+}
+
+
+/*
+ * Set the "close on exec" flag so we don't expose our file descriptors
+ * to processes launched by us.
+ */
+bool dvmSetCloseOnExec(int fd)
+{
+    int flags;
+
+    /*
+     * There's presently only one flag defined, so getting the previous
+     * value of the fd flags is probably unnecessary.
+     */
+    flags = fcntl(fd, F_GETFD);
+    if (flags < 0) {
+        LOGW("Unable to get fd flags for fd %d\n", fd);
+        return false;
+    }
+    if (fcntl(fd, F_SETFD, flags | FD_CLOEXEC) < 0) {
+        LOGW("Unable to set close-on-exec for fd %d\n", fd);
+        return false;
+    }
+    return true;
+}
+
+#if (!HAVE_STRLCPY)
+/* Implementation of strlcpy() for platforms that don't already have it. */
+size_t strlcpy(char *dst, const char *src, size_t size) {
+    size_t srcLength = strlen(src);
+    size_t copyLength = srcLength;
+
+    if (srcLength > (size - 1)) {
+        copyLength = size - 1;
+    }
+
+    if (size != 0) {
+        strncpy(dst, src, copyLength);
+        dst[copyLength] = '\0';
+    }
+    
+    return srcLength;
+}
+#endif
diff --git a/vm/Misc.h b/vm/Misc.h
new file mode 100644
index 0000000..95edc0f
--- /dev/null
+++ b/vm/Misc.h
@@ -0,0 +1,274 @@
+/*
+ * Copyright (C) 2008 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/*
+ * Miscellaneous utility functions.
+ */
+#ifndef _DALVIK_MISC
+#define _DALVIK_MISC
+
+#include "Inlines.h"
+
+#include <stdio.h>
+#include <sys/types.h>
+#include <sys/time.h>
+
+/*
+ * Used to shut up the compiler when a parameter isn't used.
+ */
+#define UNUSED_PARAMETER(p)     (void)(p)
+
+/*
+ * Floating point conversion functions.  These are necessary to avoid
+ * strict-aliasing problems ("dereferencing type-punned pointer will break
+ * strict-aliasing rules").  According to the gcc info page, this usage
+ * is allowed, even with "-fstrict-aliasing".
+ *
+ * The code generated by gcc-4.1.1 appears to be much better than a
+ * type cast dereference ("int foo = *(int*)&myfloat") when the conversion
+ * function is inlined.  It also allows us to take advantage of the
+ * optimizations that strict aliasing rules allow.
+ */
+INLINE float dvmU4ToFloat(u4 val) {
+    union { u4 in; float out; } conv;
+    conv.in = val;
+    return conv.out;
+}
+INLINE u4 dvmFloatToU4(float val) {
+    union { float in; u4 out; } conv;
+    conv.in = val;
+    return conv.out;
+}
+#if 0
+INLINE float dvmU8ToFloat(u8 val) {
+    union { u8 in; float out; } conv;
+    conv.in = val;
+    return conv.out;
+}
+INLINE u8 dvmFloatToU8(float val) {
+    union { float in; u8 out; } conv;
+    conv.in = val;
+    return conv.out;
+}
+INLINE double dvmU8ToDouble(u8 val) {
+    union { u8 in; double out; } conv;
+    conv.in = val;
+    return conv.out;
+}
+INLINE u8 dvmDoubleToU8(double val) {
+    union { double in; u8 out; } conv;
+    conv.in = val;
+    return conv.out;
+}
+#endif
+
+/*
+ * Print a hex dump to the log file.
+ *
+ * "local" mode prints a hex dump starting from offset 0 (roughly equivalent
+ * to "xxd -g1").
+ *
+ * "mem" mode shows the actual memory address, and will offset the start
+ * so that the low nibble of the address is always zero.
+ *
+ * If "tag" is NULL the default tag ("dalvikvm") will be used.
+ */
+typedef enum { kHexDumpLocal, kHexDumpMem } HexDumpMode;
+void dvmPrintHexDumpEx(int priority, const char* tag, const void* vaddr,
+    size_t length, HexDumpMode mode);
+
+/*
+ * Print a hex dump, at INFO level.
+ */
+INLINE void dvmPrintHexDump(const void* vaddr, size_t length) {
+    dvmPrintHexDumpEx(ANDROID_LOG_INFO, LOG_TAG,
+        vaddr, length, kHexDumpLocal);
+}
+
+/*
+ * Print a hex dump at VERBOSE level. This does nothing in non-debug builds.
+ */
+INLINE void dvmPrintHexDumpDbg(const void* vaddr, size_t length,const char* tag)
+{
+#if !LOG_NDEBUG
+    dvmPrintHexDumpEx(ANDROID_LOG_VERBOSE, (tag != NULL) ? tag : LOG_TAG,
+        vaddr, length, kHexDumpLocal);
+#endif
+}
+
+/*
+ * We pass one of these around when we want code to be able to write debug
+ * info to either the log or to a file (or stdout/stderr).
+ */
+typedef struct DebugOutputTarget {
+    /* where to? */
+    enum {
+        kDebugTargetUnknown = 0,
+        kDebugTargetLog,
+        kDebugTargetFile,
+    } which;
+
+    /* additional bits */
+    union {
+        struct {
+            int priority;
+            const char* tag;
+        } log;
+        struct {
+            FILE* fp;
+        } file;
+    } data;
+} DebugOutputTarget;
+
+/*
+ * Fill in a DebugOutputTarget struct.
+ */
+void dvmCreateLogOutputTarget(DebugOutputTarget* target, int priority,
+    const char* tag);
+void dvmCreateFileOutputTarget(DebugOutputTarget* target, FILE* fp);
+
+/*
+ * Print a debug message.
+ */
+void dvmPrintDebugMessage(const DebugOutputTarget* target, const char* format,
+    ...);
+
+
+/*
+ * Expanding bitmap, used for tracking resources.  Bits are numbered starting
+ * from zero.
+ */
+typedef struct BitVector {
+    bool    expandable;     /* expand bitmap if we run out? */
+    int     storageSize;    /* current size, in 32-bit words */
+    u4*     storage;
+} BitVector;
+
+/* allocate a bit vector with enough space to hold "startBits" bits */
+BitVector* dvmAllocBitVector(int startBits, bool expandable);
+void dvmFreeBitVector(BitVector* pBits);
+
+/*
+ * Set/clear a single bit; assumes external synchronization.
+ *
+ * We always allocate the first possible bit.  If we run out of space in
+ * the bitmap, and it's not marked expandable, dvmAllocBit returns -1.
+ */
+int dvmAllocBit(BitVector* pBits);
+void dvmFreeBit(BitVector* pBits, int num);
+
+#define kBitVectorGrowth    4   /* increase by 4 u4s when limit hit */
+
+
+/*
+ * Return a newly-allocated string in which all occurrences of '.' have
+ * been changed to '/'.  If we find a '/' in the original string, NULL
+ * is returned to avoid ambiguity.
+ */
+char* dvmDotToSlash(const char* str);
+
+/*
+ * Return a newly-allocated string for the "dot version" of the class
+ * name for the given type descriptor. That is, The initial "L" and
+ * final ";" (if any) have been removed and all occurrences of '/'
+ * have been changed to '.'.
+ */
+char* dvmDescriptorToDot(const char* str);
+
+/*
+ * Return a newly-allocated string for the type descriptor
+ * corresponding to the "dot version" of the given class name. That
+ * is, non-array names are surrounde by "L" and ";", and all
+ * occurrences of '.' have been changed to '/'.
+ */
+char* dvmDotToDescriptor(const char* str);
+
+/*
+ * Return a newly-allocated string for the internal-form class name for
+ * the given type descriptor. That is, the initial "L" and final ";" (if
+ * any) have been removed.
+ */
+char* dvmDescriptorToName(const char* str);
+
+/*
+ * Return a newly-allocated string for the type descriptor for the given
+ * internal-form class name. That is, a non-array class name will get
+ * surrounded by "L" and ";", while array names are left as-is.
+ */
+char* dvmNameToDescriptor(const char* str);
+
+/*
+ * Get the current time, in nanoseconds.  This is "relative" time, meaning
+ * it could be wall-clock time or a monotonic counter, and is only suitable
+ * for computing time deltas.
+ */
+u8 dvmGetRelativeTimeNsec(void);
+
+/*
+ * Get the current time, in microseconds.  This is "relative" time, meaning
+ * it could be wall-clock time or a monotonic counter, and is only suitable
+ * for computing time deltas.
+ */
+INLINE u8 dvmGetRelativeTimeUsec(void) {
+    return dvmGetRelativeTimeNsec() / 1000;
+}
+
+/*
+ * Get the current per-thread CPU time.  This clock increases monotonically
+ * when the thread is running, but not when it's sleeping or blocked on a
+ * synchronization object.
+ *
+ * The absolute value of the clock may not be useful, so this should only
+ * be used for time deltas.
+ *
+ * If the thread CPU clock is not available, this always returns (u8)-1.
+ */
+u8 dvmGetThreadCpuTimeNsec(void);
+
+/*
+ * Per-thread CPU time, in micros.
+ */
+INLINE u8 dvmGetThreadCpuTimeUsec(void) {
+    return dvmGetThreadCpuTimeNsec() / 1000;
+}
+
+/*
+ * Like dvmGetThreadCpuTimeNsec, but for a different thread.
+ */
+u8 dvmGetOtherThreadCpuTimeNsec(pthread_t thread);
+INLINE u8 dvmGetOtherThreadCpuTimeUsec(pthread_t thread) {
+    return dvmGetOtherThreadCpuTimeNsec(thread) / 1000;
+}
+
+/*
+ * Sleep for increasingly longer periods, until "maxTotalSleep" microseconds
+ * have elapsed.  Pass in the start time, which must be a value returned by
+ * dvmGetRelativeTimeUsec().
+ *
+ * Returns "false" if we were unable to sleep because our time is up.
+ */
+bool dvmIterativeSleep(int iteration, int maxTotalSleep, u8 relStartTime);
+
+/*
+ * Set the "close on exec" flag on a file descriptor.
+ */
+bool dvmSetCloseOnExec(int fd);
+
+#if (!HAVE_STRLCPY)
+/* Implementation of strlcpy() for platforms that don't already have it. */
+size_t strlcpy(char *dst, const char *src, size_t size);
+#endif
+
+#endif /*_DALVIK_MISC*/
diff --git a/vm/Native.c b/vm/Native.c
new file mode 100644
index 0000000..7a153d6
--- /dev/null
+++ b/vm/Native.c
@@ -0,0 +1,704 @@
+/*
+ * Copyright (C) 2008 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/*
+ * Native method resolution.
+ *
+ * Currently the "Dalvik native" methods are only used for internal methods.
+ * Someday we may want to export the interface as a faster but riskier
+ * alternative to JNI.
+ */
+#include "Dalvik.h"
+
+#include <stdlib.h>
+#include <dlfcn.h>
+
+static void freeSharedLibEntry(void* ptr);
+static void* lookupSharedLibMethod(const Method* method);
+
+
+/*
+ * Initialize the native code loader.
+ */
+bool dvmNativeStartup(void)
+{
+    gDvm.nativeLibs = dvmHashTableCreate(4, freeSharedLibEntry);
+    if (gDvm.nativeLibs == NULL)
+        return false;
+
+    return true;
+}
+
+/*
+ * Free up our tables.
+ */
+void dvmNativeShutdown(void)
+{
+    dvmHashTableFree(gDvm.nativeLibs);
+    gDvm.nativeLibs = NULL;
+}
+
+
+/*
+ * Resolve a native method and invoke it.
+ *
+ * This is executed as if it were a native bridge or function.  If the
+ * resolution succeeds, method->insns is replaced, and we don't go through
+ * here again.
+ *
+ * Initializes method's class if necessary.
+ *
+ * An exception is thrown on resolution failure.
+ */
+void dvmResolveNativeMethod(const u4* args, JValue* pResult,
+    const Method* method, Thread* self)
+{
+    ClassObject* clazz = method->clazz;
+    void* func;
+
+    /*
+     * If this is a static method, it could be called before the class
+     * has been initialized.
+     */
+    if (dvmIsStaticMethod(method)) {
+        if (!dvmIsClassInitialized(clazz) && !dvmInitClass(clazz)) {
+            assert(dvmCheckException(dvmThreadSelf()));
+            return;
+        }
+    } else {
+        assert(dvmIsClassInitialized(clazz) ||
+               dvmIsClassInitializing(clazz));
+    }
+
+    /* start with our internal-native methods */
+    func = dvmLookupInternalNativeMethod(method);
+    if (func != NULL) {
+        /* resolution always gets the same answer, so no race here */
+        IF_LOGVV() {
+            char* desc = dexProtoCopyMethodDescriptor(&method->prototype);
+            LOGVV("+++ resolved native %s.%s %s, invoking\n",
+                clazz->descriptor, method->name, desc);
+            free(desc);
+        }
+        if (dvmIsSynchronizedMethod(method)) {
+            LOGE("ERROR: internal-native can't be declared 'synchronized'\n");
+            LOGE("Failing on %s.%s\n", method->clazz->descriptor, method->name);
+            dvmAbort();     // harsh, but this is VM-internal problem
+        }
+        DalvikBridgeFunc dfunc = (DalvikBridgeFunc) func;
+        dvmSetNativeFunc(method, dfunc, NULL);
+        assert(method->insns == NULL);
+        dfunc(args, pResult, method, self);
+        return;
+    }
+
+    /* now scan any DLLs we have loaded for JNI signatures */
+    func = lookupSharedLibMethod(method);
+    if (func != NULL) {
+        if (dvmIsSynchronizedMethod(method))
+            dvmSetNativeFunc(method, dvmCallSynchronizedJNIMethod, func);
+        else
+            dvmSetNativeFunc(method, dvmCallJNIMethod, func);
+        dvmCallJNIMethod(args, pResult, method, self);
+        return;
+    }
+
+    IF_LOGW() {
+        char* desc = dexProtoCopyMethodDescriptor(&method->prototype);
+        LOGW("No implementation found for native %s.%s %s\n",
+            clazz->descriptor, method->name, desc);
+        free(desc);
+    }
+
+    dvmThrowException("Ljava/lang/UnsatisfiedLinkError;", method->name);
+}
+
+
+/*
+ * ===========================================================================
+ *      Native shared library support
+ * ===========================================================================
+ */
+
+// TODO? if a ClassLoader is unloaded, we need to unload all DLLs that
+// are associated with it.  (Or not -- can't determine if native code
+// is still using parts of it.)
+
+/*
+ * We add one of these to the hash table for every library we load.  The
+ * hash is on the "pathName" field.
+ */
+typedef struct SharedLib {
+    char*       pathName;       /* absolute path to library */
+    void*       handle;         /* from dlopen */
+    Object*     classLoader;    /* ClassLoader we are associated with */
+} SharedLib;
+
+/*
+ * (This is a dvmHashTableLookup callback.)
+ *
+ * Find an entry that matches the string.
+ */
+static int hashcmpNameStr(const void* ventry, const void* vname)
+{
+    const SharedLib* pLib = (const SharedLib*) ventry;
+    const char* name = (const char*) vname;
+
+    return strcmp(pLib->pathName, name);
+}
+
+/*
+ * (This is a dvmHashTableLookup callback.)
+ *
+ * Find an entry that matches the new entry.
+ */
+static int hashcmpSharedLib(const void* ventry, const void* vnewEntry)
+{
+    const SharedLib* pLib = (const SharedLib*) ventry;
+    const SharedLib* pNewLib = (const SharedLib*) vnewEntry;
+
+    LOGD("--- comparing %p '%s' %p '%s'\n",
+        pLib, pLib->pathName, pNewLib, pNewLib->pathName);
+    return strcmp(pLib->pathName, pNewLib->pathName);
+}
+
+/*
+ * Check to see if an entry with the same pathname already exists.
+ */
+static const SharedLib* findSharedLibEntry(const char* pathName)
+{
+    u4 hash = dvmComputeUtf8Hash(pathName);
+    void* ent;
+
+    ent = dvmHashTableLookup(gDvm.nativeLibs, hash, (void*)pathName,
+                hashcmpNameStr, false);
+    return ent;
+}
+
+/*
+ * Add the new entry to the table.
+ *
+ * Returns "true" on success, "false" if the entry already exists.
+ */
+static bool addSharedLibEntry(SharedLib* pLib)
+{
+    u4 hash = dvmComputeUtf8Hash(pLib->pathName);
+    void* ent;
+
+    /*
+     * Do the lookup with the "add" flag set.  If we add it, we will get
+     * our own pointer back.  If somebody beat us to the punch, we'll get
+     * their pointer back instead.
+     */
+    ent = dvmHashTableLookup(gDvm.nativeLibs, hash, pLib, hashcmpSharedLib,
+                true);
+    return (ent == pLib);
+}
+
+/*
+ * Free up an entry.  (This is a dvmHashTableFree callback.)
+ */
+static void freeSharedLibEntry(void* ptr)
+{
+    SharedLib* pLib = (SharedLib*) ptr;
+
+    /*
+     * Calling dlclose() here is somewhat dangerous, because it's possible
+     * that a thread outside the VM is still accessing the code we loaded.
+     */
+    if (false)
+        dlclose(pLib->handle);
+    free(pLib->pathName);
+    free(pLib);
+}
+
+/*
+ * Convert library name to system-dependent form, e.g. "jpeg" becomes
+ * "libjpeg.so".
+ *
+ * (Should we have this take buffer+len and avoid the alloc?  It gets
+ * called very rarely.)
+ */
+char* dvmCreateSystemLibraryName(char* libName)
+{
+    char buf[256];
+    int len;
+
+    len = snprintf(buf, sizeof(buf), OS_SHARED_LIB_FORMAT_STR, libName);
+    if (len >= (int) sizeof(buf))
+        return NULL;
+    else
+        return strdup(buf);
+}
+
+
+#if 0
+/*
+ * Find a library, given the lib's system-dependent name (e.g. "libjpeg.so").
+ *
+ * We need to search through the path defined by the java.library.path
+ * property.
+ *
+ * Returns NULL if the library was not found.
+ */
+static char* findLibrary(const char* libSysName)
+{
+    char* javaLibraryPath = NULL;
+    char* testName = NULL;
+    char* start;
+    char* cp;
+    bool done;
+
+    javaLibraryPath = dvmGetProperty("java.library.path");
+    if (javaLibraryPath == NULL)
+        goto bail;
+
+    LOGVV("+++ path is '%s'\n", javaLibraryPath);
+
+    start = cp = javaLibraryPath;
+    while (cp != NULL) {
+        char pathBuf[256];
+        int len;
+
+        cp = strchr(start, ':');
+        if (cp != NULL)
+            *cp = '\0';
+
+        len = snprintf(pathBuf, sizeof(pathBuf), "%s/%s", start, libSysName);
+        if (len >= (int) sizeof(pathBuf)) {
+            LOGW("Path overflowed %d bytes: '%s' / '%s'\n",
+                len, start, libSysName);
+            /* keep going, next one might fit */
+        } else {
+            LOGVV("+++  trying '%s'\n", pathBuf);
+            if (access(pathBuf, R_OK) == 0) {
+                testName = strdup(pathBuf);
+                break;
+            }
+        }
+
+        start = cp +1;
+    }
+
+bail:
+    free(javaLibraryPath);
+    return testName;
+}
+
+/*
+ * Load a native shared library, given the system-independent piece of
+ * the library name.
+ *
+ * Throws an exception on failure.
+ */
+void dvmLoadNativeLibrary(StringObject* libNameObj, Object* classLoader)
+{
+    char* libName = NULL;
+    char* libSysName = NULL;
+    char* libPath = NULL;
+
+    /*
+     * If "classLoader" isn't NULL, call the class loader's "findLibrary"
+     * method with the lib name.  If it returns a non-NULL result, we use
+     * that as the pathname.
+     */
+    if (classLoader != NULL) {
+        Method* findLibrary;
+        Object* findLibResult;
+
+        findLibrary = dvmFindVirtualMethodByDescriptor(classLoader->clazz,
+            "findLibrary", "(Ljava/lang/String;)Ljava/lang/String;");
+        if (findLibrary == NULL) {
+            LOGW("Could not find findLibrary() in %s\n",
+                classLoader->clazz->name);
+            dvmThrowException("Ljava/lang/UnsatisfiedLinkError;",
+                "findLibrary");
+            goto bail;
+        }
+
+        findLibResult = (Object*)(u4) dvmCallMethod(findLibrary, classLoader,
+                                            libNameObj);
+        if (dvmCheckException()) {
+            LOGV("returning early on exception\n");
+            goto bail;
+        }
+        if (findLibResult != NULL) {
+            /* success! */
+            libPath = dvmCreateCstrFromString(libNameObj);
+            LOGI("Found library through CL: '%s'\n", libPath);
+            dvmLoadNativeCode(libPath, classLoader);
+            goto bail;
+        }
+    }
+
+    libName = dvmCreateCstrFromString(libNameObj);
+    if (libName == NULL)
+        goto bail;
+    libSysName = dvmCreateSystemLibraryName(libName);
+    if (libSysName == NULL)
+        goto bail;
+
+    libPath = findLibrary(libSysName);
+    if (libPath != NULL) {
+        LOGD("Found library through path: '%s'\n", libPath);
+        dvmLoadNativeCode(libPath, classLoader);
+    } else {
+        LOGW("Unable to locate shared lib matching '%s'\n", libSysName);
+        dvmThrowException("Ljava/lang/UnsatisfiedLinkError;", libName);
+    }
+
+bail:
+    free(libName);
+    free(libSysName);
+    free(libPath);
+}
+#endif
+
+typedef int (*OnLoadFunc)(JavaVM*, void*);
+
+/*
+ * Load native code from the specified absolute pathname.  Per the spec,
+ * if we've already loaded a library with the specified pathname, we
+ * return without doing anything.
+ *
+ * TODO? for better results we should absolutify the pathname.  For fully
+ * correct results we should stat to get the inode and compare that.  The
+ * existing implementation is fine so long as everybody is using
+ * System.loadLibrary.
+ *
+ * The library will be associated with the specified class loader.  The JNI
+ * spec says we can't load the same library into more than one class loader.
+ *
+ * Returns "true" on success.
+ */
+bool dvmLoadNativeCode(const char* pathName, Object* classLoader)
+{
+    const SharedLib* pEntry;
+    void* handle;
+
+    LOGD("Trying to load lib %s %p\n", pathName, classLoader);
+
+    /*
+     * See if we've already loaded it.  If we have, and the class loader
+     * matches, return successfully without doing anything.
+     */
+    pEntry = findSharedLibEntry(pathName);
+    if (pEntry != NULL) {
+        if (pEntry->classLoader != classLoader) {
+            LOGW("Shared lib '%s' already opened by CL %p; can't open in %p\n",
+                pathName, pEntry->classLoader, classLoader);
+            return false;
+        }
+        LOGD("Shared lib '%s' already loaded in same CL %p\n",
+            pathName, classLoader);
+        return true;
+    }
+
+    /*
+     * Open the shared library.  Because we're using a full path, the system
+     * doesn't have to search through LD_LIBRARY_PATH.  (It may do so to
+     * resolve this library's dependencies though.)
+     *
+     * Failures here are expected when java.library.path has several entries.
+     *
+     * The current android-arm dynamic linker implementation tends to
+     * return "Cannot find library" from dlerror() regardless of the actual
+     * problem.  A more useful diagnostic may be sent to stdout/stderr,
+     * but often that's not visible.  Some things to try:
+     *   - make sure the library exists on the device
+     *   - verify that the right path is being opened (the debug log message
+     *     above can help with that)
+     *   - check to see if the library is valid
+     *   - check config/prelink-linux-arm.map to ensure that the library
+     *     is listed and is not being overrun by the previous entry (if
+     *     loading suddenly stops working, this is a good one to check)
+     */
+    handle = dlopen(pathName, RTLD_LAZY);
+    if (handle == NULL) {
+        LOGI("Unable to dlopen(%s): %s\n", pathName, dlerror());
+        return false;
+    }
+
+    SharedLib* pNewEntry;
+    pNewEntry = (SharedLib*) malloc(sizeof(SharedLib));
+    pNewEntry->pathName = strdup(pathName);
+    pNewEntry->handle = handle;
+    pNewEntry->classLoader = classLoader;
+    if (!addSharedLibEntry(pNewEntry)) {
+        LOGI("WOW: we lost a race to add a shared lib (%s %p)\n",
+            pathName, classLoader);
+        /* free up our entry, and just return happy that one exists */
+        freeSharedLibEntry(pNewEntry);
+    } else {
+        LOGD("Added shared lib %s %p\n", pathName, classLoader);
+
+        void* vonLoad;
+        int version;
+
+        vonLoad = dlsym(handle, "JNI_OnLoad");
+        if (vonLoad == NULL) {
+            LOGD("No JNI_OnLoad found in %s %p\n", pathName, classLoader);
+        } else {
+            /*
+             * Call JNI_OnLoad.  We have to override the current class
+             * loader, which will always be "null" since the stuff at the
+             * top of the stack is around Runtime.loadLibrary().
+             */
+            OnLoadFunc func = vonLoad;
+            Thread* self = dvmThreadSelf();
+            Object* prevOverride = self->classLoaderOverride;
+
+            self->classLoaderOverride = classLoader;
+            dvmChangeStatus(NULL, THREAD_NATIVE);
+            version = (*func)(gDvm.vmList, NULL);
+            dvmChangeStatus(NULL, THREAD_RUNNING);
+            self->classLoaderOverride = prevOverride;
+
+            if (version != JNI_VERSION_1_2 && version != JNI_VERSION_1_4 &&
+                version != JNI_VERSION_1_6)
+            {
+                LOGW("JNI_OnLoad returned bad version (%d) in %s %p\n",
+                    version, pathName, classLoader);
+                // TODO: dlclose, remove hash table entry
+                return false;
+            }
+        }
+    }
+
+    return true;
+}
+
+
+/*
+ * ===========================================================================
+ *      Signature-based method lookup
+ * ===========================================================================
+ */
+
+/*
+ * Create the pre-mangled form of the class+method string.
+ *
+ * Returns a newly-allocated string, and sets "*pLen" to the length.
+ */
+static char* createJniNameString(const char* classDescriptor,
+    const char* methodName, int* pLen)
+{
+    char* result;
+    size_t descriptorLength = strlen(classDescriptor);
+
+    *pLen = 4 + descriptorLength + strlen(methodName);
+
+    result = malloc(*pLen +1);
+    if (result == NULL)
+        return NULL;
+
+    /*
+     * Add one to classDescriptor to skip the "L", and then replace
+     * the final ";" with a "/" after the sprintf() call.
+     */
+    sprintf(result, "Java/%s%s", classDescriptor + 1, methodName);
+    result[5 + (descriptorLength - 2)] = '/';
+
+    return result;
+}
+
+/*
+ * Returns a newly-allocated, mangled copy of "str".
+ *
+ * "str" is a "modified UTF-8" string.  We convert it to UTF-16 first to
+ * make life simpler.
+ */
+static char* mangleString(const char* str, int len)
+{
+    u2* utf16 = NULL;
+    char* mangle = NULL;
+    int charLen;
+
+    //LOGI("mangling '%s' %d\n", str, len);
+
+    assert(str[len] == '\0');
+
+    charLen = dvmUtf8Len(str);
+    utf16 = (u2*) malloc(sizeof(u2) * charLen);
+    if (utf16 == NULL)
+        goto bail;
+
+    dvmConvertUtf8ToUtf16(utf16, str);
+
+    /*
+     * Compute the length of the mangled string.
+     */
+    int i, mangleLen = 0;
+
+    for (i = 0; i < charLen; i++) {
+        u2 ch = utf16[i];
+
+        if (ch > 127) {
+            mangleLen += 6;
+        } else {
+            switch (ch) {
+            case '_':
+            case ';':
+            case '[':
+                mangleLen += 2;
+                break;
+            default:
+                mangleLen++;
+                break;
+            }
+        }
+    }
+
+    char* cp;
+
+    mangle = (char*) malloc(mangleLen +1);
+    if (mangle == NULL)
+        goto bail;
+
+    for (i = 0, cp = mangle; i < charLen; i++) {
+        u2 ch = utf16[i];
+
+        if (ch > 127) {
+            sprintf(cp, "_0%04x", ch);
+            cp += 6;
+        } else {
+            switch (ch) {
+            case '_':
+                *cp++ = '_';
+                *cp++ = '1';
+                break;
+            case ';':
+                *cp++ = '_';
+                *cp++ = '2';
+                break;
+            case '[':
+                *cp++ = '_';
+                *cp++ = '3';
+                break;
+            case '/':
+                *cp++ = '_';
+                break;
+            default:
+                *cp++ = (char) ch;
+                break;
+            }
+        }
+    }
+
+    *cp = '\0';
+
+bail:
+    free(utf16);
+    return mangle;
+}
+
+/*
+ * Create the mangled form of the parameter types.
+ */
+static char* createMangledSignature(const DexProto* proto)
+{
+    DexStringCache sigCache;
+    const char* interim;
+    char* result;
+
+    dexStringCacheInit(&sigCache);
+    interim = dexProtoGetParameterDescriptors(proto, &sigCache);
+    result = mangleString(interim, strlen(interim));
+    dexStringCacheRelease(&sigCache);
+
+    return result;
+}
+
+/*
+ * (This is a dvmHashForeach callback.)
+ *
+ * Search for a matching method in this shared library.
+ */
+static int findMethodInLib(void* vlib, void* vmethod)
+{
+    const SharedLib* pLib = (const SharedLib*) vlib;
+    const Method* meth = (const Method*) vmethod;
+    char* preMangleCM = NULL;
+    char* mangleCM = NULL;
+    char* mangleSig = NULL;
+    char* mangleCMSig = NULL;
+    void* func = NULL;
+    int len;
+
+    if (meth->clazz->classLoader != pLib->classLoader) {
+        LOGD("+++ not scanning '%s' for '%s' (wrong CL)\n",
+            pLib->pathName, meth->name);
+        return 0;
+    } else
+        LOGV("+++ scanning '%s' for '%s'\n", pLib->pathName, meth->name);
+
+    /*
+     * First, we try it without the signature.
+     */
+    preMangleCM =
+        createJniNameString(meth->clazz->descriptor, meth->name, &len);
+    if (preMangleCM == NULL)
+        goto bail;
+
+    mangleCM = mangleString(preMangleCM, len);
+    if (mangleCM == NULL)
+        goto bail;
+
+    LOGV("+++ calling dlsym(%s)\n", mangleCM);
+    func = dlsym(pLib->handle, mangleCM);
+    if (func == NULL) {
+        mangleSig =
+            createMangledSignature(&meth->prototype);
+        if (mangleSig == NULL)
+            goto bail;
+
+        mangleCMSig = (char*) malloc(strlen(mangleCM) + strlen(mangleSig) +3);
+        if (mangleCMSig == NULL)
+            goto bail;
+
+        sprintf(mangleCMSig, "%s__%s", mangleCM, mangleSig);
+
+        LOGV("+++ calling dlsym(%s)\n", mangleCMSig);
+        func = dlsym(pLib->handle, mangleCMSig);
+        if (func != NULL) {
+            LOGV("Found '%s' with dlsym\n", mangleCMSig);
+        }
+    } else {
+        LOGV("Found '%s' with dlsym\n", mangleCM);
+    }
+
+bail:
+    free(preMangleCM);
+    free(mangleCM);
+    free(mangleSig);
+    free(mangleCMSig);
+    return (int) func;
+}
+
+/*
+ * See if the requested method lives in any of the currently-loaded
+ * shared libraries.  We do this by checking each of them for the expected
+ * method signature.
+ */
+static void* lookupSharedLibMethod(const Method* method)
+{
+    if (gDvm.nativeLibs == NULL) {
+        LOGE("Unexpected init state: nativeLibs not ready\n");
+        dvmAbort();
+    }
+    return (void*) dvmHashForeach(gDvm.nativeLibs, findMethodInLib,
+        (void*) method);
+}
+
diff --git a/vm/Native.h b/vm/Native.h
new file mode 100644
index 0000000..b11a2ea
--- /dev/null
+++ b/vm/Native.h
@@ -0,0 +1,114 @@
+/*
+ * Copyright (C) 2008 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/*
+ * Dalvik's native call interface.
+ *
+ * You should follow the JNI function naming conventions, but prefix with
+ * "Dalvik_" instead of "Java_".
+ */
+#ifndef _DALVIK_NATIVE
+#define _DALVIK_NATIVE
+
+/*
+ * Method description; equivalent to a JNI struct.
+ */
+typedef struct DalvikNativeMethod {
+    const char* name;
+    const char* signature;
+    DalvikNativeFunc  fnPtr;
+} DalvikNativeMethod;
+
+/*
+ * All methods for one class.  The last "methodInfo" has a NULL "name".
+ */
+typedef struct DalvikNativeClass {
+    const char* classDescriptor;
+    const DalvikNativeMethod* methodInfo;
+    u4          classDescriptorHash;          /* initialized at runtime */
+} DalvikNativeClass;
+
+
+/* init/shutdown */
+bool dvmNativeStartup(void);
+void dvmNativeShutdown(void);
+
+
+/*
+ * Convert argc/argv into a function call.  This is platform-specific.
+ */
+void dvmPlatformInvoke(void* pEnv, ClassObject* clazz, int argInfo, int argc,
+    const u4* argv, const char* signature, void* func, JValue* pResult);
+
+/*
+ * Convert a short library name ("jpeg") to a system-dependent name
+ * ("libjpeg.so").  Returns a newly-allocated string.
+ */
+char* dvmCreateSystemLibraryName(char* libName);
+//void dvmLoadNativeLibrary(StringObject* libNameObj, Object* classLoader);
+bool dvmLoadNativeCode(const char* fileName, Object* classLoader);
+
+
+/*
+ * Some setup for internal native functions.
+ */
+bool dvmInternalNativeStartup(void);
+void dvmInternalNativeShutdown(void);
+
+DalvikNativeFunc dvmLookupInternalNativeMethod(const Method* method);
+
+/* exception-throwing stub for abstract methods (DalvikNativeFunc) */
+void dvmAbstractMethodStub(const u4* args, JValue* pResult);
+
+/*
+ * Resolve a native method.  This uses the same prototype as a
+ * DalvikBridgeFunc, because it takes the place of the actual function
+ * until the first time that it's invoked.
+ *
+ * Causes the method's class to be initialized.
+ *
+ * Throws an exception and returns NULL on failure.
+ */
+void dvmResolveNativeMethod(const u4* args, JValue* pResult,
+    const Method* method, struct Thread* self);
+
+//#define GET_ARG_LONG(_args, _elem)          (*(s8*)(&(_args)[_elem]))
+#define GET_ARG_LONG(_args, _elem)          dvmGetArgLong(_args, _elem)
+
+/*
+ * Helpful function for accessing long integers in "u4* args".
+ *
+ * We can't just return *(s8*)(&args[elem]), because that breaks if our
+ * architecture requires 64-bit alignment of 64-bit values.
+ *
+ * Big/little endian shouldn't matter here -- ordering of words within a
+ * long seems consistent across our supported platforms.
+ */
+INLINE s8 dvmGetArgLong(const u4* args, int elem)
+{
+#if 0
+    union { u4 parts[2]; s8 whole; } conv;
+    conv.parts[0] = args[elem];
+    conv.parts[1] = args[elem+1];
+    return conv.whole;
+#else
+    /* with gcc's optimizer, memcpy() turns into simpler assignments */
+    s8 val;
+    memcpy(&val, &args[elem], 8);
+    return val;
+#endif
+}
+
+#endif /*_DALVIK_NATIVE*/
diff --git a/vm/PointerSet.c b/vm/PointerSet.c
new file mode 100644
index 0000000..e40387c
--- /dev/null
+++ b/vm/PointerSet.c
@@ -0,0 +1,227 @@
+/*
+ * Copyright (C) 2008 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/*
+ * Maintain an expanding set of unique pointer values.
+ */
+#include "Dalvik.h"
+
+/*
+ * Sorted, expanding list of pointers.
+ */
+struct PointerSet {
+    u2          alloc;
+    u2          count;
+    const void** list;
+};
+
+/*
+ * Verify that the set is in sorted order.
+ */
+static bool verifySorted(PointerSet* pSet)
+{
+    const void* last = NULL;
+    int i;
+
+    for (i = 0; i < pSet->count; i++) {
+        const void* cur = pSet->list[i];
+        if (cur < last)
+            return false;
+        last = cur;
+    }
+
+    return true;
+}
+
+
+/*
+ * Allocate a new PointerSet.
+ *
+ * Returns NULL on failure.
+ */
+PointerSet* dvmPointerSetAlloc(int initialSize)
+{
+    PointerSet* pSet = calloc(1, sizeof(PointerSet));
+    if (pSet != NULL) {
+        if (initialSize > 0) {
+            pSet->list = malloc(sizeof(const void*) * initialSize);
+            if (pSet->list == NULL) {
+                free(pSet);
+                return NULL;
+            }
+            pSet->alloc = initialSize;
+        }
+    }
+
+    return pSet;
+}
+
+/*
+ * Free up a PointerSet.
+ */
+void dvmPointerSetFree(PointerSet* pSet)
+{
+    if (pSet->list != NULL) {
+        free(pSet->list);
+        pSet->list = NULL;
+    }
+    free(pSet);
+}
+
+/*
+ * Get the number of pointers currently stored in the list.
+ */
+int dvmPointerSetGetCount(const PointerSet* pSet)
+{
+    return pSet->count;
+}
+
+/*
+ * Get the Nth entry from the list.
+ */
+const void* dvmPointerSetGetEntry(const PointerSet* pSet, int i)
+{
+    return pSet->list[i];
+}
+
+/*
+ * Insert a new entry into the list.  If it already exists, this returns
+ * without doing anything.
+ */
+void dvmPointerSetAddEntry(PointerSet* pSet, const void* ptr)
+{
+    int nearby;
+
+    if (dvmPointerSetHas(pSet, ptr, &nearby))
+        return;
+
+    /* ensure we have space to add one more */
+    if (pSet->count == pSet->alloc) {
+        /* time to expand */
+        const void** newList;
+
+        if (pSet->alloc == 0)
+            pSet->alloc = 4;
+        else
+            pSet->alloc *= 2;
+        LOGVV("expanding %p to %d\n", pSet, pSet->alloc);
+        newList = realloc(pSet->list, pSet->alloc * sizeof(const void*));
+        if (newList == NULL) {
+            LOGE("Failed expanding ptr set (alloc=%d)\n", pSet->alloc);
+            dvmAbort();
+        }
+        pSet->list = newList;
+    }
+
+    if (pSet->count == 0) {
+        /* empty list */
+        assert(nearby == 0);
+    } else {
+        /*
+         * Determine the insertion index.  The binary search might have
+         * terminated "above" or "below" the value.
+         */
+        if (nearby != 0 && ptr < pSet->list[nearby-1]) {
+            //LOGD("nearby-1=%d %p, inserting %p at -1\n",
+            //    nearby-1, pSet->list[nearby-1], ptr);
+            nearby--;
+        } else if (ptr < pSet->list[nearby]) {
+            //LOGD("nearby=%d %p, inserting %p at +0\n",
+            //    nearby, pSet->list[nearby], ptr);
+        } else {
+            //LOGD("nearby+1=%d %p, inserting %p at +1\n",
+            //    nearby+1, pSet->list[nearby+1], ptr);
+            nearby++;
+        }
+
+        /*
+         * Move existing values, if necessary.
+         */
+        if (nearby != pSet->count) {
+            /* shift up */
+            memmove(&pSet->list[nearby+1], &pSet->list[nearby],
+                (pSet->count - nearby) * sizeof(pSet->list[0]));
+        }
+    }
+
+    pSet->list[nearby] = ptr;
+    pSet->count++;
+
+    assert(verifySorted(pSet));
+}
+
+/*
+ * Returns "true" if the element was successfully removed.
+ */
+bool dvmPointerSetRemoveEntry(PointerSet* pSet, const void* ptr)
+{
+    int i, where;
+
+    if (!dvmPointerSetHas(pSet, ptr, &where))
+        return false;
+
+    if (where != pSet->count-1) {
+        /* shift down */
+        memmove(&pSet->list[where], &pSet->list[where+1],
+            (pSet->count-1 - where) * sizeof(pSet->list[0]));
+    }
+
+    pSet->count--;
+    pSet->list[pSet->count] = (const void*) 0xdecadead;
+    return true;
+}
+
+/*
+ * Returns the index if "ptr" appears in the list.  If it doesn't appear,
+ * this returns a negative index for a nearby element.
+ */
+bool dvmPointerSetHas(const PointerSet* pSet, const void* ptr, int* pIndex)
+{
+    int hi, lo, mid;
+
+    lo = mid = 0;
+    hi = pSet->count-1;
+
+    /* array is sorted, use a binary search */
+    while (lo <= hi) {
+        mid = (lo + hi) / 2;
+        const void* listVal = pSet->list[mid];
+
+        if (ptr > listVal) {
+            lo = mid + 1;
+        } else if (ptr < listVal) {
+            hi = mid - 1;
+        } else /* listVal == ptr */ {
+            if (pIndex != NULL)
+                *pIndex = mid;
+            return true;
+        }
+    }
+
+    if (pIndex != NULL)
+        *pIndex = mid;
+    return false;
+}
+
+/*
+ * Print the list contents to stdout.  For debugging.
+ */
+void dvmPointerSetDump(const PointerSet* pSet)
+{
+    int i;
+    for (i = 0; i < pSet->count; i++)
+        printf(" %p", pSet->list[i]);
+}
+
diff --git a/vm/PointerSet.h b/vm/PointerSet.h
new file mode 100644
index 0000000..acc56f9
--- /dev/null
+++ b/vm/PointerSet.h
@@ -0,0 +1,81 @@
+/*
+ * Copyright (C) 2008 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/*
+ * Maintain an expanding set of unique pointer values.  The set is
+ * kept in sorted order.
+ */
+#ifndef _DALVIK_POINTERSET
+#define _DALVIK_POINTERSET
+
+struct PointerSet;   /* private */
+typedef struct PointerSet PointerSet;
+
+/*
+ * Allocate a new PointerSet.
+ *
+ * Returns NULL on failure.
+ */
+PointerSet* dvmPointerSetAlloc(int initialSize);
+
+/*
+ * Free up a PointerSet.
+ */
+void dvmPointerSetFree(PointerSet* pSet);
+
+/*
+ * Get the number of pointers currently stored in the list.
+ */
+int dvmPointerSetGetCount(const PointerSet* pSet);
+
+/*
+ * Get the Nth entry from the list.
+ */
+const void* dvmPointerSetGetEntry(const PointerSet* pSet, int i);
+
+/*
+ * Insert a new entry into the list.  If it already exists, this returns
+ * without doing anything.
+ */
+void dvmPointerSetAddEntry(PointerSet* pSet, const void* ptr);
+
+/*
+ * Returns "true" if the element was successfully removed.
+ */
+bool dvmPointerSetRemoveEntry(PointerSet* pSet, const void* ptr);
+
+/*
+ * Returns "true" if the value appears, "false" otherwise.  If "pIndex" is
+ * non-NULL, it will receive the matching index or the index of a nearby
+ * element.
+ */
+bool dvmPointerSetHas(const PointerSet* pSet, const void* ptr, int* pIndex);
+
+/*
+ * Find an entry in the set.  Returns the index, or -1 if not found.
+ */
+INLINE int dvmPointerSetFind(const PointerSet* pSet, const void* ptr) {
+    int idx;
+    if (!dvmPointerSetHas(pSet, ptr, &idx))
+        idx = -1;
+    return idx;
+}
+
+/*
+ * Print the list contents to stdout.  For debugging.
+ */
+void dvmPointerSetDump(const PointerSet* pSet);
+
+#endif /*_DALVIK_POINTERSET*/
diff --git a/vm/Profile.c b/vm/Profile.c
new file mode 100644
index 0000000..832ac11
--- /dev/null
+++ b/vm/Profile.c
@@ -0,0 +1,754 @@
+/*
+ * Copyright (C) 2008 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/*
+ * Android's method call profiling goodies.
+ */
+#include "Dalvik.h"
+
+#ifdef WITH_PROFILER        // -- include rest of file
+
+#include <stdlib.h>
+#include <stddef.h>
+#include <string.h>
+#include <sys/time.h>
+#include <time.h>
+#include <sys/mman.h>
+#include <sched.h>
+#include <errno.h>
+
+#ifdef HAVE_ANDROID_OS
+# define UPDATE_MAGIC_PAGE      1
+# define MAGIC_PAGE_BASE_ADDR   0x08000000
+# ifndef PAGESIZE
+#  define PAGESIZE              4096
+# endif
+#endif
+
+/*
+ * File format:
+ *  header
+ *  record 0
+ *  record 1
+ *  ...
+ *
+ * Header format:
+ *  u4  magic ('SLOW')
+ *  u2  version
+ *  u2  offset to data
+ *  u8  start date/time in usec
+ *
+ * Record format:
+ *  u1  thread ID
+ *  u4  method ID | method action
+ *  u4  time delta since start, in usec
+ *
+ * 32 bits of microseconds is 70 minutes.
+ *
+ * All values are stored in little-endian order.
+ */
+#define TRACE_REC_SIZE      9
+#define TRACE_MAGIC         0x574f4c53
+#define TRACE_HEADER_LEN    32
+
+
+/*
+ * Get the wall-clock date/time, in usec.
+ */
+static inline u8 getTimeInUsec()
+{
+    struct timeval tv;
+
+    gettimeofday(&tv, NULL);
+    return tv.tv_sec * 1000000LL + tv.tv_usec;
+}
+
+/*
+ * Get the current time, in microseconds.
+ *
+ * This can mean one of two things.  In "global clock" mode, we get the
+ * same time across all threads.  If we use CLOCK_THREAD_CPUTIME_ID, we
+ * get a per-thread CPU usage timer.  The latter is better, but a bit
+ * more complicated to implement.
+ */
+static inline u8 getClock()
+{
+#if defined(HAVE_POSIX_CLOCKS)
+    struct timespec tm;
+
+    clock_gettime(CLOCK_THREAD_CPUTIME_ID, &tm);
+    //assert(tm.tv_nsec >= 0 && tm.tv_nsec < 1*1000*1000*1000);
+    if (!(tm.tv_nsec >= 0 && tm.tv_nsec < 1*1000*1000*1000)) {
+        LOGE("bad nsec: %ld\n", tm.tv_nsec);
+        dvmAbort();
+    }
+
+    return tm.tv_sec * 1000000LL + tm.tv_nsec / 1000;
+#else
+    struct timeval tv;
+
+    gettimeofday(&tv, NULL);
+    return tv.tv_sec * 1000000LL + tv.tv_usec;
+#endif
+}
+
+/*
+ * Write little-endian data.
+ */
+static inline void storeShortLE(u1* buf, u2 val)
+{
+    *buf++ = (u1) val;
+    *buf++ = (u1) (val >> 8);
+}
+static inline void storeIntLE(u1* buf, u4 val)
+{
+    *buf++ = (u1) val;
+    *buf++ = (u1) (val >> 8);
+    *buf++ = (u1) (val >> 16);
+    *buf++ = (u1) (val >> 24);
+}
+static inline void storeLongLE(u1* buf, u8 val)
+{
+    *buf++ = (u1) val;
+    *buf++ = (u1) (val >> 8);
+    *buf++ = (u1) (val >> 16);
+    *buf++ = (u1) (val >> 24);
+    *buf++ = (u1) (val >> 32);
+    *buf++ = (u1) (val >> 40);
+    *buf++ = (u1) (val >> 48);
+    *buf++ = (u1) (val >> 56);
+}
+
+/*
+ * Boot-time init.
+ */
+bool dvmProfilingStartup(void)
+{
+    /*
+     * Initialize "dmtrace" method profiling.
+     */
+    memset(&gDvm.methodTrace, 0, sizeof(gDvm.methodTrace));
+    dvmInitMutex(&gDvm.methodTrace.startStopLock);
+    pthread_cond_init(&gDvm.methodTrace.threadExitCond, NULL);
+
+    ClassObject* clazz =
+        dvmFindClassNoInit("Ldalvik/system/VMDebug;", NULL);
+    assert(clazz != NULL);
+    gDvm.methodTrace.gcMethod =
+        dvmFindDirectMethodByDescriptor(clazz, "startGC", "()V");
+    gDvm.methodTrace.classPrepMethod =
+        dvmFindDirectMethodByDescriptor(clazz, "startClassPrep", "()V");
+    if (gDvm.methodTrace.gcMethod == NULL ||
+        gDvm.methodTrace.classPrepMethod == NULL)
+    {
+        LOGE("Unable to find startGC or startClassPrep\n");
+        return false;
+    }
+
+    assert(!dvmCheckException(dvmThreadSelf()));
+
+    /*
+     * Allocate storage for instruction counters.
+     */
+    gDvm.executedInstrCounts = (int*) malloc(kNumDalvikInstructions * sizeof(int));
+    if (gDvm.executedInstrCounts == NULL)
+        return false;
+    memset(gDvm.executedInstrCounts, 0, kNumDalvikInstructions * sizeof(int));
+
+#ifdef UPDATE_MAGIC_PAGE
+    /*
+     * If we're running on the emulator, there's a magic page into which
+     * we can put interpreted method information.  This allows interpreted
+     * methods to show up in the emulator's code traces.
+     *
+     * We could key this off of the "ro.kernel.qemu" property, but there's
+     * no real harm in doing this on a real device.
+     */
+    gDvm.emulatorTracePage = mmap((void*) MAGIC_PAGE_BASE_ADDR,
+        PAGESIZE, PROT_READ|PROT_WRITE, MAP_FIXED|MAP_SHARED|MAP_ANON, -1, 0);
+    if (gDvm.emulatorTracePage == MAP_FAILED) {
+        LOGE("Unable to mmap magic page (0x%08x)\n", MAGIC_PAGE_BASE_ADDR);
+        return false;
+    }
+    *(u4*) gDvm.emulatorTracePage = 0;
+#else
+    assert(gDvm.emulatorTracePage == NULL);
+#endif
+
+    return true;
+}
+
+/*
+ * Free up profiling resources.
+ */
+void dvmProfilingShutdown(void)
+{
+#ifdef UPDATE_MAGIC_PAGE
+    if (gDvm.emulatorTracePage != NULL)
+        munmap(gDvm.emulatorTracePage, PAGESIZE);
+#endif
+    free(gDvm.executedInstrCounts);
+}
+
+/*
+ * Update the "active profilers" count.
+ *
+ * "count" should be +1 or -1.
+ */
+static void updateActiveProfilers(int count)
+{
+    int oldValue, newValue;
+
+    do {
+        oldValue = gDvm.activeProfilers;
+        newValue = oldValue + count;
+        if (newValue < 0) {
+            LOGE("Can't have %d active profilers\n", newValue);
+            dvmAbort();
+        }
+    } while (!ATOMIC_CMP_SWAP(&gDvm.activeProfilers, oldValue, newValue));
+
+    LOGD("+++ active profiler count now %d\n", newValue);
+}
+
+
+/*
+ * Reset the "cpuClockBase" field in all threads.
+ */
+static void resetCpuClockBase(void)
+{
+    Thread* thread;
+
+    dvmLockThreadList(NULL);
+    for (thread = gDvm.threadList; thread != NULL; thread = thread->next) {
+        thread->cpuClockBaseSet = false;
+        thread->cpuClockBase = 0;
+    }
+    dvmUnlockThreadList();
+}
+
+/*
+ * Dump the thread list to the specified file.
+ */
+static void dumpThreadList(FILE* fp)
+{
+    Thread* thread;
+
+    dvmLockThreadList(NULL);
+    for (thread = gDvm.threadList; thread != NULL; thread = thread->next) {
+        char* name = dvmGetThreadName(thread);
+
+        fprintf(fp, "%d\t%s\n", thread->threadId, name);
+        free(name);
+    }
+    dvmUnlockThreadList();
+}
+
+/*
+ * This is a dvmHashForeach callback.
+ */
+static int dumpMarkedMethods(void* vclazz, void* vfp)
+{
+    DexStringCache stringCache;
+    ClassObject* clazz = (ClassObject*) vclazz;
+    FILE* fp = (FILE*) vfp;
+    Method* meth;
+    char* name;
+    int i, lineNum;
+
+    dexStringCacheInit(&stringCache);
+
+    for (i = 0; i < clazz->virtualMethodCount; i++) {
+        meth = &clazz->virtualMethods[i];
+        if (meth->inProfile) {
+            name = dvmDescriptorToName(meth->clazz->descriptor);
+            fprintf(fp, "0x%08x\t%s\t%s\t%s\t%s\t%d\n", (int) meth,
+                name, meth->name,
+                dexProtoGetMethodDescriptor(&meth->prototype, &stringCache),
+                dvmGetMethodSourceFile(meth), dvmLineNumFromPC(meth, 0));
+            meth->inProfile = false;
+            free(name);
+        }
+    }
+
+    for (i = 0; i < clazz->directMethodCount; i++) {
+        meth = &clazz->directMethods[i];
+        if (meth->inProfile) {
+            name = dvmDescriptorToName(meth->clazz->descriptor);
+            fprintf(fp, "0x%08x\t%s\t%s\t%s\t%s\t%d\n", (int) meth,
+                name, meth->name,
+                dexProtoGetMethodDescriptor(&meth->prototype, &stringCache),
+                dvmGetMethodSourceFile(meth), dvmLineNumFromPC(meth, 0));
+            meth->inProfile = false;
+            free(name);
+        }
+    }
+
+    dexStringCacheRelease(&stringCache);
+
+    return 0;
+}
+
+/*
+ * Dump the list of "marked" methods to the specified file.
+ */
+static void dumpMethodList(FILE* fp)
+{
+    dvmHashTableLock(gDvm.loadedClasses);
+    dvmHashForeach(gDvm.loadedClasses, dumpMarkedMethods, (void*) fp);
+    dvmHashTableUnlock(gDvm.loadedClasses);
+}
+
+/*
+ * Start method tracing.  This opens the file and allocates the buffer.
+ * If any of these fail, we throw an exception and return.
+ *
+ * Method tracing is global to the VM.
+ */
+void dvmMethodTraceStart(const char* traceFileName, int bufferSize, int flags)
+{
+    MethodTraceState* state = &gDvm.methodTrace;
+
+    assert(bufferSize > 0);
+
+    if (state->traceEnabled != 0) {
+        LOGI("TRACE start requested, but already in progress; stopping\n");
+        dvmMethodTraceStop();
+    }
+    updateActiveProfilers(1);
+    LOGI("TRACE STARTED: '%s' %dKB\n",
+        traceFileName, bufferSize / 1024);
+    dvmLockMutex(&state->startStopLock);
+
+    /*
+     * Allocate storage and open files.
+     *
+     * We don't need to initialize the buffer, but doing so might remove
+     * some fault overhead if the pages aren't mapped until touched.
+     */
+    state->buf = (u1*) malloc(bufferSize);
+    if (state->buf == NULL) {
+        dvmThrowException("Ljava/lang/InternalError;", "buffer alloc failed");
+        goto fail;
+    }
+    state->traceFile = fopen(traceFileName, "w");
+    if (state->traceFile == NULL) {
+        LOGE("trace %s=%p errno=%d\n",
+            traceFileName, state->traceFile, errno);
+        dvmThrowException("Ljava/lang/InternalError;", "file open failed");
+        goto fail;
+    }
+    memset(state->buf, 0xee, bufferSize);
+
+    state->bufferSize = bufferSize;
+    state->overflow = false;
+
+    /*
+     * Enable alloc counts if we've been requested to do so.
+     */
+    state->flags = flags;
+    if ((flags & TRACE_ALLOC_COUNTS) != 0)
+        dvmStartAllocCounting();
+
+    /* reset our notion of the start time for all CPU threads */
+    resetCpuClockBase();
+
+    state->startWhen = getTimeInUsec();
+
+    /*
+     * Output the header.
+     */
+    memset(state->buf, 0, TRACE_HEADER_LEN);
+    storeIntLE(state->buf + 0, TRACE_MAGIC);
+    storeShortLE(state->buf + 4, TRACE_VERSION);
+    storeShortLE(state->buf + 6, TRACE_HEADER_LEN);
+    storeLongLE(state->buf + 8, state->startWhen);
+    state->curOffset = TRACE_HEADER_LEN;
+
+    MEM_BARRIER();
+
+    /*
+     * Set the "enabled" flag.  Once we do this, threads will wait to be
+     * signaled before exiting, so we have to make sure we wake them up.
+     */
+    state->traceEnabled = true;
+    dvmUnlockMutex(&state->startStopLock);
+    return;
+
+fail:
+    if (state->traceFile != NULL) {
+        fclose(state->traceFile);
+        state->traceFile = NULL;
+    }
+    if (state->buf != NULL) {
+        free(state->buf);
+        state->buf = NULL;
+    }
+    dvmUnlockMutex(&state->startStopLock);
+}
+
+/*
+ * Run through the data buffer and pull out the methods that were visited.
+ * Set a mark so that we know which ones to output.
+ */
+static void markTouchedMethods(void)
+{
+    u1* ptr = gDvm.methodTrace.buf + TRACE_HEADER_LEN;
+    u1* end = gDvm.methodTrace.buf + gDvm.methodTrace.curOffset;
+    unsigned int methodVal;
+    Method* method;
+
+    while (ptr < end) {
+        methodVal = *(ptr+1) | (*(ptr+2) << 8) | (*(ptr+3) << 16)
+                    | (*(ptr+4) << 24);
+        method = (Method*) METHOD_ID(methodVal);
+
+        method->inProfile = true;
+        ptr += TRACE_REC_SIZE;
+    }
+}
+
+/*
+ * Compute the amount of overhead in a clock call, in nsec.
+ *
+ * This value is going to vary depending on what else is going on in the
+ * system.  When examined across several runs a pattern should emerge.
+ */
+static u4 getClockOverhead(void)
+{
+    u8 calStart, calElapsed;
+    int i;
+
+    calStart = getClock();
+    for (i = 1000 * 4; i > 0; i--) {
+        getClock();
+        getClock();
+        getClock();
+        getClock();
+        getClock();
+        getClock();
+        getClock();
+        getClock();
+    }
+
+    calElapsed = getClock() - calStart;
+    return (int) (calElapsed / (8*4));
+}
+
+/*
+ * Stop method tracing.  We write the buffer to disk and generate a key
+ * file so we can interpret it.
+ */
+void dvmMethodTraceStop(void)
+{
+    MethodTraceState* state = &gDvm.methodTrace;
+    u8 elapsed;
+
+    /*
+     * We need this to prevent somebody from starting a new trace while
+     * we're in the process of stopping the old.
+     */
+    dvmLockMutex(&state->startStopLock);
+
+    if (!state->traceEnabled) {
+        /* somebody already stopped it, or it was never started */
+        dvmUnlockMutex(&state->startStopLock);
+        return;
+    } else {
+        updateActiveProfilers(-1);
+    }
+
+    /* compute elapsed time */
+    elapsed = getTimeInUsec() - state->startWhen;
+
+    /*
+     * Globally disable it, and allow other threads to notice.  We want
+     * to stall here for at least as long as dvmMethodTraceAdd needs
+     * to finish.  There's no real risk though -- it will take a while to
+     * write the data to disk, and we don't clear the buffer pointer until
+     * after that completes.
+     */
+    state->traceEnabled = false;
+    MEM_BARRIER();
+    sched_yield();
+
+    if ((state->flags & TRACE_ALLOC_COUNTS) != 0)
+        dvmStopAllocCounting();
+
+    LOGI("TRACE STOPPED%s: writing %d records\n",
+        state->overflow ? " (NOTE: overflowed buffer)" : "",
+        (state->curOffset - TRACE_HEADER_LEN) / TRACE_REC_SIZE);
+
+    /*
+     * Do a quick calibration test to see how expensive our clock call is.
+     */
+    u4 clockNsec = getClockOverhead();
+
+    markTouchedMethods();
+
+    fprintf(state->traceFile, "%cversion\n", TOKEN_CHAR);
+    fprintf(state->traceFile, "%d\n", TRACE_VERSION);
+    fprintf(state->traceFile, "data-file-overflow=%s\n",
+        state->overflow ? "true" : "false");
+#if defined(HAVE_POSIX_CLOCKS)
+    fprintf(state->traceFile, "clock=thread-cpu\n");
+#else
+    fprintf(state->traceFile, "clock=global\n");
+#endif
+    fprintf(state->traceFile, "elapsed-time-usec=%llu\n", elapsed);
+    fprintf(state->traceFile, "num-method-calls=%d\n",
+        (state->curOffset - TRACE_HEADER_LEN) / TRACE_REC_SIZE);
+    fprintf(state->traceFile, "clock-call-overhead-nsec=%d\n", clockNsec);
+    fprintf(state->traceFile, "vm=dalvik\n");
+    if ((state->flags & TRACE_ALLOC_COUNTS) != 0) {
+        fprintf(state->traceFile, "alloc-count=%d\n",
+            gDvm.allocProf.allocCount);
+        fprintf(state->traceFile, "alloc-size=%d\n",
+            gDvm.allocProf.allocSize);
+        fprintf(state->traceFile, "gc-count=%d\n",
+            gDvm.allocProf.gcCount);
+    }
+    fprintf(state->traceFile, "%cthreads\n", TOKEN_CHAR);
+    dumpThreadList(state->traceFile);
+    fprintf(state->traceFile, "%cmethods\n", TOKEN_CHAR);
+    dumpMethodList(state->traceFile);
+    fprintf(state->traceFile, "%cend\n", TOKEN_CHAR);
+
+    if (fwrite(state->buf, state->curOffset, 1, state->traceFile) != 1) {
+        dvmThrowException("Ljava/lang/InternalError;", "data write failed");
+        goto bail;
+    }
+
+bail:
+    free(state->buf);
+    state->buf = NULL;
+    fclose(state->traceFile);
+    state->traceFile = NULL;
+
+    int cc = pthread_cond_broadcast(&state->threadExitCond);
+    assert(cc == 0);
+    dvmUnlockMutex(&state->startStopLock);
+}
+
+
+/*
+ * We just did something with a method.  Emit a record.
+ *
+ * Multiple threads may be banging on this all at once.  We use atomic ops
+ * rather than mutexes for speed.
+ */
+void dvmMethodTraceAdd(Thread* self, const Method* method, int action)
+{
+    MethodTraceState* state = &gDvm.methodTrace;
+    u4 clockDiff, methodVal;
+    int oldOffset, newOffset;
+    u1* ptr;
+
+    /*
+     * We can only access the per-thread CPU clock from within the
+     * thread, so we have to initialize the base time on the first use.
+     * (Looks like pthread_getcpuclockid(thread, &id) will do what we
+     * want, but it doesn't appear to be defined on the device.)
+     */
+    if (!self->cpuClockBaseSet) {
+        self->cpuClockBase = getClock();
+        self->cpuClockBaseSet = true;
+        //LOGI("thread base id=%d 0x%llx\n",
+        //    self->threadId, self->cpuClockBase);
+    }
+
+    /*
+     * Advance "curOffset" atomically.
+     */
+    do {
+        oldOffset = state->curOffset;
+        newOffset = oldOffset + TRACE_REC_SIZE;
+        if (newOffset > state->bufferSize) {
+            state->overflow = true;
+            return;
+        }
+    } while (!ATOMIC_CMP_SWAP(&state->curOffset, oldOffset, newOffset));
+
+    //assert(METHOD_ACTION((u4) method) == 0);
+
+    u8 now = getClock();
+    clockDiff = (u4) (now - self->cpuClockBase);
+
+    methodVal = METHOD_COMBINE((u4) method, action);
+
+    /*
+     * Write data into "oldOffset".
+     */
+    ptr = state->buf + oldOffset;
+    *ptr++ = self->threadId;
+    *ptr++ = (u1) methodVal;
+    *ptr++ = (u1) (methodVal >> 8);
+    *ptr++ = (u1) (methodVal >> 16);
+    *ptr++ = (u1) (methodVal >> 24);
+    *ptr++ = (u1) clockDiff;
+    *ptr++ = (u1) (clockDiff >> 8);
+    *ptr++ = (u1) (clockDiff >> 16);
+    *ptr++ = (u1) (clockDiff >> 24);
+}
+
+/*
+ * We just did something with a method.  Emit a record by setting a value
+ * in a magic memory location.
+ */
+void dvmEmitEmulatorTrace(const Method* method, int action)
+{
+#ifdef UPDATE_MAGIC_PAGE
+    /*
+     * We want to store the address of the Dalvik bytecodes.  Native and
+     * abstract methods don't have any, so we don't do this for those.
+     * (Abstract methods are never called, but in Dalvik they can be
+     * because we do a "late trap" to generate the abstract method
+     * exception.  However, we trap to a native method, so we don't need
+     * an explicit check for abstract here.)
+     */
+    if (dvmIsNativeMethod(method))
+        return;
+    assert(method->insns != NULL);
+
+    u4* pMagic = ((u4*) MAGIC_PAGE_BASE_ADDR) +1;
+
+    /*
+     * The dexlist output shows the &DexCode.insns offset value, which
+     * is offset from the start of the base DEX header. Method.insns
+     * is the absolute address, effectively offset from the start of
+     * the optimized DEX header. We either need to return the
+     * optimized DEX base file address offset by the right amount, or
+     * take the "real" address and subtract off the size of the
+     * optimized DEX header.
+     *
+     * Would be nice to factor this out at dexlist time, but we can't count
+     * on having access to the correct optimized DEX file.
+     */
+    u4 addr;
+#if 0
+    DexFile* pDexFile = method->clazz->pDvmDex->pDexFile;
+    addr = (u4)pDexFile->pOptHeader; /* file starts at "opt" header */
+    addr += dvmGetMethodCode(method)->insnsOff;
+#else
+    const DexOptHeader* pOptHdr = method->clazz->pDvmDex->pDexFile->pOptHeader;
+    addr = (u4) method->insns - pOptHdr->dexOffset;
+#endif
+    assert(METHOD_TRACE_ENTER == 0);
+    assert(METHOD_TRACE_EXIT == 1);
+    assert(METHOD_TRACE_UNROLL == 2);
+    *(pMagic+action) = addr;
+    LOGVV("Set %p = 0x%08x (%s.%s)\n",
+        pMagic+action, addr, method->clazz->descriptor, method->name);
+#endif
+}
+
+/*
+ * The GC calls this when it's about to start.  We add a marker to the
+ * trace output so the tool can exclude the GC cost from the results.
+ */
+void dvmMethodTraceGCBegin(void)
+{
+    TRACE_METHOD_ENTER(dvmThreadSelf(), gDvm.methodTrace.gcMethod);
+}
+void dvmMethodTraceGCEnd(void)
+{
+    TRACE_METHOD_EXIT(dvmThreadSelf(), gDvm.methodTrace.gcMethod);
+}
+
+/*
+ * The class loader calls this when it's loading or initializing a class.
+ */
+void dvmMethodTraceClassPrepBegin(void)
+{
+    TRACE_METHOD_ENTER(dvmThreadSelf(), gDvm.methodTrace.classPrepMethod);
+}
+void dvmMethodTraceClassPrepEnd(void)
+{
+    TRACE_METHOD_EXIT(dvmThreadSelf(), gDvm.methodTrace.classPrepMethod);
+}
+
+
+/*
+ * Enable emulator trace info.
+ */
+void dvmEmulatorTraceStart(void)
+{
+    updateActiveProfilers(1);
+
+    /* in theory we should make this an atomic inc; in practice not important */
+    gDvm.emulatorTraceEnableCount++;
+    if (gDvm.emulatorTraceEnableCount == 1)
+        LOGD("--- emulator method traces enabled\n");
+}
+
+/*
+ * Disable emulator trace info.
+ */
+void dvmEmulatorTraceStop(void)
+{
+    if (gDvm.emulatorTraceEnableCount == 0) {
+        LOGE("ERROR: emulator tracing not enabled\n");
+        dvmAbort();
+    }
+    updateActiveProfilers(-1);
+    /* in theory we should make this an atomic inc; in practice not important */
+    gDvm.emulatorTraceEnableCount--;
+    if (gDvm.emulatorTraceEnableCount == 0)
+        LOGD("--- emulator method traces disabled\n");
+}
+
+
+/*
+ * Start instruction counting.
+ */
+void dvmStartInstructionCounting()
+{
+    updateActiveProfilers(1);
+    /* in theory we should make this an atomic inc; in practice not important */
+    gDvm.instructionCountEnableCount++;
+}
+
+/*
+ * Start instruction counting.
+ */
+void dvmStopInstructionCounting()
+{
+    if (gDvm.instructionCountEnableCount == 0) {
+        LOGE("ERROR: instruction counting not enabled\n");
+        dvmAbort();
+    }
+    updateActiveProfilers(-1);
+    gDvm.instructionCountEnableCount--;
+}
+
+
+/*
+ * Start alloc counting.  Note this doesn't affect the "active profilers"
+ * count, since the interpreter loop is not involved.
+ */
+void dvmStartAllocCounting(void)
+{
+    gDvm.allocProf.enabled = true;
+}
+
+/*
+ * Stop alloc counting.
+ */
+void dvmStopAllocCounting(void)
+{
+    gDvm.allocProf.enabled = false;
+}
+
+#endif /*WITH_PROFILER*/
diff --git a/vm/Profile.h b/vm/Profile.h
new file mode 100644
index 0000000..f762974
--- /dev/null
+++ b/vm/Profile.h
@@ -0,0 +1,196 @@
+/*
+ * Copyright (C) 2008 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/*
+ * Android's method call profiling goodies.
+ */
+#ifndef _DALVIK_PROFILE
+#define _DALVIK_PROFILE
+
+#ifndef NOT_VM      /* for utilities that sneakily include this file */
+
+#include <stdio.h>
+
+/* External allocations are hackish enough that it's worthwhile
+ * separating them for possible removal later.
+ */
+#define PROFILE_EXTERNAL_ALLOCATIONS 1
+
+struct Thread;      // extern
+
+
+/* boot init */
+bool dvmProfilingStartup(void);
+void dvmProfilingShutdown(void);
+
+/*
+ * Method trace state.  This is currently global.  In theory we could make
+ * most of this per-thread.
+ *
+ */
+typedef struct MethodTraceState {
+    /* these are set during VM init */
+    Method* gcMethod;
+    Method* classPrepMethod;
+
+    /* active state */
+    pthread_mutex_t startStopLock;
+    pthread_cond_t  threadExitCond;
+    FILE*   traceFile;
+    int     bufferSize;
+    int     flags;
+
+    bool    traceEnabled;
+    u1*     buf;
+    volatile int curOffset;
+    u8      startWhen;
+    int     overflow;
+} MethodTraceState;
+
+/*
+ * Memory allocation profiler state.  This is used both globally and
+ * per-thread.
+ *
+ * If you add a field here, zero it out in dvmStartAllocCounting().
+ */
+typedef struct AllocProfState {
+    bool    enabled;            // is allocation tracking enabled?
+
+    int     allocCount;         // #of objects allocated
+    int     allocSize;          // cumulative size of objects
+
+    int     failedAllocCount;   // #of times an allocation failed
+    int     failedAllocSize;    // cumulative size of failed allocations
+
+    int     freeCount;          // #of objects freed
+    int     freeSize;           // cumulative size of freed objects
+
+    int     gcCount;            // #of times an allocation triggered a GC
+
+#if PROFILE_EXTERNAL_ALLOCATIONS
+    int     externalAllocCount; // #of calls to dvmTrackExternalAllocation()
+    int     externalAllocSize;  // #of bytes passed to ...ExternalAllocation()
+
+    int     failedExternalAllocCount; // #of times an allocation failed
+    int     failedExternalAllocSize;  // cumulative size of failed allocations
+
+    int     externalFreeCount;  // #of calls to dvmTrackExternalFree()
+    int     externalFreeSize;   // #of bytes passed to ...ExternalFree()
+#endif  // PROFILE_EXTERNAL_ALLOCATIONS
+} AllocProfState;
+
+
+/*
+ * Start/stop method tracing.
+ */
+void dvmMethodTraceStart(const char* traceFileName, int bufferSize, int flags);
+void dvmMethodTraceStop(void);
+
+/*
+ * Start/stop emulator tracing.
+ */
+void dvmEmulatorTraceStart(void);
+void dvmEmulatorTraceStop(void);
+
+/*
+ * Start/stop Dalvik instruction counting.
+ */
+void dvmStartInstructionCounting();
+void dvmStopInstructionCounting();
+
+/*
+ * Bit flags for dvmMethodTraceStart "flags" argument.  These must match
+ * the values in android.os.Debug.
+ */
+enum {
+    TRACE_ALLOC_COUNTS      = 0x01,
+};
+
+/*
+ * Call these when a method enters or exits.
+ */
+#ifdef WITH_PROFILER
+# define TRACE_METHOD_ENTER(_self, _method)                                 \
+    do {                                                                    \
+        if (gDvm.activeProfilers != 0) {                                    \
+            if (gDvm.methodTrace.traceEnabled)                              \
+                dvmMethodTraceAdd(_self, _method, METHOD_TRACE_ENTER);      \
+            if (gDvm.emulatorTraceEnableCount != 0)                         \
+                dvmEmitEmulatorTrace(_method, METHOD_TRACE_ENTER);          \
+        }                                                                   \
+    } while(0);
+# define TRACE_METHOD_EXIT(_self, _method)                                  \
+    do {                                                                    \
+        if (gDvm.activeProfilers != 0) {                                    \
+            if (gDvm.methodTrace.traceEnabled)                              \
+                dvmMethodTraceAdd(_self, _method, METHOD_TRACE_EXIT);       \
+            if (gDvm.emulatorTraceEnableCount != 0)                         \
+                dvmEmitEmulatorTrace(_method, METHOD_TRACE_EXIT);           \
+        }                                                                   \
+    } while(0);
+# define TRACE_METHOD_UNROLL(_self, _method)                                \
+    do {                                                                    \
+        if (gDvm.activeProfilers != 0) {                                    \
+            if (gDvm.methodTrace.traceEnabled)                              \
+                dvmMethodTraceAdd(_self, _method, METHOD_TRACE_UNROLL);     \
+            if (gDvm.emulatorTraceEnableCount != 0)                         \
+                dvmEmitEmulatorTrace(_method, METHOD_TRACE_UNROLL);         \
+        }                                                                   \
+    } while(0);
+#else
+# define TRACE_METHOD_ENTER(_self, _method)     ((void) 0)
+# define TRACE_METHOD_EXIT(_self, _method)      ((void) 0)
+# define TRACE_METHOD_UNROLL(_self, _method)    ((void) 0)
+#endif
+
+void dvmMethodTraceAdd(struct Thread* self, const Method* method, int action);
+void dvmEmitEmulatorTrace(const Method* method, int action);
+
+void dvmMethodTraceGCBegin(void);
+void dvmMethodTraceGCEnd(void);
+void dvmMethodTraceClassPrepBegin(void);
+void dvmMethodTraceClassPrepEnd(void);
+
+/*
+ * Start/stop alloc counting.
+ */
+void dvmStartAllocCounting(void);
+void dvmStopAllocCounting(void);
+
+#endif
+
+
+/*
+ * Enumeration for the two "action" bits.
+ */
+enum {
+    METHOD_TRACE_ENTER = 0x00,      // method entry
+    METHOD_TRACE_EXIT = 0x01,       // method exit
+    METHOD_TRACE_UNROLL = 0x02,     // method exited by exception unrolling
+    // 0x03 currently unused
+};
+
+#define TOKEN_CHAR      '*'
+#define TRACE_VERSION   1
+
+/*
+ * Common definitions, shared with the dump tool.
+ */
+#define METHOD_ACTION_MASK      0x03            /* two bits */
+#define METHOD_ID(_method)      ((_method) & (~METHOD_ACTION_MASK))
+#define METHOD_ACTION(_method)  (((unsigned int)(_method)) & METHOD_ACTION_MASK)
+#define METHOD_COMBINE(_method, _action)    ((_method) | (_action))
+
+#endif /*_DALVIK_PROFILE*/
diff --git a/vm/Properties.c b/vm/Properties.c
new file mode 100644
index 0000000..a9fe5e1
--- /dev/null
+++ b/vm/Properties.c
@@ -0,0 +1,288 @@
+/*
+ * Copyright (C) 2008 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/*
+ * Set up values for System.getProperties().
+ */
+#include "Dalvik.h"
+
+#include <stdlib.h>
+#include <sys/utsname.h>
+#include <limits.h>
+#include <unistd.h>
+
+/*
+ * Create some storage for properties read from the command line.
+ */
+bool dvmPropertiesStartup(int maxProps)
+{
+    gDvm.maxProps = maxProps;
+    if (maxProps > 0) {
+        gDvm.propList = (char**) malloc(maxProps * sizeof(char*));
+        if (gDvm.propList == NULL)
+            return false;
+    }
+    gDvm.numProps = 0;
+
+    return true;
+}
+
+/*
+ * Clean up.
+ */
+void dvmPropertiesShutdown(void)
+{
+    int i;
+
+    for (i = 0; i < gDvm.numProps; i++)
+        free(gDvm.propList[i]);
+    free(gDvm.propList);
+    gDvm.propList = NULL;
+}
+
+/*
+ * Add a property specified on the command line.  "argStr" has the form
+ * "name=value".  "name" must have nonzero length.
+ *
+ * Returns "true" if argStr appears valid.
+ */
+bool dvmAddCommandLineProperty(const char* argStr)
+{
+    char* mangle;
+    char* equals;
+
+    mangle = strdup(argStr);
+    equals = strchr(mangle, '=');
+    if (equals == NULL || equals == mangle) {
+        free(mangle);
+        return false;
+    }
+    *equals = '\0';
+
+    assert(gDvm.numProps < gDvm.maxProps);
+    gDvm.propList[gDvm.numProps++] = mangle;
+
+    return true;
+}
+
+
+/*
+ * Find the "put" method for this class.
+ *
+ * Returns NULL and throws an exception if not found.
+ */
+static Method* getPut(ClassObject* clazz)
+{
+    Method* put;
+
+    put = dvmFindVirtualMethodHierByDescriptor(clazz, "setProperty",
+            "(Ljava/lang/String;Ljava/lang/String;)Ljava/lang/Object;");
+    if (put == NULL) {
+        dvmThrowException("Ljava/lang/RuntimeException;",
+            "could not find setProperty(String,String) in Properties");
+        /* fall through to return */
+    }
+    return put;
+}
+
+/*
+ * Set the value of the property.
+ */
+static void setProperty(Object* propObj, Method* put, const char* key,
+    const char* value)
+{
+    StringObject* keyStr;
+    StringObject* valueStr;
+
+    if (value == NULL) {
+        /* unclear what to do; probably want to create prop w/ empty string */
+        value = "";
+    }
+
+    keyStr = dvmCreateStringFromCstr(key, ALLOC_DEFAULT);
+    valueStr = dvmCreateStringFromCstr(value, ALLOC_DEFAULT);
+    if (keyStr == NULL || valueStr == NULL) {
+        LOGW("setProperty string creation failed\n");
+        goto bail;
+    }
+
+    JValue unused;
+    dvmCallMethod(dvmThreadSelf(), put, propObj, &unused, keyStr, valueStr);
+
+bail:
+    dvmReleaseTrackedAlloc((Object*) keyStr, NULL);
+    dvmReleaseTrackedAlloc((Object*) valueStr, NULL);
+}
+
+/*
+ * Create the VM-default system properties.
+ *
+ * We can do them here, or do them in interpreted code with lots of native
+ * methods to get bits and pieces.  This is a bit smaller.
+ */
+void dvmCreateDefaultProperties(Object* propObj)
+{
+    Method* put = getPut(propObj->clazz);
+
+    if (put == NULL)
+        return;
+
+    struct utsname info;
+    uname(&info);
+
+    /* constant strings that are used multiple times below */
+    const char *projectUrl = "http://www.android.com/";
+    const char *projectName = "The Android Project";
+
+    /*
+     * These are listed in the docs.
+     */
+
+    setProperty(propObj, put, "java.boot.class.path", gDvm.bootClassPathStr);
+    setProperty(propObj, put, "java.class.path", gDvm.classPathStr);
+    setProperty(propObj, put, "java.class.version", "46.0");
+    setProperty(propObj, put, "java.compiler", "");
+    setProperty(propObj, put, "java.ext.dirs", "");
+
+    if (getenv("JAVA_HOME") != NULL) {
+        setProperty(propObj, put, "java.home", getenv("JAVA_HOME"));
+    } else {
+        setProperty(propObj, put, "java.home", "/system");
+    }
+
+    setProperty(propObj, put, "java.io.tmpdir", "/tmp");
+    setProperty(propObj, put, "java.library.path", getenv("LD_LIBRARY_PATH"));
+
+    setProperty(propObj, put, "java.vendor", projectName);
+    setProperty(propObj, put, "java.vendor.url", projectUrl);
+    setProperty(propObj, put, "java.version", "0");
+    setProperty(propObj, put, "java.vm.name", "Dalvik");
+    setProperty(propObj, put, "java.vm.specification.name",
+            "Dalvik Virtual Machine Specification");
+    setProperty(propObj, put, "java.vm.specification.vendor", projectName);
+    setProperty(propObj, put, "java.vm.specification.version", "0.9");
+    setProperty(propObj, put, "java.vm.vendor", projectName);
+
+    char tmpBuf[64];
+    sprintf(tmpBuf, "%d.%d.%d",
+        DALVIK_MAJOR_VERSION, DALVIK_MINOR_VERSION, DALVIK_BUG_VERSION);
+    setProperty(propObj, put, "java.vm.version", tmpBuf);
+
+    setProperty(propObj, put, "java.specification.name",
+            "Dalvik Core Library");
+    setProperty(propObj, put, "java.specification.vendor", projectName);
+    setProperty(propObj, put, "java.specification.version", "0.9");
+
+    #define OS_ARCH generic /* TODO: Use an "arch" header. */
+    #define OS_ARCH_QUOTE(x) #x
+    setProperty(propObj, put, "os.arch", OS_ARCH_QUOTE(OS_ARCH));
+    #undef OS_ARCH
+    #undef OS_ARCH_QUOTE
+
+    setProperty(propObj, put, "os.name", info.sysname);
+    setProperty(propObj, put, "os.version", info.release);
+    setProperty(propObj, put, "user.home", getenv("HOME"));
+    setProperty(propObj, put, "user.name", getenv("USER"));
+
+    char path[PATH_MAX];
+    setProperty(propObj, put, "user.dir", getcwd(path, sizeof(path)));
+
+    setProperty(propObj, put, "file.separator", "/");
+    setProperty(propObj, put, "line.separator", "\n");
+    setProperty(propObj, put, "path.separator", ":");
+    
+    /*
+     * These show up elsewhere, so do them here too.
+     */
+    setProperty(propObj, put, "java.runtime.name", "Android Runtime");
+    setProperty(propObj, put, "java.runtime.version", "0.9");
+    setProperty(propObj, put, "java.vm.vendor.url", projectUrl);
+
+    setProperty(propObj, put, "file.encoding", "UTF-8");
+    setProperty(propObj, put, "user.language", "en");
+    setProperty(propObj, put, "user.region", "US");
+
+    /*
+     * These are unique to Android/Dalvik.
+     */
+    setProperty(propObj, put, "android.vm.dexfile", "true");
+}
+
+/*
+ * Add anything specified on the command line.
+ */
+void dvmSetCommandLineProperties(Object* propObj)
+{
+    Method* put = getPut(propObj->clazz);
+    int i;
+
+    if (put == NULL)
+        return;
+
+    for (i = 0; i < gDvm.numProps; i++) {
+        const char* value;
+
+        /* value starts after the end of the key string */
+        for (value = gDvm.propList[i]; *value != '\0'; value++)
+            ;
+        setProperty(propObj, put, gDvm.propList[i], value+1);
+    }
+}
+
+/*
+ * Get a property by calling System.getProperty(key).
+ *
+ * Returns a newly-allocated string, or NULL on failure or key not found.
+ * (Unexpected failures will also raise an exception.)
+ */
+char* dvmGetProperty(const char* key)
+{
+    ClassObject* system;
+    Method* getProp;
+    StringObject* keyObj = NULL;
+    StringObject* valueObj;
+    char* result = NULL;
+
+    assert(key != NULL);
+
+    system = dvmFindSystemClass("Ljava/lang/System;");
+    if (system == NULL)
+        goto bail;
+
+    getProp = dvmFindDirectMethodByDescriptor(system, "getProperty",
+        "(Ljava/lang/String;)Ljava/lang/String;");
+    if (getProp == NULL) {
+        LOGW("Could not find getProperty(String) in java.lang.System\n");
+        goto bail;
+    }
+
+    keyObj = dvmCreateStringFromCstr(key, ALLOC_DEFAULT);
+    if (keyObj == NULL)
+        goto bail;
+
+    JValue val;
+    dvmCallMethod(dvmThreadSelf(), getProp, NULL, &val, keyObj);
+    valueObj = (StringObject*) val.l;
+    if (valueObj == NULL)
+        goto bail;
+
+    result = dvmCreateCstrFromString(valueObj);
+    /* fall through with result */
+
+bail:
+    dvmReleaseTrackedAlloc((Object*)keyObj, NULL);
+    return result;
+}
+
diff --git a/vm/Properties.h b/vm/Properties.h
new file mode 100644
index 0000000..f7f2f03
--- /dev/null
+++ b/vm/Properties.h
@@ -0,0 +1,37 @@
+/*
+ * Copyright (C) 2008 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/*
+ * Support for System.getProperties().
+ */
+#ifndef _DALVIK_PROPERTIES
+#define _DALVIK_PROPERTIES
+
+/*
+ * Initialization.
+ */
+bool dvmPropertiesStartup(int maxProps);
+void dvmPropertiesShutdown(void);
+
+/* add "-D" option to list */
+bool dvmAddCommandLineProperty(const char* argStr);
+
+/* called during property initialization */
+void dvmCreateDefaultProperties(Object* propObj);
+void dvmSetCommandLineProperties(Object* propObj);
+
+char* dvmGetProperty(const char* key);
+
+#endif /*_DALVIK_PROPERTIES*/
diff --git a/vm/README.txt b/vm/README.txt
new file mode 100644
index 0000000..e00e240
--- /dev/null
+++ b/vm/README.txt
@@ -0,0 +1,20 @@
+Dalvik Virtual Machine
+
+
+Source code rules of the road:
+
+- All externally-visible function names must start with "dvm" to avoid
+namespace clashes.  Use static functions when possible.
+
+- Do not create static variables (globally or locally).  Do not create
+global variables.  Keep everything with non-local lifespan in "gDvm",
+defined in Globals.h, so that all global VM state is in one place.
+
+- Use "startup" and "shutdown" functions to clean up gDvm.  The VM must
+exit cleanly in valgrind.
+
+- The primary target is ARM Linux.  Others are secondary, but must still
+work correctly.
+
+- Use of gcc-specific and C99 constructs is allowed.
+
diff --git a/vm/RawDexFile.c b/vm/RawDexFile.c
new file mode 100644
index 0000000..57f5c1f
--- /dev/null
+++ b/vm/RawDexFile.c
@@ -0,0 +1,44 @@
+/*
+ * Copyright (C) 2008 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/*
+ * Open an unoptimized DEX file.
+ */
+#include "Dalvik.h"
+
+/*
+ * Open an unoptimized DEX file.  This finds the optimized version in the
+ * cache, constructing it if necessary.
+ */
+int dvmRawDexFileOpen(const char* fileName, RawDexFile** ppRawDexFile,
+    bool isBootstrap)
+{
+    // TODO - should be very similar to what JarFile does
+    return -1;
+}
+
+/*
+ * Close a RawDexFile and free the struct.
+ */
+void dvmRawDexFileFree(RawDexFile* pRawDexFile)
+{
+    if (pRawDexFile == NULL)
+        return;
+
+    dvmDexFileFree(pRawDexFile->pDvmDex);
+    free(pRawDexFile->cacheFileName);
+    free(pRawDexFile);
+}
+
diff --git a/vm/RawDexFile.h b/vm/RawDexFile.h
new file mode 100644
index 0000000..310c4ba
--- /dev/null
+++ b/vm/RawDexFile.h
@@ -0,0 +1,59 @@
+/*
+ * Copyright (C) 2008 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/*
+ * This represents a "raw" unswapped, unoptimized DEX file.  We don't open
+ * them directly, except to create the optimized version that we tuck in
+ * the cache area.
+ */
+#ifndef _DALVIK_RAWDEXFILE
+#define _DALVIK_RAWDEXFILE
+
+/*
+ * Structure representing a "raw" DEX file, in its unswapped unoptimized
+ * state.
+ */
+typedef struct RawDexFile {
+    char*       cacheFileName;
+    DvmDex*     pDvmDex;
+} RawDexFile;
+
+/*
+ * Open a raw ".dex" file, optimize it, and load it.
+ *
+ * On success, returns 0 and sets "*ppDexFile" to a newly-allocated DexFile.
+ * On failure, returns a meaningful error code [currently just -1].
+ */
+int dvmRawDexFileOpen(const char* fileName, RawDexFile** ppDexFile,
+    bool isBootstrap);
+
+/*
+ * Free a RawDexFile structure, along with any associated structures.
+ */
+void dvmRawDexFileFree(RawDexFile* pRawDexFile);
+
+/*
+ * Pry the DexFile out of a RawDexFile.
+ */
+INLINE DvmDex* dvmGetRawDexFileDex(RawDexFile* pRawDexFile) {
+    return pRawDexFile->pDvmDex;
+}
+
+/* get full path of optimized DEX file */
+INLINE const char* dvmGetRawDexFileCacheFileName(RawDexFile* pRawDexFile) {
+    return pRawDexFile->cacheFileName;
+}
+
+#endif /*_DALVIK_RAWDEXFILE*/
diff --git a/vm/ReferenceTable.c b/vm/ReferenceTable.c
new file mode 100644
index 0000000..c748222
--- /dev/null
+++ b/vm/ReferenceTable.c
@@ -0,0 +1,283 @@
+/*
+ * Copyright (C) 2008 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/*
+ * Reference table management.
+ */
+#include "Dalvik.h"
+
+/*
+ * Initialize a ReferenceTable structure.
+ */
+bool dvmInitReferenceTable(ReferenceTable* pRef, int initialCount,
+    int maxCount)
+{
+    assert(initialCount > 0);
+    assert(initialCount <= maxCount);
+
+    pRef->table = (Object**) malloc(initialCount * sizeof(Object*));
+    if (pRef->table == NULL)
+        return false;
+#ifndef NDEBUG
+    memset(pRef->table, 0xdd, initialCount * sizeof(Object*));
+#endif
+    pRef->nextEntry = pRef->table;
+    pRef->allocEntries = initialCount;
+    pRef->maxEntries = maxCount;
+
+    return true;
+}
+
+/*
+ * Clears out the contents of a ReferenceTable, freeing allocated storage.
+ */
+void dvmClearReferenceTable(ReferenceTable* pRef)
+{
+    free(pRef->table);
+    pRef->table = pRef->nextEntry = NULL;
+    pRef->allocEntries = pRef->maxEntries = -1;
+}
+
+/*
+ * Add "obj" to "pRef".
+ */
+bool dvmAddToReferenceTable(ReferenceTable* pRef, Object* obj)
+{
+    assert(dvmIsValidObject(obj));
+    assert(obj != NULL);
+    assert(pRef->table != NULL);
+
+    if (pRef->nextEntry == pRef->table + pRef->maxEntries) {
+        LOGW("ReferenceTable overflow (max=%d)\n", pRef->maxEntries);
+        return false;
+    } else if (pRef->nextEntry == pRef->table + pRef->allocEntries) {
+        Object** newTable;
+        int newSize;
+
+        newSize = pRef->allocEntries * 2;
+        if (newSize > pRef->maxEntries)
+            newSize = pRef->maxEntries;
+        assert(newSize > pRef->allocEntries);
+
+        newTable = (Object**) realloc(pRef->table, newSize * sizeof(Object*));
+        if (newTable == NULL) {
+            LOGE("Unable to expand ref table (from %d to %d %d-byte entries)\n",
+                pRef->allocEntries, newSize, sizeof(Object*));
+            return false;
+        }
+        LOGVV("Growing %p from %d to %d\n", pRef, pRef->allocEntries, newSize);
+
+        /* update entries; adjust "nextEntry" in case memory moved */
+        pRef->nextEntry = newTable + (pRef->nextEntry - pRef->table);
+        pRef->table = newTable;
+        pRef->allocEntries = newSize;
+    }
+
+    *pRef->nextEntry++ = obj;
+    return true;
+}
+
+/*
+ * Returns NULL if not found.
+ */
+Object** dvmFindInReferenceTable(const ReferenceTable* pRef, Object** top,
+    Object* obj)
+{
+    Object** ptr;
+
+    ptr = pRef->nextEntry;
+    while (--ptr >= top) {
+        if (*ptr == obj)
+            return ptr;
+    }
+    return NULL;
+}
+
+/*
+ * Remove "obj" from "pRef".  We start at the end of the list (where the
+ * most-recently-added element is), and stop searching for a match after
+ * examining the element at "top".
+ *
+ * Most of the time "obj" is at or near the end of the list.  If not, we
+ * compact it down.
+ */
+bool dvmRemoveFromReferenceTable(ReferenceTable* pRef, Object** top,
+    Object* obj)
+{
+    Object** ptr;
+
+    assert(pRef->table != NULL);
+
+    /*
+     * Scan from the most-recently-added entry up to the top entry for
+     * this frame.
+     */
+    ptr = dvmFindInReferenceTable(pRef, top, obj);
+    if (ptr == NULL)
+        return false;
+
+    /*
+     * Delete the entry.
+     */
+    pRef->nextEntry--;
+    int moveCount = pRef->nextEntry - ptr;
+    if (moveCount != 0) {
+        /* remove from middle, slide the rest down */
+        memmove(ptr, ptr+1, moveCount * sizeof(Object*));
+        //LOGV("LREF delete %p, shift %d down\n", obj, moveCount);
+    } else {
+        /* last entry, falls off the end */
+        //LOGV("LREF delete %p from end\n", obj);
+    }
+
+    return true;
+}
+
+/*
+ * This is a qsort() callback.  We sort Object* by class, allocation size,
+ * and then by the Object* itself.
+ */
+static int compareObject(const void* vobj1, const void* vobj2)
+{
+    Object* obj1 = *((Object**) vobj1);
+    Object* obj2 = *((Object**) vobj2);
+
+    if (obj1 == NULL || obj2 == NULL)
+        return (u1*)obj1 - (u1*)obj2;
+
+    if (obj1->clazz != obj2->clazz) {
+        return (u1*)obj1->clazz - (u1*)obj2->clazz;
+    } else {
+        int size1 = dvmObjectSizeInHeap(obj1);
+        int size2 = dvmObjectSizeInHeap(obj2);
+        if (size1 != size2) {
+            return size1 - size2;
+        } else {
+            return (u1*)obj1 - (u1*)obj2;
+        }
+    }
+}
+
+/*
+ * Log an object with some additional info.
+ *
+ * Pass in the number of additional elements that are identical to or
+ * equivalent to the original.
+ */
+static void logObject(Object* obj, int size, int identical, int equiv)
+{
+    if (obj == NULL) {
+        LOGW("  NULL reference (count=%d)\n", equiv);
+        return;
+    }
+
+    if (identical + equiv != 0) {
+        LOGW("%5d of %s %dB (%d unique)\n", identical + equiv +1,
+            obj->clazz->descriptor, size, equiv +1);
+    } else {
+        LOGW("%5d of %s %dB\n", identical + equiv +1,
+            obj->clazz->descriptor, size);
+    }
+}
+
+/*
+ * Dump the contents of a ReferenceTable to the log.
+ *
+ * The caller should lock any external sync before calling.
+ *
+ * (This was originally written to be tolerant of null entries in the table.
+ * I don't think that can happen anymore.)
+ */
+void dvmDumpReferenceTable(const ReferenceTable* pRef, const char* descr)
+{
+    const int kLast = 10;
+    int count = dvmReferenceTableEntries(pRef);
+    Object** refs;
+    int i;
+
+    if (count == 0) {
+        LOGW("Reference table has no entries\n");
+        return;
+    }
+    assert(count > 0);
+
+    /*
+     * Dump the most recent N entries.
+     */
+    LOGW("Last %d entries in %s reference table:\n", kLast, descr);
+    refs = pRef->table;         // use unsorted list
+    int size;
+    int start = count - kLast;
+    if (start < 0)
+        start = 0;
+
+    for (i = start; i < count; i++) {
+        size = (refs[i] == NULL) ? 0 : dvmObjectSizeInHeap(refs[i]);
+        Object* ref = refs[i];
+        if (ref->clazz == gDvm.classJavaLangClass) {
+            ClassObject* clazz = (ClassObject*) ref;
+            LOGW("%5d: %p cls=%s '%s' (%d bytes)\n", i, ref,
+                (refs[i] == NULL) ? "-" : ref->clazz->descriptor,
+                clazz->descriptor, size);
+        } else {
+            LOGW("%5d: %p cls=%s (%d bytes)\n", i, ref,
+                (refs[i] == NULL) ? "-" : ref->clazz->descriptor, size);
+        }
+    }
+
+    /*
+     * Make a copy of the table, and sort it.
+     */
+    Object** tableCopy = (Object**)malloc(sizeof(Object*) * count);
+    memcpy(tableCopy, pRef->table, sizeof(Object*) * count);
+    qsort(tableCopy, count, sizeof(Object*), compareObject);
+    refs = tableCopy;       // use sorted list
+
+    /*
+     * Dump uniquified table summary.  While we're at it, generate a
+     * cumulative total amount of pinned memory based on the unique entries.
+     */
+    LOGW("%s reference table summary (%d entries):\n", descr, count);
+    int equiv, identical, total;
+    total = equiv = identical = 0;
+    for (i = 1; i < count; i++) {
+        size = (refs[i-1] == NULL) ? 0 : dvmObjectSizeInHeap(refs[i-1]);
+
+        if (refs[i] == refs[i-1]) {
+            /* same reference, added more than once */
+            identical++;
+        } else if (refs[i]->clazz == refs[i-1]->clazz &&
+            (int) dvmObjectSizeInHeap(refs[i]) == size)
+        {
+            /* same class / size, different object */
+            total += size;
+            equiv++;
+        } else {
+            /* different class */
+            total += size;
+            logObject(refs[i-1], size, identical, equiv);
+            equiv = identical = 0;
+        }
+    }
+
+    /* handle the last entry (everything above outputs refs[i-1]) */
+    size = (refs[count-1] == NULL) ? 0 : dvmObjectSizeInHeap(refs[count-1]);
+    total += size;
+    logObject(refs[count-1], size, identical, equiv);
+
+    LOGW("Memory held directly by native code is %d bytes\n", total);
+    free(tableCopy);
+}
+
diff --git a/vm/ReferenceTable.h b/vm/ReferenceTable.h
new file mode 100644
index 0000000..f8f2461
--- /dev/null
+++ b/vm/ReferenceTable.h
@@ -0,0 +1,115 @@
+/*
+ * Copyright (C) 2008 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/*
+ * Maintain a table of references.  Used for internal local references,
+ * JNI locals, JNI globals, and GC heap references.
+ *
+ * None of the table functions are synchronized.
+ */
+#ifndef _DALVIK_REFERENCETABLE
+#define _DALVIK_REFERENCETABLE
+
+/*
+ * Table definition.
+ *
+ * The expected common operations are adding a new entry and removing a
+ * recently-added entry (usually the most-recently-added entry).
+ *
+ * If "allocEntries" is not equal to "maxEntries", the table may expand when
+ * entries are added, which means the memory may move.  If you want to keep
+ * pointers into "table" rather than offsets, use a fixed-size table.
+ *
+ * (This structure is still somewhat transparent; direct access to
+ * table/nextEntry is allowed.)
+ */
+typedef struct ReferenceTable {
+    Object**        table;              /* top of the list */
+    Object**        nextEntry;          /* bottom of the list */
+
+    int             allocEntries;       /* #of entries we have space for */
+    int             maxEntries;         /* max #of entries allowed */
+} ReferenceTable;
+
+/*
+ * Initialize a ReferenceTable.
+ *
+ * If "initialCount" != "maxCount", the table will expand as required.
+ *
+ * Returns "false" if table allocation fails.
+ */
+bool dvmInitReferenceTable(ReferenceTable* pRef, int initialCount,
+    int maxCount);
+
+/*
+ * Clears out the contents of a ReferenceTable, freeing allocated storage.
+ * Does not free "pRef".
+ *
+ * You must call dvmInitReferenceTable() before you can re-use this table.
+ */
+void dvmClearReferenceTable(ReferenceTable* pRef);
+
+/*
+ * Return the #of entries currently stored in the ReferenceTable.
+ */
+INLINE size_t dvmReferenceTableEntries(const ReferenceTable* pRef)
+{
+    return pRef->nextEntry - pRef->table;
+}
+
+/*
+ * Returns "true" if the table is full.  The table is considered full if
+ * we would need to expand it to add another entry.
+ */
+INLINE size_t dvmIsReferenceTableFull(const ReferenceTable* pRef)
+{
+    return dvmReferenceTableEntries(pRef) == (size_t)pRef->allocEntries;
+}
+
+/*
+ * Add a new entry.  "obj" must be a valid non-NULL object reference
+ * (though it's okay if it's not fully-formed, e.g. the result from
+ * dvmMalloc doesn't have obj->clazz set).
+ *
+ * Returns "false" if the table is full.
+ */
+bool dvmAddToReferenceTable(ReferenceTable* pRef, Object* obj);
+
+/*
+ * Determine if "obj" is present in "pRef".  Stops searching when we hit "top".
+ * To include the entire table, pass in "pRef->table" as the top.
+ *
+ * Returns NULL if "obj" was not found.
+ */
+Object** dvmFindInReferenceTable(const ReferenceTable* pRef, Object** top,
+    Object* obj);
+
+/*
+ * Remove an existing entry.
+ *
+ * We stop searching for a match after examining the element at "top".  This
+ * is useful when entries are associated with a stack frame.
+ *
+ * Returns "false" if the entry was not found.
+ */
+bool dvmRemoveFromReferenceTable(ReferenceTable* pRef, Object** top,
+    Object* obj);
+
+/*
+ * Dump the contents of a reference table to the log file.
+ */
+void dvmDumpReferenceTable(const ReferenceTable* pRef, const char* descr);
+
+#endif /*_DALVIK_REFERENCETABLE*/
diff --git a/vm/SignalCatcher.c b/vm/SignalCatcher.c
new file mode 100644
index 0000000..3efc191
--- /dev/null
+++ b/vm/SignalCatcher.c
@@ -0,0 +1,262 @@
+/*
+ * Copyright (C) 2008 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/*
+ * This is a thread that catches signals and does something useful.  For
+ * example, when a SIGQUIT (Ctrl-\) arrives, suspend the VM and dump the
+ * status of all threads.
+ */
+#include "Dalvik.h"
+
+#include <stdlib.h>
+#include <unistd.h>
+#include <signal.h>
+#include <pthread.h>
+#include <sys/file.h>
+#include <sys/time.h>
+#include <fcntl.h>
+#include <errno.h>
+
+static void* signalCatcherThreadStart(void* arg);
+
+/*
+ * Crank up the signal catcher thread.
+ *
+ * Returns immediately.
+ */
+bool dvmSignalCatcherStartup(void)
+{
+    gDvm.haltSignalCatcher = false;
+
+    if (!dvmCreateInternalThread(&gDvm.signalCatcherHandle,
+                "Signal Catcher", signalCatcherThreadStart, NULL))
+        return false;
+
+    return true;
+}
+
+/*
+ * Shut down the signal catcher thread if it was started.
+ *
+ * Since we know the thread is just sitting around waiting for signals
+ * to arrive, send it one.
+ */
+void dvmSignalCatcherShutdown(void)
+{
+    gDvm.haltSignalCatcher = true;
+    if (gDvm.signalCatcherHandle == 0)      // not started yet
+        return;
+
+    pthread_kill(gDvm.signalCatcherHandle, SIGQUIT);
+
+    pthread_join(gDvm.signalCatcherHandle, NULL);
+    LOGV("signal catcher has shut down\n");
+}
+
+
+/*
+ * Print the name of the current process, if we can get it.
+ */
+static void printProcessName(const DebugOutputTarget* target)
+{
+    int fd = -1;
+
+    fd = open("/proc/self/cmdline", O_RDONLY, 0);
+    if (fd < 0)
+        goto bail;
+
+    char tmpBuf[256];
+    ssize_t actual;
+
+    actual = read(fd, tmpBuf, sizeof(tmpBuf)-1);
+    if (actual <= 0)
+        goto bail;
+
+    tmpBuf[actual] = '\0';
+    dvmPrintDebugMessage(target, "Cmd line: %s\n", tmpBuf);
+
+bail:
+    if (fd >= 0)
+        close(fd);
+}
+
+/*
+ * Dump the stack traces for all threads to the log or to a file.  If it's
+ * to a file we have a little setup to do.
+ */
+static void logThreadStacks(void)
+{
+    DebugOutputTarget target;
+
+    if (gDvm.stackTraceFile == NULL) {
+        /* just dump to log file */
+        dvmCreateLogOutputTarget(&target, ANDROID_LOG_INFO, LOG_TAG);
+        dvmDumpAllThreadsEx(&target, true);
+    } else {
+        FILE* fp = NULL;
+        int cc, fd;
+
+        /*
+         * Open the stack trace output file, creating it if necessary.  It
+         * needs to be world-writable so other processes can write to it.
+         */
+        fd = open(gDvm.stackTraceFile, O_WRONLY | O_APPEND | O_CREAT, 0666);
+        if (fd < 0) {
+            LOGE("Unable to open stack trace file '%s': %s\n",
+                gDvm.stackTraceFile, strerror(errno));
+            return;
+        }
+
+        /* gain exclusive access to the file */
+        cc = flock(fd, LOCK_EX | LOCK_UN);
+        if (cc != 0) {
+            LOGV("Sleeping on flock(%s)\n", gDvm.stackTraceFile);
+            cc = flock(fd, LOCK_EX);
+        }
+        if (cc != 0) {
+            LOGE("Unable to lock stack trace file '%s': %s\n",
+                gDvm.stackTraceFile, strerror(errno));
+            close(fd);
+            return;
+        }
+
+        fp = fdopen(fd, "a");
+        if (fp == NULL) {
+            LOGE("Unable to fdopen '%s' (%d): %s\n",
+                gDvm.stackTraceFile, fd, strerror(errno));
+            flock(fd, LOCK_UN);
+            close(fd);
+            return;
+        }
+
+        dvmCreateFileOutputTarget(&target, fp);
+
+        pid_t pid = getpid();
+        time_t now = time(NULL);
+        struct tm* ptm;
+#ifdef HAVE_LOCALTIME_R
+        struct tm tmbuf;
+        ptm = localtime_r(&now, &tmbuf);
+#else
+        ptm = localtime(&now);
+#endif
+        dvmPrintDebugMessage(&target,
+            "\n\n----- pid %d at %04d-%02d-%02d %02d:%02d:%02d -----\n",
+            pid, ptm->tm_year + 1900, ptm->tm_mon+1, ptm->tm_mday,
+            ptm->tm_hour, ptm->tm_min, ptm->tm_sec);
+        printProcessName(&target);
+        dvmPrintDebugMessage(&target, "\n");
+        dvmDumpAllThreadsEx(&target, true);
+        fprintf(fp, "----- end %d -----\n", pid);
+
+        /*
+         * Unlock and close the file, flushing pending data before we unlock
+         * it.  The fclose() will close the underyling fd.
+         */
+        fflush(fp);
+        flock(fd, LOCK_UN);
+        fclose(fp);
+
+        LOGI("Wrote stack trace to '%s'\n", gDvm.stackTraceFile);
+    }
+}
+
+
+/*
+ * Sleep in sigwait() until a signal arrives.
+ */
+static void* signalCatcherThreadStart(void* arg)
+{
+    Thread* self = dvmThreadSelf();
+    sigset_t mask;
+    int cc;
+
+    UNUSED_PARAMETER(arg);
+
+    LOGV("Signal catcher thread started (threadid=%d)\n", self->threadId);
+
+    /* set up mask with signals we want to handle */
+    sigemptyset(&mask);
+    sigaddset(&mask, SIGQUIT);
+    sigaddset(&mask, SIGUSR1);
+
+    while (true) {
+        int rcvd;
+
+        dvmChangeStatus(self, THREAD_VMWAIT);
+
+        /*
+         * Signals for sigwait() must be blocked but not ignored.  We
+         * block signals like SIGQUIT for all threads, so the condition
+         * is met.  When the signal hits, we wake up, without any signal
+         * handlers being invoked.
+         *
+         * We want to suspend all other threads, so that it's safe to
+         * traverse their stacks.
+         *
+         * When running under GDB we occasionally return with EINTR (e.g.
+         * when other threads exit).
+         */
+loop:
+        cc = sigwait(&mask, &rcvd);
+        if (cc != 0) {
+            if (cc == EINTR) {
+                //LOGV("sigwait: EINTR\n");
+                goto loop;
+            }
+            assert(!"bad result from sigwait");
+        }
+
+        if (!gDvm.haltSignalCatcher) {
+            LOGI("threadid=%d: reacting to signal %d\n",
+                dvmThreadSelf()->threadId, rcvd);
+        }
+
+        /* set our status to RUNNING, self-suspending if GC in progress */
+        dvmChangeStatus(self, THREAD_RUNNING);
+
+        if (gDvm.haltSignalCatcher)
+            break;
+
+        if (rcvd == SIGQUIT) {
+            dvmSuspendAllThreads(SUSPEND_FOR_STACK_DUMP);
+            dvmDumpLoaderStats("sig");
+
+            logThreadStacks();
+
+            if (false) {
+                dvmLockMutex(&gDvm.jniGlobalRefLock);
+                dvmDumpReferenceTable(&gDvm.jniGlobalRefTable, "JNI global");
+                dvmUnlockMutex(&gDvm.jniGlobalRefLock);
+            }
+
+            //dvmDumpTrackedAllocations(true);
+            dvmResumeAllThreads(SUSPEND_FOR_STACK_DUMP);
+        } else if (rcvd == SIGUSR1) {
+#if WITH_HPROF
+            LOGI("SIGUSR1 forcing GC and HPROF dump\n");
+            hprofDumpHeap();
+#else
+            LOGI("SIGUSR1 forcing GC (no HPROF)\n");
+            dvmCollectGarbage(false);
+#endif
+        } else {
+            LOGE("unexpected signal %d\n", rcvd);
+        }
+    }
+
+    return NULL;
+}
+
diff --git a/vm/SignalCatcher.h b/vm/SignalCatcher.h
new file mode 100644
index 0000000..ece052c
--- /dev/null
+++ b/vm/SignalCatcher.h
@@ -0,0 +1,25 @@
+/*
+ * Copyright (C) 2008 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/*
+ * Signal catcher thread.
+ */
+#ifndef _DALVIK_SIGNALCATCHER
+#define _DALVIK_SIGNALCATCHER
+
+bool dvmSignalCatcherStartup(void);
+void dvmSignalCatcherShutdown(void);
+
+#endif /*_DALVIK_SIGNALCATCHER*/
diff --git a/vm/StdioConverter.c b/vm/StdioConverter.c
new file mode 100644
index 0000000..54ceb0b
--- /dev/null
+++ b/vm/StdioConverter.c
@@ -0,0 +1,292 @@
+/*
+ * Copyright (C) 2008 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/*
+ * Thread that reads from stdout/stderr and converts them to log messages.
+ * (Sort of a hack.)
+ */
+#include "Dalvik.h"
+
+#include <stdlib.h>
+#include <unistd.h>
+#include <fcntl.h>
+#include <errno.h>
+
+#define kFilenoStdout   1
+#define kFilenoStderr   2
+
+/*
+ * Hold our replacement stdout/stderr.
+ */
+typedef struct StdPipes {
+    int stdoutPipe[2];
+    int stderrPipe[2];
+} StdPipes;
+
+#define kMaxLine    512
+
+/*
+ * Hold some data.
+ */
+typedef struct BufferedData {
+    char    buf[kMaxLine+1];
+    int     count;
+} BufferedData;
+
+// fwd
+static void* stdioConverterThreadStart(void* arg);
+static bool readAndLog(int fd, BufferedData* data, const char* tag);
+
+
+/*
+ * Crank up the stdout/stderr converter thread.
+ *
+ * Returns immediately.
+ */
+bool dvmStdioConverterStartup(void)
+{
+    StdPipes* pipeStorage;
+
+    gDvm.haltStdioConverter = false;
+
+    dvmInitMutex(&gDvm.stdioConverterLock);
+    pthread_cond_init(&gDvm.stdioConverterCond, NULL);
+
+    pipeStorage = (StdPipes*) malloc(sizeof(StdPipes));
+    if (pipeStorage == NULL)
+        return false;
+
+    if (pipe(pipeStorage->stdoutPipe) != 0) {
+        LOGW("pipe failed: %s\n", strerror(errno));
+        return false;
+    }
+    if (pipe(pipeStorage->stderrPipe) != 0) {
+        LOGW("pipe failed: %s\n", strerror(errno));
+        return false;
+    }
+
+    if (dup2(pipeStorage->stdoutPipe[1], kFilenoStdout) != kFilenoStdout) {
+        LOGW("dup2(1) failed: %s\n", strerror(errno));
+        return false;
+    }
+    close(pipeStorage->stdoutPipe[1]);
+    pipeStorage->stdoutPipe[1] = -1;
+#ifdef HAVE_ANDROID_OS
+    /* don't redirect stderr on sim -- logs get written there! */
+    /* (don't need this on the sim anyway) */
+    if (dup2(pipeStorage->stderrPipe[1], kFilenoStderr) != kFilenoStderr) {
+        LOGW("dup2(2) failed: %d %s\n", errno, strerror(errno));
+        return false;
+    }
+    close(pipeStorage->stderrPipe[1]);
+    pipeStorage->stderrPipe[1] = -1;
+#endif
+
+
+    /*
+     * Create the thread.
+     */
+    dvmLockMutex(&gDvm.stdioConverterLock);
+
+    if (!dvmCreateInternalThread(&gDvm.stdioConverterHandle,
+                "Stdio Converter", stdioConverterThreadStart, pipeStorage))
+    {
+        free(pipeStorage);
+        return false;
+    }
+    /* new thread owns pipeStorage */
+
+    while (!gDvm.stdioConverterReady) {
+        int cc = pthread_cond_wait(&gDvm.stdioConverterCond,
+                    &gDvm.stdioConverterLock);
+        assert(cc == 0);
+    }
+    dvmUnlockMutex(&gDvm.stdioConverterLock);
+
+    return true;
+}
+
+/*
+ * Shut down the stdio converter thread if it was started.
+ *
+ * Since we know the thread is just sitting around waiting for something
+ * to arrive on stdout, print something.
+ */
+void dvmStdioConverterShutdown(void)
+{
+    gDvm.haltStdioConverter = true;
+    if (gDvm.stdioConverterHandle == 0)    // not started, or still starting
+        return;
+
+    /* print something to wake it up */
+    printf("Shutting down\n");
+    fflush(stdout);
+
+    LOGD("Joining stdio converter...\n");
+    pthread_join(gDvm.stdioConverterHandle, NULL);
+}
+
+/*
+ * Select on stdout/stderr pipes, waiting for activity.
+ *
+ * DO NOT use printf from here.
+ */
+static void* stdioConverterThreadStart(void* arg)
+{
+#define MAX(a,b) ((a) > (b) ? (a) : (b))
+    StdPipes* pipeStorage = (StdPipes*) arg;
+    BufferedData* stdoutData;
+    BufferedData* stderrData;
+    int cc;
+
+    /* tell the main thread that we're ready */
+    dvmLockMutex(&gDvm.stdioConverterLock);
+    gDvm.stdioConverterReady = true;
+    cc = pthread_cond_signal(&gDvm.stdioConverterCond);
+    assert(cc == 0);
+    dvmUnlockMutex(&gDvm.stdioConverterLock);
+
+    /* we never do anything that affects the rest of the VM */
+    dvmChangeStatus(NULL, THREAD_VMWAIT);
+
+    /*
+     * Allocate read buffers.
+     */
+    stdoutData = (BufferedData*) malloc(sizeof(*stdoutData));
+    stderrData = (BufferedData*) malloc(sizeof(*stderrData));
+    stdoutData->count = stderrData->count = 0;
+
+    /*
+     * Read until shutdown time.
+     */
+    while (!gDvm.haltStdioConverter) {
+        ssize_t actual;
+        fd_set readfds;
+        int maxFd, fdCount;
+
+        FD_ZERO(&readfds);
+        FD_SET(pipeStorage->stdoutPipe[0], &readfds);
+        FD_SET(pipeStorage->stderrPipe[0], &readfds);
+        maxFd = MAX(pipeStorage->stdoutPipe[0], pipeStorage->stderrPipe[0]);
+
+        fdCount = select(maxFd+1, &readfds, NULL, NULL, NULL);
+
+        if (fdCount < 0) {
+            if (errno != EINTR) {
+                LOGE("select on stdout/stderr failed\n");
+                break;
+            }
+            LOGD("Got EINTR, ignoring\n");
+        } else if (fdCount == 0) {
+            LOGD("WEIRD: select returned zero\n");
+        } else {
+            bool err = false;
+            if (FD_ISSET(pipeStorage->stdoutPipe[0], &readfds)) {
+                err |= !readAndLog(pipeStorage->stdoutPipe[0], stdoutData,
+                    "stdout");
+            }
+            if (FD_ISSET(pipeStorage->stderrPipe[0], &readfds)) {
+                err |= !readAndLog(pipeStorage->stderrPipe[0], stderrData,
+                    "stderr");
+            }
+
+            /* probably EOF; give up */
+            if (err) {
+                LOGW("stdio converter got read error; shutting it down\n");
+                break;
+            }
+        }
+    }
+
+    close(pipeStorage->stdoutPipe[0]);
+    close(pipeStorage->stderrPipe[0]);
+
+    free(pipeStorage);
+    free(stdoutData);
+    free(stderrData);
+
+    /* change back for shutdown sequence */
+    dvmChangeStatus(NULL, THREAD_RUNNING);
+    return NULL;
+#undef MAX
+}
+
+/*
+ * Data is pending on "fd".  Read as much as will fit in "data", then
+ * write out any full lines and compact "data".
+ */
+static bool readAndLog(int fd, BufferedData* data, const char* tag)
+{
+    ssize_t actual;
+    size_t want;
+
+    assert(data->count < kMaxLine);
+
+    want = kMaxLine - data->count;
+    actual = read(fd, data->buf + data->count, want);
+    if (actual <= 0) {
+        LOGW("read %s: (%d,%d) failed (%d): %s\n",
+            tag, fd, want, (int)actual, strerror(errno));
+        return false;
+    } else {
+        //LOGI("read %s: %d at %d\n", tag, actual, data->count);
+    }
+    data->count += actual;
+
+    /*
+     * Got more data, look for an EOL.  We expect LF or CRLF, but will
+     * try to handle a standalone CR.
+     */
+    char* cp = data->buf;
+    const char* start = data->buf;
+    int i = data->count;
+    for (i = data->count; i > 0; i--, cp++) {
+        if (*cp == '\n' || (*cp == '\r' && i != 0 && *(cp+1) != '\n')) {
+            *cp = '\0';
+            //LOGW("GOT %d at %d '%s'\n", cp - start, start - data->buf, start);
+            LOG(LOG_INFO, tag, "%s", start);
+            start = cp+1;
+        }
+    }
+
+    /*
+     * See if we overflowed.  If so, cut it off.
+     */
+    if (start == data->buf && data->count == kMaxLine) {
+        data->buf[kMaxLine] = '\0';
+        LOG(LOG_INFO, tag, "%s!", start);
+        start = cp + kMaxLine;
+    }
+
+    /*
+     * Update "data" if we consumed some output.  If there's anything left
+     * in the buffer, it's because we didn't see an EOL and need to keep
+     * reading until we see one.
+     */
+    if (start != data->buf) {
+        if (start >= data->buf + data->count) {
+            /* consumed all available */
+            data->count = 0;
+        } else {
+            /* some left over */
+            int remaining = data->count - (start - data->buf);
+            memmove(data->buf, start, remaining);
+            data->count = remaining;
+        }
+    }
+
+    return true;
+}
+
diff --git a/vm/StdioConverter.h b/vm/StdioConverter.h
new file mode 100644
index 0000000..eef4a72
--- /dev/null
+++ b/vm/StdioConverter.h
@@ -0,0 +1,26 @@
+/*
+ * Copyright (C) 2008 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/*
+ * Stdout/stderr conversion thread.
+ */
+#ifndef _DALVIK_STDOUTCONVERTER
+#define _DALVIK_STDOUTCONVERTER
+
+bool dvmStdioConverterStartup(void);
+void dvmStdioConverterShutdown(void);
+
+#endif /*_DALVIK_STDOUTCONVERTER*/
+
diff --git a/vm/Sync.c b/vm/Sync.c
new file mode 100644
index 0000000..bfb93fe
--- /dev/null
+++ b/vm/Sync.c
@@ -0,0 +1,1864 @@
+/*
+ * Copyright (C) 2008 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/*
+ * Fundamental synchronization mechanisms.
+ *
+ * The top part of the file has operations on "monitor" structs; the
+ * next part has the native calls on objects.
+ *
+ * The current implementation uses "thin locking" to avoid allocating
+ * an Object's full Monitor struct until absolutely necessary (i.e.,
+ * during contention or a call to wait()).
+ *
+ * TODO: make improvements to thin locking
+ * We may be able to improve performance and reduce memory requirements by:
+ *  - reverting to a thin lock once the Monitor is no longer necessary
+ *  - using a pool of monitor objects, with some sort of recycling scheme
+ *
+ * TODO: recycle native-level monitors when objects are garbage collected.
+ *
+ * NOTE: if we broadcast a notify, and somebody sneaks in a Thread.interrupt
+ * before the notify finishes (i.e. before all threads sleeping on the
+ * condition variable have awoken), we could end up with a nonzero value for
+ * "notifying" after everybody is gone because one of the notified threads
+ * will actually exit via the "interrupted" path.  This can be detected as
+ * (notifying + interrupting > waiting), i.e. the number of threads that need
+ * to be woken is greater than the number waiting.  The fix is to test and
+ * adjust "notifying" at the start of the wait() call.
+ * -> This is probably not a problem if we notify less than the full set
+ * before the interrupt comes in.  If we have four waiters, two pending
+ * notifies, and an interrupt hits, we will interrupt one thread and notify
+ * two others.  Doesn't matter if the interrupted thread would have been
+ * one of the notified.  Count is only screwed up if we have two waiters,
+ * in which case it's safe to fix it at the start of the next wait().
+ */
+#include "Dalvik.h"
+
+#include <stdlib.h>
+#include <unistd.h>
+#include <pthread.h>
+#include <time.h>
+#include <sys/time.h>
+#include <errno.h>
+
+#define LOG_THIN    LOGV
+
+#ifdef WITH_DEADLOCK_PREDICTION     /* fwd */
+static const char* kStartBanner =
+    "<-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#";
+static const char* kEndBanner =
+    "#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#->";
+
+/*
+ * Unsorted, expanding list of objects.
+ *
+ * This is very similar to PointerSet (which came into existence after this),
+ * but these are unsorted, uniqueness is not enforced by the "add" function,
+ * and the base object isn't allocated on the heap.
+ */
+typedef struct ExpandingObjectList {
+    u2          alloc;
+    u2          count;
+    Object**    list;
+} ExpandingObjectList;
+
+/* fwd */
+static void updateDeadlockPrediction(Thread* self, Object* obj);
+static void removeCollectedObject(Object* obj);
+static void expandObjClear(ExpandingObjectList* pList);
+#endif
+
+/*
+ * Every Object has a monitor associated with it, but not every Object is
+ * actually locked.  Even the ones that are locked do not need a
+ * full-fledged monitor until a) there is actual contention or b) wait()
+ * is called on the Object.
+ *
+ * For Dalvik, we have implemented a scheme similar to the one described
+ * in Bacon et al.'s "Thin locks: featherweight synchronization for Java"
+ * (ACM 1998).  Things are even easier for us, though, because we have
+ * a full 32 bits to work with.
+ *
+ * The two states that an Object's lock may have are referred to as
+ * "thin" and "fat".  The lock may transition between the two states
+ * for various reasons.
+ *
+ * The lock value itself is stored in Object.lock, which is a union of
+ * the form:
+ *
+ *     typedef union Lock {
+ *         u4          thin;
+ *         Monitor*    mon;
+ *     } Lock;
+ *
+ * It is possible to tell the current state of the lock from the actual
+ * value, so we do not need to store any additional state.  When the
+ * lock is "thin", it has the form:
+ *
+ *     [31 ---- 16] [15 ---- 1] [0]
+ *      lock count   thread id   1
+ *
+ * When it is "fat", the field is simply a (Monitor *).  Since the pointer
+ * will always be 4-byte-aligned, bits 1 and 0 will always be zero when
+ * the field holds a pointer.  Hence, we can tell the current fat-vs-thin
+ * state by checking the least-significant bit.
+ *
+ * For an in-depth description of the mechanics of thin-vs-fat locking,
+ * read the paper referred to above.
+ *
+ * To reduce the amount of work when attempting a compare and exchange,
+ * Thread.threadId is guaranteed to have bit 0 set, and all new Objects
+ * have their lock fields initialized to the value 0x1, or
+ * DVM_LOCK_INITIAL_THIN_VALUE, via DVM_OBJECT_INIT().
+ */
+
+/*
+ * Monitors provide:
+ *  - mutually exclusive access to resources
+ *  - a way for multiple threads to wait for notification
+ *
+ * In effect, they fill the role of both mutexes and condition variables.
+ *
+ * Only one thread can own the monitor at any time.  There may be several
+ * threads waiting on it (the wait call unlocks it).  One or more waiting
+ * threads may be getting interrupted or notified at any given time.
+ */
+struct Monitor {
+    Thread*     owner;          /* which thread currently owns the lock? */
+    int         lockCount;      /* owner's recursive lock depth */
+    Object*     obj;            /* what object are we part of [debug only] */
+
+    int         waiting;        /* total #of threads waiting on this */
+    int         notifying;      /* #of threads being notified */
+    int         interrupting;   /* #of threads being interrupted */
+
+    pthread_mutex_t lock;
+    pthread_cond_t  cond;
+
+    Monitor*    next;
+
+#ifdef WITH_DEADLOCK_PREDICTION
+    /*
+     * Objects that have been locked immediately after this one in the
+     * past.  We use an expanding flat array, allocated on first use, to
+     * minimize allocations.  Deletions from the list, expected to be
+     * infrequent, are crunched down.
+     */
+    ExpandingObjectList historyChildren;
+
+    /*
+     * We also track parents.  This isn't strictly necessary, but it makes
+     * the cleanup at GC time significantly faster.
+     */
+    ExpandingObjectList historyParents;
+
+    /* used during cycle detection */
+    bool        historyMark;
+
+    /* stack trace, established the first time we locked the object */
+    int         historyStackDepth;
+    int*        historyRawStackTrace;
+#endif
+};
+
+
+/*
+ * Create and initialize a monitor.
+ */
+Monitor* dvmCreateMonitor(Object* obj)
+{
+    Monitor* mon;
+
+    mon = (Monitor*) calloc(1, sizeof(Monitor));
+    if (mon == NULL) {
+        LOGE("Unable to allocate monitor\n");
+        dvmAbort();
+    }
+    mon->obj = obj;
+    dvmInitMutex(&mon->lock);
+    pthread_cond_init(&mon->cond, NULL);
+
+    /* replace the head of the list with the new monitor */
+    do {
+        mon->next = gDvm.monitorList;
+    } while (!ATOMIC_CMP_SWAP((int32_t*)(void*)&gDvm.monitorList,
+                              (int32_t)mon->next, (int32_t)mon));
+
+    return mon;
+}
+
+/*
+ * Release a Monitor.
+ */
+static void releaseMonitor(Monitor* mon)
+{
+    // TODO
+}
+
+/*
+ * Free the monitor list.  Only used when shutting the VM down.
+ */
+void dvmFreeMonitorList(void)
+{
+    Monitor* mon;
+    Monitor* nextMon;
+
+    mon = gDvm.monitorList;
+    while (mon != NULL) {
+        nextMon = mon->next;
+
+#ifdef WITH_DEADLOCK_PREDICTION
+        expandObjClear(&mon->historyChildren);
+        expandObjClear(&mon->historyParents);
+        free(mon->historyRawStackTrace);
+#endif
+        free(mon);
+        mon = nextMon;
+    }
+}
+
+/*
+ * Log some info about our monitors.
+ */
+void dvmDumpMonitorInfo(const char* msg)
+{
+#if QUIET_ZYGOTE_MONITOR
+    if (gDvm.zygote) {
+        return;
+    }
+#endif
+
+    int totalCount;
+    int liveCount;
+
+    totalCount = liveCount = 0;
+    Monitor* mon = gDvm.monitorList;
+    while (mon != NULL) {
+        totalCount++;
+        if (mon->obj != NULL)
+            liveCount++;
+        mon = mon->next;
+    }
+
+    LOGD("%s: monitor list has %d entries (%d live)\n",
+        msg, totalCount, liveCount);
+}
+
+/*
+ * Get the object that a monitor is part of.
+ */
+Object* dvmGetMonitorObject(Monitor* mon)
+{
+    if (mon == NULL)
+        return NULL;
+    else
+        return mon->obj;
+}
+
+/*
+ * Checks whether the given thread holds the given
+ * objects's lock.
+ */
+bool dvmHoldsLock(Thread* thread, Object* obj)
+{
+    if (thread == NULL || obj == NULL) {
+        return false;
+    }
+
+    /* Since we're reading the lock value multiple times,
+     * latch it so that it doesn't change out from under
+     * us if we get preempted.
+     */
+    Lock lock = obj->lock;
+    if (IS_LOCK_FAT(&lock)) {
+        return thread == lock.mon->owner;
+    } else {
+        return thread->threadId == (lock.thin & 0xffff);
+    }
+}
+
+/*
+ * Free the monitor associated with an object and make the object's lock
+ * thin again.  This is called during garbage collection.
+ */
+void dvmFreeObjectMonitor_internal(Lock *lock)
+{
+    Monitor *mon;
+
+    /* The macro that wraps this function checks IS_LOCK_FAT() first.
+     */
+    assert(IS_LOCK_FAT(lock));
+
+#ifdef WITH_DEADLOCK_PREDICTION
+    if (gDvm.deadlockPredictMode != kDPOff)
+        removeCollectedObject(lock->mon->obj);
+#endif
+
+    mon = lock->mon;
+    lock->thin = DVM_LOCK_INITIAL_THIN_VALUE;
+
+    /* This lock is associated with an object
+     * that's being swept.  The only possible way
+     * anyone could be holding this lock would be
+     * if some JNI code locked but didn't unlock
+     * the object, in which case we've got some bad
+     * native code somewhere.
+     */
+    assert(pthread_mutex_trylock(&mon->lock) == 0);
+    pthread_mutex_destroy(&mon->lock);
+    pthread_cond_destroy(&mon->cond);
+#if 1
+//TODO: unlink from the monitor list (would require a lock)
+// (might not -- the GC suspension may be enough)
+    {
+        Monitor *next;
+        next = mon->next;
+#ifdef WITH_DEADLOCK_PREDICTION
+        expandObjClear(&mon->historyChildren);
+        expandObjClear(&mon->historyParents);
+        free(mon->historyRawStackTrace);
+#endif
+        memset(mon, 0, sizeof (*mon));
+        mon->next = next;
+    }
+//free(mon);
+#endif
+}
+
+
+/*
+ * Lock a monitor.
+ */
+static void lockMonitor(Thread* self, Monitor* mon)
+{
+    int cc;
+
+    if (mon->owner == self) {
+        mon->lockCount++;
+    } else {
+        ThreadStatus oldStatus;
+
+        if (pthread_mutex_trylock(&mon->lock) != 0) {
+            /* mutex is locked, switch to wait status and sleep on it */
+            oldStatus = dvmChangeStatus(self, THREAD_MONITOR);
+            cc = pthread_mutex_lock(&mon->lock);
+            assert(cc == 0);
+            dvmChangeStatus(self, oldStatus);
+        }
+
+        mon->owner = self;
+        assert(mon->lockCount == 0);
+
+        /*
+         * "waiting", "notifying", and "interrupting" could all be nonzero
+         * if we're locking an object on which other threads are waiting.
+         * Nothing worth assert()ing about here.
+         */
+    }
+}
+
+/*
+ * Try to lock a monitor.
+ *
+ * Returns "true" on success.
+ */
+static bool tryLockMonitor(Thread* self, Monitor* mon)
+{
+    int cc;
+
+    if (mon->owner == self) {
+        mon->lockCount++;
+        return true;
+    } else {
+        cc = pthread_mutex_trylock(&mon->lock);
+        if (cc == 0) {
+            mon->owner = self;
+            assert(mon->lockCount == 0);
+            return true;
+        } else {
+            return false;
+        }
+    }
+}
+
+
+/*
+ * Unlock a monitor.
+ *
+ * Returns true if the unlock succeeded.
+ * If the unlock failed, an exception will be pending.
+ */
+static bool unlockMonitor(Thread* self, Monitor* mon)
+{
+    assert(mon != NULL);        // can this happen?
+
+    if (mon->owner == self) {
+        /*
+         * We own the monitor, so nobody else can be in here.
+         */
+        if (mon->lockCount == 0) {
+            int cc;
+            mon->owner = NULL;
+            cc = pthread_mutex_unlock(&mon->lock);
+            assert(cc == 0);
+        } else {
+            mon->lockCount--;
+        }
+    } else {
+        /*
+         * We don't own this, so we're not allowed to unlock it.
+         * The JNI spec says that we should throw IllegalMonitorStateException
+         * in this case.
+         */
+        if (mon->owner == NULL) {
+            //LOGW("Unlock fat %p: not owned\n", mon->obj);
+        } else {
+            //LOGW("Unlock fat %p: id %d vs %d\n",
+            //    mon->obj, mon->owner->threadId, self->threadId);
+        }
+        dvmThrowException("Ljava/lang/IllegalMonitorStateException;",
+            "unlock of unowned monitor");
+        return false;
+    }
+    return true;
+}
+
+/*
+ * Wait on a monitor until timeout, interrupt, or notification.  Used for
+ * Object.wait() and (somewhat indirectly) Thread.sleep() and Thread.join().
+ *
+ * If another thread calls Thread.interrupt(), we throw InterruptedException
+ * and return immediately if one of the following are true:
+ *  - blocked in wait(), wait(long), or wait(long, int) methods of Object
+ *  - blocked in join(), join(long), or join(long, int) methods of Thread
+ *  - blocked in sleep(long), or sleep(long, int) methods of Thread
+ * Otherwise, we set the "interrupted" flag.
+ *
+ * Checks to make sure that "nsec" is in the range 0-999999
+ * (i.e. fractions of a millisecond) and throws the appropriate
+ * exception if it isn't.
+ *
+ * The spec allows "spurious wakeups", and recommends that all code using
+ * Object.wait() do so in a loop.  This appears to derive from concerns
+ * about pthread_cond_wait() on multiprocessor systems.  Some commentary
+ * on the web casts doubt on whether these can/should occur.
+ *
+ * Since we're allowed to wake up "early", we clamp extremely long durations
+ * to return at the end of the 32-bit time epoch.
+ */
+static void waitMonitor(Thread* self, Monitor* mon, s8 msec, s4 nsec,
+    bool interruptShouldThrow)
+{
+    struct timespec ts;
+    bool wasInterrupted = false;
+    bool timed;
+    int cc;
+
+    /* Make sure that the lock is fat and that we hold it. */
+    if (mon == NULL || ((u4)mon & 1) != 0 || mon->owner != self) {
+        dvmThrowException("Ljava/lang/IllegalMonitorStateException;",
+            "object not locked by thread before wait()");
+        return;
+    }
+
+    /*
+     * Enforce the timeout range.
+     */
+    if (msec < 0 || nsec < 0 || nsec > 999999) {
+        dvmThrowException("Ljava/lang/IllegalArgumentException;",
+            "timeout arguments out of range");
+        return;
+    }
+
+    /*
+     * Compute absolute wakeup time, if necessary.
+     */
+    if (msec == 0 && nsec == 0) {
+        timed = false;
+    } else {
+        s8 endSec;
+
+#ifdef HAVE_TIMEDWAIT_MONOTONIC
+        struct timespec now;
+        clock_gettime(CLOCK_MONOTONIC, &now);
+        endSec = now.tv_sec + msec / 1000;
+        if (endSec >= 0x7fffffff) {
+            LOGV("NOTE: end time exceeds epoch\n");
+            endSec = 0x7ffffffe;
+        }
+        ts.tv_sec = endSec;
+        ts.tv_nsec = (now.tv_nsec + (msec % 1000) * 1000 * 1000) + nsec;
+#else
+        struct timeval now;
+        gettimeofday(&now, NULL);
+        endSec = now.tv_sec + msec / 1000;
+        if (endSec >= 0x7fffffff) {
+            LOGV("NOTE: end time exceeds epoch\n");
+            endSec = 0x7ffffffe;
+        }
+        ts.tv_sec = endSec;
+        ts.tv_nsec = (now.tv_usec + (msec % 1000) * 1000) * 1000 + nsec;
+#endif
+
+        /* catch rollover */
+        if (ts.tv_nsec >= 1000000000L) {
+            ts.tv_sec++;
+            ts.tv_nsec -= 1000000000L;
+        }
+        timed = true;
+    }
+
+    /*
+     * Make sure "notifying" wasn't screwed up by earlier activity.  If this
+     * is wrong we could end up waking up too many people.  (This is a rare
+     * situation, but we need to handle it correctly.)
+     */
+    if (mon->notifying + mon->interrupting > mon->waiting) {
+        LOGI("threadid=%d: bogus mon %d+%d>%d; adjusting\n",
+            self->threadId, mon->notifying, mon->interrupting,
+            mon->waiting);
+
+        assert(mon->waiting >= mon->interrupting);
+        mon->notifying = mon->waiting - mon->interrupting;
+    }
+
+    /*
+     * Add ourselves to the set of threads waiting on this monitor, and
+     * release our hold.  We need to let it go even if we're a few levels
+     * deep in a recursive lock, and we need to restore that later.
+     *
+     * The order of operations here isn't significant, because we still
+     * hold the pthread mutex.
+     */
+    int prevLockCount;
+
+    prevLockCount = mon->lockCount;
+    mon->lockCount = 0;
+    mon->waiting++;
+    mon->owner = NULL;
+
+    /*
+     * Update thread status.  If the GC wakes up, it'll ignore us, knowing
+     * that we won't touch any references in this state, and we'll check
+     * our suspend mode before we transition out.
+     */
+    if (timed)
+        dvmChangeStatus(self, THREAD_TIMED_WAIT);
+    else
+        dvmChangeStatus(self, THREAD_WAIT);
+
+    /*
+     * Tell the thread which monitor we're waiting on.  This is necessary
+     * so that Thread.interrupt() can wake us up.  Thread.interrupt needs
+     * to gain ownership of the monitor mutex before it can signal us, so
+     * we're still not worried about race conditions.
+     */
+    self->waitMonitor = mon;
+
+    /*
+     * Handle the case where the thread was interrupted before we called
+     * wait().
+     */
+    if (self->interrupted) {
+        wasInterrupted = true;
+        goto done;
+    }
+
+    LOGVV("threadid=%d: waiting on %p\n", self->threadId, mon);
+
+    while (true) {
+        if (!timed) {
+            cc = pthread_cond_wait(&mon->cond, &mon->lock);
+            assert(cc == 0);
+        } else {
+#ifdef HAVE_TIMEDWAIT_MONOTONIC
+            cc = pthread_cond_timedwait_monotonic(&mon->cond, &mon->lock, &ts);
+#else
+            cc = pthread_cond_timedwait(&mon->cond, &mon->lock, &ts);
+#endif
+            if (cc == ETIMEDOUT) {
+                LOGVV("threadid=%d wakeup: timeout\n", self->threadId);
+                break;
+            }
+        }
+
+        /*
+         * We woke up because of an interrupt (which does a broadcast) or
+         * a notification (which might be a signal or a broadcast).  Figure
+         * out what we need to do.
+         */
+        if (self->interruptingWait) {
+            /*
+             * The other thread successfully gained the monitor lock, and
+             * has confirmed that we were waiting on it.  If this is an
+             * interruptible wait, we bail out immediately.  If not, we
+             * continue on.
+             */
+            self->interruptingWait = false;
+            mon->interrupting--;
+            assert(self->interrupted);
+            if (interruptShouldThrow) {
+                wasInterrupted = true;
+                LOGD("threadid=%d wakeup: interrupted\n", self->threadId);
+                break;
+            } else {
+                LOGD("threadid=%d wakeup: not interruptible\n", self->threadId);
+            }
+        }
+        if (mon->notifying) {
+            /*
+             * One or more threads are being notified.  Remove ourselves
+             * from the set.
+             */
+            mon->notifying--;
+            LOGVV("threadid=%d wakeup: notified\n", self->threadId);
+            break;
+        } else {
+            /*
+             * Looks like we were woken unnecessarily, probably as a
+             * result of another thread being interrupted.  Go back to
+             * sleep.
+             */
+            LOGVV("threadid=%d wakeup: going back to sleep\n", self->threadId);
+        }
+    }
+
+done:
+    //if (wasInterrupted) {
+    //    LOGW("threadid=%d: throwing InterruptedException:\n", self->threadId);
+    //    dvmDumpThread(self, false);
+    //}
+
+    /*
+     * Put everything back.  Again, we hold the pthread mutex, so the order
+     * here isn't significant.
+     */
+    self->waitMonitor = NULL;
+    mon->owner = self;
+    mon->waiting--;
+    mon->lockCount = prevLockCount;
+
+    /* set self->status back to THREAD_RUNNING, and self-suspend if needed */
+    dvmChangeStatus(self, THREAD_RUNNING);
+
+    if (wasInterrupted) {
+        /*
+         * We were interrupted while waiting, or somebody interrupted an
+         * un-interruptable thread earlier and we're bailing out immediately.
+         *
+         * The doc sayeth: "The interrupted status of the current thread is
+         * cleared when this exception is thrown."
+         */
+        self->interrupted = false;
+        if (interruptShouldThrow)
+            dvmThrowException("Ljava/lang/InterruptedException;", NULL);
+    }
+}
+
+/*
+ * Notify one thread waiting on this monitor.
+ */
+static void notifyMonitor(Thread* self, Monitor* mon)
+{
+    /* Make sure that the lock is fat and that we hold it. */
+    if (mon == NULL || ((u4)mon & 1) != 0 || mon->owner != self) {
+        dvmThrowException("Ljava/lang/IllegalMonitorStateException;",
+            "object not locked by thread before notify()");
+        return;
+    }
+
+    /*
+     * Check to see if anybody is there to notify.  We subtract off
+     * threads that are being interrupted and anything that has
+     * potentially already been notified.
+     */
+    if (mon->notifying + mon->interrupting < mon->waiting) {
+        /* wake up one thread */
+        int cc;
+
+        LOGVV("threadid=%d: signaling on %p\n", self->threadId, mon);
+
+        mon->notifying++;
+        cc = pthread_cond_signal(&mon->cond);
+        assert(cc == 0);
+    } else {
+        LOGVV("threadid=%d: nobody to signal on %p\n", self->threadId, mon);
+    }
+}
+
+/*
+ * Notify all threads waiting on this monitor.
+ *
+ * We keep a count of how many threads we notified, so that our various
+ * counts remain accurate.
+ */
+static void notifyAllMonitor(Thread* self, Monitor* mon)
+{
+    /* Make sure that the lock is fat and that we hold it. */
+    if (mon == NULL || ((u4)mon & 1) != 0 || mon->owner != self) {
+        dvmThrowException("Ljava/lang/IllegalMonitorStateException;",
+            "object not locked by thread before notifyAll()");
+        return;
+    }
+
+    mon->notifying = mon->waiting - mon->interrupting;
+    if (mon->notifying > 0) {
+        int cc;
+
+        LOGVV("threadid=%d: broadcasting to %d threads on %p\n",
+            self->threadId, mon->notifying, mon);
+
+        cc = pthread_cond_broadcast(&mon->cond);
+        assert(cc == 0);
+    } else {
+        LOGVV("threadid=%d: nobody to broadcast to on %p\n", self->threadId,mon);
+    }
+}
+
+#if THIN_LOCKING
+/*
+ * Thin locking support
+ */
+
+/*
+ * Implements monitorenter for "synchronized" stuff.
+ *
+ * This does not fail or throw an exception (unless deadlock prediction
+ * is enabled and set to "err" mode).
+ */
+void dvmLockObject(Thread* self, Object *obj)
+{
+    volatile u4 *thinp = &obj->lock.thin;
+    u4 threadId = self->threadId;
+
+    /* First, try to grab the lock as if it's thin;
+     * this is the common case and will usually succeed.
+     */
+    if (!ATOMIC_CMP_SWAP((int32_t *)thinp,
+                         (int32_t)DVM_LOCK_INITIAL_THIN_VALUE,
+                         (int32_t)threadId)) {
+        /* The lock is either a thin lock held by someone (possibly 'self'),
+         * or a fat lock.
+         */
+        if ((*thinp & 0xffff) == threadId) {
+            /* 'self' is already holding the thin lock; we can just
+             * bump the count.  Atomic operations are not necessary
+             * because only the thread holding the lock is allowed
+             * to modify the Lock field.
+             */
+            *thinp += 1<<16;
+        } else {
+            /* If this is a thin lock we need to spin on it, if it's fat
+             * we need to acquire the monitor.
+             */
+            if ((*thinp & 1) != 0) {
+                ThreadStatus oldStatus;
+                static const unsigned long maxSleepDelay = 1 * 1024 * 1024;
+                unsigned long sleepDelay;
+
+                LOG_THIN("(%d) spin on lock 0x%08x: 0x%08x (0x%08x) 0x%08x\n",
+                         threadId, (uint)&obj->lock,
+                         DVM_LOCK_INITIAL_THIN_VALUE, *thinp, threadId);
+
+                /* The lock is still thin, but some other thread is
+                 * holding it.  Let the VM know that we're about
+                 * to wait on another thread.
+                 */
+                oldStatus = dvmChangeStatus(self, THREAD_MONITOR);
+
+                /* Spin until the other thread lets go.
+                 */
+                sleepDelay = 0;
+                do {
+                    /* In addition to looking for an unlock,
+                     * we need to watch out for some other thread
+                     * fattening the lock behind our back.
+                     */
+                    while (*thinp != DVM_LOCK_INITIAL_THIN_VALUE) {
+                        if ((*thinp & 1) == 0) {
+                            /* The lock has been fattened already.
+                             */
+                            LOG_THIN("(%d) lock 0x%08x surprise-fattened\n",
+                                     threadId, (uint)&obj->lock);
+                            dvmChangeStatus(self, oldStatus);
+                            goto fat_lock;
+                        }
+
+                        if (sleepDelay == 0) {
+                            sched_yield();
+                            sleepDelay = 1 * 1000;
+                        } else {
+                            usleep(sleepDelay);
+                            if (sleepDelay < maxSleepDelay / 2) {
+                                sleepDelay *= 2;
+                            }
+                        }
+                    }
+                } while (!ATOMIC_CMP_SWAP((int32_t *)thinp,
+                                          (int32_t)DVM_LOCK_INITIAL_THIN_VALUE,
+                                          (int32_t)threadId));
+                LOG_THIN("(%d) spin on lock done 0x%08x: "
+                         "0x%08x (0x%08x) 0x%08x\n",
+                         threadId, (uint)&obj->lock,
+                         DVM_LOCK_INITIAL_THIN_VALUE, *thinp, threadId);
+
+                /* We've got the thin lock; let the VM know that we're
+                 * done waiting.
+                 */
+                dvmChangeStatus(self, oldStatus);
+
+                /* Fatten the lock.  Note this relinquishes ownership.
+                 * We could also create the monitor in an "owned" state
+                 * to avoid "re-locking" it in fat_lock.
+                 */
+                obj->lock.mon = dvmCreateMonitor(obj);
+                LOG_THIN("(%d) lock 0x%08x fattened\n",
+                         threadId, (uint)&obj->lock);
+
+                /* Fall through to acquire the newly fat lock.
+                 */
+            }
+
+            /* The lock is already fat, which means
+             * that obj->lock.mon is a regular (Monitor *).
+             */
+        fat_lock:
+            assert(obj->lock.mon != NULL);
+            lockMonitor(self, obj->lock.mon);
+        }
+    }
+    // else, the lock was acquired with the ATOMIC_CMP_SWAP().
+
+#ifdef WITH_DEADLOCK_PREDICTION
+    /*
+     * See if we were allowed to grab the lock at this time.  We do it
+     * *after* acquiring the lock, rather than before, so that we can
+     * freely update the Monitor struct.  This seems counter-intuitive,
+     * but our goal is deadlock *prediction* not deadlock *prevention*.
+     * (If we actually deadlock, the situation is easy to diagnose from
+     * a thread dump, so there's no point making a special effort to do
+     * the checks before the lock is held.)
+     *
+     * This needs to happen before we add the object to the thread's
+     * monitor list, so we can tell the difference between first-lock and
+     * re-lock.
+     *
+     * It's also important that we do this while in THREAD_RUNNING, so
+     * that we don't interfere with cleanup operations in the GC.
+     */
+    if (gDvm.deadlockPredictMode != kDPOff) {
+        if (self->status != THREAD_RUNNING) {
+            LOGE("Bad thread status (%d) in DP\n", self->status);
+            dvmDumpThread(self, false);
+            dvmAbort();
+        }
+        assert(!dvmCheckException(self));
+        updateDeadlockPrediction(self, obj);
+        if (dvmCheckException(self)) {
+            /*
+             * If we're throwing an exception here, we need to free the
+             * lock.  We add the object to the thread's monitor list so the
+             * "unlock" code can remove it.
+             */
+            dvmAddToMonitorList(self, obj, false);
+            dvmUnlockObject(self, obj);
+            LOGV("--- unlocked, pending is '%s'\n",
+                dvmGetException(self)->clazz->descriptor);
+        }
+    }
+
+    /*
+     * Add the locked object, and the current stack trace, to the list
+     * held by the Thread object.  If deadlock prediction isn't on,
+     * don't capture the stack trace.
+     */
+    dvmAddToMonitorList(self, obj, gDvm.deadlockPredictMode != kDPOff);
+#elif defined(WITH_MONITOR_TRACKING)
+    /*
+     * Add the locked object to the list held by the Thread object.
+     */
+    dvmAddToMonitorList(self, obj, false);
+#endif
+}
+
+/*
+ * Implements monitorexit for "synchronized" stuff.
+ *
+ * On failure, throws an exception and returns "false".
+ */
+bool dvmUnlockObject(Thread* self, Object *obj)
+{
+    volatile u4 *thinp = &obj->lock.thin;
+    u4 threadId = self->threadId;
+
+    /* Check the common case, where 'self' has locked 'obj' once, first.
+     */
+    if (*thinp == threadId) {
+        /* Unlock 'obj' by clearing our threadId from 'thin'.
+         * The lock protects the lock field itself, so it's
+         * safe to update non-atomically.
+         */
+        *thinp = DVM_LOCK_INITIAL_THIN_VALUE;
+    } else if ((*thinp & 1) != 0) {
+        /* If the object is locked, it had better be locked by us.
+         */
+        if ((*thinp & 0xffff) != threadId) {
+            /* The JNI spec says that we should throw an exception
+             * in this case.
+             */
+            //LOGW("Unlock thin %p: id %d vs %d\n",
+            //    obj, (*thinp & 0xfff), threadId);
+            dvmThrowException("Ljava/lang/IllegalMonitorStateException;",
+                "unlock of unowned monitor");
+            return false;
+        }
+
+        /* It's a thin lock, but 'self' has locked 'obj'
+         * more than once.  Decrement the count.
+         */
+        *thinp -= 1<<16;
+    } else {
+        /* It's a fat lock.
+         */
+        assert(obj->lock.mon != NULL);
+        if (!unlockMonitor(self, obj->lock.mon)) {
+            /* exception has been raised */
+            return false;
+        }
+    }
+
+#ifdef WITH_MONITOR_TRACKING
+    /*
+     * Remove the object from the Thread's list.
+     */
+    dvmRemoveFromMonitorList(self, obj);
+#endif
+
+    return true;
+}
+
+/*
+ * Object.wait().  Also called for class init.
+ */
+void dvmObjectWait(Thread* self, Object *obj, s8 msec, s4 nsec,
+    bool interruptShouldThrow)
+{
+    Monitor* mon = obj->lock.mon;
+    u4 thin = obj->lock.thin;
+
+    /* If the lock is still thin, we need to fatten it.
+     */
+    if ((thin & 1) != 0) {
+        /* Make sure that 'self' holds the lock.
+         */
+        if ((thin & 0xffff) != self->threadId) {
+            dvmThrowException("Ljava/lang/IllegalMonitorStateException;",
+                "object not locked by thread before wait()");
+            return;
+        }
+
+        /* This thread holds the lock.  We need to fatten the lock
+         * so 'self' can block on it.  Don't update the object lock
+         * field yet, because 'self' needs to acquire the lock before
+         * any other thread gets a chance.
+         */
+        mon = dvmCreateMonitor(obj);
+
+        /* 'self' has actually locked the object one or more times;
+         * make sure that the monitor reflects this.
+         */
+        lockMonitor(self, mon);
+        mon->lockCount = thin >> 16;
+        LOG_THIN("(%d) lock 0x%08x fattened by wait() to count %d\n",
+                 self->threadId, (uint)&obj->lock, mon->lockCount);
+
+        /* Make the monitor public now that it's in the right state.
+         */
+        obj->lock.mon = mon;
+    }
+
+    waitMonitor(self, mon, msec, nsec, interruptShouldThrow);
+}
+
+/*
+ * Object.notify().
+ */
+void dvmObjectNotify(Thread* self, Object *obj)
+{
+    Monitor* mon = obj->lock.mon;
+    u4 thin = obj->lock.thin;
+
+    /* If the lock is still thin, there aren't any waiters;
+     * waiting on an object forces lock fattening.
+     */
+    if ((thin & 1) != 0) {
+        /* Make sure that 'self' holds the lock.
+         */
+        if ((thin & 0xffff) != self->threadId) {
+            dvmThrowException("Ljava/lang/IllegalMonitorStateException;",
+                "object not locked by thread before notify()");
+            return;
+        }
+
+        /* no-op;  there are no waiters to notify.
+         */
+    } else {
+        /* It's a fat lock.
+         */
+        notifyMonitor(self, mon);
+    }
+}
+
+/*
+ * Object.notifyAll().
+ */
+void dvmObjectNotifyAll(Thread* self, Object *obj)
+{
+    u4 thin = obj->lock.thin;
+
+    /* If the lock is still thin, there aren't any waiters;
+     * waiting on an object forces lock fattening.
+     */
+    if ((thin & 1) != 0) {
+        /* Make sure that 'self' holds the lock.
+         */
+        if ((thin & 0xffff) != self->threadId) {
+            dvmThrowException("Ljava/lang/IllegalMonitorStateException;",
+                "object not locked by thread before notifyAll()");
+            return;
+        }
+
+        /* no-op;  there are no waiters to notify.
+         */
+    } else {
+        Monitor* mon = obj->lock.mon;
+
+        /* It's a fat lock.
+         */
+        notifyAllMonitor(self, mon);
+    }
+}
+
+#else  // not THIN_LOCKING
+
+/*
+ * Implements monitorenter for "synchronized" stuff.
+ *
+ * This does not fail or throw an exception.
+ */
+void dvmLockObject(Thread* self, Object* obj)
+{
+    Monitor* mon = obj->lock.mon;
+
+    if (mon == NULL) {
+        mon = dvmCreateMonitor(obj);
+        if (!ATOMIC_CMP_SWAP((int32_t *)&obj->lock.mon,
+                             (int32_t)NULL, (int32_t)mon)) {
+            /* somebody else beat us to it */
+            releaseMonitor(mon);
+            mon = obj->lock.mon;
+        }
+    }
+
+    lockMonitor(self, mon);
+}
+
+/*
+ * Implements monitorexit for "synchronized" stuff.
+ */
+bool dvmUnlockObject(Thread* self, Object* obj)
+{
+    Monitor* mon = obj->lock.mon;
+
+    return unlockMonitor(self, mon);
+}
+
+
+/*
+ * Object.wait().
+ */
+void dvmObjectWait(Thread* self, Object* obj, u8 msec, u4 nsec)
+{
+    Monitor* mon = obj->lock.mon;
+
+    waitMonitor(self, mon, msec, nsec);
+}
+
+/*
+ * Object.notify().
+ */
+void dvmObjectNotify(Thread* self, Object* obj)
+{
+    Monitor* mon = obj->lock.mon;
+
+    notifyMonitor(self, mon);
+}
+
+/*
+ * Object.notifyAll().
+ */
+void dvmObjectNotifyAll(Thread* self, Object* obj)
+{
+    Monitor* mon = obj->lock.mon;
+
+    notifyAllMonitor(self, mon);
+}
+
+#endif  // not THIN_LOCKING
+
+
+/*
+ * This implements java.lang.Thread.sleep(long msec, int nsec).
+ *
+ * The sleep is interruptible by other threads, which means we can't just
+ * plop into an OS sleep call.  (We probably could if we wanted to send
+ * signals around and rely on EINTR, but that's inefficient and relies
+ * on native code respecting our signal mask.)
+ *
+ * We have to do all of this stuff for Object.wait() as well, so it's
+ * easiest to just sleep on a private Monitor.
+ *
+ * It appears that we want sleep(0,0) to go through the motions of sleeping
+ * for a very short duration, rather than just returning.
+ */
+void dvmThreadSleep(u8 msec, u4 nsec)
+{
+    Thread* self = dvmThreadSelf();
+    Monitor* mon = gDvm.threadSleepMon;
+
+    /* sleep(0,0) wakes up immediately, wait(0,0) means wait forever; adjust */
+    if (msec == 0 && nsec == 0)
+        nsec++;
+
+    lockMonitor(self, mon);
+    waitMonitor(self, mon, msec, nsec, true);
+    unlockMonitor(self, mon);
+}
+
+/*
+ * Implement java.lang.Thread.interrupt().
+ *
+ * We need to increment the monitor's "interrupting" count, and set the
+ * interrupted status for the thread in question.  Doing so requires
+ * gaining the monitor's lock, which may not happen in a timely fashion.
+ * We are left with a decision between failing to interrupt the thread
+ * and stalling the interrupting thread.
+ *
+ * We must take some care to ensure that we don't try to interrupt the same
+ * thread on the same mutex twice.  Doing so would leave us with an
+ * incorrect value for Monitor.interrupting.
+ */
+void dvmThreadInterrupt(Thread* thread)
+{
+    static const int kMaxRetries = 4;
+    Monitor* mon;
+    Thread* self;
+    int retry;
+
+    /*
+     * Raise the "interrupted" flag.  This will cause it to bail early out
+     * of the next wait() attempt, if it's not currently waiting on
+     * something.
+     */
+    thread->interrupted = true;
+    MEM_BARRIER();
+
+    /*
+     * Is the thread waiting?
+     *
+     * Note that fat vs. thin doesn't matter here;  waitMonitor
+     * is only set when a thread actually waits on a monitor,
+     * which implies that the monitor has already been fattened.
+     */
+    mon = thread->waitMonitor;
+    if (mon == NULL)
+        return;
+
+    /*
+     * Try to acquire the monitor, if we don't already own it.
+     * We need to hold the same mutex as the thread in order
+     * to signal the condition it's waiting on.
+     *
+     * TODO: we may be able to get rid of the explicit lock by coordinating
+     * this more closely with waitMonitor.
+     */
+    self = dvmThreadSelf();
+    retry = 0;
+    do {
+        if (tryLockMonitor(self, mon))
+            goto gotit;
+
+        /* wait a bit */
+        sched_yield();
+
+        /*
+         * If they've moved on to a different monitor, the "interrupted"
+         * flag we set earlier will have taken care of things, so we don't
+         * want to continue.
+         */
+        if (thread->waitMonitor != mon)
+            return;
+    } while (retry++ < kMaxRetries);
+
+    LOGW("threadid=%d: unable to interrupt threadid=%d\n",
+        self->threadId, thread->threadId);
+    return;
+
+gotit:
+    /*
+     * We've got the monitor lock, which means nobody can be added or
+     * removed from the wait list.  This also means that the Thread's
+     * waitMonitor/interruptingWait fields can't be modified by anyone
+     * else.
+     *
+     * If things look good, raise flags and wake the threads sleeping
+     * on the monitor's condition variable.
+     */
+    if (thread->waitMonitor == mon &&       // still on same monitor?
+        thread->interrupted &&              // interrupt still pending?
+        !thread->interruptingWait)          // nobody else is interrupting too?
+    {
+        int cc;
+
+        LOGVV("threadid=%d: interrupting threadid=%d waiting on %p\n",
+            self->threadId, thread->threadId, mon);
+
+        thread->interruptingWait = true;    // prevent re-interrupt...
+        mon->interrupting++;                // ...so we only do this once
+        cc = pthread_cond_broadcast(&mon->cond);
+        assert(cc == 0);
+    }
+
+    unlockMonitor(self, mon);
+}
+
+
+#ifdef WITH_DEADLOCK_PREDICTION
+/*
+ * ===========================================================================
+ *      Deadlock prediction
+ * ===========================================================================
+ */
+/*
+The idea is to predict the possibility of deadlock by recording the order
+in which monitors are acquired.  If we see an attempt to acquire a lock
+out of order, we can identify the locks and offending code.
+
+To make this work, we need to keep track of the locks held by each thread,
+and create history trees for each lock.  When a thread tries to acquire
+a new lock, we walk through the "history children" of the lock, looking
+for a match with locks the thread already holds.  If we find a match,
+it means the thread has made a request that could result in a deadlock.
+
+To support recursive locks, we always allow re-locking a currently-held
+lock, and maintain a recursion depth count.
+
+An ASCII-art example, where letters represent Objects:
+
+        A
+       /|\
+      / | \
+     B  |  D
+      \ |
+       \|
+        C
+
+The above is the tree we'd have after handling Object synchronization
+sequences "ABC", "AC", "AD".  A has three children, {B, C, D}.  C is also
+a child of B.  (The lines represent pointers between parent and child.
+Every node can have multiple parents and multiple children.)
+
+If we hold AC, and want to lock B, we recursively search through B's
+children to see if A or C appears.  It does, so we reject the attempt.
+(A straightforward way to implement it: add a link from C to B, then
+determine whether the graph starting at B contains a cycle.)
+
+If we hold AC and want to lock D, we would succeed, creating a new link
+from C to D.
+
+The lock history and a stack trace is attached to the Object's Monitor
+struct, which means we need to fatten every Object we lock (thin locking
+is effectively disabled).  If we don't need the stack trace we can
+avoid fattening the leaf nodes, only fattening objects that need to hold
+history trees.
+
+Updates to Monitor structs are only allowed for the thread that holds
+the Monitor, so we actually do most of our deadlock prediction work after
+the lock has been acquired.
+
+When an object with a monitor is GCed, we need to remove it from the
+history trees.  There are two basic approaches:
+ (1) For through the entire set of known monitors, search all child
+     lists for the object in question.  This is rather slow, resulting
+     in GC passes that take upwards of 10 seconds to complete.
+ (2) Maintain "parent" pointers in each node.  Remove the entries as
+     required.  This requires additional storage and maintenance for
+     every operation, but is significantly faster at GC time.
+For each GCed object, we merge all of the object's children into each of
+the object's parents.
+*/
+
+#if !defined(WITH_MONITOR_TRACKING)
+# error "WITH_DEADLOCK_PREDICTION requires WITH_MONITOR_TRACKING"
+#endif
+
+/*
+ * Clear out the contents of an ExpandingObjectList, freeing any
+ * dynamic allocations.
+ */
+static void expandObjClear(ExpandingObjectList* pList)
+{
+    if (pList->list != NULL) {
+        free(pList->list);
+        pList->list = NULL;
+    }
+    pList->alloc = pList->count = 0;
+}
+
+/*
+ * Get the number of objects currently stored in the list.
+ */
+static inline int expandBufGetCount(const ExpandingObjectList* pList)
+{
+    return pList->count;
+}
+
+/*
+ * Get the Nth entry from the list.
+ */
+static inline Object* expandBufGetEntry(const ExpandingObjectList* pList,
+    int i)
+{
+    return pList->list[i];
+}
+
+/*
+ * Add a new entry to the list.
+ *
+ * We don't check for or try to enforce uniqueness.  It's expected that
+ * the higher-level code does this for us.
+ */
+static void expandObjAddEntry(ExpandingObjectList* pList, Object* obj)
+{
+    if (pList->count == pList->alloc) {
+        /* time to expand */
+        Object** newList;
+
+        if (pList->alloc == 0)
+            pList->alloc = 4;
+        else
+            pList->alloc *= 2;
+        LOGVV("expanding %p to %d\n", pList, pList->alloc);
+        newList = realloc(pList->list, pList->alloc * sizeof(Object*));
+        if (newList == NULL) {
+            LOGE("Failed expanding DP object list (alloc=%d)\n", pList->alloc);
+            dvmAbort();
+        }
+        pList->list = newList;
+    }
+
+    pList->list[pList->count++] = obj;
+}
+
+/*
+ * Returns "true" if the element was successfully removed.
+ */
+static bool expandObjRemoveEntry(ExpandingObjectList* pList, Object* obj)
+{
+    int i;
+
+    for (i = pList->count-1; i >= 0; i--) {
+        if (pList->list[i] == obj)
+            break;
+    }
+    if (i < 0)
+        return false;
+
+    if (i != pList->count-1) {
+        /*
+         * The order of elements is not important, so we just copy the
+         * last entry into the new slot.
+         */
+        //memmove(&pList->list[i], &pList->list[i+1],
+        //    (pList->count-1 - i) * sizeof(pList->list[0]));
+        pList->list[i] = pList->list[pList->count-1];
+    }
+
+    pList->count--;
+    pList->list[pList->count] = (Object*) 0xdecadead;
+    return true;
+}
+
+/*
+ * Returns "true" if "obj" appears in the list.
+ */
+static bool expandObjHas(const ExpandingObjectList* pList, Object* obj)
+{
+    int i;
+
+    for (i = 0; i < pList->count; i++) {
+        if (pList->list[i] == obj)
+            return true;
+    }
+    return false;
+}
+
+/*
+ * Print the list contents to stdout.  For debugging.
+ */
+static void expandObjDump(const ExpandingObjectList* pList)
+{
+    int i;
+    for (i = 0; i < pList->count; i++)
+        printf(" %p", pList->list[i]);
+}
+
+/*
+ * Check for duplicate entries.  Returns the index of the first instance
+ * of the duplicated value, or -1 if no duplicates were found.
+ */
+static int expandObjCheckForDuplicates(const ExpandingObjectList* pList)
+{
+    int i, j;
+    for (i = 0; i < pList->count-1; i++) {
+        for (j = i + 1; j < pList->count; j++) {
+            if (pList->list[i] == pList->list[j]) {
+                return i;
+            }
+        }
+    }
+
+    return -1;
+}
+
+
+/*
+ * Determine whether "child" appears in the list of objects associated
+ * with the Monitor in "parent".  If "parent" is a thin lock, we return
+ * false immediately.
+ */
+static bool objectInChildList(const Object* parent, Object* child)
+{
+    Lock lock = parent->lock;
+    if (!IS_LOCK_FAT(&lock)) {
+        //LOGI("on thin\n");
+        return false;
+    }
+
+    return expandObjHas(&lock.mon->historyChildren, child);
+}
+
+/*
+ * Print the child list.
+ */
+static void dumpKids(Object* parent)
+{
+    Monitor* mon = parent->lock.mon;
+
+    printf("Children of %p:", parent);
+    expandObjDump(&mon->historyChildren);
+    printf("\n");
+}
+
+/*
+ * Add "child" to the list of children in "parent", and add "parent" to
+ * the list of parents in "child".
+ */
+static void linkParentToChild(Object* parent, Object* child)
+{
+    //assert(parent->lock.mon->owner == dvmThreadSelf());   // !owned for merge
+    assert(IS_LOCK_FAT(&parent->lock));
+    assert(IS_LOCK_FAT(&child->lock));
+    assert(parent != child);
+    Monitor* mon;
+
+    mon = parent->lock.mon;
+    assert(!expandObjHas(&mon->historyChildren, child));
+    expandObjAddEntry(&mon->historyChildren, child);
+
+    mon = child->lock.mon;
+    assert(!expandObjHas(&mon->historyParents, parent));
+    expandObjAddEntry(&mon->historyParents, parent);
+}
+
+
+/*
+ * Remove "child" from the list of children in "parent".
+ */
+static void unlinkParentFromChild(Object* parent, Object* child)
+{
+    //assert(parent->lock.mon->owner == dvmThreadSelf());   // !owned for GC
+    assert(IS_LOCK_FAT(&parent->lock));
+    assert(IS_LOCK_FAT(&child->lock));
+    assert(parent != child);
+    Monitor* mon;
+
+    mon = parent->lock.mon;
+    if (!expandObjRemoveEntry(&mon->historyChildren, child)) {
+        LOGW("WARNING: child %p not found in parent %p\n", child, parent);
+    }
+    assert(!expandObjHas(&mon->historyChildren, child));
+    assert(expandObjCheckForDuplicates(&mon->historyChildren) < 0);
+
+    mon = child->lock.mon;
+    if (!expandObjRemoveEntry(&mon->historyParents, parent)) {
+        LOGW("WARNING: parent %p not found in child %p\n", parent, child);
+    }
+    assert(!expandObjHas(&mon->historyParents, parent));
+    assert(expandObjCheckForDuplicates(&mon->historyParents) < 0);
+}
+
+
+/*
+ * Log the monitors held by the current thread.  This is done as part of
+ * flagging an error.
+ */
+static void logHeldMonitors(Thread* self)
+{
+    char* name = NULL;
+
+    name = dvmGetThreadName(self);
+    LOGW("Monitors currently held by thread (threadid=%d '%s')\n",
+        self->threadId, name);
+    LOGW("(most-recently-acquired on top):\n");
+    free(name);
+
+    LockedObjectData* lod = self->pLockedObjects;
+    while (lod != NULL) {
+        LOGW("--- object %p[%d] (%s)\n",
+            lod->obj, lod->recursionCount, lod->obj->clazz->descriptor);
+        dvmLogRawStackTrace(lod->rawStackTrace, lod->stackDepth);
+
+        lod = lod->next;
+    }
+}
+
+/*
+ * Recursively traverse the object hierarchy starting at "obj".  We mark
+ * ourselves on entry and clear the mark on exit.  If we ever encounter
+ * a marked object, we have a cycle.
+ *
+ * Returns "true" if all is well, "false" if we found a cycle.
+ */
+static bool traverseTree(Thread* self, const Object* obj)
+{
+    assert(IS_LOCK_FAT(&obj->lock));
+    Monitor* mon = obj->lock.mon;
+
+    /*
+     * Have we been here before?
+     */
+    if (mon->historyMark) {
+        int* rawStackTrace;
+        int stackDepth;
+
+        LOGW("%s\n", kStartBanner);
+        LOGW("Illegal lock attempt:\n");
+        LOGW("--- object %p (%s)\n", obj, obj->clazz->descriptor);
+
+        rawStackTrace = dvmFillInStackTraceRaw(self, &stackDepth);
+        dvmLogRawStackTrace(rawStackTrace, stackDepth);
+        free(rawStackTrace);
+
+        LOGW(" ");
+        logHeldMonitors(self);
+
+        LOGW(" ");
+        LOGW("Earlier, the following lock order (from last to first) was\n");
+        LOGW("established -- stack trace is from first successful lock):\n");
+        return false;
+    }
+    mon->historyMark = true;
+
+    /*
+     * Examine the children.  We do NOT hold these locks, so they might
+     * very well transition from thin to fat or change ownership while
+     * we work.
+     *
+     * NOTE: we rely on the fact that they cannot revert from fat to thin
+     * while we work.  This is currently a safe assumption.
+     *
+     * We can safely ignore thin-locked children, because by definition
+     * they have no history and are leaf nodes.  In the current
+     * implementation we always fatten the locks to provide a place to
+     * hang the stack trace.
+     */
+    ExpandingObjectList* pList = &mon->historyChildren;
+    int i;
+    for (i = expandBufGetCount(pList)-1; i >= 0; i--) {
+        const Object* child = expandBufGetEntry(pList, i);
+        Lock lock = child->lock;
+        if (!IS_LOCK_FAT(&lock))
+            continue;
+        if (!traverseTree(self, child)) {
+            LOGW("--- object %p (%s)\n", obj, obj->clazz->descriptor);
+            dvmLogRawStackTrace(mon->historyRawStackTrace,
+                mon->historyStackDepth);
+            mon->historyMark = false;
+            return false;
+        }
+    }
+
+    mon->historyMark = false;
+
+    return true;
+}
+
+/*
+ * Update the deadlock prediction tree, based on the current thread
+ * acquiring "acqObj".  This must be called before the object is added to
+ * the thread's list of held monitors.
+ *
+ * If the thread already holds the lock (recursion), or this is a known
+ * lock configuration, we return without doing anything.  Otherwise, we add
+ * a link from the most-recently-acquired lock in this thread to "acqObj"
+ * after ensuring that the parent lock is "fat".
+ *
+ * This MUST NOT be called while a GC is in progress in another thread,
+ * because we assume exclusive access to history trees in owned monitors.
+ */
+static void updateDeadlockPrediction(Thread* self, Object* acqObj)
+{
+    LockedObjectData* lod;
+    LockedObjectData* mrl;
+
+    /*
+     * Quick check for recursive access.
+     */
+    lod = dvmFindInMonitorList(self, acqObj);
+    if (lod != NULL) {
+        LOGV("+++ DP: recursive %p\n", acqObj);
+        return;
+    }
+
+    /*
+     * Make the newly-acquired object's monitor "fat".  In some ways this
+     * isn't strictly necessary, but we need the GC to tell us when
+     * "interesting" objects go away, and right now the only way to make
+     * an object look interesting is to give it a monitor.
+     *
+     * This also gives us a place to hang a stack trace.
+     *
+     * Our thread holds the lock, so we're allowed to rewrite the lock
+     * without worrying that something will change out from under us.
+     */
+    if (!IS_LOCK_FAT(&acqObj->lock)) {
+        LOGVV("fattening lockee %p (recur=%d)\n",
+            acqObj, acqObj->lock.thin >> 16);
+        Monitor* newMon = dvmCreateMonitor(acqObj);
+        lockMonitor(self, newMon);      // can't stall, don't need VMWAIT
+        newMon->lockCount += acqObj->lock.thin >> 16;
+        acqObj->lock.mon = newMon;
+    }
+
+    /* if we don't have a stack trace for this monitor, establish one */
+    if (acqObj->lock.mon->historyRawStackTrace == NULL) {
+        Monitor* mon = acqObj->lock.mon;
+        mon->historyRawStackTrace = dvmFillInStackTraceRaw(self,
+            &mon->historyStackDepth);
+    }
+
+    /*
+     * We need to examine and perhaps modify the most-recently-locked
+     * monitor.  We own that, so there's no risk of another thread
+     * stepping on us.
+     *
+     * Retrieve the most-recently-locked entry from our thread.
+     */
+    mrl = self->pLockedObjects;
+    if (mrl == NULL)
+        return;         /* no other locks held */
+
+    /*
+     * Do a quick check to see if "acqObj" is a direct descendant.  We can do
+     * this without holding the global lock because of our assertion that
+     * a GC is not running in parallel -- nobody except the GC can
+     * modify a history list in a Monitor they don't own, and we own "mrl".
+     * (There might be concurrent *reads*, but no concurrent *writes.)
+     *
+     * If we find it, this is a known good configuration, and we're done.
+     */
+    if (objectInChildList(mrl->obj, acqObj))
+        return;
+
+    /*
+     * "mrl" is going to need to have a history tree.  If it's currently
+     * a thin lock, we make it fat now.  The thin lock might have a
+     * nonzero recursive lock count, which we need to carry over.
+     *
+     * Our thread holds the lock, so we're allowed to rewrite the lock
+     * without worrying that something will change out from under us.
+     */
+    if (!IS_LOCK_FAT(&mrl->obj->lock)) {
+        LOGVV("fattening parent %p f/b/o child %p (recur=%d)\n",
+            mrl->obj, acqObj, mrl->obj->lock.thin >> 16);
+        Monitor* newMon = dvmCreateMonitor(mrl->obj);
+        lockMonitor(self, newMon);      // can't stall, don't need VMWAIT
+        newMon->lockCount += mrl->obj->lock.thin >> 16;
+        mrl->obj->lock.mon = newMon;
+    }
+
+    /*
+     * We haven't seen this configuration before.  We need to scan down
+     * acqObj's tree to see if any of the monitors in self->pLockedObjects
+     * appear.  We grab a global lock before traversing or updating the
+     * history list.
+     *
+     * If we find a match for any of our held locks, we know that the lock
+     * has previously been acquired *after* acqObj, and we throw an error.
+     *
+     * The easiest way to do this is to create a link from "mrl" to "acqObj"
+     * and do a recursive traversal, marking nodes as we cross them.  If
+     * we cross one a second time, we have a cycle and can throw an error.
+     * (We do the flag-clearing traversal before adding the new link, so
+     * that we're guaranteed to terminate.)
+     *
+     * If "acqObj" is a thin lock, it has no history, and we can create a
+     * link to it without additional checks.  [ We now guarantee that it's
+     * always fat. ]
+     */
+    bool failed = false;
+    dvmLockMutex(&gDvm.deadlockHistoryLock);
+    linkParentToChild(mrl->obj, acqObj);
+    if (!traverseTree(self, acqObj)) {
+        LOGW("%s\n", kEndBanner);
+        failed = true;
+
+        /* remove the entry so we're still okay when in "warning" mode */
+        unlinkParentFromChild(mrl->obj, acqObj);
+    }
+    dvmUnlockMutex(&gDvm.deadlockHistoryLock);
+
+    if (failed) {
+        switch (gDvm.deadlockPredictMode) {
+        case kDPErr:
+            dvmThrowException("Ldalvik/system/PotentialDeadlockError;", NULL);
+            break;
+        case kDPAbort:
+            LOGE("Aborting due to potential deadlock\n");
+            dvmAbort();
+            break;
+        default:
+            /* warn only */
+            break;
+        }
+    }
+}
+
+/*
+ * We're removing "child" from existence.  We want to pull all of
+ * child's children into "parent", filtering out duplicates.  This is
+ * called during the GC.
+ *
+ * This does not modify "child", which might have multiple parents.
+ */
+static void mergeChildren(Object* parent, const Object* child)
+{
+    Monitor* mon;
+    int i;
+
+    assert(IS_LOCK_FAT(&child->lock));
+    mon = child->lock.mon;
+    ExpandingObjectList* pList = &mon->historyChildren;
+
+    for (i = expandBufGetCount(pList)-1; i >= 0; i--) {
+        Object* grandChild = expandBufGetEntry(pList, i);
+
+        if (!objectInChildList(parent, grandChild)) {
+            LOGVV("+++  migrating %p link to %p\n", grandChild, parent);
+            linkParentToChild(parent, grandChild);
+        } else {
+            LOGVV("+++  parent %p already links to %p\n", parent, grandChild);
+        }
+    }
+}
+
+/*
+ * An object with a fat lock is being collected during a GC pass.  We
+ * want to remove it from any lock history trees that it is a part of.
+ *
+ * This may require updating the history trees in several monitors.  The
+ * monitor semantics guarantee that no other thread will be accessing
+ * the history trees at the same time.
+ */
+static void removeCollectedObject(Object* obj)
+{
+    Monitor* mon;
+
+    LOGVV("+++ collecting %p\n", obj);
+
+#if 0
+    /*
+     * We're currently running through the entire set of known monitors.
+     * This can be somewhat slow.  We may want to keep lists of parents
+     * in each child to speed up GC.
+     */
+    mon = gDvm.monitorList;
+    while (mon != NULL) {
+        Object* parent = mon->obj;
+        if (parent != NULL) {       /* value nulled for deleted entries */
+            if (objectInChildList(parent, obj)) {
+                LOGVV("removing child %p from parent %p\n", obj, parent);
+                unlinkParentFromChild(parent, obj);
+                mergeChildren(parent, obj);
+            }
+        }
+        mon = mon->next;
+    }
+#endif
+
+    /*
+     * For every parent of this object:
+     *  - merge all of our children into the parent's child list (creates
+     *    a two-way link between parent and child)
+     *  - remove ourselves from the parent's child list
+     */
+    ExpandingObjectList* pList;
+    int i;
+
+    assert(IS_LOCK_FAT(&obj->lock));
+    mon = obj->lock.mon;
+    pList = &mon->historyParents;
+    for (i = expandBufGetCount(pList)-1; i >= 0; i--) {
+        Object* parent = expandBufGetEntry(pList, i);
+        Monitor* parentMon = parent->lock.mon;
+
+        if (!expandObjRemoveEntry(&parentMon->historyChildren, obj)) {
+            LOGW("WARNING: child %p not found in parent %p\n", obj, parent);
+        }
+        assert(!expandObjHas(&parentMon->historyChildren, obj));
+
+        mergeChildren(parent, obj);
+    }
+
+    /*
+     * For every child of this object:
+     *  - remove ourselves from the child's parent list
+     */
+    pList = &mon->historyChildren;
+    for (i = expandBufGetCount(pList)-1; i >= 0; i--) {
+        Object* child = expandBufGetEntry(pList, i);
+        Monitor* childMon = child->lock.mon;
+
+        if (!expandObjRemoveEntry(&childMon->historyParents, obj)) {
+            LOGW("WARNING: parent %p not found in child %p\n", obj, child);
+        }
+        assert(!expandObjHas(&childMon->historyParents, obj));
+    }
+}
+
+#endif /*WITH_DEADLOCK_PREDICTION*/
+
diff --git a/vm/Sync.h b/vm/Sync.h
new file mode 100644
index 0000000..8f8867b
--- /dev/null
+++ b/vm/Sync.h
@@ -0,0 +1,116 @@
+/*
+ * Copyright (C) 2008 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/*
+ * Object synchronization functions.
+ */
+#ifndef _DALVIK_SYNC
+#define _DALVIK_SYNC
+
+struct Object;
+struct Monitor;
+struct Thread;
+typedef struct Monitor Monitor;
+
+#define QUIET_ZYGOTE_MONITOR 1
+
+/*
+ * Synchronization lock, included in every object.
+ *
+ * We want this to be a 32-bit "thin lock", holding the lock level and
+ * the owner's threadId, that inflates to a Monitor pointer when there
+ * is contention or somebody waits on it.
+ */
+typedef union Lock {
+    u4          thin;
+    Monitor*    mon;
+} Lock;
+
+/*
+ * Initialize a Lock to the proper starting value.
+ * This is necessary for thin locking.
+ */
+#define THIN_LOCKING 1
+#if THIN_LOCKING
+#define DVM_LOCK_INITIAL_THIN_VALUE (0x1)
+#else
+#define DVM_LOCK_INITIAL_THIN_VALUE (0)
+#endif
+#define DVM_LOCK_INIT(lock) \
+    do { (lock)->thin = DVM_LOCK_INITIAL_THIN_VALUE; } while (0)
+
+/*
+ * Returns true if the lock has been fattened.
+ */
+#define IS_LOCK_FAT(lock)   (((lock)->thin & 1) == 0 && (lock)->mon != NULL)
+
+/*
+ * Acquire the object's monitor.
+ */
+void dvmLockObject(struct Thread* self, struct Object* obj);
+
+/* Returns true if the unlock succeeded.
+ * If the unlock failed, an exception will be pending.
+ */
+bool dvmUnlockObject(struct Thread* self, struct Object* obj);
+
+/*
+ * Implementations of some java/lang/Object calls.
+ */
+void dvmObjectWait(struct Thread* self, struct Object* obj,
+    s8 timeout, s4 nanos, bool interruptShouldThrow);
+void dvmObjectNotify(struct Thread* self, struct Object* obj);
+void dvmObjectNotifyAll(struct Thread* self, struct Object* obj);
+
+/*
+ * Implementation of Thread.sleep().
+ */
+void dvmThreadSleep(u8 msec, u4 nsec);
+
+/* create a new Monitor struct */
+Monitor* dvmCreateMonitor(struct Object* obj);
+
+/* free an object's monitor during GC */
+void dvmFreeObjectMonitor_internal(Lock* lock);
+#define dvmFreeObjectMonitor(obj) \
+    do { \
+        Object *DFM_obj_ = (obj); \
+        if (IS_LOCK_FAT(&DFM_obj_->lock)) { \
+            dvmFreeObjectMonitor_internal(&DFM_obj_->lock); \
+        } \
+    } while (0)
+
+/* free monitor list */
+void dvmFreeMonitorList(void);
+
+/*
+ * Get the object a monitor is part of.
+ *
+ * Returns NULL if "mon" is NULL or the monitor is not part of an object
+ * (which should only happen for Thread.sleep() in the current implementation).
+ */
+struct Object* dvmGetMonitorObject(Monitor* mon);
+
+/*
+ * Checks whether the object is held by the specified thread.
+ */
+bool dvmHoldsLock(struct Thread* thread, struct Object* obj);
+
+/*
+ * Debug.
+ */
+void dvmDumpMonitorInfo(const char* msg);
+
+#endif /*_DALVIK_SYNC*/
diff --git a/vm/Thread.c b/vm/Thread.c
new file mode 100644
index 0000000..9b18c30
--- /dev/null
+++ b/vm/Thread.c
@@ -0,0 +1,3232 @@
+/*
+ * Copyright (C) 2008 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/*
+ * Thread support.
+ */
+#include "Dalvik.h"
+
+#include "utils/threads.h"      // need Android thread priorities
+
+#include <stdlib.h>
+#include <unistd.h>
+#include <sys/time.h>
+#include <sys/resource.h>
+#include <sys/mman.h>
+#include <errno.h>
+
+#if defined(HAVE_PRCTL)
+#include <sys/prctl.h>
+#endif
+
+/* desktop Linux needs a little help with gettid() */
+#if defined(HAVE_GETTID) && !defined(HAVE_ANDROID_OS)
+#define __KERNEL__
+# include <linux/unistd.h>
+#ifdef _syscall0
+_syscall0(pid_t,gettid)
+#else
+pid_t gettid() { return syscall(__NR_gettid);}
+#endif
+#undef __KERNEL__
+#endif
+
+// change this to LOGV/LOGD to debug thread activity
+#define LOG_THREAD  LOGVV
+
+/*
+Notes on Threading
+
+All threads are native pthreads.  All threads, except the JDWP debugger
+thread, are visible to code running in the VM and to the debugger.  (We
+don't want the debugger to try to manipulate the thread that listens for
+instructions from the debugger.)  Internal VM threads are in the "system"
+ThreadGroup, all others are in the "main" ThreadGroup, per convention.
+
+The GC only runs when all threads have been suspended.  Threads are
+expected to suspend themselves, using a "safe point" mechanism.  We check
+for a suspend request at certain points in the main interpreter loop,
+and on requests coming in from native code (e.g. all JNI functions).
+Certain debugger events may inspire threads to self-suspend.
+
+Native methods must use JNI calls to modify object references to avoid
+clashes with the GC.  JNI doesn't provide a way for native code to access
+arrays of objects as such -- code must always get/set individual entries --
+so it should be possible to fully control access through JNI.
+
+Internal native VM threads, such as the finalizer thread, must explicitly
+check for suspension periodically.  In most cases they will be sound
+asleep on a condition variable, and won't notice the suspension anyway.
+
+Threads may be suspended by the GC, debugger, or the SIGQUIT listener
+thread.  The debugger may suspend or resume individual threads, while the
+GC always suspends all threads.  Each thread has a "suspend count" that
+is incremented on suspend requests and decremented on resume requests.
+When the count is zero, the thread is runnable.  This allows us to fulfill
+a debugger requirement: if the debugger suspends a thread, the thread is
+not allowed to run again until the debugger resumes it (or disconnects,
+in which case we must resume all debugger-suspended threads).
+
+Paused threads sleep on a condition variable, and are awoken en masse.
+Certain "slow" VM operations, such as starting up a new thread, will be
+done in a separate "VMWAIT" state, so that the rest of the VM doesn't
+freeze up waiting for the operation to finish.  Threads must check for
+pending suspension when leaving VMWAIT.
+
+Because threads suspend themselves while interpreting code or when native
+code makes JNI calls, there is no risk of suspending while holding internal
+VM locks.  All threads can enter a suspended (or native-code-only) state.
+Also, we don't have to worry about object references existing solely
+in hardware registers.
+
+We do, however, have to worry about objects that were allocated internally
+and aren't yet visible to anything else in the VM.  If we allocate an
+object, and then go to sleep on a mutex after changing to a non-RUNNING
+state (e.g. while trying to allocate a second object), the first object
+could be garbage-collected out from under us while we sleep.  To manage
+this, we automatically add all allocated objects to an internal object
+tracking list, and only remove them when we know we won't be suspended
+before the object appears in the GC root set.
+
+The debugger may choose to suspend or resume a single thread, which can
+lead to application-level deadlocks; this is expected behavior.  The VM
+will only check for suspension of single threads when the debugger is
+active (the java.lang.Thread calls for this are deprecated and hence are
+not supported).  Resumption of a single thread is handled by decrementing
+the thread's suspend count and sending a broadcast signal to the condition
+variable.  (This will cause all threads to wake up and immediately go back
+to sleep, which isn't tremendously efficient, but neither is having the
+debugger attached.)
+
+The debugger is not allowed to resume threads suspended by the GC.  This
+is trivially enforced by ignoring debugger requests while the GC is running
+(the JDWP thread is suspended during GC).
+
+The VM maintains a Thread struct for every pthread known to the VM.  There
+is a java/lang/Thread object associated with every Thread.  At present,
+there is no safe way to go from a Thread object to a Thread struct except by
+locking and scanning the list; this is necessary because the lifetimes of
+the two are not closely coupled.  We may want to change this behavior,
+though at present the only performance impact is on the debugger (see
+threadObjToThread()).  See also notes about dvmDetachCurrentThread().
+*/
+/*
+Alternate implementation (signal-based):
+
+Threads run without safe points -- zero overhead.  The VM uses a signal
+(e.g. pthread_kill(SIGUSR1)) to notify threads of suspension or resumption.
+
+The trouble with using signals to suspend threads is that it means a thread
+can be in the middle of an operation when garbage collection starts.
+To prevent some sticky situations, we have to introduce critical sections
+to the VM code.
+
+Critical sections temporarily block suspension for a given thread.
+The thread must move to a non-blocked state (and self-suspend) after
+finishing its current task.  If the thread blocks on a resource held
+by a suspended thread, we're hosed.
+
+One approach is to require that no blocking operations, notably
+acquisition of mutexes, can be performed within a critical section.
+This is too limiting.  For example, if thread A gets suspended while
+holding the thread list lock, it will prevent the GC or debugger from
+being able to safely access the thread list.  We need to wrap the critical
+section around the entire operation (enter critical, get lock, do stuff,
+release lock, exit critical).
+
+A better approach is to declare that certain resources can only be held
+within critical sections.  A thread that enters a critical section and
+then gets blocked on the thread list lock knows that the thread it is
+waiting for is also in a critical section, and will release the lock
+before suspending itself.  Eventually all threads will complete their
+operations and self-suspend.  For this to work, the VM must:
+
+ (1) Determine the set of resources that may be accessed from the GC or
+     debugger threads.  The mutexes guarding those go into the "critical
+     resource set" (CRS).
+ (2) Ensure that no resource in the CRS can be acquired outside of a
+     critical section.  This can be verified with an assert().
+ (3) Ensure that only resources in the CRS can be held while in a critical
+     section.  This is harder to enforce.
+
+If any of these conditions are not met, deadlock can ensue when grabbing
+resources in the GC or debugger (#1) or waiting for threads to suspend
+(#2,#3).  (You won't actually deadlock in the GC, because if the semantics
+above are followed you don't need to lock anything in the GC.  The risk is
+rather that the GC will access data structures in an intermediate state.)
+
+This approach requires more care and awareness in the VM than
+safe-pointing.  Because the GC and debugger are fairly intrusive, there
+really aren't any internal VM resources that aren't shared.  Thus, the
+enter/exit critical calls can be added to internal mutex wrappers, which
+makes it easy to get #1 and #2 right.
+
+An ordering should be established for all locks to avoid deadlocks.
+
+Monitor locks, which are also implemented with pthread calls, should not
+cause any problems here.  Threads fighting over such locks will not be in
+critical sections and can be suspended freely.
+
+This can get tricky if we ever need exclusive access to VM and non-VM
+resources at the same time.  It's not clear if this is a real concern.
+
+There are (at least) two ways to handle the incoming signals:
+
+ (a) Always accept signals.  If we're in a critical section, the signal
+     handler just returns without doing anything (the "suspend level"
+     should have been incremented before the signal was sent).  Otherwise,
+     if the "suspend level" is nonzero, we go to sleep.
+ (b) Block signals in critical sections.  This ensures that we can't be
+     interrupted in a critical section, but requires pthread_sigmask()
+     calls on entry and exit.
+
+This is a choice between blocking the message and blocking the messenger.
+Because UNIX signals are unreliable (you can only know that you have been
+signaled, not whether you were signaled once or 10 times), the choice is
+not significant for correctness.  The choice depends on the efficiency
+of pthread_sigmask() and the desire to actually block signals.  Either way,
+it is best to ensure that there is only one indication of "blocked";
+having two (i.e. block signals and set a flag, then only send a signal
+if the flag isn't set) can lead to race conditions.
+
+The signal handler must take care to copy registers onto the stack (via
+setjmp), so that stack scans find all references.  Because we have to scan
+native stacks, "exact" GC is not possible with this approach.
+
+Some other concerns with flinging signals around:
+ - Odd interactions with some debuggers (e.g. gdb on the Mac)
+ - Restrictions on some standard library calls during GC (e.g. don't
+   use printf on stdout to print GC debug messages)
+*/
+
+#define kMaxThreadId        ((1<<15) - 1)
+#define kMainThreadId       ((1<<1) | 1)
+
+
+static Thread* allocThread(int interpStackSize);
+static bool prepareThread(Thread* thread);
+static void setThreadSelf(Thread* thread);
+static void unlinkThread(Thread* thread);
+static void freeThread(Thread* thread);
+static void assignThreadId(Thread* thread);
+static bool createFakeEntryFrame(Thread* thread);
+static bool createFakeRunFrame(Thread* thread);
+static void* interpThreadStart(void* arg);
+static void* internalThreadStart(void* arg);
+static void threadExitUncaughtException(Thread* thread, Object* group);
+static void threadExitCheck(void* arg);
+static void waitForThreadSuspend(Thread* self, Thread* thread);
+static int getThreadPriorityFromSystem(void);
+
+
+/*
+ * Initialize thread list and main thread's environment.  We need to set
+ * up some basic stuff so that dvmThreadSelf() will work when we start
+ * loading classes (e.g. to check for exceptions).
+ */
+bool dvmThreadStartup(void)
+{
+    Thread* thread;
+
+    /* allocate a TLS slot */
+    if (pthread_key_create(&gDvm.pthreadKeySelf, threadExitCheck) != 0) {
+        LOGE("ERROR: pthread_key_create failed\n");
+        return false;
+    }
+
+    /* test our pthread lib */
+    if (pthread_getspecific(gDvm.pthreadKeySelf) != NULL)
+        LOGW("WARNING: newly-created pthread TLS slot is not NULL\n");
+
+    /* prep thread-related locks and conditions */
+    dvmInitMutex(&gDvm.threadListLock);
+    pthread_cond_init(&gDvm.threadStartCond, NULL);
+    //dvmInitMutex(&gDvm.vmExitLock);
+    pthread_cond_init(&gDvm.vmExitCond, NULL);
+    dvmInitMutex(&gDvm._threadSuspendLock);
+    dvmInitMutex(&gDvm.threadSuspendCountLock);
+    pthread_cond_init(&gDvm.threadSuspendCountCond, NULL);
+#ifdef WITH_DEADLOCK_PREDICTION
+    dvmInitMutex(&gDvm.deadlockHistoryLock);
+#endif
+
+    /*
+     * Dedicated monitor for Thread.sleep().
+     * TODO: change this to an Object* so we don't have to expose this
+     * call, and we interact better with JDWP monitor calls.  Requires
+     * deferring the object creation to much later (e.g. final "main"
+     * thread prep) or until first use.
+     */
+    gDvm.threadSleepMon = dvmCreateMonitor(NULL);
+
+    gDvm.threadIdMap = dvmAllocBitVector(kMaxThreadId, true);
+
+    thread = allocThread(gDvm.stackSize);
+    if (thread == NULL)
+        return false;
+
+    /* switch mode for when we run initializers */
+    thread->status = THREAD_RUNNING;
+
+    /*
+     * We need to assign the threadId early so we can lock/notify
+     * object monitors.  We'll set the "threadObj" field later.
+     */
+    prepareThread(thread);
+    gDvm.threadList = thread;
+
+    return true;
+}
+
+/*
+ * We're a little farther up now, and can load some basic classes.
+ *
+ * We're far enough along that we can poke at java.lang.Thread and friends,
+ * but should not assume that static initializers have run (or cause them
+ * to do so).  That means no object allocations yet.
+ */
+bool dvmThreadObjStartup(void)
+{
+    /*
+     * Cache the locations of these classes.  It's likely that we're the
+     * first to reference them, so they're being loaded now.
+     */
+    gDvm.classJavaLangThread =
+        dvmFindSystemClassNoInit("Ljava/lang/Thread;");
+    gDvm.classJavaLangVMThread =
+        dvmFindSystemClassNoInit("Ljava/lang/VMThread;");
+    gDvm.classJavaLangThreadGroup =
+        dvmFindSystemClassNoInit("Ljava/lang/ThreadGroup;");
+    if (gDvm.classJavaLangThread == NULL ||
+        gDvm.classJavaLangThreadGroup == NULL ||
+        gDvm.classJavaLangThreadGroup == NULL)
+    {
+        LOGE("Could not find one or more essential thread classes\n");
+        return false;
+    }
+
+    /*
+     * Cache field offsets.  This makes things a little faster, at the
+     * expense of hard-coding non-public field names into the VM.
+     */
+    gDvm.offJavaLangThread_vmThread =
+        dvmFindFieldOffset(gDvm.classJavaLangThread,
+            "vmThread", "Ljava/lang/VMThread;");
+    gDvm.offJavaLangThread_group =
+        dvmFindFieldOffset(gDvm.classJavaLangThread,
+            "group", "Ljava/lang/ThreadGroup;");
+    gDvm.offJavaLangThread_daemon =
+        dvmFindFieldOffset(gDvm.classJavaLangThread, "daemon", "Z");
+    gDvm.offJavaLangThread_name =
+        dvmFindFieldOffset(gDvm.classJavaLangThread,
+            "name", "Ljava/lang/String;");
+    gDvm.offJavaLangThread_priority =
+        dvmFindFieldOffset(gDvm.classJavaLangThread, "priority", "I");
+
+    if (gDvm.offJavaLangThread_vmThread < 0 ||
+        gDvm.offJavaLangThread_group < 0 ||
+        gDvm.offJavaLangThread_daemon < 0 ||
+        gDvm.offJavaLangThread_name < 0 ||
+        gDvm.offJavaLangThread_priority < 0)
+    {
+        LOGE("Unable to find all fields in java.lang.Thread\n");
+        return false;
+    }
+
+    gDvm.offJavaLangVMThread_thread =
+        dvmFindFieldOffset(gDvm.classJavaLangVMThread,
+            "thread", "Ljava/lang/Thread;");
+    gDvm.offJavaLangVMThread_vmData =
+        dvmFindFieldOffset(gDvm.classJavaLangVMThread, "vmData", "I");
+    if (gDvm.offJavaLangVMThread_thread < 0 ||
+        gDvm.offJavaLangVMThread_vmData < 0)
+    {
+        LOGE("Unable to find all fields in java.lang.VMThread\n");
+        return false;
+    }
+
+    /*
+     * Cache the vtable offset for "run()".
+     *
+     * We don't want to keep the Method* because then we won't find see
+     * methods defined in subclasses.
+     */
+    Method* meth;
+    meth = dvmFindVirtualMethodByDescriptor(gDvm.classJavaLangThread, "run", "()V");
+    if (meth == NULL) {
+        LOGE("Unable to find run() in java.lang.Thread\n");
+        return false;
+    }
+    gDvm.voffJavaLangThread_run = meth->methodIndex;
+
+    /*
+     * Cache vtable offsets for ThreadGroup methods.
+     */
+    meth = dvmFindVirtualMethodByDescriptor(gDvm.classJavaLangThreadGroup,
+        "removeThread", "(Ljava/lang/Thread;)V");
+    if (meth == NULL) {
+        LOGE("Unable to find removeThread(Thread) in java.lang.ThreadGroup\n");
+        return false;
+    }
+    gDvm.voffJavaLangThreadGroup_removeThread = meth->methodIndex;
+
+    return true;
+}
+
+/*
+ * All threads should be stopped by now.  Clean up some thread globals.
+ */
+void dvmThreadShutdown(void)
+{
+    if (gDvm.threadList != NULL) {
+        assert(gDvm.threadList->next == NULL);
+        assert(gDvm.threadList->prev == NULL);
+        freeThread(gDvm.threadList);
+        gDvm.threadList = NULL;
+    }
+
+    dvmFreeBitVector(gDvm.threadIdMap);
+
+    dvmFreeMonitorList();
+
+    pthread_key_delete(gDvm.pthreadKeySelf);
+}
+
+
+/*
+ * Grab the suspend count global lock.
+ */
+static inline void lockThreadSuspendCount(void)
+{
+    /*
+     * Don't try to change to VMWAIT here.  When we change back to RUNNING
+     * we have to check for a pending suspend, which results in grabbing
+     * this lock recursively.  Doesn't work with "fast" pthread mutexes.
+     *
+     * This lock is always held for very brief periods, so as long as
+     * mutex ordering is respected we shouldn't stall.
+     */
+    int cc = pthread_mutex_lock(&gDvm.threadSuspendCountLock);
+    assert(cc == 0);
+}
+
+/*
+ * Release the suspend count global lock.
+ */
+static inline void unlockThreadSuspendCount(void)
+{
+    dvmUnlockMutex(&gDvm.threadSuspendCountLock);
+}
+
+/*
+ * Grab the thread list global lock.
+ *
+ * This is held while "suspend all" is trying to make everybody stop.  If
+ * the shutdown is in progress, and somebody tries to grab the lock, they'll
+ * have to wait for the GC to finish.  Therefore it's important that the
+ * thread not be in RUNNING mode.
+ *
+ * We don't have to check to see if we should be suspended once we have
+ * the lock.  Nobody can suspend all threads without holding the thread list
+ * lock while they do it, so by definition there isn't a GC in progress.
+ */
+void dvmLockThreadList(Thread* self)
+{
+    ThreadStatus oldStatus;
+
+    if (self == NULL)       /* try to get it from TLS */
+        self = dvmThreadSelf();
+
+    if (self != NULL) {
+        oldStatus = self->status;
+        self->status = THREAD_VMWAIT;
+    } else {
+        /* happens for JNI AttachCurrentThread [not anymore?] */
+        //LOGW("NULL self in dvmLockThreadList\n");
+        oldStatus = -1;         // shut up gcc
+    }
+
+    int cc = pthread_mutex_lock(&gDvm.threadListLock);
+    assert(cc == 0);
+
+    if (self != NULL)
+        self->status = oldStatus;
+}
+
+/*
+ * Release the thread list global lock.
+ */
+void dvmUnlockThreadList(void)
+{
+    int cc = pthread_mutex_unlock(&gDvm.threadListLock);
+    assert(cc == 0);
+}
+
+
+/*
+ * Grab the "thread suspend" lock.  This is required to prevent the
+ * GC and the debugger from simultaneously suspending all threads.
+ *
+ * If we fail to get the lock, somebody else is trying to suspend all
+ * threads -- including us.  If we go to sleep on the lock we'll deadlock
+ * the VM.  Loop until we get it or somebody puts us to sleep.
+ */
+static void lockThreadSuspend(const char* who, SuspendCause why)
+{
+    const int kMaxRetries = 10;
+    const int kSpinSleepTime = 3*1000*1000;        /* 3s */
+    u8 startWhen = 0;       // init req'd to placate gcc
+    int sleepIter = 0;
+    int cc;
+    
+    do {
+        cc = pthread_mutex_trylock(&gDvm._threadSuspendLock);
+        if (cc != 0) {
+            if (!dvmCheckSuspendPending(NULL)) {
+                /*
+                 * Could be unusual JNI-attach thing, could be we hit
+                 * the window as the suspend or resume was started.  Could
+                 * also be the debugger telling us to resume at roughly
+                 * the same time we're posting an event.
+                 */
+                LOGI("threadid=%d ODD: thread-suspend lock held (%s:%d)"
+                     " but suspend not pending\n",
+                    dvmThreadSelf()->threadId, who, why);
+            }
+
+            /* give the lock-holder a chance to do some work */
+            if (sleepIter == 0)
+                startWhen = dvmGetRelativeTimeUsec();
+            if (!dvmIterativeSleep(sleepIter++, kSpinSleepTime, startWhen)) {
+                LOGE("threadid=%d: couldn't get thread-suspend lock (%s:%d),"
+                     " bailing\n",
+                    dvmThreadSelf()->threadId, who, why);
+                dvmDumpAllThreads(false);
+                dvmAbort();
+            }
+        }
+    } while (cc != 0);
+    assert(cc == 0);
+}
+
+/*
+ * Release the "thread suspend" lock.
+ */
+static inline void unlockThreadSuspend(void)
+{
+    int cc = pthread_mutex_unlock(&gDvm._threadSuspendLock);
+    assert(cc == 0);
+}
+
+
+/*
+ * Kill any daemon threads that still exist.  All of ours should be
+ * stopped, so these should be Thread objects or JNI-attached threads
+ * started by the application.  Actively-running threads are likely
+ * to crash the process if they continue to execute while the VM
+ * shuts down, so we really need to kill or suspend them.  (If we want
+ * the VM to restart within this process, we need to kill them, but that
+ * leaves open the possibility of orphaned resources.)
+ *
+ * Waiting for the thread to suspend may be unwise at this point, but
+ * if one of these is wedged in a critical section then we probably
+ * would've locked up on the last GC attempt.
+ *
+ * It's possible for this function to get called after a failed
+ * initialization, so be careful with assumptions about the environment.
+ */
+void dvmSlayDaemons(void)
+{
+    Thread* self = dvmThreadSelf();
+    Thread* target;
+    Thread* nextTarget;
+
+    if (self == NULL)
+        return;
+
+    //dvmEnterCritical(self);
+    dvmLockThreadList(self);
+
+    target = gDvm.threadList;
+    while (target != NULL) {
+        if (target == self) {
+            target = target->next;
+            continue;
+        }
+
+        if (!dvmGetFieldBoolean(target->threadObj,
+                gDvm.offJavaLangThread_daemon))
+        {
+            LOGW("threadid=%d: non-daemon id=%d still running at shutdown?!\n",
+                self->threadId, target->threadId);
+            target = target->next;
+            continue;
+        }
+
+        LOGI("threadid=%d: killing leftover daemon threadid=%d [TODO]\n",
+            self->threadId, target->threadId);
+        // TODO: suspend and/or kill the thread
+        // (at the very least, we can "rescind their JNI privileges")
+
+        /* remove from list */
+        nextTarget = target->next;
+        unlinkThread(target);
+
+        freeThread(target);
+        target = nextTarget;
+    }
+
+    dvmUnlockThreadList();
+    //dvmExitCritical(self);
+}
+
+
+/*
+ * Finish preparing the parts of the Thread struct required to support
+ * JNI registration.
+ */
+bool dvmPrepMainForJni(JNIEnv* pEnv)
+{
+    Thread* self;
+
+    /* main thread is always first in list at this point */
+    self = gDvm.threadList;
+    assert(self->threadId == kMainThreadId);
+
+    /* create a "fake" JNI frame at the top of the main thread interp stack */
+    if (!createFakeEntryFrame(self))
+        return false;
+
+    /* fill these in, since they weren't ready at dvmCreateJNIEnv time */
+    dvmSetJniEnvThreadId(pEnv, self);
+    dvmSetThreadJNIEnv(self, (JNIEnv*) pEnv);
+
+    return true;
+}
+
+
+/*
+ * Finish preparing the main thread, allocating some objects to represent
+ * it.  As part of doing so, we finish initializing Thread and ThreadGroup.
+ */
+bool dvmPrepMainThread(void)
+{
+    Thread* thread;
+    Object* groupObj;
+    Object* threadObj;
+    Object* vmThreadObj;
+    StringObject* threadNameStr;
+    Method* init;
+    JValue unused;
+
+    LOGV("+++ finishing prep on main VM thread\n");
+
+    /* main thread is always first in list at this point */
+    thread = gDvm.threadList;
+    assert(thread->threadId == kMainThreadId);
+
+    /*
+     * Make sure the classes are initialized.  We have to do this before
+     * we create an instance of them.
+     */
+    if (!dvmInitClass(gDvm.classJavaLangClass)) {
+        LOGE("'Class' class failed to initialize\n");
+        return false;
+    }
+    if (!dvmInitClass(gDvm.classJavaLangThreadGroup) ||
+        !dvmInitClass(gDvm.classJavaLangThread) ||
+        !dvmInitClass(gDvm.classJavaLangVMThread))
+    {
+        LOGE("thread classes failed to initialize\n");
+        return false;
+    }
+
+    groupObj = dvmGetMainThreadGroup();
+    if (groupObj == NULL)
+        return false;
+
+    /*
+     * Allocate and construct a Thread with the internal-creation
+     * constructor.
+     */
+    threadObj = dvmAllocObject(gDvm.classJavaLangThread, ALLOC_DEFAULT);
+    if (threadObj == NULL) {
+        LOGE("unable to allocate main thread object\n");
+        return false;
+    }
+    dvmReleaseTrackedAlloc(threadObj, NULL);
+
+    threadNameStr = dvmCreateStringFromCstr("main", ALLOC_DEFAULT);
+    if (threadNameStr == NULL)
+        return false;
+    dvmReleaseTrackedAlloc((Object*)threadNameStr, NULL);
+
+    init = dvmFindDirectMethodByDescriptor(gDvm.classJavaLangThread, "<init>",
+            "(Ljava/lang/ThreadGroup;Ljava/lang/String;IZ)V");
+    assert(init != NULL);
+    dvmCallMethod(thread, init, threadObj, &unused, groupObj, threadNameStr,
+        THREAD_NORM_PRIORITY, false);
+    if (dvmCheckException(thread)) {
+        LOGE("exception thrown while constructing main thread object\n");
+        return false;
+    }
+
+    /*
+     * Allocate and construct a VMThread.
+     */
+    vmThreadObj = dvmAllocObject(gDvm.classJavaLangVMThread, ALLOC_DEFAULT);
+    if (vmThreadObj == NULL) {
+        LOGE("unable to allocate main vmthread object\n");
+        return false;
+    }
+    dvmReleaseTrackedAlloc(vmThreadObj, NULL);
+
+    init = dvmFindDirectMethodByDescriptor(gDvm.classJavaLangVMThread, "<init>",
+            "(Ljava/lang/Thread;)V");
+    dvmCallMethod(thread, init, vmThreadObj, &unused, threadObj);
+    if (dvmCheckException(thread)) {
+        LOGE("exception thrown while constructing main vmthread object\n");
+        return false;
+    }
+
+    /* set the VMThread.vmData field to our Thread struct */
+    assert(gDvm.offJavaLangVMThread_vmData != 0);
+    dvmSetFieldInt(vmThreadObj, gDvm.offJavaLangVMThread_vmData, (u4)thread);
+
+    /*
+     * Stuff the VMThread back into the Thread.  From this point on, other
+     * Threads will see that this Thread is running.
+     */
+    dvmSetFieldObject(threadObj, gDvm.offJavaLangThread_vmThread,
+        vmThreadObj);
+
+    thread->threadObj = threadObj;
+
+    /*
+     * Finish our thread prep.
+     */
+
+    /* include self in non-daemon threads (mainly for AttachCurrentThread) */
+    gDvm.nonDaemonThreadCount++;
+
+    return true;
+}
+
+
+/*
+ * Alloc and initialize a Thread struct.
+ *
+ * "threadObj" is the java.lang.Thread object.  It will be NULL for the
+ * main VM thread, but non-NULL for everything else.
+ *
+ * Does not create any objects, just stuff on the system (malloc) heap.  (If
+ * this changes, we need to use ALLOC_NO_GC.  And also verify that we're
+ * ready to load classes at the time this is called.)
+ */
+static Thread* allocThread(int interpStackSize)
+{
+    Thread* thread;
+    u1* stackBottom;
+
+    thread = (Thread*) calloc(1, sizeof(Thread));
+    if (thread == NULL)
+        return NULL;
+
+    assert(interpStackSize >= kMinStackSize && interpStackSize <=kMaxStackSize);
+
+    thread->status = THREAD_INITIALIZING;
+    thread->suspendCount = 0;
+
+#ifdef WITH_ALLOC_LIMITS
+    thread->allocLimit = -1;
+#endif
+
+    /*
+     * Allocate and initialize the interpreted code stack.  We essentially
+     * "lose" the alloc pointer, which points at the bottom of the stack,
+     * but we can get it back later because we know how big the stack is.
+     *
+     * The stack must be aligned on a 4-byte boundary.
+     */
+#ifdef MALLOC_INTERP_STACK
+    stackBottom = (u1*) malloc(interpStackSize);
+    if (stackBottom == NULL) {
+        free(thread);
+        return NULL;
+    }
+    memset(stackBottom, 0xc5, interpStackSize);     // stop valgrind complaints
+#else
+    stackBottom = mmap(NULL, interpStackSize, PROT_READ | PROT_WRITE,
+        MAP_PRIVATE | MAP_ANON, -1, 0);
+    if (stackBottom == MAP_FAILED) {
+        free(thread);
+        return NULL;
+    }
+#endif
+
+    assert(((u4)stackBottom & 0x03) == 0); // looks like our malloc ensures this
+    thread->interpStackSize = interpStackSize;
+    thread->interpStackStart = stackBottom + interpStackSize;
+    thread->interpStackEnd = stackBottom + STACK_OVERFLOW_RESERVE;
+
+    /* give the thread code a chance to set things up */
+    dvmInitInterpStack(thread, interpStackSize);
+
+    return thread;
+}
+
+/*
+ * Get a meaningful thread ID.  At present this only has meaning under Linux,
+ * where getpid() and gettid() sometimes agree and sometimes don't depending
+ * on your thread model (try "export LD_ASSUME_KERNEL=2.4.19").
+ */
+pid_t dvmGetSysThreadId(void)
+{
+#ifdef HAVE_GETTID
+    return gettid();
+#else
+    return getpid();
+#endif
+}
+
+/*
+ * Finish initialization of a Thread struct.
+ *
+ * This must be called while executing in the new thread, but before the
+ * thread is added to the thread list.
+ *
+ * *** NOTE: The threadListLock must be held by the caller (needed for
+ * assignThreadId()).
+ */
+static bool prepareThread(Thread* thread)
+{
+    assignThreadId(thread);
+    thread->handle = pthread_self();
+    thread->systemTid = dvmGetSysThreadId();
+
+    //LOGI("SYSTEM TID IS %d (pid is %d)\n", (int) thread->systemTid,
+    //    (int) getpid());
+    setThreadSelf(thread);
+
+    LOGV("threadid=%d: interp stack at %p\n",
+        thread->threadId, thread->interpStackStart - thread->interpStackSize);
+
+    /*
+     * Initialize invokeReq.
+     */
+    pthread_mutex_init(&thread->invokeReq.lock, NULL);
+    pthread_cond_init(&thread->invokeReq.cv, NULL);
+
+    /*
+     * Initialize our reference tracking tables.
+     *
+     * The JNI local ref table *must* be fixed-size because we keep pointers
+     * into the table in our stack frames.
+     *
+     * Most threads won't use jniMonitorRefTable, so we clear out the
+     * structure but don't call the init function (which allocs storage).
+     */
+    if (!dvmInitReferenceTable(&thread->jniLocalRefTable,
+            kJniLocalRefMax, kJniLocalRefMax))
+        return false;
+    if (!dvmInitReferenceTable(&thread->internalLocalRefTable,
+            kInternalRefDefault, kInternalRefMax))
+        return false;
+
+    memset(&thread->jniMonitorRefTable, 0, sizeof(thread->jniMonitorRefTable));
+
+    return true;
+}
+
+/*
+ * Remove a thread from the internal list.
+ * Clear out the links to make it obvious that the thread is
+ * no longer on the list.  Caller must hold gDvm.threadListLock.
+ */
+static void unlinkThread(Thread* thread)
+{
+    LOG_THREAD("threadid=%d: removing from list\n", thread->threadId);
+    if (thread == gDvm.threadList) {
+        assert(thread->prev == NULL);
+        gDvm.threadList = thread->next;
+    } else {
+        assert(thread->prev != NULL);
+        thread->prev->next = thread->next;
+    }
+    if (thread->next != NULL)
+        thread->next->prev = thread->prev;
+    thread->prev = thread->next = NULL;
+}
+
+/*
+ * Free a Thread struct, and all the stuff allocated within.
+ */
+static void freeThread(Thread* thread)
+{
+    if (thread == NULL)
+        return;
+
+    /* thread->threadId is zero at this point */
+    LOGVV("threadid=%d: freeing\n", thread->threadId);
+
+    if (thread->interpStackStart != NULL) {
+        u1* interpStackBottom;
+
+        interpStackBottom = thread->interpStackStart;
+        interpStackBottom -= thread->interpStackSize;
+#ifdef MALLOC_INTERP_STACK
+        free(interpStackBottom);
+#else
+        if (munmap(interpStackBottom, thread->interpStackSize) != 0)
+            LOGW("munmap(thread stack) failed\n");
+#endif
+    }
+
+    dvmClearReferenceTable(&thread->jniLocalRefTable);
+    dvmClearReferenceTable(&thread->internalLocalRefTable);
+    if (&thread->jniMonitorRefTable.table != NULL)
+        dvmClearReferenceTable(&thread->jniMonitorRefTable);
+
+    free(thread);
+}
+
+/*
+ * Like pthread_self(), but on a Thread*.
+ */
+Thread* dvmThreadSelf(void)
+{
+    return (Thread*) pthread_getspecific(gDvm.pthreadKeySelf);
+}
+
+/*
+ * Explore our sense of self.  Stuffs the thread pointer into TLS.
+ */
+static void setThreadSelf(Thread* thread)
+{
+    int cc;
+
+    cc = pthread_setspecific(gDvm.pthreadKeySelf, thread);
+    if (cc != 0) {
+        LOGE("pthread_setspecific failed, err=%d\n", cc);
+        //dvmAbort();     /* the world is fundamentally hosed */
+    }
+}
+
+/*
+ * This is associated with the pthreadKeySelf key.  It's called by the
+ * pthread library when a thread is exiting and the "self" pointer in TLS
+ * is non-NULL, meaning the VM hasn't had a chance to clean up.  In normal
+ * operation this should never be called.
+ *
+ * This is mainly of use to ensure that we don't leak resources if, for
+ * example, a thread attaches itself to us with AttachCurrentThread and
+ * then exits without notifying the VM.
+ */
+static void threadExitCheck(void* arg)
+{
+    Thread* thread = (Thread*) arg;
+
+    LOGI("In threadExitCheck %p\n", arg);
+    assert(thread != NULL);
+
+    if (thread->status != THREAD_ZOMBIE) {
+        /* TODO: instead of failing, we could call dvmDetachCurrentThread() */
+        LOGE("Native thread exited without telling us\n");
+        dvmAbort();
+    }
+}
+
+
+/*
+ * Assign the threadId.  This needs to be a small integer so that our
+ * "thin" locks fit in a small number of bits.
+ *
+ * We reserve zero for use as an invalid ID.
+ *
+ * This must be called with threadListLock held (unless we're still
+ * initializing the system).
+ */
+static void assignThreadId(Thread* thread)
+{
+    /* Find a small unique integer.  threadIdMap is a vector of
+     * kMaxThreadId bits;  dvmAllocBit() returns the index of a
+     * bit, meaning that it will always be < kMaxThreadId.
+     *
+     * The thin locking magic requires that the low bit is always
+     * set, so we do it once, here.
+     */
+    thread->threadId = ((dvmAllocBit(gDvm.threadIdMap) + 1) << 1) | 1;
+
+    assert(thread->threadId != 0);
+    assert(thread->threadId != DVM_LOCK_INITIAL_THIN_VALUE);
+}
+
+/*
+ * Give back the thread ID.
+ */
+static void releaseThreadId(Thread* thread)
+{
+    assert(thread->threadId > 0);
+    dvmFreeBit(gDvm.threadIdMap, (thread->threadId >> 1) - 1);
+    thread->threadId = 0;
+}
+
+
+/*
+ * Add a stack frame that makes it look like the native code in the main
+ * thread was originally invoked from interpreted code.  This gives us a
+ * place to hang JNI local references.  The VM spec says (v2 5.2) that the
+ * VM begins by executing "main" in a class, so in a way this brings us
+ * closer to the spec.
+ */
+static bool createFakeEntryFrame(Thread* thread)
+{
+    assert(thread->threadId == kMainThreadId);      // main thread only
+
+    /* find the method on first use */
+    if (gDvm.methFakeNativeEntry == NULL) {
+        ClassObject* nativeStart;
+        Method* mainMeth;
+
+        nativeStart = dvmFindSystemClassNoInit(
+                "Ldalvik/system/NativeStart;");
+        if (nativeStart == NULL) {
+            LOGE("Unable to find dalvik.system.NativeStart class\n");
+            return false;
+        }
+
+        /*
+         * Because we are creating a frame that represents application code, we
+         * want to stuff the application class loader into the method's class
+         * loader field, even though we're using the system class loader to
+         * load it.  This makes life easier over in JNI FindClass (though it
+         * could bite us in other ways).
+         *
+         * Unfortunately this is occurring too early in the initialization,
+         * of necessity coming before JNI is initialized, and we're not quite
+         * ready to set up the application class loader.
+         *
+         * So we save a pointer to the method in gDvm.methFakeNativeEntry
+         * and check it in FindClass.  The method is private so nobody else
+         * can call it.
+         */
+        //nativeStart->classLoader = dvmGetSystemClassLoader();
+
+        mainMeth = dvmFindDirectMethodByDescriptor(nativeStart,
+                    "main", "([Ljava/lang/String;)V");
+        if (mainMeth == NULL) {
+            LOGE("Unable to find 'main' in dalvik.system.NativeStart\n");
+            return false;
+        }
+
+        gDvm.methFakeNativeEntry = mainMeth;
+    }
+
+    return dvmPushJNIFrame(thread, gDvm.methFakeNativeEntry);
+}
+
+
+/*
+ * Add a stack frame that makes it look like the native thread has been
+ * executing interpreted code.  This gives us a place to hang JNI local
+ * references.
+ */
+static bool createFakeRunFrame(Thread* thread)
+{
+    ClassObject* nativeStart;
+    Method* runMeth;
+
+    assert(thread->threadId != 1);      // not for main thread
+
+    nativeStart =
+        dvmFindSystemClassNoInit("Ldalvik/system/NativeStart;");
+    if (nativeStart == NULL) {
+        LOGE("Unable to find dalvik.system.NativeStart class\n");
+        return false;
+    }
+
+    runMeth = dvmFindVirtualMethodByDescriptor(nativeStart, "run", "()V");
+    if (runMeth == NULL) {
+        LOGE("Unable to find 'run' in dalvik.system.NativeStart\n");
+        return false;
+    }
+
+    return dvmPushJNIFrame(thread, runMeth);
+}
+
+/*
+ * Helper function to set the name of the current thread
+ */
+static void setThreadName(const char *threadName)
+{
+#if defined(HAVE_PRCTL)
+    int hasAt = 0;
+    int hasDot = 0;
+    const char *s = threadName;
+    while (*s) {
+        if (*s == '.') hasDot = 1;
+        else if (*s == '@') hasAt = 1;
+        s++;
+    }
+    int len = s - threadName;
+    if (len < 15 || hasAt || !hasDot) {
+        s = threadName;
+    } else {
+        s = threadName + len - 15;
+    }
+    prctl(PR_SET_NAME, (unsigned long) s, 0, 0, 0);
+#endif
+}
+
+/*
+ * Create a thread as a result of java.lang.Thread.start().
+ *
+ * We do have to worry about some concurrency problems, e.g. programs
+ * that try to call Thread.start() on the same object from multiple threads.
+ * (This will fail for all but one, but we have to make sure that it succeeds
+ * for exactly one.)
+ *
+ * Some of the complexity here arises from our desire to mimic the
+ * Thread vs. VMThread class decomposition we inherited.  We've been given
+ * a Thread, and now we need to create a VMThread and then populate both
+ * objects.  We also need to create one of our internal Thread objects.
+ *
+ * Pass in a stack size of 0 to get the default.
+ */
+bool dvmCreateInterpThread(Object* threadObj, int reqStackSize)
+{
+    pthread_attr_t threadAttr;
+    pthread_t threadHandle;
+    Thread* self;
+    Thread* newThread = NULL;
+    Object* vmThreadObj = NULL;
+    int stackSize;
+
+    assert(threadObj != NULL);
+
+    if(gDvm.zygote) {
+        dvmThrowException("Ljava/lang/IllegalStateException;",
+            "No new threads in -Xzygote mode");
+
+        goto fail;
+    }
+
+    self = dvmThreadSelf();
+    if (reqStackSize == 0)
+        stackSize = gDvm.stackSize;
+    else if (reqStackSize < kMinStackSize)
+        stackSize = kMinStackSize;
+    else if (reqStackSize > kMaxStackSize)
+        stackSize = kMaxStackSize;
+    else
+        stackSize = reqStackSize;
+
+    pthread_attr_init(&threadAttr);
+    pthread_attr_setdetachstate(&threadAttr, PTHREAD_CREATE_DETACHED);
+
+    /*
+     * To minimize the time spent in the critical section, we allocate the
+     * vmThread object here.
+     */
+    vmThreadObj = dvmAllocObject(gDvm.classJavaLangVMThread, ALLOC_DEFAULT);
+    if (vmThreadObj == NULL)
+        goto fail;
+
+    newThread = allocThread(stackSize);
+    if (newThread == NULL)
+        goto fail;
+    newThread->threadObj = threadObj;
+
+    assert(newThread->status == THREAD_INITIALIZING);
+
+    /*
+     * We need to lock out other threads while we test and set the
+     * "vmThread" field in java.lang.Thread, because we use that to determine
+     * if this thread has been started before.  We use the thread list lock
+     * because it's handy and we're going to need to grab it again soon
+     * anyway.
+     */
+    dvmLockThreadList(self);
+
+    if (dvmGetFieldObject(threadObj, gDvm.offJavaLangThread_vmThread) != NULL) {
+        dvmUnlockThreadList();
+        dvmThrowException("Ljava/lang/IllegalThreadStateException;",
+            "thread has already been started");
+        goto fail;
+    }
+
+    /*
+     * There are actually three data structures: Thread (object), VMThread
+     * (object), and Thread (C struct).  All of them point to at least one
+     * other.
+     *
+     * As soon as "VMThread.vmData" is assigned, other threads can start
+     * making calls into us (e.g. setPriority).
+     */
+    dvmSetFieldInt(vmThreadObj, gDvm.offJavaLangVMThread_vmData, (u4)newThread);
+    dvmSetFieldObject(threadObj, gDvm.offJavaLangThread_vmThread, vmThreadObj);
+
+    /*
+     * Thread creation might take a while, so release the lock.
+     */
+    dvmUnlockThreadList();
+
+    if (pthread_create(&threadHandle, &threadAttr, interpThreadStart,
+            newThread) != 0)
+    {
+        /*
+         * Failure generally indicates that we have exceeded system
+         * resource limits.  VirtualMachineError is probably too severe,
+         * so use OutOfMemoryError.
+         */
+        LOGE("Thread creation failed (err=%s)\n", strerror(errno));
+
+        dvmSetFieldObject(threadObj, gDvm.offJavaLangThread_vmThread, NULL);
+
+        dvmThrowException("Ljava/lang/OutOfMemoryError;",
+            "thread creation failed");
+        goto fail;
+    }
+
+    /*
+     * We need to wait for the thread to start.  Otherwise, depending on
+     * the whims of the OS scheduler, we could return and the code in our
+     * thread could try to do operations on the new thread before it had
+     * finished starting.
+     *
+     * The new thread will lock the thread list, change its state to
+     * THREAD_STARTING, broadcast to gDvm.threadStartCond, and then sleep
+     * on gDvm.threadStartCond (which uses the thread list lock).  This
+     * thread (the parent) will either see that the thread is already ready
+     * after we grab the thread list lock, or will be awakened from the
+     * condition variable on the broadcast.
+     *
+     * We don't want to stall the rest of the VM while the new thread
+     * starts, which can happen if the GC wakes up at the wrong moment.
+     * So, we change our own status to VMWAIT, and self-suspend if
+     * necessary after we finish adding the new thread.
+     *
+     *
+     * We have to deal with an odd race with the GC/debugger suspension
+     * mechanism when creating a new thread.  The information about whether
+     * or not a thread should be suspended is contained entirely within
+     * the Thread struct; this is usually cleaner to deal with than having
+     * one or more globally-visible suspension flags.  The trouble is that
+     * we could create the thread while the VM is trying to suspend all
+     * threads.  The suspend-count won't be nonzero for the new thread,
+     * so dvmChangeStatus(THREAD_RUNNING) won't cause a suspension.
+     *
+     * The easiest way to deal with this is to prevent the new thread from
+     * running until the parent says it's okay.  This results in the
+     * following sequence of events for a "badly timed" GC:
+     *
+     *  - call pthread_create()
+     *  - lock thread list
+     *  - put self into THREAD_VMWAIT so GC doesn't wait for us
+     *  - sleep on condition var (mutex = thread list lock) until child starts
+     *  + GC triggered by another thread
+     *  + thread list locked; suspend counts updated; thread list unlocked
+     *  + loop waiting for all runnable threads to suspend
+     *  + success, start GC
+     *  o child thread wakes, signals condition var to wake parent
+     *  o child waits for parent ack on condition variable
+     *  - we wake up, locking thread list
+     *  - add child to thread list
+     *  - unlock thread list
+     *  - change our state back to THREAD_RUNNING; GC causes us to suspend
+     *  + GC finishes; all threads in thread list are resumed
+     *  - lock thread list
+     *  - set child to THREAD_VMWAIT, and signal it to start
+     *  - unlock thread list
+     *  o child resumes
+     *  o child changes state to THREAD_RUNNING
+     *
+     * The above shows the GC starting up during thread creation, but if
+     * it starts anywhere after VMThread.create() is called it will
+     * produce the same series of events.
+     *
+     * Once the child is in the thread list, it will be suspended and
+     * resumed like any other thread.  In the above scenario the resume-all
+     * code will try to resume the new thread, which was never actually
+     * suspended, and try to decrement the child's thread suspend count to -1.
+     * We can catch this in the resume-all code.
+     *
+     * Bouncing back and forth between threads like this adds a small amount
+     * of scheduler overhead to thread startup.
+     *
+     * One alternative to having the child wait for the parent would be
+     * to have the child inherit the parents' suspension count.  This
+     * would work for a GC, since we can safely assume that the parent
+     * thread didn't cause it, but we must only do so if the parent suspension
+     * was caused by a suspend-all.  If the parent was being asked to
+     * suspend singly by the debugger, the child should not inherit the value.
+     *
+     * We could also have a global "new thread suspend count" that gets
+     * picked up by new threads before changing state to THREAD_RUNNING.
+     * This would be protected by the thread list lock and set by a
+     * suspend-all.
+     */
+    dvmLockThreadList(self);
+    assert(self->status == THREAD_RUNNING);
+    self->status = THREAD_VMWAIT;
+    while (newThread->status != THREAD_STARTING)
+        pthread_cond_wait(&gDvm.threadStartCond, &gDvm.threadListLock);
+
+    LOG_THREAD("threadid=%d: adding to list\n", newThread->threadId);
+    newThread->next = gDvm.threadList->next;
+    if (newThread->next != NULL)
+        newThread->next->prev = newThread;
+    newThread->prev = gDvm.threadList;
+    gDvm.threadList->next = newThread;
+
+    if (!dvmGetFieldBoolean(threadObj, gDvm.offJavaLangThread_daemon))
+        gDvm.nonDaemonThreadCount++;        // guarded by thread list lock
+
+    dvmUnlockThreadList();
+
+    /* change status back to RUNNING, self-suspending if necessary */
+    dvmChangeStatus(self, THREAD_RUNNING);
+
+    /*
+     * Tell the new thread to start.
+     *
+     * We must hold the thread list lock before messing with another thread.
+     * In the general case we would also need to verify that newThread was
+     * still in the thread list, but in our case the thread has not started
+     * executing user code and therefore has not had a chance to exit.
+     *
+     * We move it to VMWAIT, and it then shifts itself to RUNNING, which
+     * comes with a suspend-pending check.
+     */
+    dvmLockThreadList(self);
+
+    assert(newThread->status == THREAD_STARTING);
+    newThread->status = THREAD_VMWAIT;
+    pthread_cond_broadcast(&gDvm.threadStartCond);
+
+    dvmUnlockThreadList();
+
+    dvmReleaseTrackedAlloc(vmThreadObj, NULL);
+    return true;
+
+fail:
+    freeThread(newThread);
+    dvmReleaseTrackedAlloc(vmThreadObj, NULL);
+    return false;
+}
+
+/*
+ * pthread entry function for threads started from interpreted code.
+ */
+static void* interpThreadStart(void* arg)
+{
+    Thread* self = (Thread*) arg;
+
+    char *threadName = dvmGetThreadName(self);
+    setThreadName(threadName);
+    free(threadName);
+
+    /*
+     * Finish initializing the Thread struct.
+     */
+    prepareThread(self);
+
+    LOG_THREAD("threadid=%d: created from interp\n", self->threadId);
+
+    /*
+     * Change our status and wake our parent, who will add us to the
+     * thread list and advance our state to VMWAIT.
+     */
+    dvmLockThreadList(self);
+    self->status = THREAD_STARTING;
+    pthread_cond_broadcast(&gDvm.threadStartCond);
+
+    /*
+     * Wait until the parent says we can go.  Assuming there wasn't a
+     * suspend pending, this will happen immediately.  When it completes,
+     * we're full-fledged citizens of the VM.
+     *
+     * We have to use THREAD_VMWAIT here rather than THREAD_RUNNING
+     * because the pthread_cond_wait below needs to reacquire a lock that
+     * suspend-all is also interested in.  If we get unlucky, the parent could
+     * change us to THREAD_RUNNING, then a GC could start before we get
+     * signaled, and suspend-all will grab the thread list lock and then
+     * wait for us to suspend.  We'll be in the tail end of pthread_cond_wait
+     * trying to get the lock.
+     */
+    while (self->status != THREAD_VMWAIT)
+        pthread_cond_wait(&gDvm.threadStartCond, &gDvm.threadListLock);
+
+    dvmUnlockThreadList();
+
+    /*
+     * Add a JNI context.
+     */
+    self->jniEnv = dvmCreateJNIEnv(self);
+
+    /*
+     * Change our state so the GC will wait for us from now on.  If a GC is
+     * in progress this call will suspend us.
+     */
+    dvmChangeStatus(self, THREAD_RUNNING);
+
+    /*
+     * Notify the debugger & DDM.  The debugger notification may cause
+     * us to suspend ourselves (and others).
+     */
+    if (gDvm.debuggerConnected)
+        dvmDbgPostThreadStart(self);
+
+    /*
+     * Set the system thread priority according to the Thread object's
+     * priority level.  We don't usually need to do this, because both the
+     * Thread object and system thread priorities inherit from parents.  The
+     * tricky case is when somebody creates a Thread object, calls
+     * setPriority(), and then starts the thread.  We could manage this with
+     * a "needs priority update" flag to avoid the redundant call.
+     */
+    int priority = dvmGetFieldBoolean(self->threadObj,
+                        gDvm.offJavaLangThread_priority);
+    dvmChangeThreadPriority(self, priority);
+
+    /*
+     * Execute the "run" method.
+     *
+     * At this point our stack is empty, so somebody who comes looking for
+     * stack traces right now won't have much to look at.  This is normal.
+     */
+    Method* run = self->threadObj->clazz->vtable[gDvm.voffJavaLangThread_run];
+    JValue unused;
+
+    LOGV("threadid=%d: calling run()\n", self->threadId);
+    assert(strcmp(run->name, "run") == 0);
+    dvmCallMethod(self, run, self->threadObj, &unused);
+    LOGV("threadid=%d: exiting\n", self->threadId);
+
+    /*
+     * Remove the thread from various lists, report its death, and free
+     * its resources.
+     */
+    dvmDetachCurrentThread();
+
+    return NULL;
+}
+
+/*
+ * The current thread is exiting with an uncaught exception.  The
+ * Java programming language allows the application to provide a
+ * thread-exit-uncaught-exception handler for the VM, for a specific
+ * Thread, and for all threads in a ThreadGroup.
+ *
+ * Version 1.5 added the per-thread handler.  We need to call
+ * "uncaughtException" in the handler object, which is either the
+ * ThreadGroup object or the Thread-specific handler.
+ */
+static void threadExitUncaughtException(Thread* self, Object* group)
+{
+    Object* exception;
+    Object* handlerObj;
+    ClassObject* throwable;
+    Method* uncaughtHandler = NULL;
+    InstField* threadHandler;
+
+    LOGW("threadid=%d: thread exiting with uncaught exception (group=%p)\n",
+        self->threadId, group);
+    assert(group != NULL);
+
+    /*
+     * Get a pointer to the exception, then clear out the one in the
+     * thread.  We don't want to have it set when executing interpreted code.
+     */
+    exception = dvmGetException(self);
+    dvmAddTrackedAlloc(exception, self);
+    dvmClearException(self);
+
+    /*
+     * Get the Thread's "uncaughtHandler" object.  Use it if non-NULL;
+     * else use "group" (which is an instance of UncaughtExceptionHandler).
+     */
+    threadHandler = dvmFindInstanceField(gDvm.classJavaLangThread,
+            "uncaughtHandler", "Ljava/lang/Thread$UncaughtExceptionHandler;");
+    if (threadHandler == NULL) {
+        LOGW("WARNING: no 'uncaughtHandler' field in java/lang/Thread\n");
+        goto bail;
+    }
+    handlerObj = dvmGetFieldObject(self->threadObj, threadHandler->byteOffset);
+    if (handlerObj == NULL)
+        handlerObj = group;
+
+    /*
+     * Find the "uncaughtHandler" field in this object.
+     */
+    uncaughtHandler = dvmFindVirtualMethodHierByDescriptor(handlerObj->clazz,
+            "uncaughtException", "(Ljava/lang/Thread;Ljava/lang/Throwable;)V");
+
+    if (uncaughtHandler != NULL) {
+        //LOGI("+++ calling %s.uncaughtException\n",
+        //     handlerObj->clazz->descriptor);
+        JValue unused;
+        dvmCallMethod(self, uncaughtHandler, handlerObj, &unused,
+            self->threadObj, exception);
+    } else {
+        /* restore it and dump a stack trace */
+        LOGW("WARNING: no 'uncaughtException' method in class %s\n",
+            handlerObj->clazz->descriptor);
+        dvmSetException(self, exception);
+        dvmLogExceptionStackTrace();
+    }
+
+bail:
+    dvmReleaseTrackedAlloc(exception, self);
+}
+
+
+/*
+ * Create an internal VM thread, for things like JDWP and finalizers.
+ *
+ * The easiest way to do this is create a new thread and then use the
+ * JNI AttachCurrentThread implementation.
+ *
+ * This does not return until after the new thread has begun executing.
+ */
+bool dvmCreateInternalThread(pthread_t* pHandle, const char* name,
+    InternalThreadStart func, void* funcArg)
+{
+    InternalStartArgs* pArgs;
+    Object* systemGroup;
+    pthread_attr_t threadAttr;
+    volatile Thread* newThread = NULL;
+    volatile int createStatus = 0;
+
+    systemGroup = dvmGetSystemThreadGroup();
+    if (systemGroup == NULL)
+        return false;
+
+    pArgs = (InternalStartArgs*) malloc(sizeof(*pArgs));
+    pArgs->func = func;
+    pArgs->funcArg = funcArg;
+    pArgs->name = strdup(name);     // storage will be owned by new thread
+    pArgs->group = systemGroup;
+    pArgs->isDaemon = true;
+    pArgs->pThread = &newThread;
+    pArgs->pCreateStatus = &createStatus;
+
+    pthread_attr_init(&threadAttr);
+    //pthread_attr_setdetachstate(&threadAttr, PTHREAD_CREATE_DETACHED);
+
+    if (pthread_create(pHandle, &threadAttr, internalThreadStart,
+            pArgs) != 0)
+    {
+        LOGE("internal thread creation failed\n");
+        free(pArgs->name);
+        free(pArgs);
+        return false;
+    }
+
+    /*
+     * Wait for the child to start.  This gives us an opportunity to make
+     * sure that the thread started correctly, and allows our caller to
+     * assume that the thread has started running.
+     *
+     * Because we aren't holding a lock across the thread creation, it's
+     * possible that the child will already have completed its
+     * initialization.  Because the child only adjusts "createStatus" while
+     * holding the thread list lock, the initial condition on the "while"
+     * loop will correctly avoid the wait if this occurs.
+     *
+     * It's also possible that we'll have to wait for the thread to finish
+     * being created, and as part of allocating a Thread object it might
+     * need to initiate a GC.  We switch to VMWAIT while we pause.
+     */
+    Thread* self = dvmThreadSelf();
+    int oldStatus = dvmChangeStatus(self, THREAD_VMWAIT);
+    dvmLockThreadList(self);
+    while (createStatus == 0)
+        pthread_cond_wait(&gDvm.threadStartCond, &gDvm.threadListLock);
+
+    if (newThread == NULL) {
+        LOGW("internal thread create failed (createStatus=%d)\n", createStatus);
+        assert(createStatus < 0);
+        /* don't free pArgs -- if pthread_create succeeded, child owns it */
+        dvmUnlockThreadList();
+        dvmChangeStatus(self, oldStatus);
+        return false;
+    }
+
+    /* thread could be in any state now (except early init states) */
+    //assert(newThread->status == THREAD_RUNNING);
+
+    dvmUnlockThreadList();
+    dvmChangeStatus(self, oldStatus);
+
+    return true;
+}
+
+/*
+ * pthread entry function for internally-created threads.
+ *
+ * We are expected to free "arg" and its contents.  If we're a daemon
+ * thread, and we get cancelled abruptly when the VM shuts down, the
+ * storage won't be freed.  If this becomes a concern we can make a copy
+ * on the stack.
+ */
+static void* internalThreadStart(void* arg)
+{
+    InternalStartArgs* pArgs = (InternalStartArgs*) arg;
+    JavaVMAttachArgs jniArgs;
+
+    jniArgs.version = JNI_VERSION_1_2;
+    jniArgs.name = pArgs->name;
+    jniArgs.group = pArgs->group;
+
+    setThreadName(pArgs->name);
+
+    /* use local jniArgs as stack top */
+    if (dvmAttachCurrentThread(&jniArgs, pArgs->isDaemon)) {
+        /*
+         * Tell the parent of our success.
+         *
+         * threadListLock is the mutex for threadStartCond.
+         */
+        dvmLockThreadList(dvmThreadSelf());
+        *pArgs->pCreateStatus = 1;
+        *pArgs->pThread = dvmThreadSelf();
+        pthread_cond_broadcast(&gDvm.threadStartCond);
+        dvmUnlockThreadList();
+
+        LOG_THREAD("threadid=%d: internal '%s'\n",
+            dvmThreadSelf()->threadId, pArgs->name);
+
+        /* execute */
+        (*pArgs->func)(pArgs->funcArg);
+
+        /* detach ourselves */
+        dvmDetachCurrentThread();
+    } else {
+        /*
+         * Tell the parent of our failure.  We don't have a Thread struct,
+         * so we can't be suspended, so we don't need to enter a critical
+         * section.
+         */
+        dvmLockThreadList(dvmThreadSelf());
+        *pArgs->pCreateStatus = -1;
+        assert(*pArgs->pThread == NULL);
+        pthread_cond_broadcast(&gDvm.threadStartCond);
+        dvmUnlockThreadList();
+
+        assert(*pArgs->pThread == NULL);
+    }
+
+    free(pArgs->name);
+    free(pArgs);
+    return NULL;
+}
+
+/*
+ * Attach the current thread to the VM.
+ *
+ * Used for internally-created threads and JNI's AttachCurrentThread.
+ */
+bool dvmAttachCurrentThread(const JavaVMAttachArgs* pArgs, bool isDaemon)
+{
+    Thread* self = NULL;
+    Object* threadObj = NULL;
+    Object* vmThreadObj = NULL;
+    StringObject* threadNameStr = NULL;
+    Method* init;
+    bool ok, ret;
+
+    /* establish a basic sense of self */
+    self = allocThread(gDvm.stackSize);
+    if (self == NULL)
+        goto fail;
+    setThreadSelf(self);
+
+    /*
+     * Create Thread and VMThread objects.  We have to use ALLOC_NO_GC
+     * because this thread is not yet visible to the VM.  We could also
+     * just grab the GC lock earlier, but that leaves us executing
+     * interpreted code with the lock held, which is not prudent.
+     *
+     * The alloc calls will block if a GC is in progress, so we don't need
+     * to check for global suspension here.
+     *
+     * It's also possible for the allocation calls to *cause* a GC.
+     */
+    //BUG: deadlock if a GC happens here during HeapWorker creation
+    threadObj = dvmAllocObject(gDvm.classJavaLangThread, ALLOC_NO_GC);
+    if (threadObj == NULL)
+        goto fail;
+    vmThreadObj = dvmAllocObject(gDvm.classJavaLangVMThread, ALLOC_NO_GC);
+    if (vmThreadObj == NULL)
+        goto fail;
+
+    self->threadObj = threadObj;
+    dvmSetFieldInt(vmThreadObj, gDvm.offJavaLangVMThread_vmData, (u4)self);
+
+    /*
+     * Do some java.lang.Thread constructor prep before we lock stuff down.
+     */
+    if (pArgs->name != NULL) {
+        threadNameStr = dvmCreateStringFromCstr(pArgs->name, ALLOC_NO_GC);
+        if (threadNameStr == NULL) {
+            assert(dvmCheckException(dvmThreadSelf()));
+            goto fail;
+        }
+    }
+
+    init = dvmFindDirectMethodByDescriptor(gDvm.classJavaLangThread, "<init>",
+            "(Ljava/lang/ThreadGroup;Ljava/lang/String;IZ)V");
+    if (init == NULL) {
+        assert(dvmCheckException(dvmThreadSelf()));
+        goto fail;
+    }
+
+    /*
+     * Finish our thread prep.  We need to do this before invoking any
+     * interpreted code.  prepareThread() requires that we hold the thread
+     * list lock.
+     */
+    dvmLockThreadList(self);
+    ok = prepareThread(self);
+    dvmUnlockThreadList();
+    if (!ok)
+        goto fail;
+
+    self->jniEnv = dvmCreateJNIEnv(self);
+    if (self->jniEnv == NULL)
+        goto fail;
+
+    /*
+     * Create a "fake" JNI frame at the top of the main thread interp stack.
+     * It isn't really necessary for the internal threads, but it gives
+     * the debugger something to show.  It is essential for the JNI-attached
+     * threads.
+     */
+    if (!createFakeRunFrame(self))
+        goto fail;
+
+    /*
+     * The native side of the thread is ready;  add it to the list.
+     */
+    LOG_THREAD("threadid=%d: adding to list (attached)\n", self->threadId);
+
+    /* Start off in VMWAIT, because we may be about to block
+     * on the heap lock, and we don't want any suspensions
+     * to wait for us.
+     */
+    self->status = THREAD_VMWAIT;
+
+    /*
+     * Add ourselves to the thread list.  Once we finish here we are
+     * visible to the debugger and the GC.
+     */
+    dvmLockThreadList(self);
+
+    self->next = gDvm.threadList->next;
+    if (self->next != NULL)
+        self->next->prev = self;
+    self->prev = gDvm.threadList;
+    gDvm.threadList->next = self;
+    if (!isDaemon)
+        gDvm.nonDaemonThreadCount++;
+
+    dvmUnlockThreadList();
+
+    /*
+     * It's possible that a GC is currently running.  Our thread
+     * wasn't in the list when the GC started, so it's not properly
+     * suspended in that case.  Synchronize on the heap lock (held
+     * when a GC is happening) to guarantee that any GCs from here
+     * on will see this thread in the list.
+     */
+    dvmLockMutex(&gDvm.gcHeapLock);
+    dvmUnlockMutex(&gDvm.gcHeapLock);
+
+    /*
+     * Switch to the running state now that we're ready for
+     * suspensions.  This call may suspend.
+     */
+    dvmChangeStatus(self, THREAD_RUNNING);
+
+    /*
+     * Now we're ready to run some interpreted code.
+     *
+     * We need to construct the Thread object and set the VMThread field.
+     * Setting VMThread tells interpreted code that we're alive.
+     *
+     * Call the (group, name, priority, daemon) constructor on the Thread.
+     * This sets the thread's name and adds it to the specified group, and
+     * provides values for priority and daemon (which are normally inherited
+     * from the current thread).
+     */
+    JValue unused;
+    dvmCallMethod(self, init, threadObj, &unused, (Object*)pArgs->group,
+        threadNameStr, getThreadPriorityFromSystem(), isDaemon);
+    if (dvmCheckException(self)) {
+        LOGE("exception thrown while constructing attached thread object\n");
+        goto fail_unlink;
+    }
+    //if (isDaemon)
+    //    dvmSetFieldBoolean(threadObj, gDvm.offJavaLangThread_daemon, true);
+
+    /*
+     * Set the VMThread field, which tells interpreted code that we're alive.
+     *
+     * The risk of a thread start collision here is very low; somebody
+     * would have to be deliberately polling the ThreadGroup list and
+     * trying to start threads against anything it sees, which would
+     * generally cause problems for all thread creation.  However, for
+     * correctness we test "vmThread" before setting it.
+     */
+    if (dvmGetFieldObject(threadObj, gDvm.offJavaLangThread_vmThread) != NULL) {
+        dvmThrowException("Ljava/lang/IllegalThreadStateException;",
+            "thread has already been started");
+        /* We don't want to free anything associated with the thread
+         * because someone is obviously interested in it.  Just let
+         * it go and hope it will clean itself up when its finished.
+         * This case should never happen anyway.
+         *
+         * Since we're letting it live, we need to finish setting it up.
+         * We just have to let the caller know that the intended operation
+         * has failed.
+         *
+         * [ This seems strange -- stepping on the vmThread object that's
+         * already present seems like a bad idea.  TODO: figure this out. ]
+         */
+        ret = false;
+    } else
+        ret = true;
+    dvmSetFieldObject(threadObj, gDvm.offJavaLangThread_vmThread, vmThreadObj);
+
+    /* These are now reachable from the thread groups. */
+    dvmClearAllocFlags(threadObj, ALLOC_NO_GC);
+    dvmClearAllocFlags(vmThreadObj, ALLOC_NO_GC);
+
+    /*
+     * The thread is ready to go;  let the debugger see it.
+     */
+    self->threadObj = threadObj;
+
+    LOG_THREAD("threadid=%d: attached from native, name=%s\n",
+        self->threadId, pArgs->name);
+
+    /* tell the debugger & DDM */
+    if (gDvm.debuggerConnected)
+        dvmDbgPostThreadStart(self);
+
+    return ret;
+
+fail_unlink:
+    dvmLockThreadList(self);
+    unlinkThread(self);
+    if (!isDaemon)
+        gDvm.nonDaemonThreadCount--;
+    dvmUnlockThreadList();
+    /* fall through to "fail" */
+fail:
+    dvmClearAllocFlags(threadObj, ALLOC_NO_GC);
+    dvmClearAllocFlags(vmThreadObj, ALLOC_NO_GC);
+    if (self != NULL) {
+        if (self->jniEnv != NULL) {
+            dvmDestroyJNIEnv(self->jniEnv);
+            self->jniEnv = NULL;
+        }
+        freeThread(self);
+    }
+    setThreadSelf(NULL);
+    return false;
+}
+
+/*
+ * Detach the thread from the various data structures, notify other threads
+ * that are waiting to "join" it, and free up all heap-allocated storage.
+ *
+ * Used for all threads.
+ *
+ * When we get here the interpreted stack should be empty.  The JNI 1.6 spec
+ * requires us to enforce this for the DetachCurrentThread call, probably
+ * because it also says that DetachCurrentThread causes all monitors
+ * associated with the thread to be released.  (Because the stack is empty,
+ * we only have to worry about explicit JNI calls to MonitorEnter.)
+ *
+ * THOUGHT:
+ * We might want to avoid freeing our internal Thread structure until the
+ * associated Thread/VMThread objects get GCed.  Our Thread is impossible to
+ * get to once the thread shuts down, but there is a small possibility of
+ * an operation starting in another thread before this thread halts, and
+ * finishing much later (perhaps the thread got stalled by a weird OS bug).
+ * We don't want something like Thread.isInterrupted() crawling through
+ * freed storage.  Can do with a Thread finalizer, or by creating a
+ * dedicated ThreadObject class for java/lang/Thread and moving all of our
+ * state into that.
+ */
+void dvmDetachCurrentThread(void)
+{
+    Thread* self = dvmThreadSelf();
+    Object* vmThread;
+    Object* group;
+
+    /*
+     * Make sure we're not detaching a thread that's still running.  (This
+     * could happen with an explicit JNI detach call.)
+     *
+     * A thread created by interpreted code will finish with a depth of
+     * zero, while a JNI-attached thread will have the synthetic "stack
+     * starter" native method at the top.
+     */
+    int curDepth = dvmComputeExactFrameDepth(self->curFrame);
+    if (curDepth != 0) {
+        bool topIsNative = false;
+
+        if (curDepth == 1) {
+            /* not expecting a lingering break frame; just look at curFrame */
+            assert(!dvmIsBreakFrame(self->curFrame));
+            StackSaveArea* ssa = SAVEAREA_FROM_FP(self->curFrame);
+            if (dvmIsNativeMethod(ssa->method))
+                topIsNative = true;
+        }
+
+        if (!topIsNative) {
+            LOGE("ERROR: detaching thread with interp frames (count=%d)\n",
+                curDepth);
+            dvmDumpThread(self, false);
+            dvmAbort();
+        }
+    }
+
+    group = dvmGetFieldObject(self->threadObj, gDvm.offJavaLangThread_group);
+    LOG_THREAD("threadid=%d: detach (group=%p)\n", self->threadId, group);
+
+    /*
+     * Release any held monitors.  Since there are no interpreted stack
+     * frames, the only thing left are the monitors held by JNI MonitorEnter
+     * calls.
+     */
+    dvmReleaseJniMonitors(self);
+
+    /*
+     * Do some thread-exit uncaught exception processing if necessary.
+     */
+    if (dvmCheckException(self))
+        threadExitUncaughtException(self, group);
+
+    /*
+     * Remove the thread from the thread group.
+     */
+    if (group != NULL) {
+        Method* removeThread =
+            group->clazz->vtable[gDvm.voffJavaLangThreadGroup_removeThread];
+        JValue unused;
+        dvmCallMethod(self, removeThread, group, &unused, self->threadObj);
+    }
+
+    /*
+     * Clear the vmThread reference in the Thread object.  Interpreted code
+     * will now see that this Thread is not running.  As this may be the
+     * only reference to the VMThread object that the VM knows about, we
+     * have to create an internal reference to it first.
+     */
+    vmThread = dvmGetFieldObject(self->threadObj,
+                    gDvm.offJavaLangThread_vmThread);
+    dvmAddTrackedAlloc(vmThread, self);
+    dvmSetFieldObject(self->threadObj, gDvm.offJavaLangThread_vmThread, NULL);
+
+    /* clear out our struct Thread pointer, since it's going away */
+    dvmSetFieldObject(vmThread, gDvm.offJavaLangVMThread_vmData, NULL);
+
+    /*
+     * Tell the debugger & DDM.  This may cause the current thread or all
+     * threads to suspend.
+     *
+     * The JDWP spec is somewhat vague about when this happens, other than
+     * that it's issued by the dying thread, which may still appear in
+     * an "all threads" listing.
+     */
+    if (gDvm.debuggerConnected)
+        dvmDbgPostThreadDeath(self);
+
+    /*
+     * Thread.join() is implemented as an Object.wait() on the VMThread
+     * object.  Signal anyone who is waiting.
+     */
+    dvmLockObject(self, vmThread);
+    dvmObjectNotifyAll(self, vmThread);
+    dvmUnlockObject(self, vmThread);
+
+    dvmReleaseTrackedAlloc(vmThread, self);
+    vmThread = NULL;
+
+    /*
+     * We're done manipulating objects, so it's okay if the GC runs in
+     * parallel with us from here out.  It's important to do this if
+     * profiling is enabled, since we can wait indefinitely.
+     */
+    self->status = THREAD_VMWAIT;
+
+#ifdef WITH_PROFILER
+    /*
+     * If we're doing method trace profiling, we don't want threads to exit,
+     * because if they do we'll end up reusing thread IDs.  This complicates
+     * analysis and makes it impossible to have reasonable output in the
+     * "threads" section of the "key" file.
+     *
+     * We need to do this after Thread.join() completes, or other threads
+     * could get wedged.  Since self->threadObj is still valid, the Thread
+     * object will not get GCed even though we're no longer in the ThreadGroup
+     * list (which is important since the profiling thread needs to get
+     * the thread's name).
+     */
+    MethodTraceState* traceState = &gDvm.methodTrace;
+
+    dvmLockMutex(&traceState->startStopLock);
+    if (traceState->traceEnabled) {
+        LOGI("threadid=%d: waiting for method trace to finish\n",
+            self->threadId);
+        while (traceState->traceEnabled) {
+            int cc;
+            cc = pthread_cond_wait(&traceState->threadExitCond,
+                    &traceState->startStopLock);
+            assert(cc == 0);
+        }
+    }
+    dvmUnlockMutex(&traceState->startStopLock);
+#endif
+
+    dvmLockThreadList(self);
+
+    /*
+     * Lose the JNI context.
+     */
+    dvmDestroyJNIEnv(self->jniEnv);
+    self->jniEnv = NULL;
+
+    self->status = THREAD_ZOMBIE;
+
+    /*
+     * Remove ourselves from the internal thread list.
+     */
+    unlinkThread(self);
+
+    /*
+     * If we're the last one standing, signal anybody waiting in
+     * DestroyJavaVM that it's okay to exit.
+     */
+    if (!dvmGetFieldBoolean(self->threadObj, gDvm.offJavaLangThread_daemon)) {
+        gDvm.nonDaemonThreadCount--;        // guarded by thread list lock
+
+        if (gDvm.nonDaemonThreadCount == 0) {
+            int cc;
+
+            LOGV("threadid=%d: last non-daemon thread\n", self->threadId);
+            //dvmDumpAllThreads(false);
+            // cond var guarded by threadListLock, which we already hold
+            cc = pthread_cond_signal(&gDvm.vmExitCond);
+            assert(cc == 0);
+        }
+    }
+
+    LOGV("threadid=%d: bye!\n", self->threadId);
+    releaseThreadId(self);
+    dvmUnlockThreadList();
+
+    setThreadSelf(NULL);
+    freeThread(self);
+}
+
+
+/*
+ * Suspend a single thread.  Do not use to suspend yourself.
+ *
+ * This is used primarily for debugger/DDMS activity.  Does not return
+ * until the thread has suspended or is in a "safe" state (e.g. executing
+ * native code outside the VM).
+ *
+ * The thread list lock should be held before calling here -- it's not
+ * entirely safe to hang on to a Thread* from another thread otherwise.
+ * (We'd need to grab it here anyway to avoid clashing with a suspend-all.)
+ */
+void dvmSuspendThread(Thread* thread)
+{
+    assert(thread != NULL);
+    assert(thread != dvmThreadSelf());
+    //assert(thread->handle != dvmJdwpGetDebugThread(gDvm.jdwpState));
+
+    lockThreadSuspendCount();
+    thread->suspendCount++;
+    thread->dbgSuspendCount++;
+
+    LOG_THREAD("threadid=%d: suspend++, now=%d\n",
+        thread->threadId, thread->suspendCount);
+    unlockThreadSuspendCount();
+
+    waitForThreadSuspend(dvmThreadSelf(), thread);
+}
+
+/*
+ * Reduce the suspend count of a thread.  If it hits zero, tell it to
+ * resume.
+ *
+ * Used primarily for debugger/DDMS activity.  The thread in question
+ * might have been suspended singly or as part of a suspend-all operation.
+ *
+ * The thread list lock should be held before calling here -- it's not
+ * entirely safe to hang on to a Thread* from another thread otherwise.
+ * (We'd need to grab it here anyway to avoid clashing with a suspend-all.)
+ */
+void dvmResumeThread(Thread* thread)
+{
+    assert(thread != NULL);
+    assert(thread != dvmThreadSelf());
+    //assert(thread->handle != dvmJdwpGetDebugThread(gDvm.jdwpState));
+
+    lockThreadSuspendCount();
+    if (thread->suspendCount > 0) {
+        thread->suspendCount--;
+        thread->dbgSuspendCount--;
+    } else {
+        LOG_THREAD("threadid=%d:  suspendCount already zero\n",
+            thread->threadId);
+    }
+
+    LOG_THREAD("threadid=%d: suspend--, now=%d\n",
+        thread->threadId, thread->suspendCount);
+
+    if (thread->suspendCount == 0) {
+        int cc = pthread_cond_broadcast(&gDvm.threadSuspendCountCond);
+        assert(cc == 0);
+    }
+
+    unlockThreadSuspendCount();
+}
+
+/*
+ * Suspend yourself, as a result of debugger activity.
+ */
+void dvmSuspendSelf(bool jdwpActivity)
+{
+    Thread* self = dvmThreadSelf();
+
+    /* debugger thread may not suspend itself due to debugger activity! */
+    assert(gDvm.jdwpState != NULL);
+    if (self->handle == dvmJdwpGetDebugThread(gDvm.jdwpState)) {
+        assert(false);
+        return;
+    }
+
+    /*
+     * Collisions with other suspends aren't really interesting.  We want
+     * to ensure that we're the only one fiddling with the suspend count
+     * though.
+     */
+    lockThreadSuspendCount();
+    self->suspendCount++;
+    self->dbgSuspendCount++;
+
+    /*
+     * Suspend ourselves.
+     */
+    assert(self->suspendCount > 0);
+    self->isSuspended = true;
+    LOG_THREAD("threadid=%d: self-suspending (dbg)\n", self->threadId);
+
+    /*
+     * Tell JDWP that we've completed suspension.  The JDWP thread can't
+     * tell us to resume before we're fully asleep because we hold the
+     * suspend count lock.
+     *
+     * If we got here via waitForDebugger(), don't do this part.
+     */
+    if (jdwpActivity) {
+        //LOGI("threadid=%d: clearing wait-for-event (my handle=%08x)\n",
+        //    self->threadId, (int) self->handle);
+        dvmJdwpClearWaitForEventThread(gDvm.jdwpState);
+    }
+
+    while (self->suspendCount != 0) {
+        int cc;
+        cc = pthread_cond_wait(&gDvm.threadSuspendCountCond,
+                &gDvm.threadSuspendCountLock);
+        assert(cc == 0);
+        if (self->suspendCount != 0) {
+            LOGD("threadid=%d: still suspended after undo (s=%d d=%d)\n",
+                self->threadId, self->suspendCount, self->dbgSuspendCount);
+        }
+    }
+    assert(self->suspendCount == 0 && self->dbgSuspendCount == 0);
+    self->isSuspended = false;
+    LOG_THREAD("threadid=%d: self-reviving (dbg), status=%d\n",
+        self->threadId, self->status);
+
+    unlockThreadSuspendCount();
+}
+
+
+#ifdef HAVE_GLIBC
+# define NUM_FRAMES  20
+# include <execinfo.h>
+/*
+ * glibc-only stack dump function.  Requires link with "--export-dynamic".
+ *
+ * TODO: move this into libs/cutils and make it work for all platforms.
+ */
+static void printBackTrace(void)
+{
+    void* array[NUM_FRAMES];
+    size_t size;
+    char** strings;
+    size_t i;
+
+    size = backtrace(array, NUM_FRAMES);
+    strings = backtrace_symbols(array, size);
+
+    LOGW("Obtained %zd stack frames.\n", size);
+
+    for (i = 0; i < size; i++)
+        LOGW("%s\n", strings[i]);
+
+    free(strings);
+}
+#else
+static void printBackTrace(void) {}
+#endif
+
+/*
+ * Dump the state of the current thread and that of another thread that
+ * we think is wedged.
+ */
+static void dumpWedgedThread(Thread* thread)
+{
+    char exePath[1024];
+
+    /*
+     * The "executablepath" function in libutils is host-side only.
+     */
+    strcpy(exePath, "-");
+#ifdef HAVE_GLIBC
+    {
+        char proc[100];
+        sprintf(proc, "/proc/%d/exe", getpid());
+        int len;
+        
+        len = readlink(proc, exePath, sizeof(exePath)-1);
+        exePath[len] = '\0';
+    }
+#endif
+
+    LOGW("dumping state: process %s %d\n", exePath, getpid());
+    dvmDumpThread(dvmThreadSelf(), false);
+    printBackTrace();
+
+    // dumping a running thread is risky, but could be useful
+    dvmDumpThread(thread, true);
+
+
+    // stop now and get a core dump
+    //abort();
+}
+
+
+/*
+ * Wait for another thread to see the pending suspension and stop running.
+ * It can either suspend itself or go into a non-running state such as
+ * VMWAIT or NATIVE in which it cannot interact with the GC.
+ *
+ * If we're running at a higher priority, sched_yield() may not do anything,
+ * so we need to sleep for "long enough" to guarantee that the other
+ * thread has a chance to finish what it's doing.  Sleeping for too short
+ * a period (e.g. less than the resolution of the sleep clock) might cause
+ * the scheduler to return immediately, so we want to start with a
+ * "reasonable" value and expand.
+ *
+ * This does not return until the other thread has stopped running.
+ * Eventually we time out and the VM aborts.
+ *
+ * TODO: track basic stats about time required to suspend VM.
+ */
+static void waitForThreadSuspend(Thread* self, Thread* thread)
+{
+    const int kMaxRetries = 10;
+    const int kSpinSleepTime = 750*1000;        /* 0.75s */
+
+    int sleepIter = 0;
+    int retryCount = 0;
+    u8 startWhen = 0;       // init req'd to placate gcc
+
+    while (thread->status == THREAD_RUNNING && !thread->isSuspended) {
+        if (sleepIter == 0)         // get current time on first iteration
+            startWhen = dvmGetRelativeTimeUsec();
+
+        if (!dvmIterativeSleep(sleepIter++, kSpinSleepTime, startWhen)) {
+            LOGW("threadid=%d (h=%d): spin on suspend threadid=%d (handle=%d)\n",
+                self->threadId, (int)self->handle,
+                thread->threadId, (int)thread->handle);
+            dumpWedgedThread(thread);
+
+            // keep going; could be slow due to valgrind
+            sleepIter = 0;
+
+            if (retryCount++ == kMaxRetries) {
+                LOGE("threadid=%d: stuck on threadid=%d, giving up\n",
+                    self->threadId, thread->threadId);
+                dvmDumpAllThreads(false);
+                dvmAbort();
+            }
+        }
+    }
+}
+
+/*
+ * Suspend all threads except the current one.  This is used by the GC,
+ * the debugger, and by any thread that hits a "suspend all threads"
+ * debugger event (e.g. breakpoint or exception).
+ *
+ * If thread N hits a "suspend all threads" breakpoint, we don't want it
+ * to suspend the JDWP thread.  For the GC, we do, because the debugger can
+ * create objects and even execute arbitrary code.  The "why" argument
+ * allows the caller to say why the suspension is taking place.
+ *
+ * This can be called when a global suspend has already happened, due to
+ * various debugger gymnastics, so keeping an "everybody is suspended" flag
+ * doesn't work.
+ *
+ * DO NOT grab any locks before calling here.  We grab & release the thread
+ * lock and suspend lock here (and we're not using recursive threads), and
+ * we might have to self-suspend if somebody else beats us here.
+ *
+ * The current thread may not be attached to the VM.  This can happen if
+ * we happen to GC as the result of an allocation of a Thread object.
+ */
+void dvmSuspendAllThreads(SuspendCause why)
+{
+    Thread* self = dvmThreadSelf();
+    Thread* thread;
+
+    assert(why != 0);
+
+    /*
+     * Start by grabbing the thread suspend lock.  If we can't get it, most
+     * likely somebody else is in the process of performing a suspend or
+     * resume, so lockThreadSuspend() will cause us to self-suspend.
+     *
+     * We keep the lock until all other threads are suspended.
+     */
+    lockThreadSuspend("susp-all", why);
+
+    LOG_THREAD("threadid=%d: SuspendAll starting\n", self->threadId);
+
+    /*
+     * This is possible if the current thread was in VMWAIT mode when a
+     * suspend-all happened, and then decided to do its own suspend-all.
+     * This can happen when a couple of threads have simultaneous events
+     * of interest to the debugger.
+     */
+    //assert(self->suspendCount == 0);
+
+    /*
+     * Increment everybody's suspend count (except our own).
+     */
+    dvmLockThreadList(self);
+
+    lockThreadSuspendCount();
+    for (thread = gDvm.threadList; thread != NULL; thread = thread->next) {
+        if (thread == self)
+            continue;
+
+        /* debugger events don't suspend JDWP thread */
+        if ((why == SUSPEND_FOR_DEBUG || why == SUSPEND_FOR_DEBUG_EVENT) &&
+            thread->handle == dvmJdwpGetDebugThread(gDvm.jdwpState))
+            continue;
+
+        thread->suspendCount++;
+        if (why == SUSPEND_FOR_DEBUG || why == SUSPEND_FOR_DEBUG_EVENT)
+            thread->dbgSuspendCount++;
+    }
+    unlockThreadSuspendCount();
+
+    /*
+     * Wait for everybody in THREAD_RUNNING state to stop.  Other states
+     * indicate the code is either running natively or sleeping quietly.
+     * Any attempt to transition back to THREAD_RUNNING will cause a check
+     * for suspension, so it should be impossible for anything to execute
+     * interpreted code or modify objects (assuming native code plays nicely).
+     *
+     * It's also okay if the thread transitions to a non-RUNNING state.
+     *
+     * Note we released the threadSuspendCountLock before getting here,
+     * so if another thread is fiddling with its suspend count (perhaps
+     * self-suspending for the debugger) it won't block while we're waiting
+     * in here.
+     */
+    for (thread = gDvm.threadList; thread != NULL; thread = thread->next) {
+        if (thread == self)
+            continue;
+
+        /* debugger events don't suspend JDWP thread */
+        if ((why == SUSPEND_FOR_DEBUG || why == SUSPEND_FOR_DEBUG_EVENT) &&
+            thread->handle == dvmJdwpGetDebugThread(gDvm.jdwpState))
+            continue;
+
+        /* wait for the other thread to see the pending suspend */
+        waitForThreadSuspend(self, thread);
+
+        LOG_THREAD("threadid=%d:   threadid=%d status=%d c=%d dc=%d isSusp=%d\n", 
+            self->threadId,
+            thread->threadId, thread->status, thread->suspendCount,
+            thread->dbgSuspendCount, thread->isSuspended);
+    }
+
+    dvmUnlockThreadList();
+    unlockThreadSuspend();
+
+    LOG_THREAD("threadid=%d: SuspendAll complete\n", self->threadId);
+}
+
+/*
+ * Resume all threads that are currently suspended.
+ *
+ * The "why" must match with the previous suspend.
+ */
+void dvmResumeAllThreads(SuspendCause why)
+{
+    Thread* self = dvmThreadSelf();
+    Thread* thread;
+    int cc;
+
+    lockThreadSuspend("res-all", why);  /* one suspend/resume at a time */
+    LOG_THREAD("threadid=%d: ResumeAll starting\n", self->threadId);
+
+    /*
+     * Decrement the suspend counts for all threads.  No need for atomic
+     * writes, since nobody should be moving until we decrement the count.
+     * We do need to hold the thread list because of JNI attaches.
+     */
+    dvmLockThreadList(self);
+    lockThreadSuspendCount();
+    for (thread = gDvm.threadList; thread != NULL; thread = thread->next) {
+        if (thread == self)
+            continue;
+
+        /* debugger events don't suspend JDWP thread */
+        if ((why == SUSPEND_FOR_DEBUG || why == SUSPEND_FOR_DEBUG_EVENT) &&
+            thread->handle == dvmJdwpGetDebugThread(gDvm.jdwpState))
+            continue;
+
+        if (thread->suspendCount > 0) {
+            thread->suspendCount--;
+            if (why == SUSPEND_FOR_DEBUG || why == SUSPEND_FOR_DEBUG_EVENT)
+                thread->dbgSuspendCount--;
+        } else {
+            LOG_THREAD("threadid=%d:  suspendCount already zero\n",
+                thread->threadId);
+        }
+    }
+    unlockThreadSuspendCount();
+    dvmUnlockThreadList();
+
+    /*
+     * Broadcast a notification to all suspended threads, some or all of
+     * which may choose to wake up.  No need to wait for them.
+     */
+    lockThreadSuspendCount();
+    cc = pthread_cond_broadcast(&gDvm.threadSuspendCountCond);
+    assert(cc == 0);
+    unlockThreadSuspendCount();
+
+    unlockThreadSuspend();
+
+    LOG_THREAD("threadid=%d: ResumeAll complete\n", self->threadId);
+}
+
+/*
+ * Undo any debugger suspensions.  This is called when the debugger
+ * disconnects.
+ */
+void dvmUndoDebuggerSuspensions(void)
+{
+    Thread* self = dvmThreadSelf();
+    Thread* thread;
+    int cc;
+
+    lockThreadSuspend("undo", SUSPEND_FOR_DEBUG);
+    LOG_THREAD("threadid=%d: UndoDebuggerSusp starting\n", self->threadId);
+
+    /*
+     * Decrement the suspend counts for all threads.  No need for atomic
+     * writes, since nobody should be moving until we decrement the count.
+     * We do need to hold the thread list because of JNI attaches.
+     */
+    dvmLockThreadList(self);
+    lockThreadSuspendCount();
+    for (thread = gDvm.threadList; thread != NULL; thread = thread->next) {
+        if (thread == self)
+            continue;
+
+        /* debugger events don't suspend JDWP thread */
+        if (thread->handle == dvmJdwpGetDebugThread(gDvm.jdwpState)) {
+            assert(thread->dbgSuspendCount == 0);
+            continue;
+        }
+
+        assert(thread->suspendCount >= thread->dbgSuspendCount);
+        thread->suspendCount -= thread->dbgSuspendCount;
+        thread->dbgSuspendCount = 0;
+    }
+    unlockThreadSuspendCount();
+    dvmUnlockThreadList();
+
+    /*
+     * Broadcast a notification to all suspended threads, some or all of
+     * which may choose to wake up.  No need to wait for them.
+     */
+    lockThreadSuspendCount();
+    cc = pthread_cond_broadcast(&gDvm.threadSuspendCountCond);
+    assert(cc == 0);
+    unlockThreadSuspendCount();
+
+    unlockThreadSuspend();
+
+    LOG_THREAD("threadid=%d: UndoDebuggerSusp complete\n", self->threadId);
+}
+
+/*
+ * Determine if a thread is suspended.
+ *
+ * As with all operations on foreign threads, the caller should hold
+ * the thread list lock before calling.
+ */
+bool dvmIsSuspended(Thread* thread)
+{
+    /*
+     * The thread could be:
+     *  (1) Running happily.  status is RUNNING, isSuspended is false,
+     *      suspendCount is zero.  Return "false".
+     *  (2) Pending suspend.  status is RUNNING, isSuspended is false,
+     *      suspendCount is nonzero.  Return "false".
+     *  (3) Suspended.  suspendCount is nonzero, and either (status is
+     *      RUNNING and isSuspended is true) OR (status is !RUNNING).
+     *      Return "true".
+     *  (4) Waking up.  suspendCount is zero, status is RUNNING and
+     *      isSuspended is true.  Return "false" (since it could change
+     *      out from under us, unless we hold suspendCountLock).
+     */
+
+    return (thread->suspendCount != 0 &&
+            ((thread->status == THREAD_RUNNING && thread->isSuspended) ||
+             (thread->status != THREAD_RUNNING)));
+}
+
+/*
+ * Wait until another thread self-suspends.  This is specifically for
+ * synchronization between the JDWP thread and a thread that has decided
+ * to suspend itself after sending an event to the debugger.
+ *
+ * Threads that encounter "suspend all" events work as well -- the thread
+ * in question suspends everybody else and then itself.
+ *
+ * We can't hold a thread lock here or in the caller, because we could
+ * get here just before the to-be-waited-for-thread issues a "suspend all".
+ * There's an opportunity for badness if the thread we're waiting for exits
+ * and gets cleaned up, but since the thread in question is processing a
+ * debugger event, that's not really a possibility.  (To avoid deadlock,
+ * it's important that we not be in THREAD_RUNNING while we wait.)
+ */
+void dvmWaitForSuspend(Thread* thread)
+{
+    Thread* self = dvmThreadSelf();
+
+    LOG_THREAD("threadid=%d: waiting for threadid=%d to sleep\n",
+        self->threadId, thread->threadId);
+
+    assert(thread->handle != dvmJdwpGetDebugThread(gDvm.jdwpState));
+    assert(thread != self);
+    assert(self->status != THREAD_RUNNING);
+
+    waitForThreadSuspend(self, thread);
+
+    LOG_THREAD("threadid=%d: threadid=%d is now asleep\n",
+        self->threadId, thread->threadId);
+}
+
+/*
+ * Check to see if we need to suspend ourselves.  If so, go to sleep on
+ * a condition variable.
+ *
+ * Takes "self" as an argument as an optimization.  Pass in NULL to have
+ * it do the lookup.
+ *
+ * Returns "true" if we suspended ourselves.
+ */
+bool dvmCheckSuspendPending(Thread* self)
+{
+    bool didSuspend;
+
+    if (self == NULL)
+        self = dvmThreadSelf();
+
+    /* fast path: if count is zero, bail immediately */
+    if (self->suspendCount == 0)
+        return false;
+
+    lockThreadSuspendCount();
+
+    assert(self->suspendCount >= 0);
+
+    didSuspend = (self->suspendCount != 0);
+    self->isSuspended = true;
+    LOG_THREAD("threadid=%d: self-suspending\n", self->threadId);
+    while (self->suspendCount != 0) {
+        int cc;
+        cc = pthread_cond_wait(&gDvm.threadSuspendCountCond,
+                &gDvm.threadSuspendCountLock);
+        assert(cc == 0);
+    }
+    assert(self->suspendCount == 0 && self->dbgSuspendCount == 0);
+    self->isSuspended = false;
+    LOG_THREAD("threadid=%d: self-reviving, status=%d\n",
+        self->threadId, self->status);
+
+    unlockThreadSuspendCount();
+
+    return didSuspend;
+}
+
+/*
+ * Update our status.
+ *
+ * The "self" argument, which may be NULL, is accepted as an optimization.
+ *
+ * Returns the old status.
+ */
+ThreadStatus dvmChangeStatus(Thread* self, ThreadStatus newStatus)
+{
+    ThreadStatus oldStatus;
+
+    if (self == NULL)
+        self = dvmThreadSelf();
+
+    LOGVV("threadid=%d: (status %d -> %d)\n",
+        self->threadId, self->status, newStatus);
+
+    oldStatus = self->status;
+
+    if (newStatus == THREAD_RUNNING) {
+        /*
+         * Change our status to THREAD_RUNNING.  The transition requires
+         * that we check for pending suspension, because the VM considers
+         * us to be "asleep" in all other states.
+         *
+         * We need to do the "suspend pending" check FIRST, because it grabs
+         * a lock that could be held by something that wants us to suspend.
+         * If we're in RUNNING it will wait for us, and we'll be waiting
+         * for the lock it holds.
+         */
+        assert(self->status != THREAD_RUNNING);
+
+        dvmCheckSuspendPending(self);
+        self->status = THREAD_RUNNING;
+    } else {
+        /*
+         * Change from one state to another, neither of which is
+         * THREAD_RUNNING.  This is most common during system or thread
+         * initialization.
+         */
+        self->status = newStatus;
+    }
+
+    return oldStatus;
+}
+
+/*
+ * Get a statically defined thread group from a field in the ThreadGroup
+ * Class object.  Expected arguments are "mMain" and "mSystem".
+ */
+static Object* getStaticThreadGroup(const char* fieldName)
+{
+    StaticField* groupField;
+    Object* groupObj;
+
+    groupField = dvmFindStaticField(gDvm.classJavaLangThreadGroup,
+        fieldName, "Ljava/lang/ThreadGroup;");
+    if (groupField == NULL) {
+        LOGE("java.lang.ThreadGroup does not have an '%s' field\n", fieldName);
+        dvmThrowException("Ljava/lang/IncompatibleClassChangeError;", NULL);
+        return NULL;
+    }
+    groupObj = dvmGetStaticFieldObject(groupField);
+    if (groupObj == NULL) {
+        LOGE("java.lang.ThreadGroup.%s not initialized\n", fieldName);
+        dvmThrowException("Ljava/lang/InternalError;", NULL);
+        return NULL;
+    }
+
+    return groupObj;
+}
+Object* dvmGetSystemThreadGroup(void)
+{
+    return getStaticThreadGroup("mSystem");
+}
+Object* dvmGetMainThreadGroup(void)
+{
+    return getStaticThreadGroup("mMain");
+}
+
+/*
+ * Given a VMThread object, return the associated Thread*.
+ *
+ * NOTE: if the thread detaches, the struct Thread will disappear, and
+ * we will be touching invalid data.  For safety, lock the thread list
+ * before calling this.
+ */
+Thread* dvmGetThreadFromThreadObject(Object* vmThreadObj)
+{
+    int vmData;
+
+    vmData = dvmGetFieldInt(vmThreadObj, gDvm.offJavaLangVMThread_vmData);
+    return (Thread*) vmData;
+}
+
+
+/*
+ * Conversion map for "nice" values.
+ *
+ * We use Android thread priority constants to be consistent with the rest
+ * of the system.  In some cases adjacent entries may overlap.
+ */
+static const int kNiceValues[10] = {
+    ANDROID_PRIORITY_LOWEST,                /* 1 (MIN_PRIORITY) */
+    ANDROID_PRIORITY_BACKGROUND + 6,
+    ANDROID_PRIORITY_BACKGROUND + 3,
+    ANDROID_PRIORITY_BACKGROUND,
+    ANDROID_PRIORITY_NORMAL,                /* 5 (NORM_PRIORITY) */
+    ANDROID_PRIORITY_NORMAL - 2,
+    ANDROID_PRIORITY_NORMAL - 4,
+    ANDROID_PRIORITY_URGENT_DISPLAY + 3,
+    ANDROID_PRIORITY_URGENT_DISPLAY + 2,
+    ANDROID_PRIORITY_URGENT_DISPLAY         /* 10 (MAX_PRIORITY) */
+};
+
+/*
+ * Change the priority of a system thread to match that of the Thread object.
+ *
+ * We map a priority value from 1-10 to Linux "nice" values, where lower
+ * numbers indicate higher priority.
+ */
+void dvmChangeThreadPriority(Thread* thread, int newPriority)
+{
+    pid_t pid = thread->systemTid;
+    int newNice;
+
+    if (newPriority < 1 || newPriority > 10) {
+        LOGW("bad priority %d\n", newPriority);
+        newPriority = 5;
+    }
+    newNice = kNiceValues[newPriority-1];
+
+    if (setpriority(PRIO_PROCESS, pid, newNice) != 0) {
+        char* str = dvmGetThreadName(thread);
+        LOGI("setPriority(%d) '%s' to prio=%d(n=%d) failed: %s\n",
+            pid, str, newPriority, newNice, strerror(errno));
+        free(str);
+    } else {
+        LOGV("setPriority(%d) to prio=%d(n=%d)\n",
+            pid, newPriority, newNice);
+    }
+}
+
+/*
+ * Get the thread priority for the current thread by querying the system.
+ * This is useful when attaching a thread through JNI.
+ *
+ * Returns a value from 1 to 10 (compatible with java.lang.Thread values).
+ */
+static int getThreadPriorityFromSystem(void)
+{
+    int i, sysprio, jprio;
+
+    errno = 0;
+    sysprio = getpriority(PRIO_PROCESS, 0);
+    if (sysprio == -1 && errno != 0) {
+        LOGW("getpriority() failed: %s\n", strerror(errno));
+        return THREAD_NORM_PRIORITY;
+    }
+
+    jprio = THREAD_MIN_PRIORITY;
+    for (i = 0; i < NELEM(kNiceValues); i++) {
+        if (sysprio >= kNiceValues[i])
+            break;
+        jprio++;
+    }
+    if (jprio > THREAD_MAX_PRIORITY)
+        jprio = THREAD_MAX_PRIORITY;
+
+    return jprio;
+}
+
+
+/*
+ * Return true if the thread is on gDvm.threadList.
+ * Caller should not hold gDvm.threadListLock.
+ */
+bool dvmIsOnThreadList(const Thread* thread)
+{
+    bool ret = false;
+
+    dvmLockThreadList(NULL);
+    if (thread == gDvm.threadList) {
+        ret = true;
+    } else {
+        ret = thread->prev != NULL || thread->next != NULL;
+    }
+    dvmUnlockThreadList();
+
+    return ret;
+}
+
+/*
+ * Dump a thread to the log file -- just calls dvmDumpThreadEx() with an
+ * output target.
+ */
+void dvmDumpThread(Thread* thread, bool isRunning)
+{
+    DebugOutputTarget target;
+
+    dvmCreateLogOutputTarget(&target, ANDROID_LOG_INFO, LOG_TAG);
+    dvmDumpThreadEx(&target, thread, isRunning);
+}
+
+/*
+ * Print information about the specified thread.
+ *
+ * Works best when the thread in question is "self" or has been suspended.
+ * When dumping a separate thread that's still running, set "isRunning" to
+ * use a more cautious thread dump function.
+ */
+void dvmDumpThreadEx(const DebugOutputTarget* target, Thread* thread,
+    bool isRunning)
+{
+    /* tied to ThreadStatus enum */
+    static const char* kStatusNames[] = {
+        "ZOMBIE", "RUNNABLE", "TIMED_WAIT", "MONITOR", "WAIT",
+        "INITIALIZING", "STARTING", "NATIVE", "VMWAIT"
+    };
+    Object* threadObj;
+    Object* groupObj;
+    StringObject* nameStr;
+    char* threadName = NULL;
+    char* groupName = NULL;
+    bool isDaemon;
+    int priority;               // java.lang.Thread priority
+    int policy;                 // pthread policy
+    struct sched_param sp;      // pthread scheduling parameters
+
+    threadObj = thread->threadObj;
+    if (threadObj == NULL) {
+        LOGW("Can't dump thread %d: threadObj not set\n", thread->threadId);
+        return;
+    }
+    nameStr = (StringObject*) dvmGetFieldObject(threadObj,
+                gDvm.offJavaLangThread_name);
+    threadName = dvmCreateCstrFromString(nameStr);
+
+    priority = dvmGetFieldInt(threadObj, gDvm.offJavaLangThread_priority);
+    isDaemon = dvmGetFieldBoolean(threadObj, gDvm.offJavaLangThread_daemon);
+
+    if (pthread_getschedparam(pthread_self(), &policy, &sp) != 0) {
+        LOGW("Warning: pthread_getschedparam failed\n");
+        policy = -1;
+        sp.sched_priority = -1;
+    }
+
+    /* a null value for group is not expected, but deal with it anyway */
+    groupObj = (Object*) dvmGetFieldObject(threadObj,
+                gDvm.offJavaLangThread_group);
+    if (groupObj != NULL) {
+        int offset = dvmFindFieldOffset(gDvm.classJavaLangThreadGroup,
+            "name", "Ljava/lang/String;");
+        if (offset < 0) {
+            LOGW("Unable to find 'name' field in ThreadGroup\n");
+        } else {
+            nameStr = (StringObject*) dvmGetFieldObject(groupObj, offset);
+            groupName = dvmCreateCstrFromString(nameStr);
+        }
+    }
+    if (groupName == NULL)
+        groupName = strdup("(BOGUS GROUP)");
+
+    assert(thread->status < NELEM(kStatusNames));
+    dvmPrintDebugMessage(target,
+        "\"%s\"%s prio=%d tid=%d %s\n",
+        threadName, isDaemon ? " daemon" : "",
+        priority, thread->threadId, kStatusNames[thread->status]);
+    dvmPrintDebugMessage(target,
+        "  | group=\"%s\" sCount=%d dsCount=%d s=%d obj=%p\n",
+        groupName, thread->suspendCount, thread->dbgSuspendCount,
+        thread->isSuspended, thread->threadObj);
+    dvmPrintDebugMessage(target,
+        "  | sysTid=%d nice=%d sched=%d/%d handle=%d\n",
+        thread->systemTid, getpriority(PRIO_PROCESS, thread->systemTid),
+        policy, sp.sched_priority, (int)thread->handle);
+
+#ifdef WITH_MONITOR_TRACKING
+    if (!isRunning) {
+        LockedObjectData* lod = thread->pLockedObjects;
+        if (lod != NULL)
+            dvmPrintDebugMessage(target, "  | monitors held:\n");
+        else
+            dvmPrintDebugMessage(target, "  | monitors held: <none>\n");
+        while (lod != NULL) {
+            dvmPrintDebugMessage(target, "  >  %p[%d] (%s)\n",
+                lod->obj, lod->recursionCount, lod->obj->clazz->descriptor);
+            lod = lod->next;
+        }
+    }
+#endif
+
+    if (isRunning)
+        dvmDumpRunningThreadStack(target, thread);
+    else
+        dvmDumpThreadStack(target, thread);
+
+    free(threadName);
+    free(groupName);
+
+}
+
+/*
+ * Get the name of a thread.
+ *
+ * For correctness, the caller should hold the thread list lock to ensure
+ * that the thread doesn't go away mid-call.
+ *
+ * Returns a newly-allocated string, or NULL if the Thread doesn't have a name.
+ */
+char* dvmGetThreadName(Thread* thread)
+{
+    StringObject* nameObj;
+
+    if (thread->threadObj == NULL) {
+        LOGW("threadObj is NULL, name not available\n");
+        return strdup("-unknown-");
+    }
+
+    nameObj = (StringObject*)
+        dvmGetFieldObject(thread->threadObj, gDvm.offJavaLangThread_name);
+    return dvmCreateCstrFromString(nameObj);
+}
+
+/*
+ * Dump all threads to the log file -- just calls dvmDumpAllThreadsEx() with
+ * an output target.
+ */
+void dvmDumpAllThreads(bool grabLock)
+{
+    DebugOutputTarget target;
+
+    dvmCreateLogOutputTarget(&target, ANDROID_LOG_INFO, LOG_TAG);
+    dvmDumpAllThreadsEx(&target, grabLock);
+}
+
+/*
+ * Print information about all known threads.  Assumes they have been
+ * suspended (or are in a non-interpreting state, e.g. WAIT or NATIVE).
+ *
+ * If "grabLock" is true, we grab the thread lock list.  This is important
+ * to do unless the caller already holds the lock.
+ */
+void dvmDumpAllThreadsEx(const DebugOutputTarget* target, bool grabLock)
+{
+    Thread* thread;
+
+    dvmPrintDebugMessage(target, "DALVIK THREADS:\n");
+
+    if (grabLock)
+        dvmLockThreadList(dvmThreadSelf());
+
+    thread = gDvm.threadList;
+    while (thread != NULL) {
+        dvmDumpThreadEx(target, thread, false);
+
+        /* verify link */
+        assert(thread->next == NULL || thread->next->prev == thread);
+
+        thread = thread->next;
+    }
+
+    if (grabLock)
+        dvmUnlockThreadList();
+}
+
+#ifdef WITH_MONITOR_TRACKING
+/*
+ * Count up the #of locked objects in the current thread.
+ */
+static int getThreadObjectCount(const Thread* self)
+{
+    LockedObjectData* lod;
+    int count = 0;
+
+    lod = self->pLockedObjects;
+    while (lod != NULL) {
+        count++;
+        lod = lod->next;
+    }
+    return count;
+}
+
+/*
+ * Add the object to the thread's locked object list if it doesn't already
+ * exist.  The most recently added object is the most likely to be released
+ * next, so we insert at the head of the list.
+ *
+ * If it already exists, we increase the recursive lock count.
+ *
+ * The object's lock may be thin or fat.
+ */
+void dvmAddToMonitorList(Thread* self, Object* obj, bool withTrace)
+{
+    LockedObjectData* newLod;
+    LockedObjectData* lod;
+    int* trace;
+    int depth;
+
+    lod = self->pLockedObjects;
+    while (lod != NULL) {
+        if (lod->obj == obj) {
+            lod->recursionCount++;
+            LOGV("+++ +recursive lock %p -> %d\n", obj, lod->recursionCount);
+            return;
+        }
+        lod = lod->next;
+    }
+
+    newLod = (LockedObjectData*) calloc(1, sizeof(LockedObjectData));
+    if (newLod == NULL) {
+        LOGE("malloc failed on %d bytes\n", sizeof(LockedObjectData));
+        return;
+    }
+    newLod->obj = obj;
+    newLod->recursionCount = 0;
+
+    if (withTrace) {
+        trace = dvmFillInStackTraceRaw(self, &depth);
+        newLod->rawStackTrace = trace;
+        newLod->stackDepth = depth;
+    }
+
+    newLod->next = self->pLockedObjects;
+    self->pLockedObjects = newLod;
+
+    LOGV("+++ threadid=%d: added %p, now %d\n",
+        self->threadId, newLod, getThreadObjectCount(self));
+}
+
+/*
+ * Remove the object from the thread's locked object list.  If the entry
+ * has a nonzero recursion count, we just decrement the count instead.
+ */
+void dvmRemoveFromMonitorList(Thread* self, Object* obj)
+{
+    LockedObjectData* lod;
+    LockedObjectData* prevLod;
+
+    lod = self->pLockedObjects;
+    prevLod = NULL;
+    while (lod != NULL) {
+        if (lod->obj == obj) {
+            if (lod->recursionCount > 0) {
+                lod->recursionCount--;
+                LOGV("+++ -recursive lock %p -> %d\n",
+                    obj, lod->recursionCount);
+                return;
+            } else {
+                break;
+            }
+        }
+        prevLod = lod;
+        lod = lod->next;
+    }
+
+    if (lod == NULL) {
+        LOGW("BUG: object %p not found in thread's lock list\n", obj);
+        return;
+    }
+    if (prevLod == NULL) {
+        /* first item in list */
+        assert(self->pLockedObjects == lod);
+        self->pLockedObjects = lod->next;
+    } else {
+        /* middle/end of list */
+        prevLod->next = lod->next;
+    }
+
+    LOGV("+++ threadid=%d: removed %p, now %d\n",
+        self->threadId, lod, getThreadObjectCount(self));
+    free(lod->rawStackTrace);
+    free(lod);
+}
+
+/*
+ * If the specified object is already in the thread's locked object list,
+ * return the LockedObjectData struct.  Otherwise return NULL.
+ */
+LockedObjectData* dvmFindInMonitorList(const Thread* self, const Object* obj)
+{
+    LockedObjectData* lod;
+
+    lod = self->pLockedObjects;
+    while (lod != NULL) {
+        if (lod->obj == obj)
+            return lod;
+        lod = lod->next;
+    }
+    return NULL;
+}
+#endif /*WITH_MONITOR_TRACKING*/
+
+
+/*
+ * GC helper functions
+ */
+
+static void gcScanInterpStackReferences(Thread *thread)
+{
+    const u4 *framePtr;
+
+    framePtr = (const u4 *)thread->curFrame;
+    while (framePtr != NULL) {
+        const StackSaveArea *saveArea;
+        const Method *method;
+
+        saveArea = SAVEAREA_FROM_FP(framePtr);
+        method = saveArea->method;
+        if (method != NULL) {
+            int i;
+            for (i = method->registersSize - 1; i >= 0; i--) {
+                u4 rval = *framePtr++;
+//TODO: wrap markifobject in a macro that does pointer checks
+                if (rval != 0 && (rval & 0x3) == 0) {
+                    dvmMarkIfObject((Object *)rval);
+                }
+            }
+        }
+        /* else this is a break frame; nothing to mark.
+         */
+
+        /* Don't fall into an infinite loop if things get corrupted.
+         */
+        assert((uintptr_t)saveArea->prevFrame > (uintptr_t)framePtr ||
+               saveArea->prevFrame == NULL);
+        framePtr = saveArea->prevFrame;
+    }
+}
+
+static void gcScanReferenceTable(ReferenceTable *refTable)
+{
+    Object **op;
+
+    //TODO: these asserts are overkill; turn them off when things stablize.
+    assert(refTable != NULL);
+    assert(refTable->table != NULL);
+    assert(refTable->nextEntry != NULL);
+    assert((uintptr_t)refTable->nextEntry >= (uintptr_t)refTable->table);
+    assert(refTable->nextEntry - refTable->table <= refTable->maxEntries);
+
+    op = refTable->table;
+    while ((uintptr_t)op < (uintptr_t)refTable->nextEntry) {
+        dvmMarkObjectNonNull(*(op++));
+    }
+}
+
+/*
+ * Scan a Thread and mark any objects it references.
+ */
+static void gcScanThread(Thread *thread)
+{
+    assert(thread != NULL);
+
+    /*
+     * The target thread must be suspended or in a state where it can't do
+     * any harm (e.g. in Object.wait()).  The only exception is the current
+     * thread, which will still be active and in the "running" state.
+     *
+     * (Newly-created threads shouldn't be able to shift themselves to
+     * RUNNING without a suspend-pending check, so this shouldn't cause
+     * a false-positive.)
+     */
+    assert(thread->status != THREAD_RUNNING || thread->isSuspended ||
+            thread == dvmThreadSelf());
+
+    HPROF_SET_GC_SCAN_STATE(HPROF_ROOT_THREAD_OBJECT, thread->threadId);
+
+    dvmMarkObject(thread->threadObj);   // could be NULL, when constructing
+
+    HPROF_SET_GC_SCAN_STATE(HPROF_ROOT_NATIVE_STACK, thread->threadId);
+
+    dvmMarkObject(thread->exception);   // usually NULL
+    gcScanReferenceTable(&thread->internalLocalRefTable);
+
+    HPROF_SET_GC_SCAN_STATE(HPROF_ROOT_JNI_LOCAL, thread->threadId);
+
+    gcScanReferenceTable(&thread->jniLocalRefTable);
+
+    if (thread->jniMonitorRefTable.table != NULL) {
+        HPROF_SET_GC_SCAN_STATE(HPROF_ROOT_JNI_MONITOR, thread->threadId);
+
+        gcScanReferenceTable(&thread->jniMonitorRefTable);
+    }
+
+    HPROF_SET_GC_SCAN_STATE(HPROF_ROOT_JAVA_FRAME, thread->threadId);
+
+    gcScanInterpStackReferences(thread);
+
+    HPROF_CLEAR_GC_SCAN_STATE();
+}
+
+static void gcScanAllThreads()
+{
+    Thread *thread;
+
+    /* Lock the thread list so we can safely use the
+     * next/prev pointers.
+     */
+    dvmLockThreadList(dvmThreadSelf());
+
+    for (thread = gDvm.threadList; thread != NULL;
+            thread = thread->next)
+    {
+        /* We need to scan our own stack, so don't special-case
+         * the current thread.
+         */
+        gcScanThread(thread);
+    }
+
+    dvmUnlockThreadList();
+}
+
+void dvmGcScanRootThreadGroups()
+{
+    /* We scan the VM's list of threads instead of going
+     * through the actual ThreadGroups, but it should be
+     * equivalent.
+     *
+     * This assumes that the ThreadGroup class object is in 
+     * the root set, which should always be true;  it's
+     * loaded by the built-in class loader, which is part
+     * of the root set.
+     */
+    gcScanAllThreads();
+}
+
diff --git a/vm/Thread.h b/vm/Thread.h
new file mode 100644
index 0000000..655b435
--- /dev/null
+++ b/vm/Thread.h
@@ -0,0 +1,456 @@
+/*
+ * Copyright (C) 2008 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/*
+ * VM thread support.
+ */
+#ifndef _DALVIK_THREAD
+#define _DALVIK_THREAD
+
+#include "jni.h"
+
+#if defined(CHECK_MUTEX) && !defined(__USE_UNIX98)
+/* Linux lacks this unless you #define __USE_UNIX98 */
+int pthread_mutexattr_settype(pthread_mutexattr_t *attr, int type);
+enum { PTHREAD_MUTEX_ERRORCHECK = PTHREAD_MUTEX_ERRORCHECK_NP };
+#endif
+
+#ifdef WITH_MONITOR_TRACKING
+struct LockedObjectData;
+#endif
+
+/*
+ * Current status; these map to JDWP constants, so don't rearrange them.
+ * (If you do alter this, update the strings in dvmDumpThread and the
+ * conversion table in VMThread.java.)
+ *
+ * Note that "suspended" is orthogonal to these values (so says JDWP).
+ */
+typedef enum ThreadStatus {
+    /* these match up with JDWP values */
+    THREAD_ZOMBIE       = 0,        /* TERMINATED */
+    THREAD_RUNNING      = 1,        /* RUNNABLE or running now */
+    THREAD_TIMED_WAIT   = 2,        /* TIMED_WAITING in Object.wait() */
+    THREAD_MONITOR      = 3,        /* BLOCKED on a monitor */
+    THREAD_WAIT         = 4,        /* WAITING in Object.wait() */
+    /* non-JDWP states */
+    THREAD_INITIALIZING = 5,        /* allocated, not yet running */
+    THREAD_STARTING     = 6,        /* started, not yet on thread list */
+    THREAD_NATIVE       = 7,        /* off in a JNI native method */
+    THREAD_VMWAIT       = 8,        /* waiting on a VM resource */
+} ThreadStatus;
+
+/* thread priorities, from java.lang.Thread */
+enum {
+    THREAD_MIN_PRIORITY     = 1,
+    THREAD_NORM_PRIORITY    = 5,
+    THREAD_MAX_PRIORITY     = 10,
+};
+
+
+/* initialization */
+bool dvmThreadStartup(void);
+bool dvmThreadObjStartup(void);
+void dvmThreadShutdown(void);
+void dvmSlayDaemons(void);
+
+
+#define kJniLocalRefMax         512     /* arbitrary; should be plenty */
+#define kInternalRefDefault     32      /* equally arbitrary */
+#define kInternalRefMax         4096    /* mainly a sanity check */
+
+#define kMinStackSize       (512 + STACK_OVERFLOW_RESERVE)
+#define kDefaultStackSize   (8*1024)    /* two 4K pages */
+#define kMaxStackSize       (256*1024 + STACK_OVERFLOW_RESERVE)
+
+/*
+ * Our per-thread data.
+ *
+ * These are allocated on the system heap.
+ */
+typedef struct Thread {
+    /* small unique integer; useful for "thin" locks and debug messages */
+    u4          threadId;
+
+    /*
+     * Thread's current status.  Can only be changed by the thread itself
+     * (i.e. don't mess with this from other threads).
+     */
+    ThreadStatus status;
+
+    /*
+     * This is the number of times the thread has been suspended.  When the
+     * count drops to zero, the thread resumes.
+     *
+     * "dbgSuspendCount" is the portion of the suspend count that the
+     * debugger is responsible for.  This has to be tracked separately so
+     * that we can recover correctly if the debugger abruptly disconnects
+     * (suspendCount -= dbgSuspendCount).  The debugger should not be able
+     * to resume GC-suspended threads, because we ignore the debugger while
+     * a GC is in progress.
+     *
+     * Both of these are guarded by gDvm.threadSuspendCountLock.
+     *
+     * (We could store both of these in the same 32-bit, using 16-bit
+     * halves, to make atomic ops possible.  In practice, you only need
+     * to read suspendCount, and we need to hold a mutex when making
+     * changes, so there's no need to merge them.  Note the non-debug
+     * component will rarely be other than 1 or 0 -- not sure it's even
+     * possible with the way mutexes are currently used.)
+     */
+    int         suspendCount;
+    int         dbgSuspendCount;
+
+    /*
+     * Set to true when the thread suspends itself, false when it wakes up.
+     * This is only expected to be set when status==THREAD_RUNNING.
+     */
+    bool        isSuspended;
+
+    /* thread handle, as reported by pthread_self() */
+    pthread_t   handle;
+
+    /* thread ID, only useful under Linux */
+    pid_t       systemTid;
+
+    /* start (high addr) of interp stack (subtract size to get malloc addr) */
+    u1*         interpStackStart;
+
+    /* current limit of stack; flexes for StackOverflowError */
+    const u1*   interpStackEnd;
+
+    /* interpreter stack size; our stacks are fixed-length */
+    int         interpStackSize;
+    bool        stackOverflowed;
+
+    /* FP of bottom-most (currently executing) stack frame on interp stack */
+    void*       curFrame;
+
+    /* current exception, or NULL if nothing pending */
+    Object*     exception;
+
+    /* the java/lang/Thread that we are associated with */
+    Object*     threadObj;
+
+    /* the JNIEnv pointer associated with this thread */
+    JNIEnv*     jniEnv;
+
+    /* internal reference tracking */
+    ReferenceTable  internalLocalRefTable;
+
+    /* JNI local reference tracking */
+    ReferenceTable  jniLocalRefTable;
+
+    /* JNI native monitor reference tracking (initialized on first use) */
+    ReferenceTable  jniMonitorRefTable;
+
+    /* hack to make JNI_OnLoad work right */
+    Object*     classLoaderOverride;
+
+    /* pointer to the monitor lock we're currently waiting on */
+    /* (do not set or clear unless the Monitor itself is held) */
+    /* TODO: consider changing this to Object* for better JDWP interaction */
+    Monitor*    waitMonitor;
+    /* set when we confirm that the thread must be interrupted from a wait */
+    bool        interruptingWait;
+    /* thread "interrupted" status; stays raised until queried or thrown */
+    bool        interrupted;
+
+    /*
+     * Set to true when the thread is in the process of throwing an
+     * OutOfMemoryError.
+     */
+    bool        throwingOOME;
+
+    /* links to rest of thread list; grab global lock before traversing */
+    struct Thread* prev;
+    struct Thread* next;
+
+    /* JDWP invoke-during-breakpoint support */
+    DebugInvokeReq  invokeReq;
+
+#ifdef WITH_MONITOR_TRACKING
+    /* objects locked by this thread; most recent is at head of list */
+    struct LockedObjectData* pLockedObjects;
+#endif
+
+#ifdef WITH_ALLOC_LIMITS
+    /* allocation limit, for Debug.setAllocationLimit() regression testing */
+    int         allocLimit;
+#endif
+
+#ifdef WITH_PROFILER
+    /* base time for per-thread CPU timing */
+    bool        cpuClockBaseSet;
+    u8          cpuClockBase;
+
+    /* memory allocation profiling state */
+    AllocProfState allocProf;
+#endif
+
+#ifdef WITH_JNI_STACK_CHECK
+    u4          stackCrc;
+#endif
+} Thread;
+
+/* start point for an internal thread; mimics pthread args */
+typedef void* (*InternalThreadStart)(void* arg);
+
+/* args for internal thread creation */
+typedef struct InternalStartArgs {
+    /* inputs */
+    InternalThreadStart func;
+    void*       funcArg;
+    char*       name;
+    Object*     group;
+    bool        isDaemon;
+    /* result */
+    volatile Thread** pThread;
+    volatile int*     pCreateStatus;
+} InternalStartArgs;
+
+/* finish init */
+bool dvmPrepMainForJni(JNIEnv* pEnv);
+bool dvmPrepMainThread(void);
+
+/* utility function to get the tid */
+pid_t dvmGetSysThreadId(void);
+
+/*
+ * Get our Thread* from TLS.
+ *
+ * Returns NULL if this isn't a thread that the VM is aware of.
+ */
+Thread* dvmThreadSelf(void);
+
+/* grab the thread list global lock */
+void dvmLockThreadList(Thread* self);
+/* release the thread list global lock */
+void dvmUnlockThreadList(void);
+
+/*
+ * Thread suspend/resume, used by the GC and debugger.
+ */
+typedef enum SuspendCause {
+    SUSPEND_NOT = 0,
+    SUSPEND_FOR_GC,
+    SUSPEND_FOR_DEBUG,
+    SUSPEND_FOR_DEBUG_EVENT,
+    SUSPEND_FOR_STACK_DUMP,
+    SUSPEND_FOR_DEX_OPT,
+} SuspendCause;
+void dvmSuspendThread(Thread* thread);
+void dvmSuspendSelf(bool jdwpActivity);
+void dvmResumeThread(Thread* thread);
+void dvmSuspendAllThreads(SuspendCause why);
+void dvmResumeAllThreads(SuspendCause why);
+void dvmUndoDebuggerSuspensions(void);
+
+/*
+ * Check suspend state.  Grab threadListLock before calling.
+ */
+bool dvmIsSuspended(Thread* thread);
+
+/*
+ * Wait until a thread has suspended.  (Used by debugger support.)
+ */
+void dvmWaitForSuspend(Thread* thread);
+
+/*
+ * Check to see if we should be suspended now.  If so, suspend ourselves
+ * by sleeping on a condition variable.
+ *
+ * If "self" is NULL, this will use dvmThreadSelf().
+ */
+bool dvmCheckSuspendPending(Thread* self);
+
+/*
+ * Fast test for use in the interpreter.  If our suspend count is nonzero,
+ * do a more rigorous evaluation.
+ */
+INLINE void dvmCheckSuspendQuick(Thread* self) {
+    if (self->suspendCount != 0)
+        dvmCheckSuspendPending(self);
+}
+
+/*
+ * Used when changing thread state.  Threads may only change their own.
+ * The "self" argument, which may be NULL, is accepted as an optimization.
+ *
+ * If you're calling this before waiting on a resource (e.g. THREAD_WAIT
+ * or THREAD_MONITOR), do so in the same function as the wait -- this records
+ * the current stack depth for the GC.
+ *
+ * If you're changing to THREAD_RUNNING, this will check for suspension.
+ *
+ * Returns the old status.
+ */
+ThreadStatus dvmChangeStatus(Thread* self, ThreadStatus newStatus);
+
+/*
+ * Initialize a mutex.
+ */
+INLINE void dvmInitMutex(pthread_mutex_t* pMutex)
+{
+#ifdef CHECK_MUTEX
+    pthread_mutexattr_t attr;
+    int cc;
+
+    pthread_mutexattr_init(&attr);
+    cc = pthread_mutexattr_settype(&attr, PTHREAD_MUTEX_ERRORCHECK_NP);
+    assert(cc == 0);
+    pthread_mutex_init(pMutex, &attr);
+    pthread_mutexattr_destroy(&attr);
+#else
+    pthread_mutex_init(pMutex, NULL);       // default=PTHREAD_MUTEX_FAST_NP
+#endif
+}
+
+/*
+ * Grab a plain mutex.
+ */
+INLINE void dvmLockMutex(pthread_mutex_t* pMutex)
+{
+    int cc = pthread_mutex_lock(pMutex);
+    assert(cc == 0);
+}
+
+/*
+ * Unlock pthread mutex.
+ */
+INLINE void dvmUnlockMutex(pthread_mutex_t* pMutex)
+{
+    int cc = pthread_mutex_unlock(pMutex);
+    assert(cc == 0);
+}
+
+/*
+ * Destroy a mutex.
+ */
+INLINE void dvmDestroyMutex(pthread_mutex_t* pMutex)
+{
+    int cc = pthread_mutex_destroy(pMutex);
+    assert(cc == 0);
+}
+
+/*
+ * Create a thread as a result of java.lang.Thread.start().
+ */
+bool dvmCreateInterpThread(Object* threadObj, int reqStackSize);
+
+/*
+ * Create a thread internal to the VM.  It's visible to interpreted code,
+ * but found in the "system" thread group rather than "main".
+ */
+bool dvmCreateInternalThread(pthread_t* pHandle, const char* name,
+    InternalThreadStart func, void* funcArg);
+
+/*
+ * Attach or detach the current thread from the VM.
+ */
+bool dvmAttachCurrentThread(const JavaVMAttachArgs* pArgs, bool isDaemon);
+void dvmDetachCurrentThread(void);
+
+/*
+ * Get the "main" or "system" thread group.
+ */
+Object* dvmGetMainThreadGroup(void);
+Object* dvmGetSystemThreadGroup(void);
+
+/*
+ * Given a java/lang/VMThread object, return our Thread.
+ */
+Thread* dvmGetThreadFromThreadObject(Object* vmThreadObj);
+
+/*
+ * Sleep in a thread.  Returns when the sleep timer returns or the thread
+ * is interrupted.
+ */
+void dvmThreadSleep(u8 msec, u4 nsec);
+
+/*
+ * Interrupt a thread.  If it's waiting on a monitor, wake it up.
+ */
+void dvmThreadInterrupt(Thread* thread);
+
+/*
+ * Get the name of a thread.  (For safety, hold the thread list lock.)
+ */
+char* dvmGetThreadName(Thread* thread);
+
+/*
+ * Return true if a thread is on the internal list.  If it is, the
+ * thread is part of the GC's root set.
+ */
+bool dvmIsOnThreadList(const Thread* thread);
+ 
+/*
+ * Get/set the JNIEnv field.
+ */
+INLINE JNIEnv* dvmGetThreadJNIEnv(Thread* self) { return self->jniEnv; }
+INLINE void dvmSetThreadJNIEnv(Thread* self, JNIEnv* env) { self->jniEnv = env;}
+
+/*
+ * Update the priority value of the underlying pthread.
+ */
+void dvmChangeThreadPriority(Thread* thread, int newPriority);
+
+
+/*
+ * Debug: dump information about a single thread.
+ */
+void dvmDumpThread(Thread* thread, bool isRunning);
+void dvmDumpThreadEx(const DebugOutputTarget* target, Thread* thread,
+    bool isRunning);
+
+/*
+ * Debug: dump information about all threads.
+ */
+void dvmDumpAllThreads(bool grabLock);
+void dvmDumpAllThreadsEx(const DebugOutputTarget* target, bool grabLock);
+
+
+#ifdef WITH_MONITOR_TRACKING
+/*
+ * Track locks held by the current thread, along with the stack trace at
+ * the point the lock was acquired.
+ *
+ * At any given time the number of locks held across the VM should be
+ * fairly small, so there's no reason not to generate and store the entire
+ * stack trace.
+ */
+typedef struct LockedObjectData {
+    /* the locked object */
+    struct Object*  obj;
+
+    /* number of times it has been locked recursively (zero-based ref count) */
+    int             recursionCount;
+
+    /* stack trace at point of initial acquire */
+    u4              stackDepth;
+    int*            rawStackTrace;
+
+    struct LockedObjectData* next;
+} LockedObjectData;
+
+/*
+ * Add/remove/find objects from the thread's monitor list.
+ */
+void dvmAddToMonitorList(Thread* self, Object* obj, bool withTrace);
+void dvmRemoveFromMonitorList(Thread* self, Object* obj);
+LockedObjectData* dvmFindInMonitorList(const Thread* self, const Object* obj);
+#endif
+
+#endif /*_DALVIK_THREAD*/
diff --git a/vm/UtfString.c b/vm/UtfString.c
new file mode 100644
index 0000000..3248d59
--- /dev/null
+++ b/vm/UtfString.c
@@ -0,0 +1,488 @@
+/*
+ * Copyright (C) 2008 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/*
+ * UTF-8 and Unicode string manipulation, plus java/lang/String convenience
+ * functions.
+ *
+ * In most cases we populate the fields in the String object directly,
+ * rather than going through an instance field lookup.
+ */
+#include "Dalvik.h"
+#include <stdlib.h>
+
+/*
+ * Initialize string globals.
+ *
+ * This isn't part of the VM init sequence because it's hard to get the
+ * timing right -- we need it to happen after java/lang/String has been
+ * loaded, but before anybody wants to use a string.  It's easiest to
+ * just initialize it on first use.
+ *
+ * In some unusual circumstances (e.g. trying to throw an exception because
+ * String implements java/lang/CharSequence, but CharSequence doesn't exist)
+ * we can try to create an exception string internally before anything has
+ * really tried to use String.  In that case we basically self-destruct.
+ */
+static bool stringStartup()
+{
+    if (gDvm.javaLangStringReady < 0) {
+        LOGE("ERROR: reentrant string initialization\n");
+        assert(false);
+        return false;
+    }
+    assert(gDvm.javaLangStringReady == 0);
+
+    gDvm.javaLangStringReady = -1;
+
+    if (gDvm.classJavaLangString == NULL)
+        gDvm.classJavaLangString =
+            dvmFindSystemClassNoInit("Ljava/lang/String;");
+
+    gDvm.offJavaLangString_value =
+        dvmFindFieldOffset(gDvm.classJavaLangString, "value", "[C");
+    gDvm.offJavaLangString_count =
+        dvmFindFieldOffset(gDvm.classJavaLangString, "count", "I");
+    gDvm.offJavaLangString_offset =
+        dvmFindFieldOffset(gDvm.classJavaLangString, "offset", "I");
+    gDvm.offJavaLangString_hashCode =
+        dvmFindFieldOffset(gDvm.classJavaLangString, "hashCode", "I");
+
+    if (gDvm.offJavaLangString_value < 0 ||
+        gDvm.offJavaLangString_count < 0 ||
+        gDvm.offJavaLangString_offset < 0 ||
+        gDvm.offJavaLangString_hashCode < 0)
+    {
+        LOGE("VM-required field missing from java/lang/String\n");
+        return false;
+    }
+
+    gDvm.javaLangStringReady = 1;
+
+    return true;
+}
+
+/*
+ * Discard heap-allocated storage.
+ */
+void dvmStringShutdown()
+{
+    // currently unused
+}
+
+/*
+ * Compute a hash code on a UTF-8 string, for use with internal hash tables.
+ *
+ * This may or may not yield the same results as the java/lang/String
+ * computeHashCode() function.  (To make sure this doesn't get abused,
+ * I'm initializing the hash code to 1 so they *don't* match up.)
+ *
+ * It would be more correct to invoke dexGetUtf16FromUtf8() here and compute
+ * the hash with the result.  That way, if something encoded the same
+ * character in two different ways, the hash value would be the same.  For
+ * our purposes that isn't necessary.
+ */
+u4 dvmComputeUtf8Hash(const char* utf8Str)
+{
+    u4 hash = 1;
+
+    while (*utf8Str != '\0')
+        hash = hash * 31 + *utf8Str++;
+
+    return hash;
+}
+
+/*
+ * Like "strlen", but for strings encoded with "modified" UTF-8.
+ *
+ * The value returned is the number of characters, which may or may not
+ * be the same as the number of bytes.
+ *
+ * (If this needs optimizing, try: mask against 0xa0, shift right 5,
+ * get increment {1-3} from table of 8 values.)
+ */
+int dvmUtf8Len(const char* utf8Str)
+{
+    int ic, len = 0;
+
+    while ((ic = *utf8Str++) != '\0') {
+        len++;
+        if ((ic & 0x80) != 0) {
+            /* two- or three-byte encoding */
+            utf8Str++;
+            if ((ic & 0x20) != 0) {
+                /* three-byte encoding */
+                utf8Str++;
+            }
+        }
+    }
+
+    return len;
+}
+
+/*
+ * Convert a "modified" UTF-8 string to UTF-16.
+ */
+void dvmConvertUtf8ToUtf16(u2* utf16Str, const char* utf8Str)
+{
+    while (*utf8Str != '\0')
+        *utf16Str++ = dexGetUtf16FromUtf8(&utf8Str);
+}
+
+/*
+ * Given a UTF-16 string, compute the length of the corresponding UTF-8
+ * string in bytes.
+ */
+static int utf16_utf8ByteLen(const u2* utf16Str, int len)
+{
+    int utf8Len = 0;
+
+    while (len--) {
+        unsigned int uic = *utf16Str++;
+
+        /*
+         * The most common case is (uic > 0 && uic <= 0x7f).
+         */
+        if (uic == 0 || uic > 0x7f) {
+            if (uic > 0x07ff)
+                utf8Len += 3;
+            else /*(uic > 0x7f || uic == 0) */
+                utf8Len += 2;
+        } else
+            utf8Len++;
+    }
+    return utf8Len;
+}
+
+/*
+ * Convert a UTF-16 string to UTF-8.
+ *
+ * Make sure you allocate "utf8Str" with the result of utf16_utf8ByteLen(),
+ * not just "len".
+ */
+static void convertUtf16ToUtf8(char* utf8Str, const u2* utf16Str, int len)
+{
+    assert(len >= 0);
+
+    while (len--) {
+        unsigned int uic = *utf16Str++;
+
+        /*
+         * The most common case is (uic > 0 && uic <= 0x7f).
+         */
+        if (uic == 0 || uic > 0x7f) {
+            if (uic > 0x07ff) {
+                *utf8Str++ = (uic >> 12) | 0xe0;
+                *utf8Str++ = ((uic >> 6) & 0x3f) | 0x80;
+                *utf8Str++ = (uic & 0x3f) | 0x80;
+            } else /*(uic > 0x7f || uic == 0)*/ {
+                *utf8Str++ = (uic >> 6) | 0xc0;
+                *utf8Str++ = (uic & 0x3f) | 0x80;
+            }
+        } else {
+            *utf8Str++ = uic;
+        }
+    }
+
+    *utf8Str = '\0';
+}
+
+/*
+ * Use the java/lang/String.computeHashCode() algorithm.
+ */
+static inline u4 dvmComputeUtf16Hash(const u2* utf16Str, int len)
+{
+    u4 hash = 0;
+
+    while (len--)
+        hash = hash * 31 + *utf16Str++;
+
+    return hash;
+}
+u4 dvmComputeStringHash(StringObject* strObj) {
+    ArrayObject* chars = (ArrayObject*) dvmGetFieldObject((Object*) strObj,
+                                gDvm.offJavaLangString_value);
+    int offset, len;
+
+    len = dvmGetFieldInt((Object*) strObj, gDvm.offJavaLangString_count);
+    offset = dvmGetFieldInt((Object*) strObj, gDvm.offJavaLangString_offset);
+
+    return dvmComputeUtf16Hash((u2*) chars->contents + offset, len);
+}
+
+/*
+ * Create a new java/lang/String object, using the string data in "utf8Str".
+ *
+ * Note that "allocFlags" affects both of the allocations here.  If you
+ * use ALLOC_DONT_TRACK in a context where a GC could happen between the
+ * two allocations, you could lose the array reference.
+ *
+ * Returns NULL and throws an exception on failure.
+ */
+StringObject* dvmCreateStringFromCstr(const char* utf8Str, int allocFlags)
+{
+    assert(utf8Str != NULL);
+
+    return dvmCreateStringFromCstrAndLength(utf8Str, dvmUtf8Len(utf8Str),
+            allocFlags);
+}
+
+/*
+ * Create a java/lang/String from a C string, given its UTF-16 length
+ * (number of UTF-16 code points).
+ *
+ * The caller must call dvmReleaseTrackedAlloc() on the return value or
+ * use a non-default value for "allocFlags".  It is never appropriate
+ * to use ALLOC_DONT_TRACK with this function.
+ *
+ * Returns NULL and throws an exception on failure.
+ */
+StringObject* dvmCreateStringFromCstrAndLength(const char* utf8Str,
+    u4 utf16Length, int allocFlags)
+{
+    StringObject* newObj;
+    ArrayObject* chars;
+    u4 hashCode = 0;
+
+    //LOGV("Creating String from '%s'\n", utf8Str);
+    assert(allocFlags != ALLOC_DONT_TRACK);     /* don't currently need */
+    assert(utf8Str != NULL);
+
+    if (gDvm.javaLangStringReady <= 0) {
+        if (!stringStartup())
+            return NULL;
+    }
+
+    /* init before alloc */
+    if (!dvmIsClassInitialized(gDvm.classJavaLangString) &&
+        !dvmInitClass(gDvm.classJavaLangString))
+    {
+        return NULL;
+    }
+
+    newObj = (StringObject*) dvmAllocObject(gDvm.classJavaLangString,
+                allocFlags);
+    if (newObj == NULL)
+        return NULL;
+
+    chars = dvmAllocPrimitiveArray('C', utf16Length, allocFlags);
+    if (chars == NULL) {
+        dvmReleaseTrackedAllocIFN((Object*) newObj, NULL, allocFlags);
+        return NULL;
+    }
+    dvmConvertUtf8ToUtf16((u2*)chars->contents, utf8Str);
+    hashCode = dvmComputeUtf16Hash((u2*) chars->contents, utf16Length);
+
+    dvmSetFieldObject((Object*)newObj, gDvm.offJavaLangString_value,
+        (Object*)chars);
+    dvmReleaseTrackedAllocIFN((Object*) chars, NULL, allocFlags);
+    dvmSetFieldInt((Object*)newObj, gDvm.offJavaLangString_count, utf16Length);
+    dvmSetFieldInt((Object*)newObj, gDvm.offJavaLangString_hashCode, hashCode);
+    /* leave offset set to zero */
+
+    /* debugging stuff */
+    //dvmDumpObject((Object*)newObj);
+    //printHexDumpEx(ANDROID_LOG_DEBUG, chars->contents, utf16Length * 2,
+    //    kHexDumpMem);
+
+    /* caller may need to dvmReleaseTrackedAlloc(newObj) */
+    return newObj;
+}
+
+/*
+ * Create a new java/lang/String object, using the Unicode data.
+ */
+StringObject* dvmCreateStringFromUnicode(const u2* unichars, int len)
+{
+    StringObject* newObj;
+    ArrayObject* chars;
+    u4 hashCode = 0;
+
+    /* we allow a null pointer if the length is zero */
+    assert(len == 0 || unichars != NULL);
+
+    if (gDvm.javaLangStringReady <= 0) {
+        if (!stringStartup())
+            return NULL;
+    }
+
+    /* init before alloc */
+    if (!dvmIsClassInitialized(gDvm.classJavaLangString) &&
+        !dvmInitClass(gDvm.classJavaLangString))
+    {
+        return NULL;
+    }
+
+    newObj = (StringObject*) dvmAllocObject(gDvm.classJavaLangString,
+        ALLOC_DEFAULT);
+    if (newObj == NULL)
+        return NULL;
+
+    chars = dvmAllocPrimitiveArray('C', len, ALLOC_DEFAULT);
+    if (chars == NULL) {
+        dvmReleaseTrackedAlloc((Object*) newObj, NULL);
+        return NULL;
+    }
+    if (len > 0)
+        memcpy(chars->contents, unichars, len * sizeof(u2));
+    hashCode = dvmComputeUtf16Hash((u2*) chars->contents, len);
+
+    dvmSetFieldObject((Object*)newObj, gDvm.offJavaLangString_value,
+        (Object*)chars);
+    dvmReleaseTrackedAlloc((Object*) chars, NULL);
+    dvmSetFieldInt((Object*)newObj, gDvm.offJavaLangString_count, len);
+    dvmSetFieldInt((Object*)newObj, gDvm.offJavaLangString_hashCode, hashCode);
+    /* leave offset set to zero */
+
+    /* debugging stuff */
+    //dvmDumpObject((Object*)newObj);
+    //printHexDumpEx(ANDROID_LOG_DEBUG, chars->contents, len*2, kHexDumpMem);
+
+    /* caller must dvmReleaseTrackedAlloc(newObj) */
+    return newObj;
+}
+
+/*
+ * Create a new C string from a java/lang/String object.
+ *
+ * Returns NULL if the object is NULL.
+ */
+char* dvmCreateCstrFromString(StringObject* jstr)
+{
+    char* newStr;
+    ArrayObject* chars;
+    int len, byteLen, offset;
+    const u2* data;
+
+    assert(gDvm.javaLangStringReady > 0);
+
+    if (jstr == NULL)
+        return NULL;
+
+    len = dvmGetFieldInt((Object*) jstr, gDvm.offJavaLangString_count);
+    offset = dvmGetFieldInt((Object*) jstr, gDvm.offJavaLangString_offset);
+    chars = (ArrayObject*) dvmGetFieldObject((Object*) jstr,
+                                gDvm.offJavaLangString_value);
+    data = (const u2*) chars->contents + offset;
+    assert(offset + len <= (int) chars->length);
+
+    byteLen = utf16_utf8ByteLen(data, len);
+    newStr = (char*) malloc(byteLen+1);
+    if (newStr == NULL)
+        return NULL;
+    convertUtf16ToUtf8(newStr, data, len);
+
+    return newStr;
+}
+
+/*
+ * Create a UTF-8 C string from a region of a java/lang/String.  (Used by
+ * the JNI GetStringUTFRegion call.)
+ */
+void dvmCreateCstrFromStringRegion(StringObject* jstr, int start, int len,
+    char* buf)
+{
+    const u2* data;
+
+    data = dvmStringChars(jstr) + start;
+    convertUtf16ToUtf8(buf, data, len);
+}
+
+/*
+ * Compute the length, in modified UTF-8, of a java/lang/String object.
+ */
+int dvmStringUtf8ByteLen(StringObject* jstr)
+{
+    ArrayObject* chars;
+    int len, offset;
+    const u2* data;
+
+    assert(gDvm.javaLangStringReady > 0);
+
+    if (jstr == NULL)
+        return 0;       // should we throw something?  assert?
+
+    len = dvmGetFieldInt((Object*) jstr, gDvm.offJavaLangString_count);
+    offset = dvmGetFieldInt((Object*) jstr, gDvm.offJavaLangString_offset);
+    chars = (ArrayObject*) dvmGetFieldObject((Object*) jstr,
+                                gDvm.offJavaLangString_value);
+    data = (const u2*) chars->contents + offset;
+    assert(offset + len <= (int) chars->length);
+
+    return utf16_utf8ByteLen(data, len);
+}
+
+/*
+ * Get the string's length.
+ */
+int dvmStringLen(StringObject* jstr)
+{
+    return dvmGetFieldInt((Object*) jstr, gDvm.offJavaLangString_count);
+}
+
+/*
+ * Get the string's data.
+ */
+const u2* dvmStringChars(StringObject* jstr)
+{
+    ArrayObject* chars;
+    int offset;
+
+    offset = dvmGetFieldInt((Object*) jstr, gDvm.offJavaLangString_offset);
+    chars = (ArrayObject*) dvmGetFieldObject((Object*) jstr,
+                                gDvm.offJavaLangString_value);
+    return (const u2*) chars->contents + offset;
+}
+
+
+/*
+ * Compare two String objects.
+ *
+ * This is a dvmHashTableLookup() callback.  The function has already
+ * compared their hash values; we need to do a full compare to ensure
+ * that the strings really match.
+ */
+int dvmHashcmpStrings(const void* vstrObj1, const void* vstrObj2)
+{
+    const StringObject* strObj1 = (const StringObject*) vstrObj1;
+    const StringObject* strObj2 = (const StringObject*) vstrObj2;
+    ArrayObject* chars1;
+    ArrayObject* chars2;
+    int len1, len2, offset1, offset2;
+
+    assert(gDvm.javaLangStringReady > 0);
+
+    /* get offset and length into char array; all values are in 16-bit units */
+    len1 = dvmGetFieldInt((Object*) strObj1, gDvm.offJavaLangString_count);
+    offset1 = dvmGetFieldInt((Object*) strObj1, gDvm.offJavaLangString_offset);
+    len2 = dvmGetFieldInt((Object*) strObj2, gDvm.offJavaLangString_count);
+    offset2 = dvmGetFieldInt((Object*) strObj2, gDvm.offJavaLangString_offset);
+    if (len1 != len2)
+        return len1 - len2;
+
+    chars1 = (ArrayObject*) dvmGetFieldObject((Object*) strObj1,
+                                gDvm.offJavaLangString_value);
+    chars2 = (ArrayObject*) dvmGetFieldObject((Object*) strObj2,
+                                gDvm.offJavaLangString_value);
+
+    /* damage here actually indicates a broken java/lang/String */
+    assert(offset1 + len1 <= (int) chars1->length);
+    assert(offset2 + len2 <= (int) chars2->length);
+
+    return memcmp((const u2*) chars1->contents + offset1,
+                  (const u2*) chars2->contents + offset2,
+                  len1 * sizeof(u2));
+}
+
diff --git a/vm/UtfString.h b/vm/UtfString.h
new file mode 100644
index 0000000..5ca2ce6
--- /dev/null
+++ b/vm/UtfString.h
@@ -0,0 +1,112 @@
+/*
+ * Copyright (C) 2008 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/*
+ * UTF-8 and Unicode string manipulation functions, plus convenience
+ * functions for working with java/lang/String.
+ */
+#ifndef _DALVIK_STRING
+#define _DALVIK_STRING
+
+/*
+ * Hash function for modified UTF-8 strings.
+ */
+u4 dvmComputeUtf8Hash(const char* str);
+
+/*
+ * Hash function for string objects.
+ */
+u4 dvmComputeStringHash(StringObject* strObj);
+
+/*
+ * Create a java/lang/String from a C string.
+ *
+ * The caller must call dvmReleaseTrackedAlloc() on the return value or
+ * use a non-default value for "allocFlags".  It is never appropriate
+ * to use ALLOC_DONT_TRACK with this function.
+ *
+ * Returns NULL and throws an exception on failure.
+ */
+StringObject* dvmCreateStringFromCstr(const char* utf8Str, int allocFlags);
+
+/*
+ * Create a java/lang/String from a C string, given its UTF-16 length
+ * (number of UTF-16 code points).
+ *
+ * The caller must call dvmReleaseTrackedAlloc() on the return value or
+ * use a non-default value for "allocFlags".  It is never appropriate
+ * to use ALLOC_DONT_TRACK with this function.
+ *
+ * Returns NULL and throws an exception on failure.
+ */
+StringObject* dvmCreateStringFromCstrAndLength(const char* utf8Str,
+    u4 utf16Length, int allocFlags);
+
+/*
+ * Compute the number of characters in a "modified UTF-8" string.  This will
+ * match the result from strlen() so long as there are no multi-byte chars.
+ */
+int dvmUtf8Len(const char* utf8Str);
+
+/*
+ * Convert a UTF-8 string to UTF-16.  "utf16Str" must have enough room
+ * to hold the output.
+ */
+void dvmConvertUtf8ToUtf16(u2* utf16Str, const char* utf8Str);
+
+/*
+ * Create a java/lang/String from a Unicode string.
+ *
+ * The caller must call dvmReleaseTrackedAlloc() on the return value.
+ */
+StringObject* dvmCreateStringFromUnicode(const u2* unichars, int len);
+
+/*
+ * Create a UTF-8 C string from a java/lang/String.  Caller must free
+ * the result.
+ *
+ * Returns NULL if "jstr" is NULL.
+ */
+char* dvmCreateCstrFromString(StringObject* jstr);
+
+/*
+ * Create a UTF-8 C string from a region of a java/lang/String.  (Used by
+ * the JNI GetStringUTFRegion call.)
+ */
+void dvmCreateCstrFromStringRegion(StringObject* jstr, int start, int len,
+    char* buf);
+
+/*
+ * Compute the length in bytes of the modified UTF-8 representation of a
+ * string.
+ */
+int dvmStringUtf8ByteLen(StringObject* jstr);
+
+/*
+ * Get the length in Unicode characters of a string.
+ */
+int dvmStringLen(StringObject* jstr);
+
+/*
+ * Get a pointer to the Unicode data.
+ */
+const u2* dvmStringChars(StringObject* jstr);
+
+/*
+ * Compare two string objects.  (This is a dvmHashTableLookup() callback.)
+ */
+int dvmHashcmpStrings(const void* vstrObj1, const void* vstrObj2);
+
+#endif /*_DALVIK_STRING*/
diff --git a/vm/alloc/Alloc.c b/vm/alloc/Alloc.c
new file mode 100644
index 0000000..e247413
--- /dev/null
+++ b/vm/alloc/Alloc.c
@@ -0,0 +1,273 @@
+/*
+ * Copyright (C) 2008 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/*
+ * Garbage-collecting memory allocator.
+ */
+#include "Dalvik.h"
+#include "alloc/Heap.h"
+#include "alloc/HeapInternal.h"
+
+#if WITH_HPROF && WITH_HPROF_STACK
+#include "hprof/Hprof.h"
+#endif
+
+
+/*
+ * Initialize the GC universe.
+ *
+ * We're currently using a memory-mapped arena to keep things off of the
+ * main heap.  This needs to be replaced with something real.
+ */
+bool dvmGcStartup(void)
+{
+    dvmInitMutex(&gDvm.gcHeapLock);
+
+    return dvmHeapStartup();
+}
+
+/*
+ * Post-zygote heap initialization, including starting
+ * the HeapWorker thread.
+ */
+bool dvmGcStartupAfterZygote(void)
+{
+    if (!dvmHeapWorkerStartup()) {
+        return false;
+    }
+    return dvmHeapStartupAfterZygote();
+}
+
+/*
+ * Shut the GC down.
+ */
+void dvmGcShutdown(void)
+{
+    //TODO: grab and destroy the lock
+    dvmHeapShutdown();
+}
+
+/*
+ * Do any last-minute preparation before we call fork() for the first time.
+ */
+bool dvmGcPreZygoteFork(void)
+{
+    return dvmHeapSourceStartupBeforeFork();
+}
+
+/*
+ * Create a "stock instance" of an exception class.  These won't have
+ * useful stack traces in them, but they can be thrown when everything
+ * else is not working in a container class.
+ */
+static Object* createStockException(const char* descriptor)
+{
+    ClassObject* clazz;
+    Method* init;
+    Object* obj;
+
+    clazz = dvmFindSystemClass(descriptor);
+    if (clazz == NULL) {
+        LOGE("Unable to find %s\n", descriptor);
+        return NULL;
+    }
+
+    init = dvmFindDirectMethodByDescriptor(clazz, "<init>", "()V");
+    if (init == NULL) {
+        LOGE("Unable to find nullary constructor for %s\n", descriptor);
+        return NULL;
+    }
+
+    obj = dvmAllocObject(clazz, ALLOC_DEFAULT);
+    if (obj == NULL)
+        return NULL;
+
+    Thread* self = dvmThreadSelf();
+    JValue unused;
+    dvmCallMethod(self, init, obj, &unused);
+    if (dvmCheckException(self))
+        return NULL;
+
+    return obj;
+}
+
+/*
+ * "Late" initialization.  We had to defer this until we were able to
+ * interpret code.
+ */
+bool dvmGcLateInit(void)
+{
+    /*
+     * Pre-allocate some throwables.  These need to be explicitly added
+     * to the root set by the GC.
+     */
+    gDvm.outOfMemoryObj = createStockException("Ljava/lang/OutOfMemoryError;");
+    dvmReleaseTrackedAlloc(gDvm.outOfMemoryObj, NULL);
+    gDvm.internalErrorObj = createStockException("Ljava/lang/InternalError;");
+    dvmReleaseTrackedAlloc(gDvm.internalErrorObj, NULL);
+    if (gDvm.outOfMemoryObj == NULL || gDvm.internalErrorObj == NULL) {
+        LOGW("Unable to create stock exceptions\n");
+        return false;
+    }
+
+    return true;
+}
+
+
+/*
+ * Create an instance of the specified class.
+ *
+ * Returns NULL and throws an exception on failure.
+ */
+Object* dvmAllocObject(ClassObject* clazz, int flags)
+{
+    Object* newObj;
+
+    assert(dvmIsClassInitialized(clazz) || dvmIsClassInitializing(clazz));
+
+    if (IS_CLASS_FLAG_SET(clazz, CLASS_ISFINALIZABLE)) {
+        flags |= ALLOC_FINALIZABLE;
+    }
+
+    /* allocate on GC heap; memory is zeroed out */
+    newObj = dvmMalloc(clazz->objectSize, flags);
+    if (newObj != NULL) {
+        DVM_OBJECT_INIT(newObj, clazz);
+        LOGVV("AllocObject: %s (%d)\n", clazz->descriptor,
+            (int) clazz->objectSize);
+#if WITH_HPROF && WITH_HPROF_STACK
+        hprofFillInStackTrace(newObj);
+#endif
+        dvmTrackAllocation(clazz, clazz->objectSize);
+    }
+
+    return newObj;
+}
+
+/*
+ * Create a copy of an object, for Object.clone().
+ *
+ * We use the size actually allocated, rather than obj->clazz->objectSize,
+ * because the latter doesn't work for array objects.
+ */
+Object* dvmCloneObject(Object* obj)
+{
+    Object* copy;
+    int size;
+    int flags;
+
+    assert(dvmIsValidObject(obj));
+
+    /* Class.java shouldn't let us get here (java.lang.Class is final
+     * and does not implement Clonable), but make extra sure.
+     * A memcpy() clone will wreak havoc on a ClassObject's "innards".
+     */
+    assert(obj->clazz != gDvm.classJavaLangClass);
+
+    if (IS_CLASS_FLAG_SET(obj->clazz, CLASS_ISFINALIZABLE))
+        flags = ALLOC_DEFAULT | ALLOC_FINALIZABLE;
+    else
+        flags = ALLOC_DEFAULT;
+
+//TODO: use clazz->objectSize for non-arrays
+    size = dvmObjectSizeInHeap(obj);
+
+    copy = dvmMalloc(size, flags);
+    if (copy == NULL)
+        return NULL;
+#if WITH_HPROF && WITH_HPROF_STACK
+    hprofFillInStackTrace(copy);
+    dvmTrackAllocation(obj->clazz, size);
+#endif
+
+    memcpy(copy, obj, size);
+    DVM_LOCK_INIT(&copy->lock);
+
+    //LOGV("CloneObject: %p->%p %s (%d)\n", obj, copy, obj->clazz->name, size);
+
+    // TODO: deal with reference classes
+
+    /* don't call dvmReleaseTrackedAlloc -- the caller must do that */
+
+    return copy;
+}
+
+
+/*
+ * Track an object that was allocated internally and isn't yet part of the
+ * VM root set.
+ *
+ * We could do this per-thread or globally.  If it's global we don't have
+ * to do the thread lookup but we do have to synchronize access to the list.
+ *
+ * NOTE: "obj" is not a fully-formed object; in particular, obj->clazz will
+ * usually be NULL since we're being called from dvmMalloc().
+ */
+void dvmAddTrackedAlloc(Object* obj, Thread* self)
+{
+    if (self == NULL)
+        self = dvmThreadSelf();
+
+    //LOGI("TRACK ADD %p\n", obj);
+
+    assert(self != NULL);
+    if (!dvmAddToReferenceTable(&self->internalLocalRefTable, obj)) {
+        LOGE("threadid=%d: unable to add %p to internal ref table\n",
+            self->threadId, obj);
+        dvmDumpThread(self, false);
+        dvmAbort();
+    }
+}
+
+/*
+ * Stop tracking an object.
+ *
+ * We allow attempts to delete NULL "obj" so that callers don't have to wrap
+ * calls with "if != NULL".
+ */
+void dvmReleaseTrackedAlloc(Object* obj, Thread* self)
+{
+    if (obj == NULL)
+        return;
+
+    if (self == NULL)
+        self = dvmThreadSelf();
+    assert(self != NULL);
+
+    //LOGI("TRACK REM %p (%s)\n", obj,
+    //    (obj->clazz != NULL) ? obj->clazz->name : "");
+
+    if (!dvmRemoveFromReferenceTable(&self->internalLocalRefTable,
+            self->internalLocalRefTable.table, obj))
+    {
+        LOGE("threadid=%d: failed to remove %p from internal ref table\n",
+            self->threadId, obj);
+        dvmAbort();
+    }
+}
+
+
+/*
+ * Explicitly initiate garbage collection.
+ */
+void dvmCollectGarbage(bool collectSoftReferences)
+{
+    dvmLockHeap();
+
+    LOGVV("Explicit GC\n");
+    dvmCollectGarbageInternal(collectSoftReferences);
+
+    dvmUnlockHeap();
+}
diff --git a/vm/alloc/Alloc.h b/vm/alloc/Alloc.h
new file mode 100644
index 0000000..0489db7
--- /dev/null
+++ b/vm/alloc/Alloc.h
@@ -0,0 +1,199 @@
+/*
+ * Copyright (C) 2008 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/*
+ * Garbage-collecting allocator.
+ */
+#ifndef _DALVIK_ALLOC_ALLOC
+#define _DALVIK_ALLOC_ALLOC
+
+#include <stdlib.h>
+
+/*
+ * Initialization.
+ */
+bool dvmGcStartup(void);
+bool dvmGcStartupAfterZygote(void);
+void dvmGcShutdown(void);
+bool dvmGcLateInit(void);
+
+/*
+ * Do any last-minute preparation before we call fork() for the first time.
+ */
+bool dvmGcPreZygoteFork(void);
+
+/*
+ * Basic allocation function.
+ *
+ * The new object will be added to the "tracked alloc" table unless
+ * flags is ALLOC_DONT_TRACK or ALLOC_NO_GC.
+ *
+ * Returns NULL and throws an exception on failure.
+ */
+void* dvmMalloc(size_t size, int flags);
+
+/*
+ * Allocate a new object.
+ *
+ * The new object will be added to the "tracked alloc" table unless
+ * flags is ALLOC_DONT_TRACK or ALLOC_NO_GC.
+ *
+ * Returns NULL and throws an exception on failure.
+ */
+Object* dvmAllocObject(ClassObject* clazz, int flags);
+
+/*
+ * Clear flags set by dvmMalloc.  Pass in a bit mask of the flags that
+ * should be cleared.
+ */
+void dvmClearAllocFlags(Object* obj, int mask);
+
+/* flags for dvmMalloc */
+enum {
+    ALLOC_DEFAULT       = 0x00,
+    ALLOC_NO_GC         = 0x01,     /* do not garbage collect this object */
+    ALLOC_DONT_TRACK    = 0x02,     /* don't add to internal tracking list */
+    ALLOC_FINALIZABLE   = 0x04,     /* call finalize() before freeing */
+    // ALLOC_NO_MOVE?
+};
+
+/*
+ * Call when a request is so far off that we can't call dvmMalloc().  Throws
+ * an exception with the specified message.
+ */
+void dvmThrowBadAllocException(const char* msg);
+
+/*
+ * Track an object reference that is currently only visible internally.
+ * This is called automatically by dvmMalloc() unless ALLOC_DONT_TRACK
+ * is set.
+ *
+ * The "self" argument is allowed as an optimization; it may be NULL.
+ */
+void dvmAddTrackedAlloc(Object* obj, Thread* self);
+
+/*
+ * Remove an object from the internal tracking list.
+ *
+ * Does nothing if "obj" is NULL.
+ *
+ * The "self" argument is allowed as an optimization; it may be NULL.
+ */
+void dvmReleaseTrackedAlloc(Object* obj, Thread* self);
+
+/*
+ * Like dvmReleaseTrackedAlloc, but only does the release if "allocFlags"
+ * indicates that it's necessary to do so.
+ */
+INLINE void dvmReleaseTrackedAllocIFN(Object* obj, Thread* self, int allocFlags)
+{
+    if ((allocFlags & (ALLOC_NO_GC|ALLOC_DONT_TRACK)) == 0)
+        dvmReleaseTrackedAlloc(obj, self);
+}
+
+/*
+ * Returns true iff <obj> points to a valid allocated object.
+ */
+bool dvmIsValidObject(const Object* obj);
+
+/*
+ * Create a copy of an object.
+ *
+ * The new object will be added to the "tracked alloc" table.
+ */
+Object* dvmCloneObject(Object* obj);
+
+/*
+ * Validate the object pointer.  Returns "false" and throws an exception if
+ * "obj" is null or invalid.
+ *
+ * This may be used in performance critical areas as a null-pointer check;
+ * anything else here should be for debug builds only.  In particular, for
+ * "release" builds we want to skip the call to dvmIsValidObject() -- the
+ * classfile validation will screen out code that puts invalid data into
+ * object reference registers.
+ */
+INLINE int dvmValidateObject(Object* obj)
+{
+    if (obj == NULL) {
+        dvmThrowException("Ljava/lang/NullPointerException;", NULL);
+        return false;
+    }
+#ifdef WITH_EXTRA_OBJECT_VALIDATION
+    if (!dvmIsValidObject(obj)) {
+        //abort();
+        dvmThrowException("Ljava/lang/InternalError;",
+            "VM detected invalid object ptr");
+        return false;
+    }
+#endif
+#ifndef NDEBUG
+    /* check for heap corruption */
+    if (obj->clazz == NULL || ((u4) obj->clazz) <= 65536) {
+        abort();
+        dvmThrowException("Ljava/lang/InternalError;",
+            "VM detected invalid object class ptr");
+        return false;
+    }
+#endif
+    return true;
+}
+
+/*
+ * Determine the exact number of GC heap bytes used by an object.  (Internal
+ * to heap code except for debugging.)
+ */
+size_t dvmObjectSizeInHeap(const Object* obj);
+
+/*
+ * Gets the current ideal heap utilization, represented as a number
+ * between zero and one.
+ */
+float dvmGetTargetHeapUtilization(void);
+
+/*
+ * Sets the new ideal heap utilization, represented as a number
+ * between zero and one.
+ */
+void dvmSetTargetHeapUtilization(float newTarget);
+
+/*
+ * If set is true, sets the new minimum heap size to size; always
+ * returns the current (or previous) size.  If size is zero,
+ * removes the current minimum constraint (if present).
+ */
+size_t dvmMinimumHeapSize(size_t size, bool set);
+
+/*
+ * Updates the internal count of externally-allocated memory.  If there's
+ * enough room for that memory, returns true.  If not, returns false and
+ * does not update the count.
+ *
+ * May cause a GC as a side-effect.
+ */
+bool dvmTrackExternalAllocation(size_t n);
+
+/*
+ * Reduces the internal count of externally-allocated memory.
+ */
+void dvmTrackExternalFree(size_t n);
+
+/*
+ * Returns the number of externally-allocated bytes being tracked by
+ * dvmTrackExternalAllocation/Free().
+ */
+size_t dvmGetExternalBytesAllocated(void);
+
+#endif /*_DALVIK_ALLOC_ALLOC*/
diff --git a/vm/alloc/DdmHeap.c b/vm/alloc/DdmHeap.c
new file mode 100644
index 0000000..78da6cd
--- /dev/null
+++ b/vm/alloc/DdmHeap.c
@@ -0,0 +1,487 @@
+/*
+ * Copyright (C) 2008 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/*
+ * DDM-related heap functions
+ */
+#include <sys/time.h>
+#include <time.h>
+
+#include "Dalvik.h"
+#include "alloc/Heap.h"
+#include "alloc/HeapInternal.h"
+#include "alloc/DdmHeap.h"
+#include "alloc/HeapSource.h"
+
+#define DEFAULT_HEAP_ID  1
+
+enum HpifWhen {
+    HPIF_WHEN_NEVER = 0,
+    HPIF_WHEN_NOW = 1,
+    HPIF_WHEN_NEXT_GC = 2,
+    HPIF_WHEN_EVERY_GC = 3
+};
+
+/*
+ * Chunk HPIF (client --> server)
+ * 
+ * Heap Info. General information about the heap,
+ * suitable for a summary display.
+ * 
+ *   [u4]: number of heaps
+ * 
+ *   For each heap:
+ *     [u4]: heap ID
+ *     [u8]: timestamp in ms since Unix epoch
+ *     [u1]: capture reason (same as 'when' value from server)
+ *     [u4]: max heap size in bytes (-Xmx)
+ *     [u4]: current heap size in bytes
+ *     [u4]: current number of bytes allocated
+ *     [u4]: current number of objects allocated
+ */
+#define HPIF_SIZE(numHeaps) \
+        (sizeof(u4) + (numHeaps) * (5 * sizeof(u4) + sizeof(u1) + sizeof(u8)))
+void
+dvmDdmSendHeapInfo(int reason, bool shouldLock)
+{
+    struct timeval now;
+    u8 nowMs;
+    u1 *buf, *b;
+
+    buf = (u1 *)malloc(HPIF_SIZE(1));
+    if (buf == NULL) {
+        return;
+    }
+    b = buf;
+
+    /* If there's a one-shot 'when', reset it.
+     */
+    if (reason == gDvm.gcHeap->ddmHpifWhen) {
+        if (shouldLock && ! dvmLockHeap()) {
+            LOGW("%s(): can't lock heap to clear when\n", __func__);
+            goto skip_when;
+        }
+        if (reason == gDvm.gcHeap->ddmHpifWhen) {
+            if (gDvm.gcHeap->ddmHpifWhen == HPIF_WHEN_NEXT_GC) {
+                gDvm.gcHeap->ddmHpifWhen = HPIF_WHEN_NEVER;
+            }
+        }
+        if (shouldLock) {
+            dvmUnlockHeap();
+        }
+    }
+skip_when:
+
+    /* The current time, in milliseconds since 0:00 GMT, 1/1/70.
+     */
+    if (gettimeofday(&now, NULL) < 0) {
+        nowMs = 0;
+    } else {
+        nowMs = (u8)now.tv_sec * 1000 + now.tv_usec / 1000;
+    }
+
+    /* number of heaps */
+    set4BE(b, 1); b += 4;
+
+    /* For each heap (of which there is one) */
+    {
+        /* heap ID */
+        set4BE(b, DEFAULT_HEAP_ID); b += 4;
+
+        /* timestamp */
+        set8BE(b, nowMs); b += 8;
+
+        /* 'when' value */
+        *b++ = (u1)reason;
+
+        /* max allowed heap size in bytes */
+        set4BE(b, gDvm.heapSizeMax); b += 4;
+
+        /* current heap size in bytes */
+        set4BE(b, dvmHeapSourceGetValue(HS_FOOTPRINT, NULL, 0)); b += 4;
+
+        /* number of bytes allocated */
+        set4BE(b, dvmHeapSourceGetValue(HS_BYTES_ALLOCATED, NULL, 0)); b += 4;
+
+        /* number of objects allocated */
+        set4BE(b, dvmHeapSourceGetValue(HS_OBJECTS_ALLOCATED, NULL, 0)); b += 4;
+    }
+    assert((intptr_t)b == (intptr_t)buf + (intptr_t)HPIF_SIZE(1));
+
+    dvmDbgDdmSendChunk(CHUNK_TYPE("HPIF"), b - buf, buf);
+}
+
+bool
+dvmDdmHandleHpifChunk(int when)
+{
+    switch (when) {
+    case HPIF_WHEN_NOW:
+        dvmDdmSendHeapInfo(when, true);
+        break;
+    case HPIF_WHEN_NEVER:
+    case HPIF_WHEN_NEXT_GC:
+    case HPIF_WHEN_EVERY_GC:
+        if (dvmLockHeap()) {
+            gDvm.gcHeap->ddmHpifWhen = when;
+            dvmUnlockHeap();
+        } else {
+            LOGI("%s(): can't lock heap to set when\n", __func__);
+            return false;
+        }
+        break;
+    default:
+        LOGI("%s(): bad when value 0x%08x\n", __func__, when);
+        return false;
+    }
+
+    return true;
+}
+
+enum HpsgSolidity {
+    SOLIDITY_FREE = 0,
+    SOLIDITY_HARD = 1,
+    SOLIDITY_SOFT = 2,
+    SOLIDITY_WEAK = 3,
+    SOLIDITY_PHANTOM = 4,
+    SOLIDITY_FINALIZABLE = 5,
+    SOLIDITY_SWEEP = 6,
+};
+
+enum HpsgKind {
+    KIND_OBJECT = 0,
+    KIND_CLASS_OBJECT = 1,
+    KIND_ARRAY_1 = 2,
+    KIND_ARRAY_2 = 3,
+    KIND_ARRAY_4 = 4,
+    KIND_ARRAY_8 = 5,
+    KIND_UNKNOWN = 6,
+    KIND_NATIVE = 7,
+};
+
+#define HPSG_PARTIAL (1<<7)
+#define HPSG_STATE(solidity, kind) \
+    ((u1)((((kind) & 0x7) << 3) | ((solidity) & 0x7)))
+
+typedef struct HeapChunkContext {
+    u1 *buf;
+    u1 *p;
+    u1 *pieceLenField;
+    size_t bufLen;
+    size_t totalAllocationUnits;
+    int type;
+    bool merge;
+    bool needHeader;
+} HeapChunkContext;
+
+#define ALLOCATION_UNIT_SIZE 8
+
+static void
+flush_hpsg_chunk(HeapChunkContext *ctx)
+{
+    /* Patch the "length of piece" field.
+     */
+    assert(ctx->buf <= ctx->pieceLenField &&
+            ctx->pieceLenField <= ctx->p);
+    set4BE(ctx->pieceLenField, ctx->totalAllocationUnits);
+
+    /* Send the chunk.
+     */
+    dvmDbgDdmSendChunk(ctx->type, ctx->p - ctx->buf, ctx->buf);
+
+    /* Reset the context.
+     */
+    ctx->p = ctx->buf;
+    ctx->totalAllocationUnits = 0;
+    ctx->needHeader = true;
+    ctx->pieceLenField = NULL;
+}
+
+static void
+heap_chunk_callback(const void *chunkptr, size_t chunklen,
+                    const void *userptr, size_t userlen, void *arg)
+{
+    HeapChunkContext *ctx = (HeapChunkContext *)arg;
+    u1 state;
+
+    UNUSED_PARAMETER(userlen);
+
+    assert((chunklen & (ALLOCATION_UNIT_SIZE-1)) == 0);
+
+    /* Make sure there's enough room left in the buffer.
+     * We need to use two bytes for every fractional 256
+     * allocation units used by the chunk.
+     */
+    {
+        size_t bytesLeft = ctx->bufLen - (size_t)(ctx->p - ctx->buf);
+        if (bytesLeft < (((chunklen/ALLOCATION_UNIT_SIZE + 255) / 256) * 2)) {
+            flush_hpsg_chunk(ctx);
+        }
+    }
+
+//TODO: notice when there's a gap and start a new heap, or at least a new range.
+    if (ctx->needHeader) {
+        /*
+         * Start a new HPSx chunk.
+         */
+
+        /* [u4]: heap ID */
+        set4BE(ctx->p, DEFAULT_HEAP_ID); ctx->p += 4;
+
+        /* [u1]: size of allocation unit, in bytes */
+        *ctx->p++ = 8;
+
+        /* [u4]: virtual address of segment start */
+        set4BE(ctx->p, (uintptr_t)chunkptr); ctx->p += 4;
+
+        /* [u4]: offset of this piece (relative to the virtual address) */
+        set4BE(ctx->p, 0); ctx->p += 4;
+
+        /* [u4]: length of piece, in allocation units
+         * We won't know this until we're done, so save the offset
+         * and stuff in a dummy value.
+         */
+        ctx->pieceLenField = ctx->p;
+        set4BE(ctx->p, 0x55555555); ctx->p += 4;
+
+        ctx->needHeader = false;
+    }
+
+    /* Determine the type of this chunk.
+     */
+    if (userptr == NULL) {
+        /* It's a free chunk.
+         */
+        state = HPSG_STATE(SOLIDITY_FREE, 0);
+    } else {
+        const DvmHeapChunk *hc = (const DvmHeapChunk *)userptr;
+        const Object *obj = chunk2ptr(hc);
+        /* If we're looking at the native heap, we'll just return 
+         * (SOLIDITY_HARD, KIND_NATIVE) for all allocated chunks
+         */
+        bool native = ctx->type == CHUNK_TYPE("NHSG");
+
+        /* It's an allocated chunk.  Figure out what it is.
+         */
+//TODO: if ctx.merge, see if this chunk is different from the last chunk.
+//      If it's the same, we should combine them.
+        if (!native && dvmIsValidObject(obj)) {
+            ClassObject *clazz = obj->clazz;
+            if (clazz == NULL) {
+                /* The object was probably just created
+                 * but hasn't been initialized yet.
+                 */
+                state = HPSG_STATE(SOLIDITY_HARD, KIND_OBJECT);
+            } else if (clazz == gDvm.unlinkedJavaLangClass ||
+                       clazz == gDvm.classJavaLangClass)
+            {
+                state = HPSG_STATE(SOLIDITY_HARD, KIND_CLASS_OBJECT);
+            } else if (IS_CLASS_FLAG_SET(clazz, CLASS_ISARRAY)) {
+                if (IS_CLASS_FLAG_SET(clazz, CLASS_ISOBJECTARRAY)) {
+                    state = HPSG_STATE(SOLIDITY_HARD, KIND_ARRAY_4);
+                } else {
+                    switch (clazz->elementClass->primitiveType) {
+                    case PRIM_BOOLEAN:
+                    case PRIM_BYTE:
+                        state = HPSG_STATE(SOLIDITY_HARD, KIND_ARRAY_1);
+                        break;
+                    case PRIM_CHAR:
+                    case PRIM_SHORT:
+                        state = HPSG_STATE(SOLIDITY_HARD, KIND_ARRAY_2);
+                        break;
+                    case PRIM_INT:
+                    case PRIM_FLOAT:
+                        state = HPSG_STATE(SOLIDITY_HARD, KIND_ARRAY_4);
+                        break;
+                    case PRIM_DOUBLE:
+                    case PRIM_LONG:
+                        state = HPSG_STATE(SOLIDITY_HARD, KIND_ARRAY_8);
+                        break;
+                    default:
+                        assert(!"Unknown GC heap object type");
+                        state = HPSG_STATE(SOLIDITY_HARD, KIND_UNKNOWN);
+                        break;
+                    }
+                }
+            } else {
+                state = HPSG_STATE(SOLIDITY_HARD, KIND_OBJECT);
+            }
+        } else {
+            obj = NULL; // it's not actually an object
+            state = HPSG_STATE(SOLIDITY_HARD, KIND_NATIVE);
+        }
+    }
+
+    /* Write out the chunk description.
+     */
+    chunklen /= ALLOCATION_UNIT_SIZE;   // convert to allocation units
+    ctx->totalAllocationUnits += chunklen;
+    while (chunklen > 256) {
+        *ctx->p++ = state | HPSG_PARTIAL;
+        *ctx->p++ = 255;     // length - 1
+        chunklen -= 256;
+    }
+    *ctx->p++ = state;
+    *ctx->p++ = chunklen - 1;
+}
+
+enum HpsgWhen {
+    HPSG_WHEN_NEVER = 0,
+    HPSG_WHEN_EVERY_GC = 1,
+};
+enum HpsgWhat {
+    HPSG_WHAT_MERGED_OBJECTS = 0,
+    HPSG_WHAT_DISTINCT_OBJECTS = 1,
+};
+
+#define HPSx_CHUNK_SIZE (4096 - 16)
+
+void dlmalloc_walk_heap(void(*)(const void*, size_t, const void*, size_t, void*),void*);
+
+static void
+walkHeap(bool merge, bool native)
+{
+    HeapChunkContext ctx;
+    
+    memset(&ctx, 0, sizeof(ctx));
+    ctx.bufLen = HPSx_CHUNK_SIZE;
+    ctx.buf = (u1 *)malloc(ctx.bufLen);
+    if (ctx.buf == NULL) {
+        return;
+    }
+
+    ctx.merge = merge;
+    if (native) {
+        ctx.type = CHUNK_TYPE("NHSG");
+    } else {
+        if (ctx.merge) {
+            ctx.type = CHUNK_TYPE("HPSG");
+        } else {
+            ctx.type = CHUNK_TYPE("HPSO");
+        }
+    }
+
+    ctx.p = ctx.buf;
+    ctx.needHeader = true;
+    if (native) {
+        dlmalloc_walk_heap(heap_chunk_callback, (void *)&ctx);
+    } else {
+        dvmHeapSourceWalk(heap_chunk_callback, (void *)&ctx);
+    }
+    if (ctx.p > ctx.buf) {
+        flush_hpsg_chunk(&ctx);
+    }
+
+    free(ctx.buf);
+}
+
+void
+dvmDdmSendHeapSegments(bool shouldLock, bool native)
+{
+    u1 heapId[sizeof(u4)];
+    GcHeap *gcHeap = gDvm.gcHeap;
+    int when, what;
+    bool merge;
+
+    /* Don't even grab the lock if there's nothing to do when we're called.
+     */
+    if (!native) {
+        when = gcHeap->ddmHpsgWhen;
+        what = gcHeap->ddmHpsgWhat;
+        if (when == HPSG_WHEN_NEVER) {
+            return;
+        }
+    } else {
+        when = gcHeap->ddmNhsgWhen;
+        what = gcHeap->ddmNhsgWhat;
+        if (when == HPSG_WHEN_NEVER) {
+            return;
+        }
+    }
+    if (shouldLock && !dvmLockHeap()) {
+        LOGW("Can't lock heap for DDM HPSx dump\n");
+        return;
+    }
+
+    /* Figure out what kind of chunks we'll be sending.
+     */
+    if (what == HPSG_WHAT_MERGED_OBJECTS) {
+        merge = true;
+    } else if (what == HPSG_WHAT_DISTINCT_OBJECTS) {
+        merge = false;
+    } else {
+        assert(!"bad HPSG.what value");
+        return;
+    }
+
+    /* First, send a heap start chunk.
+     */
+    set4BE(heapId, DEFAULT_HEAP_ID);
+    dvmDbgDdmSendChunk(native ? CHUNK_TYPE("NHST") : CHUNK_TYPE("HPST"),
+        sizeof(u4), heapId);
+
+    /* Send a series of heap segment chunks.
+     */
+    walkHeap(merge, native);
+
+    /* Finally, send a heap end chunk.
+     */
+    dvmDbgDdmSendChunk(native ? CHUNK_TYPE("NHEN") : CHUNK_TYPE("HPEN"),
+        sizeof(u4), heapId);
+
+    if (shouldLock) {
+        dvmUnlockHeap();
+    }
+}
+
+bool
+dvmDdmHandleHpsgNhsgChunk(int when, int what, bool native)
+{
+    LOGI("dvmDdmHandleHpsgChunk(when %d, what %d, heap %d)\n", when, what,
+         native);
+    switch (when) {
+    case HPSG_WHEN_NEVER:
+    case HPSG_WHEN_EVERY_GC:
+        break;
+    default:
+        LOGI("%s(): bad when value 0x%08x\n", __func__, when);
+        return false;
+    }
+
+    switch (what) {
+    case HPSG_WHAT_MERGED_OBJECTS:
+    case HPSG_WHAT_DISTINCT_OBJECTS:
+        break;
+    default:
+        LOGI("%s(): bad what value 0x%08x\n", __func__, what);
+        return false;
+    }
+
+    if (dvmLockHeap()) {
+        if (!native) {
+            gDvm.gcHeap->ddmHpsgWhen = when;
+            gDvm.gcHeap->ddmHpsgWhat = what;
+        } else {
+            gDvm.gcHeap->ddmNhsgWhen = when;
+            gDvm.gcHeap->ddmNhsgWhat = what;
+        }
+//TODO: if what says we should dump immediately, signal (or do) it from here
+        dvmUnlockHeap();
+    } else {
+        LOGI("%s(): can't lock heap to set when/what\n", __func__);
+        return false;
+    }
+
+    return true;
+}
diff --git a/vm/alloc/DdmHeap.h b/vm/alloc/DdmHeap.h
new file mode 100644
index 0000000..c3e11dc
--- /dev/null
+++ b/vm/alloc/DdmHeap.h
@@ -0,0 +1,41 @@
+/*
+ * Copyright (C) 2008 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/*
+ * DDM-specific internal heap functions.
+ */
+#ifndef _DALVIK_ALLOC_DDMHEAP
+#define _DALVIK_ALLOC_DDMHEAP
+
+/*
+ * Sends the current heap info to the DDM server.
+ * Should be called after a GC when gcHeap->ddmHpifWhen
+ * is non-zero.
+ */
+void dvmDdmSendHeapInfo(int reason, bool shouldLock);
+
+/*
+ * Walks through the heap and sends a series of
+ * HPST/NHST, HPSG/HPSO/NHSG, and HPEN/NHEN chunks that describe
+ * the contents of the GC or native heap.
+ *
+ * @param shouldLock If true, grab the heap lock.  If false,
+ *                   the heap lock must already be held.
+ * @param heap       If false, dump the GC heap; if true, dump the
+ *                   native heap.
+ */
+void dvmDdmSendHeapSegments(bool shouldLock, bool native);
+
+#endif  // _DALVIK_ALLOC_DDMHEAP
diff --git a/vm/alloc/Float12.h b/vm/alloc/Float12.h
new file mode 100644
index 0000000..324cc51
--- /dev/null
+++ b/vm/alloc/Float12.h
@@ -0,0 +1,130 @@
+/*
+ * Copyright (C) 2008 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef _DALVIK_FLOAT12_H
+#define _DALVIK_FLOAT12_H
+
+/* Encodes a 32-bit number in 12 bits with +/-1.5% error,
+ * though the majority (80%) are within +/-0.25%.
+ *
+ * The encoding looks like:
+ *
+ *     EEEMMMMM MMMMMMMM MMMMMMMM
+ *     76543210 76543210 76543210
+ *
+ * where EEE is a base-16 exponent and MMMM is the mantissa.
+ * The output value is (MMMM * 16^EEE), or (MMMM << (EEE * 4)).
+ *
+ * TODO: do this in a less brain-dead way.  I'm sure we can do
+ *       it without all of these loops.
+ */
+inline unsigned short intToFloat12(unsigned int val)
+{
+    int oval = val;
+    int shift = 0;
+
+    /* Shift off the precision we don't care about.
+     * Don't round here; it biases the values too high
+     * (such that the encoded value is always greater
+     * than the actual value)
+     */
+    unsigned int pval = val;
+    while (val > 0x1ff) {
+        pval = val;
+        val >>= 1;
+        shift++;
+    }
+    if (shift > 0 && (pval & 1)) {
+        /* Round based on the last bit we shifted off.
+         */
+        val++;
+        if (val > 0x1ff) {
+            val = (val + 1) >> 1;
+            shift++;
+        }
+    }
+
+    /* Shift off enough bits to create a valid exponent.
+     * Since we care about the bits we're losing, be sure
+     * to round them.
+     */
+    while (shift % 4 != 0) {
+        val = (val + 1) >> 1;
+        shift++;
+    }
+
+    /* In the end, only round by the most-significant lost bit.
+     * This centers the values around the closest match.
+     * All of the rounding we did above guarantees that this
+     * round won't overflow past 0x1ff.
+     */
+    if (shift > 0) {
+        val = ((oval >> (shift - 1)) + 1) >> 1;
+    }
+
+    val |= (shift / 4) << 9;
+    return val;
+}
+
+inline unsigned int float12ToInt(unsigned short f12)
+{
+    return (f12 & 0x1ff) << ((f12 >> 9) * 4);
+}
+
+#if 0   // testing
+
+#include <stdio.h>
+int main(int argc, char *argv[])
+{
+    if (argc != 3) {
+        fprintf(stderr, "usage: %s <min> <max>\n", argv[0]);
+        return 1;
+    }
+
+    unsigned int min = atoi(argv[1]);
+    unsigned int max = atoi(argv[2]);
+    if (min > max) {
+        int t = min;
+        max = min;
+        min = t;
+    } else if (min == max) {
+        max++;
+    }
+
+    while (min < max) {
+        unsigned int out;
+        unsigned short sf;
+
+        sf = intToFloat12(min);
+        out = float12ToInt(sf);
+//        printf("%d 0x%03x / 0x%03x %d (%d)\n", min, min, sf, out, (int)min - (int)out);
+        printf("%6.6f %d %d\n", ((float)(int)(min - out)) / (float)(int)min, min, out);
+        if (min <= 8192) {
+            min++;
+        } else if (min < 10000) {
+            min += 10;
+        } else if (min < 100000) {
+            min += 1000;
+        } else {
+            min += 10000;
+        }
+    }
+    return 0;
+}
+
+#endif  // testing
+
+#endif  // _DALVIK_FLOAT12_H
diff --git a/vm/alloc/GC.h b/vm/alloc/GC.h
new file mode 100644
index 0000000..62e9aa6
--- /dev/null
+++ b/vm/alloc/GC.h
@@ -0,0 +1,155 @@
+/*
+ * Copyright (C) 2008 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/*
+ * Garbage collector
+ */
+#ifndef _DALVIK_ALLOC_GC
+#define _DALVIK_ALLOC_GC
+
+/*
+ * Initiate garbage collection.
+ *
+ * This usually happens automatically, but can also be caused by Runtime.gc().
+ */
+void dvmCollectGarbage(bool collectSoftRefs);
+
+/****
+ **** NOTE: The functions after this point will (should) only be called
+ ****       during GC.
+ ****/
+
+/*
+ * Functions that mark an object.
+ *
+ * Currently implemented in Heap.c.
+ */
+
+/*
+ * Mark an object and schedule it to be scanned for
+ * references to other objects.
+ *
+ * @param obj must be a valid object
+ */
+void dvmMarkObjectNonNull(const Object *obj);
+
+/*
+ * Mark an object and schedule it to be scanned for
+ * references to other objects.
+ *
+ * @param obj must be a valid object or NULL
+ */
+#define dvmMarkObject(obj) \
+    do { \
+        Object *DMO_obj_ = (Object *)(obj); \
+        if (DMO_obj_ != NULL) { \
+            dvmMarkObjectNonNull(DMO_obj_); \
+        } \
+    } while (false)
+
+/*
+ * If obj points to a valid object, mark it and
+ * schedule it to be scanned for references to other
+ * objects.
+ *
+ * @param obj any pointer that may be an Object, or NULL
+TODO: check for alignment, too (would require knowledge of heap chunks)
+ */
+#define dvmMarkIfObject(obj) \
+    do { \
+        Object *DMIO_obj_ = (Object *)(obj); \
+        if (DMIO_obj_ != NULL && dvmIsValidObject(DMIO_obj_)) { \
+            dvmMarkObjectNonNull(DMIO_obj_); \
+        } \
+    } while (false)
+
+/*
+ * Functions that handle scanning various objects for references.
+ */
+
+/*
+ * Mark all class objects loaded by the root class loader;
+ * most of these are the java.* classes.
+ *
+ * Currently implemented in Class.c.
+ */
+void dvmGcScanRootClassLoader(void);
+
+/*
+ * Mark all root ThreadGroup objects, guaranteeing that
+ * all live Thread objects will eventually be scanned.
+ *
+ * NOTE: this is a misnomer, because the current implementation
+ * actually only scans the internal list of VM threads, which
+ * will mark all VM-reachable Thread objects.  Someone else
+ * must scan the root class loader, which will mark java/lang/ThreadGroup.
+ * The ThreadGroup class object has static members pointing to
+ * the root ThreadGroups, and these will be marked as a side-effect
+ * of marking the class object.
+ *
+ * Currently implemented in Thread.c.
+ */
+void dvmGcScanRootThreadGroups(void);
+
+/*
+ * Mark all interned string objects.
+ *
+ * Currently implemented in Intern.c.
+ */
+void dvmGcScanInternedStrings(void);
+
+/*
+ * Remove any unmarked interned string objects from the table.
+ *
+ * Currently implemented in Intern.c.
+ */
+void dvmGcDetachDeadInternedStrings(int (*isUnmarkedObject)(void *));
+
+/*
+ * Mark all primitive class objects.
+ *
+ * Currently implemented in Array.c.
+ */
+void dvmGcScanPrimitiveClasses(void);
+
+/*
+ * Mark all JNI global references.
+ *
+ * Currently implemented in JNI.c.
+ */
+void dvmGcMarkJniGlobalRefs(void);
+
+/*
+ * Mark all debugger references.
+ *
+ * Currently implemented in Debugger.c.
+ */
+void dvmGcMarkDebuggerRefs(void);
+
+/*
+ * Optional heap profiling.
+ */
+#if WITH_HPROF && !defined(_DALVIK_HPROF_HPROF)
+#include "hprof/Hprof.h"
+#define HPROF_SET_GC_SCAN_STATE(tag_, thread_) \
+    dvmHeapSetHprofGcScanState((tag_), (thread_))
+#define HPROF_CLEAR_GC_SCAN_STATE() \
+    dvmHeapSetHprofGcScanState(0, 0)
+#else
+#define HPROF_SET_GC_SCAN_STATE(tag_, thread_)  do {} while (false)
+#define HPROF_CLEAR_GC_SCAN_STATE()  do {} while (false)
+#endif
+
+#endif  // _DALVIK_ALLOC_GC
diff --git a/vm/alloc/Heap.c b/vm/alloc/Heap.c
new file mode 100644
index 0000000..4129a90
--- /dev/null
+++ b/vm/alloc/Heap.c
@@ -0,0 +1,1053 @@
+/*
+ * Copyright (C) 2008 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/*
+ * Garbage-collecting memory allocator.
+ */
+#include "Dalvik.h"
+#include "alloc/HeapTable.h"
+#include "alloc/Heap.h"
+#include "alloc/HeapInternal.h"
+#include "alloc/DdmHeap.h"
+#include "alloc/HeapSource.h"
+#include "alloc/MarkSweep.h"
+
+#include "utils/threads.h"      // need Android thread priorities
+#define kInvalidPriority        10000
+
+#include <sys/time.h>
+#include <sys/resource.h>
+#include <limits.h>
+#include <errno.h>
+
+#define kNonCollectableRefDefault   16
+#define kFinalizableRefDefault      128
+
+/*
+ * Initialize the GC heap.
+ *
+ * Returns true if successful, false otherwise.
+ */
+bool dvmHeapStartup()
+{
+    GcHeap *gcHeap;
+
+#if defined(WITH_ALLOC_LIMITS)
+    gDvm.checkAllocLimits = false;
+    gDvm.allocationLimit = -1;
+#endif
+
+    gcHeap = dvmHeapSourceStartup(gDvm.heapSizeStart, gDvm.heapSizeMax);
+    if (gcHeap == NULL) {
+        return false;
+    }
+    gcHeap->heapWorkerCurrentObject = NULL;
+    gcHeap->heapWorkerCurrentMethod = NULL;
+    gcHeap->heapWorkerInterpStartTime = 0LL;
+    gcHeap->softReferenceCollectionState = SR_COLLECT_NONE;
+    gcHeap->softReferenceHeapSizeThreshold = gDvm.heapSizeStart;
+    gcHeap->ddmHpifWhen = 0;
+    gcHeap->ddmHpsgWhen = 0;
+    gcHeap->ddmHpsgWhat = 0;
+    gcHeap->ddmNhsgWhen = 0;
+    gcHeap->ddmNhsgWhat = 0;
+#if WITH_HPROF
+    gcHeap->hprofDumpOnGc = false;
+    gcHeap->hprofContext = NULL;
+#endif
+
+    /* This needs to be set before we call dvmHeapInitHeapRefTable().
+     */
+    gDvm.gcHeap = gcHeap;
+
+    /* Set up the table we'll use for ALLOC_NO_GC.
+     */
+    if (!dvmHeapInitHeapRefTable(&gcHeap->nonCollectableRefs,
+                           kNonCollectableRefDefault))
+    {
+        LOGE_HEAP("Can't allocate GC_NO_ALLOC table\n");
+        goto fail;
+    }
+
+    /* Set up the lists and lock we'll use for finalizable
+     * and reference objects.
+     */
+    dvmInitMutex(&gDvm.heapWorkerListLock);
+    gcHeap->finalizableRefs = NULL;
+    gcHeap->pendingFinalizationRefs = NULL;
+    gcHeap->referenceOperations = NULL;
+
+    /* Initialize the HeapWorker locks and other state
+     * that the GC uses.
+     */
+    dvmInitializeHeapWorkerState();
+
+    return true;
+
+fail:
+    gDvm.gcHeap = NULL;
+    dvmHeapSourceShutdown(gcHeap);
+    return false;
+}
+
+bool dvmHeapStartupAfterZygote()
+{
+    /* Update our idea of the last GC start time so that we
+     * don't use the last time that Zygote happened to GC.
+     */
+    gDvm.gcHeap->gcStartTime = dvmGetRelativeTimeUsec();
+
+    return dvmHeapSourceStartupAfterZygote();
+}
+
+void dvmHeapShutdown()
+{
+//TODO: make sure we're locked
+    if (gDvm.gcHeap != NULL) {
+        GcHeap *gcHeap;
+
+        gcHeap = gDvm.gcHeap;
+        gDvm.gcHeap = NULL;
+
+        /* Tables are allocated on the native heap;
+         * they need to be cleaned up explicitly.
+         * The process may stick around, so we don't
+         * want to leak any native memory.
+         */
+        dvmHeapFreeHeapRefTable(&gcHeap->nonCollectableRefs);
+
+        dvmHeapFreeLargeTable(gcHeap->finalizableRefs);
+        gcHeap->finalizableRefs = NULL;
+
+        dvmHeapFreeLargeTable(gcHeap->pendingFinalizationRefs);
+        gcHeap->pendingFinalizationRefs = NULL;
+
+        dvmHeapFreeLargeTable(gcHeap->referenceOperations);
+        gcHeap->referenceOperations = NULL;
+
+        /* Destroy the heap.  Any outstanding pointers
+         * will point to unmapped memory (unless/until
+         * someone else maps it).  This frees gcHeap
+         * as a side-effect.
+         */
+        dvmHeapSourceShutdown(gcHeap);
+    }
+}
+
+/*
+ * We've been asked to allocate something we can't, e.g. an array so
+ * large that (length * elementWidth) is larger than 2^31.  We want to
+ * throw an OutOfMemoryError, but doing so implies that certain other
+ * actions have taken place (like clearing soft references).
+ *
+ * TODO: for now we just throw an InternalError.
+ */
+void dvmThrowBadAllocException(const char* msg)
+{
+    dvmThrowException("Ljava/lang/InternalError;", msg);
+}
+
+/*
+ * Grab the lock, but put ourselves into THREAD_VMWAIT if it looks like
+ * we're going to have to wait on the mutex.
+ */
+bool dvmLockHeap()
+{
+    if (pthread_mutex_trylock(&gDvm.gcHeapLock) != 0) {
+        Thread *self;
+        ThreadStatus oldStatus;
+        int cc;
+
+        self = dvmThreadSelf();
+        if (self != NULL) {
+            oldStatus = dvmChangeStatus(self, THREAD_VMWAIT);
+        } else {
+            oldStatus = -1; // shut up gcc
+        }
+
+        cc = pthread_mutex_lock(&gDvm.gcHeapLock);
+        assert(cc == 0);
+
+        if (self != NULL) {
+            dvmChangeStatus(self, oldStatus);
+        }
+    }
+
+    return true;
+}
+
+void dvmUnlockHeap()
+{
+    dvmUnlockMutex(&gDvm.gcHeapLock);
+}
+
+/* Pop an object from the list of pending finalizations and
+ * reference clears/enqueues, and return the object.
+ * The caller must call dvmReleaseTrackedAlloc()
+ * on the object when finished.
+ *
+ * Typically only called by the heap worker thread.
+ */
+Object *dvmGetNextHeapWorkerObject(HeapWorkerOperation *op)
+{
+    Object *obj;
+    LargeHeapRefTable *table;
+    GcHeap *gcHeap = gDvm.gcHeap;
+
+    assert(op != NULL);
+
+    obj = NULL;
+
+    dvmLockMutex(&gDvm.heapWorkerListLock);
+
+    /* We must handle reference operations before finalizations.
+     * If:
+     *     a) Someone subclasses WeakReference and overrides clear()
+     *     b) A reference of this type is the last reference to
+     *        a finalizable object
+     * then we need to guarantee that the overridden clear() is called
+     * on the reference before finalize() is called on the referent.
+     * Both of these operations will always be scheduled at the same
+     * time, so handling reference operations first will guarantee
+     * the required order.
+     */
+    obj = dvmHeapGetNextObjectFromLargeTable(&gcHeap->referenceOperations);
+    if (obj != NULL) {
+        uintptr_t workBits;
+
+        workBits = (uintptr_t)obj & (WORKER_CLEAR | WORKER_ENQUEUE);
+        assert(workBits != 0);
+        obj = (Object *)((uintptr_t)obj & ~(WORKER_CLEAR | WORKER_ENQUEUE));
+
+        *op = workBits;
+    } else {
+        obj = dvmHeapGetNextObjectFromLargeTable(
+                &gcHeap->pendingFinalizationRefs);
+        if (obj != NULL) {
+            *op = WORKER_FINALIZE;
+        }
+    }
+
+    if (obj != NULL) {
+        /* Don't let the GC collect the object until the
+         * worker thread is done with it.
+         *
+         * This call is safe;  it uses thread-local storage
+         * and doesn't acquire any locks.
+         */
+        dvmAddTrackedAlloc(obj, NULL);
+    }
+
+    dvmUnlockMutex(&gDvm.heapWorkerListLock);
+
+    return obj;
+}
+
+/* Used for a heap size change hysteresis to avoid collecting
+ * SoftReferences when the heap only grows by a small amount.
+ */
+#define SOFT_REFERENCE_GROWTH_SLACK (128 * 1024)
+
+/* Whenever the effective heap size may have changed,
+ * this function must be called.
+ */
+void dvmHeapSizeChanged()
+{
+    GcHeap *gcHeap = gDvm.gcHeap;
+    size_t currentHeapSize;
+
+    currentHeapSize = dvmHeapSourceGetIdealFootprint();
+
+    /* See if the heap size has changed enough that we should care
+     * about it.
+     */
+    if (currentHeapSize <= gcHeap->softReferenceHeapSizeThreshold -
+            4 * SOFT_REFERENCE_GROWTH_SLACK)
+    {
+        /* The heap has shrunk enough that we'll use this as a new
+         * threshold.  Since we're doing better on space, there's
+         * no need to collect any SoftReferences.
+         *
+         * This is 4x the growth hysteresis because we don't want
+         * to snap down so easily after a shrink.  If we just cleared
+         * up a bunch of SoftReferences, we don't want to disallow
+         * any new ones from being created.
+         * TODO: determine if the 4x is important, needed, or even good
+         */
+        gcHeap->softReferenceHeapSizeThreshold = currentHeapSize;
+        gcHeap->softReferenceCollectionState = SR_COLLECT_NONE;
+    } else if (currentHeapSize >= gcHeap->softReferenceHeapSizeThreshold +
+            SOFT_REFERENCE_GROWTH_SLACK)
+    {
+        /* The heap has grown enough to warrant collecting SoftReferences.
+         */
+        gcHeap->softReferenceHeapSizeThreshold = currentHeapSize;
+        gcHeap->softReferenceCollectionState = SR_COLLECT_SOME;
+    }
+}
+
+
+/* Do a full garbage collection, which may grow the
+ * heap as a side-effect if the live set is large.
+ */
+static void gcForMalloc(bool collectSoftReferences)
+{
+#ifdef WITH_PROFILER
+    if (gDvm.allocProf.enabled) {
+        Thread* self = dvmThreadSelf();
+        gDvm.allocProf.gcCount++;
+        if (self != NULL) {
+            self->allocProf.gcCount++;
+        }
+    }
+#endif
+    /* This may adjust the soft limit as a side-effect.
+     */
+    LOGD_HEAP("dvmMalloc initiating GC%s\n",
+            collectSoftReferences ? "(collect SoftReferences)" : "");
+    dvmCollectGarbageInternal(collectSoftReferences);
+}
+
+/* Try as hard as possible to allocate some memory.
+ */
+static DvmHeapChunk *tryMalloc(size_t size)
+{
+    DvmHeapChunk *hc;
+
+    /* Don't try too hard if there's no way the allocation is
+     * going to succeed.  We have to collect SoftReferences before
+     * throwing an OOME, though.
+     */
+    if (size >= gDvm.heapSizeMax) {
+        LOGW_HEAP("dvmMalloc(%zu/0x%08zx): "
+                "someone's allocating a huge buffer\n", size, size);
+        hc = NULL;
+        goto collect_soft_refs;
+    }
+
+//TODO: figure out better heuristics
+//    There will be a lot of churn if someone allocates a bunch of
+//    big objects in a row, and we hit the frag case each time.
+//    A full GC for each.
+//    Maybe we grow the heap in bigger leaps
+//    Maybe we skip the GC if the size is large and we did one recently
+//      (number of allocations ago) (watch for thread effects)
+//    DeflateTest allocs a bunch of ~128k buffers w/in 0-5 allocs of each other
+//      (or, at least, there are only 0-5 objects swept each time)
+
+    hc = dvmHeapSourceAlloc(size + sizeof(DvmHeapChunk));
+    if (hc != NULL) {
+        return hc;
+    }
+
+    /* The allocation failed.  Free up some space by doing
+     * a full garbage collection.  This may grow the heap
+     * if the live set is sufficiently large.
+     */
+    gcForMalloc(false);
+    hc = dvmHeapSourceAlloc(size + sizeof(DvmHeapChunk));
+    if (hc != NULL) {
+        return hc;
+    }
+
+    /* Even that didn't work;  this is an exceptional state.
+     * Try harder, growing the heap if necessary.
+     */
+    hc = dvmHeapSourceAllocAndGrow(size + sizeof(DvmHeapChunk));
+    dvmHeapSizeChanged();
+    if (hc != NULL) {
+        size_t newHeapSize;
+
+        newHeapSize = dvmHeapSourceGetIdealFootprint();
+//TODO: may want to grow a little bit more so that the amount of free
+//      space is equal to the old free space + the utilization slop for
+//      the new allocation.
+        LOGI_HEAP("Grow heap (frag case) to "
+                "%zu.%03zuMB for %zu-byte allocation\n",
+                FRACTIONAL_MB(newHeapSize), size);
+        return hc;
+    }
+
+    /* Most allocations should have succeeded by now, so the heap
+     * is really full, really fragmented, or the requested size is
+     * really big.  Do another GC, collecting SoftReferences this
+     * time.  The VM spec requires that all SoftReferences have
+     * been collected and cleared before throwing an OOME.
+     */
+//TODO: wait for the finalizers from the previous GC to finish
+collect_soft_refs:
+    LOGI_HEAP("Forcing collection of SoftReferences for %zu-byte allocation\n",
+            size);
+    gcForMalloc(true);
+    hc = dvmHeapSourceAllocAndGrow(size + sizeof(DvmHeapChunk));
+    dvmHeapSizeChanged();
+    if (hc != NULL) {
+        return hc;
+    }
+//TODO: maybe wait for finalizers and try one last time
+
+    LOGE_HEAP("Out of memory on a %zd-byte allocation.\n", size);
+//TODO: tell the HeapSource to dump its state
+    dvmDumpThread(dvmThreadSelf(), false);
+
+    return NULL;
+}
+
+/* Throw an OutOfMemoryError if there's a thread to attach it to.
+ * Avoid recursing.
+ *
+ * The caller must not be holding the heap lock, or else the allocations
+ * in dvmThrowException() will deadlock.
+ */
+static void throwOOME()
+{
+    Thread *self;
+
+    if ((self = dvmThreadSelf()) != NULL) {
+        /* If the current (failing) dvmMalloc() happened as part of thread
+         * creation/attachment before the thread became part of the root set,
+         * we can't rely on the thread-local trackedAlloc table, so
+         * we can't keep track of a real allocated OOME object.  But, since
+         * the thread is in the process of being created, it won't have
+         * a useful stack anyway, so we may as well make things easier
+         * by throwing the (stackless) pre-built OOME.
+         */
+        if (dvmIsOnThreadList(self) && !self->throwingOOME) {
+            /* Let ourselves know that we tried to throw an OOM
+             * error in the normal way in case we run out of
+             * memory trying to allocate it inside dvmThrowException().
+             */
+            self->throwingOOME = true;
+
+            /* Don't include a description string;
+             * one fewer allocation.
+             */
+            dvmThrowException("Ljava/lang/OutOfMemoryError;", NULL);
+        } else {
+            /*
+             * This thread has already tried to throw an OutOfMemoryError,
+             * which probably means that we're running out of memory
+             * while recursively trying to throw.
+             *
+             * To avoid any more allocation attempts, "throw" a pre-built
+             * OutOfMemoryError object (which won't have a useful stack trace).
+             *
+             * Note that since this call can't possibly allocate anything,
+             * we don't care about the state of self->throwingOOME
+             * (which will usually already be set).
+             */
+            dvmSetException(self, gDvm.outOfMemoryObj);
+        }
+        /* We're done with the possible recursion.
+         */
+        self->throwingOOME = false;
+    }
+}
+
+/*
+ * Allocate storage on the GC heap.  We guarantee 8-byte alignment.
+ *
+ * The new storage is zeroed out.
+ *
+ * Note that, in rare cases, this could get called while a GC is in
+ * progress.  If a non-VM thread tries to attach itself through JNI,
+ * it will need to allocate some objects.  If this becomes annoying to
+ * deal with, we can block it at the source, but holding the allocation
+ * mutex should be enough.
+ *
+ * In rare circumstances (JNI AttachCurrentThread) we can be called
+ * from a non-VM thread.
+ *
+ * We implement ALLOC_NO_GC by maintaining an internal list of objects
+ * that should not be collected.  This requires no actual flag storage in
+ * the object itself, which is good, but makes flag queries expensive.
+ *
+ * Use ALLOC_DONT_TRACK when we either don't want to track an allocation
+ * (because it's being done for the interpreter "new" operation and will
+ * be part of the root set immediately) or we can't (because this allocation
+ * is for a brand new thread).
+ *
+ * Returns NULL and throws an exception on failure.
+ *
+ * TODO: don't do a GC if the debugger thinks all threads are suspended
+ */
+void* dvmMalloc(size_t size, int flags)
+{
+    GcHeap *gcHeap = gDvm.gcHeap;
+    DvmHeapChunk *hc;
+    void *ptr;
+    bool triedGc, triedGrowing;
+
+#if 0
+    /* handy for spotting large allocations */
+    if (size >= 100000) {
+        LOGI("dvmMalloc(%d):\n", size);
+        dvmDumpThread(dvmThreadSelf(), false);
+    }
+#endif
+
+#if defined(WITH_ALLOC_LIMITS)
+    /*
+     * See if they've exceeded the allocation limit for this thread.
+     *
+     * A limit value of -1 means "no limit".
+     *
+     * This is enabled at compile time because it requires us to do a
+     * TLS lookup for the Thread pointer.  This has enough of a performance
+     * impact that we don't want to do it if we don't have to.  (Now that
+     * we're using gDvm.checkAllocLimits we may want to reconsider this,
+     * but it's probably still best to just compile the check out of
+     * production code -- one less thing to hit on every allocation.)
+     */
+    if (gDvm.checkAllocLimits) {
+        Thread* self = dvmThreadSelf();
+        if (self != NULL) {
+            int count = self->allocLimit;
+            if (count > 0) {
+                self->allocLimit--;
+            } else if (count == 0) {
+                /* fail! */
+                assert(!gDvm.initializing);
+                self->allocLimit = -1;
+                dvmThrowException("Ldalvik/system/AllocationLimitError;",
+                    "thread allocation limit exceeded");
+                return NULL;
+            }
+        }
+    }
+
+    if (gDvm.allocationLimit >= 0) {
+        assert(!gDvm.initializing);
+        gDvm.allocationLimit = -1;
+        dvmThrowException("Ldalvik/system/AllocationLimitError;",
+            "global allocation limit exceeded");
+        return NULL;
+    }
+#endif
+
+    dvmLockHeap();
+
+    /* Try as hard as possible to allocate some memory.
+     */
+    hc = tryMalloc(size);
+    if (hc != NULL) {
+alloc_succeeded:
+        /* We've got the memory.
+         */
+        if ((flags & ALLOC_FINALIZABLE) != 0) {
+            /* This object is an instance of a class that
+             * overrides finalize().  Add it to the finalizable list.
+             *
+             * Note that until DVM_OBJECT_INIT() is called on this
+             * object, its clazz will be NULL.  Since the object is
+             * in this table, it will be scanned as part of the root
+             * set.  scanObject() explicitly deals with the NULL clazz.
+             */
+            if (!dvmHeapAddRefToLargeTable(&gcHeap->finalizableRefs,
+                                    (Object *)hc->data))
+            {
+                LOGE_HEAP("dvmMalloc(): no room for any more "
+                        "finalizable objects\n");
+                dvmAbort();
+            }
+        }
+
+#if WITH_OBJECT_HEADERS
+        hc->header = OBJECT_HEADER;
+        hc->birthGeneration = gGeneration;
+#endif
+        ptr = hc->data;
+
+        /* The caller may not want us to collect this object.
+         * If not, throw it in the nonCollectableRefs table, which
+         * will be added to the root set when we GC.
+         *
+         * Note that until DVM_OBJECT_INIT() is called on this
+         * object, its clazz will be NULL.  Since the object is
+         * in this table, it will be scanned as part of the root
+         * set.  scanObject() explicitly deals with the NULL clazz.
+         */
+        if ((flags & ALLOC_NO_GC) != 0) {
+            if (!dvmHeapAddToHeapRefTable(&gcHeap->nonCollectableRefs, ptr)) {
+                LOGE_HEAP("dvmMalloc(): no room for any more "
+                        "ALLOC_NO_GC objects: %zd\n",
+                        dvmHeapNumHeapRefTableEntries(
+                                &gcHeap->nonCollectableRefs));
+                dvmAbort();
+            }
+        }
+
+#ifdef WITH_PROFILER
+        if (gDvm.allocProf.enabled) {
+            Thread* self = dvmThreadSelf();
+            gDvm.allocProf.allocCount++;
+            gDvm.allocProf.allocSize += size;
+            if (self != NULL) {
+                self->allocProf.allocCount++;
+                self->allocProf.allocSize += size;
+            }
+        }
+#endif
+    } else {
+        /* The allocation failed.
+         */
+        ptr = NULL;
+
+#ifdef WITH_PROFILER
+        if (gDvm.allocProf.enabled) {
+            Thread* self = dvmThreadSelf();
+            gDvm.allocProf.failedAllocCount++;
+            gDvm.allocProf.failedAllocSize += size;
+            if (self != NULL) {
+                self->allocProf.failedAllocCount++;
+                self->allocProf.failedAllocSize += size;
+            }
+        }
+#endif
+    }
+
+    dvmUnlockHeap();
+
+    if (ptr != NULL) {
+        /*
+         * If this block is immediately GCable, and they haven't asked us not
+         * to track it, add it to the internal tracking list.
+         *
+         * If there's no "self" yet, we can't track it.  Calls made before
+         * the Thread exists should use ALLOC_NO_GC.
+         */
+        if ((flags & (ALLOC_DONT_TRACK | ALLOC_NO_GC)) == 0) {
+            dvmAddTrackedAlloc(ptr, NULL);
+        }
+    } else {
+        /* 
+         * The allocation failed; throw an OutOfMemoryError.
+         */
+        throwOOME();
+    }
+
+    return ptr;
+}
+
+/*
+ * Returns true iff <obj> points to a valid allocated object.
+ */
+bool dvmIsValidObject(const Object* obj)
+{
+    const DvmHeapChunk *hc;
+
+    /* Don't bother if it's NULL or not 8-byte aligned.
+     */
+    hc = ptr2chunk(obj);
+    if (obj != NULL && ((uintptr_t)hc & (8-1)) == 0) {
+        /* Even if the heap isn't locked, this shouldn't return
+         * any false negatives.  The only mutation that could
+         * be happening is allocation, which means that another
+         * thread could be in the middle of a read-modify-write
+         * to add a new bit for a new object.  However, that
+         * RMW will have completed by the time any other thread
+         * could possibly see the new pointer, so there is no
+         * danger of dvmIsValidObject() being called on a valid
+         * pointer whose bit isn't set.
+         *
+         * Freeing will only happen during the sweep phase, which
+         * only happens while the heap is locked.
+         */
+        return dvmHeapSourceContains(hc);
+    }
+    return false;
+}
+
+/*
+ * Clear flags that were passed into dvmMalloc() et al.
+ * e.g., ALLOC_NO_GC, ALLOC_DONT_TRACK.
+ */
+void dvmClearAllocFlags(Object *obj, int mask)
+{
+    if ((mask & ALLOC_NO_GC) != 0) {
+        dvmLockHeap();
+        if (dvmIsValidObject(obj)) {
+            if (!dvmHeapRemoveFromHeapRefTable(&gDvm.gcHeap->nonCollectableRefs,
+                                               obj))
+            {
+                LOGE_HEAP("dvmMalloc(): failed to remove ALLOC_NO_GC bit from "
+                        "object 0x%08x\n", (uintptr_t)obj);
+                dvmAbort();
+            }
+//TODO: shrink if the table is very empty
+        }
+        dvmUnlockHeap();
+    }
+
+    if ((mask & ALLOC_DONT_TRACK) != 0) {
+        dvmReleaseTrackedAlloc(obj, NULL);
+    }
+}
+
+size_t dvmObjectSizeInHeap(const Object *obj)
+{
+    return dvmHeapSourceChunkSize(ptr2chunk(obj)) - sizeof(DvmHeapChunk);
+}
+
+/*
+ * Initiate garbage collection.
+ *
+ * NOTES:
+ * - If we don't hold gDvm.threadListLock, it's possible for a thread to
+ *   be added to the thread list while we work.  The thread should NOT
+ *   start executing, so this is only interesting when we start chasing
+ *   thread stacks.  (Before we do so, grab the lock.)
+ *
+ * We are not allowed to GC when the debugger has suspended the VM, which
+ * is awkward because debugger requests can cause allocations.  The easiest
+ * way to enforce this is to refuse to GC on an allocation made by the
+ * JDWP thread -- we have to expand the heap or fail.
+ */
+void dvmCollectGarbageInternal(bool collectSoftReferences)
+{
+    GcHeap *gcHeap = gDvm.gcHeap;
+    Object *softReferences;
+    Object *weakReferences;
+    Object *phantomReferences;
+
+    u8 now;
+    s8 timeSinceLastGc;
+    s8 gcElapsedTime;
+    int numFreed;
+    size_t sizeFreed;
+
+#if DVM_TRACK_HEAP_MARKING
+    /* Since weak and soft references are always cleared,
+     * they don't require any marking.
+     * (Soft are lumped into strong when they aren't cleared.)
+     */
+    size_t strongMarkCount = 0;
+    size_t strongMarkSize = 0;
+    size_t finalizeMarkCount = 0;
+    size_t finalizeMarkSize = 0;
+    size_t phantomMarkCount = 0;
+    size_t phantomMarkSize = 0;
+#endif
+
+    /* The heap lock must be held.
+     */
+
+    if (gcHeap->gcRunning) {
+        LOGW_HEAP("Attempted recursive GC\n");
+        return;
+    }
+    gcHeap->gcRunning = true;
+    now = dvmGetRelativeTimeUsec();
+    if (gcHeap->gcStartTime != 0) {
+        timeSinceLastGc = (now - gcHeap->gcStartTime) / 1000;
+    } else {
+        timeSinceLastGc = 0;
+    }
+    gcHeap->gcStartTime = now;
+
+    LOGV_HEAP("GC starting -- suspending threads\n");
+
+    dvmSuspendAllThreads(SUSPEND_FOR_GC);
+
+    /* Get the priority (the "nice" value) of the current thread.  The
+     * getpriority() call can legitimately return -1, so we have to
+     * explicitly test errno.
+     */
+    errno = 0;
+    int oldThreadPriority = kInvalidPriority;
+    int priorityResult = getpriority(PRIO_PROCESS, 0);
+    if (errno != 0) {
+        LOGI_HEAP("getpriority(self) failed: %s\n", strerror(errno));
+    } else if (priorityResult > ANDROID_PRIORITY_NORMAL) {
+        /* Current value is numerically greater than "normal", which
+         * in backward UNIX terms means lower priority.
+         */
+        if (setpriority(PRIO_PROCESS, 0, ANDROID_PRIORITY_NORMAL) != 0) {
+            LOGI_HEAP("Unable to elevate priority from %d to %d\n",
+                priorityResult, ANDROID_PRIORITY_NORMAL);
+        } else {
+            /* priority elevated; save value so we can restore it later */
+            LOGD_HEAP("Elevating priority from %d to %d\n",
+                priorityResult, ANDROID_PRIORITY_NORMAL);
+            oldThreadPriority = priorityResult;
+        }
+    }
+
+    /* Wait for the HeapWorker thread to block.
+     * (It may also already be suspended in interp code,
+     * in which case it's not holding heapWorkerLock.)
+     */
+    dvmLockMutex(&gDvm.heapWorkerLock);
+
+    /* Make sure that the HeapWorker thread hasn't become
+     * wedged inside interp code.  If it has, this call will
+     * print a message and abort the VM.
+     */
+    dvmAssertHeapWorkerThreadRunning();
+
+    /* Lock the pendingFinalizationRefs list.
+     *
+     * Acquire the lock after suspending so the finalizer
+     * thread can't block in the RUNNING state while
+     * we try to suspend.
+     */
+    dvmLockMutex(&gDvm.heapWorkerListLock);
+
+#ifdef WITH_PROFILER
+    dvmMethodTraceGCBegin();
+#endif
+
+#if WITH_HPROF
+
+/* Set DUMP_HEAP_ON_DDMS_UPDATE to 1 to enable heap dumps
+ * whenever DDMS requests a heap update (HPIF chunk).
+ * The output files will appear in /data/misc, which must
+ * already exist.
+ * You must define "WITH_HPROF := true" in your buildspec.mk
+ * and recompile libdvm for this to work.
+ *
+ * To enable stack traces for each allocation, define
+ * "WITH_HPROF_STACK := true" in buildspec.mk.  This option slows down
+ * allocations and also requires 8 additional bytes per object on the
+ * GC heap.
+ */
+#define DUMP_HEAP_ON_DDMS_UPDATE 0
+#if DUMP_HEAP_ON_DDMS_UPDATE
+    gcHeap->hprofDumpOnGc |= (gcHeap->ddmHpifWhen != 0);
+#endif
+
+    if (gcHeap->hprofDumpOnGc) {
+        gcHeap->hprofContext = hprofStartup("/data/misc");
+        if (gcHeap->hprofContext != NULL) {
+            hprofStartHeapDump(gcHeap->hprofContext);
+        }
+        gcHeap->hprofDumpOnGc = false;
+    }
+#endif
+
+    if (timeSinceLastGc < 10000) {
+        LOGD_HEAP("GC! (%dms since last GC)\n",
+                (int)timeSinceLastGc);
+    } else {
+        LOGD_HEAP("GC! (%d sec since last GC)\n",
+                (int)(timeSinceLastGc / 1000));
+    }
+#if DVM_TRACK_HEAP_MARKING
+    gcHeap->markCount = 0;
+    gcHeap->markSize = 0;
+#endif
+
+    /* Set up the marking context.
+     */
+    dvmHeapBeginMarkStep();
+
+    /* Mark the set of objects that are strongly reachable from the roots.
+     */
+    LOGD_HEAP("Marking...");
+    dvmHeapMarkRootSet();
+
+    /* dvmHeapScanMarkedObjects() will build the lists of known
+     * instances of the Reference classes.
+     */
+    gcHeap->softReferences = NULL;
+    gcHeap->weakReferences = NULL;
+    gcHeap->phantomReferences = NULL;
+
+    /* Make sure that we don't hard-mark the referents of Reference
+     * objects by default.
+     */
+    gcHeap->markAllReferents = false;
+
+    /* Don't mark SoftReferences if our caller wants us to collect them.
+     * This has to be set before calling dvmHeapScanMarkedObjects().
+     */
+    if (collectSoftReferences) {
+        gcHeap->softReferenceCollectionState = SR_COLLECT_ALL;
+    }
+
+    /* Recursively mark any objects that marked objects point to strongly.
+     * If we're not collecting soft references, soft-reachable
+     * objects will also be marked.
+     */
+    LOGD_HEAP("Recursing...");
+    dvmHeapScanMarkedObjects();
+#if DVM_TRACK_HEAP_MARKING
+    strongMarkCount = gcHeap->markCount;
+    strongMarkSize = gcHeap->markSize;
+    gcHeap->markCount = 0;
+    gcHeap->markSize = 0;
+#endif
+
+    /* Latch these so that the other calls to dvmHeapScanMarkedObjects() don't
+     * mess with them.
+     */
+    softReferences = gcHeap->softReferences;
+    weakReferences = gcHeap->weakReferences;
+    phantomReferences = gcHeap->phantomReferences;
+
+    /* All strongly-reachable objects have now been marked.
+     */
+    if (gcHeap->softReferenceCollectionState != SR_COLLECT_NONE) {
+        LOGD_HEAP("Handling soft references...");
+        dvmHeapHandleReferences(softReferences, REF_SOFT);
+        // markCount always zero
+
+        /* Now that we've tried collecting SoftReferences,
+         * fall back to not collecting them.  If the heap
+         * grows, we will start collecting again.
+         */
+        gcHeap->softReferenceCollectionState = SR_COLLECT_NONE;
+    } // else dvmHeapScanMarkedObjects() already marked the soft-reachable set
+    LOGD_HEAP("Handling weak references...");
+    dvmHeapHandleReferences(weakReferences, REF_WEAK);
+    // markCount always zero
+
+    /* Once all weak-reachable objects have been taken
+     * care of, any remaining unmarked objects can be finalized.
+     */
+    LOGD_HEAP("Finding finalizations...");
+    dvmHeapScheduleFinalizations();
+#if DVM_TRACK_HEAP_MARKING
+    finalizeMarkCount = gcHeap->markCount;
+    finalizeMarkSize = gcHeap->markSize;
+    gcHeap->markCount = 0;
+    gcHeap->markSize = 0;
+#endif
+
+    /* Any remaining objects that are not pending finalization
+     * could be phantom-reachable.  This will mark any phantom-reachable
+     * objects, as well as enqueue their references.
+     */
+    LOGD_HEAP("Handling phantom references...");
+    dvmHeapHandleReferences(phantomReferences, REF_PHANTOM);
+#if DVM_TRACK_HEAP_MARKING
+    phantomMarkCount = gcHeap->markCount;
+    phantomMarkSize = gcHeap->markSize;
+    gcHeap->markCount = 0;
+    gcHeap->markSize = 0;
+#endif
+
+//TODO: take care of JNI weak global references
+
+#if DVM_TRACK_HEAP_MARKING
+    LOGI_HEAP("Marked objects: %dB strong, %dB final, %dB phantom\n",
+            strongMarkSize, finalizeMarkSize, phantomMarkSize);
+#endif
+
+#ifdef WITH_DEADLOCK_PREDICTION
+    dvmDumpMonitorInfo("before sweep");
+#endif
+    LOGD_HEAP("Sweeping...");
+    dvmHeapSweepUnmarkedObjects(&numFreed, &sizeFreed);
+#ifdef WITH_DEADLOCK_PREDICTION
+    dvmDumpMonitorInfo("after sweep");
+#endif
+
+    LOGD_HEAP("Cleaning up...");
+    dvmHeapFinishMarkStep();
+
+    LOGD_HEAP("Done.");
+
+    /* Now's a good time to adjust the heap size, since
+     * we know what our utilization is.
+     *
+     * This doesn't actually resize any memory;
+     * it just lets the heap grow more when necessary.
+     */
+    dvmHeapSourceGrowForUtilization();
+    dvmHeapSizeChanged();
+
+#if WITH_HPROF
+    if (gcHeap->hprofContext != NULL) {
+        hprofFinishHeapDump(gcHeap->hprofContext);
+//TODO: write a HEAP_SUMMARY record
+        hprofShutdown(gcHeap->hprofContext);
+        gcHeap->hprofContext = NULL;
+    }
+#endif
+
+    /* Now that we've freed up the GC heap, return any large
+     * free chunks back to the system.  They'll get paged back
+     * in the next time they're used.  Don't do it immediately,
+     * though;  if the process is still allocating a bunch of
+     * memory, we'll be taking a ton of page faults that we don't
+     * necessarily need to.
+     *
+     * Cancel any old scheduled trims, and schedule a new one.
+     */
+    dvmScheduleHeapSourceTrim(5);  // in seconds
+
+#ifdef WITH_PROFILER
+    dvmMethodTraceGCEnd();
+#endif
+    LOGV_HEAP("GC finished -- resuming threads\n");
+
+    gcHeap->gcRunning = false;
+
+    dvmUnlockMutex(&gDvm.heapWorkerListLock);
+    dvmUnlockMutex(&gDvm.heapWorkerLock);
+
+    dvmResumeAllThreads(SUSPEND_FOR_GC);
+    if (oldThreadPriority != kInvalidPriority) {
+        if (setpriority(PRIO_PROCESS, 0, oldThreadPriority) != 0) {
+            LOGW_HEAP("Unable to reset priority to %d: %s\n",
+                oldThreadPriority, strerror(errno));
+        } else {
+            LOGD_HEAP("Reset priority to %d\n", oldThreadPriority);
+        }
+    }
+    gcElapsedTime = (dvmGetRelativeTimeUsec() - gcHeap->gcStartTime) / 1000;
+    if (gcElapsedTime < 10000) {
+        LOGD("GC freed %d objects / %zd bytes in %dms\n",
+                numFreed, sizeFreed, (int)gcElapsedTime);
+    } else {
+        LOGD("GC freed %d objects / %zd bytes in %d sec\n",
+                numFreed, sizeFreed, (int)(gcElapsedTime / 1000));
+    }
+    dvmLogGcStats(numFreed, sizeFreed, gcElapsedTime);
+
+    if (gcHeap->ddmHpifWhen != 0) {
+        LOGD_HEAP("Sending VM heap info to DDM\n");
+        dvmDdmSendHeapInfo(gcHeap->ddmHpifWhen, false);
+    }
+    if (gcHeap->ddmHpsgWhen != 0) {
+        LOGD_HEAP("Dumping VM heap to DDM\n");
+        dvmDdmSendHeapSegments(false, false);
+    }
+    if (gcHeap->ddmNhsgWhen != 0) {
+        LOGD_HEAP("Dumping native heap to DDM\n");
+        dvmDdmSendHeapSegments(false, true);
+    }
+}
+
+#if WITH_HPROF
+void hprofDumpHeap()
+{
+    dvmLockMutex(&gDvm.gcHeapLock);
+
+    gDvm.gcHeap->hprofDumpOnGc = true;
+    dvmCollectGarbageInternal(false);
+
+    dvmUnlockMutex(&gDvm.gcHeapLock);
+}
+
+void dvmHeapSetHprofGcScanState(hprof_heap_tag_t state, u4 threadSerialNumber)
+{
+    if (gDvm.gcHeap->hprofContext != NULL) {
+        hprofSetGcScanState(gDvm.gcHeap->hprofContext, state,
+                threadSerialNumber);
+    }
+}
+#endif
diff --git a/vm/alloc/Heap.h b/vm/alloc/Heap.h
new file mode 100644
index 0000000..cc29c40
--- /dev/null
+++ b/vm/alloc/Heap.h
@@ -0,0 +1,60 @@
+/*
+ * Copyright (C) 2008 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/*
+ * Internal heap functions
+ */
+#ifndef _DALVIK_ALLOC_HEAP
+#define _DALVIK_ALLOC_HEAP
+
+/*
+ * Initialize the GC heap.
+ *
+ * Returns true if successful, false otherwise.
+ */
+bool dvmHeapStartup(void);
+
+/*
+ * Initialization that needs to wait until after leaving zygote mode.
+ * This needs to be called before the first allocation or GC that
+ * happens after forking.
+ */
+bool dvmHeapStartupAfterZygote(void);
+
+/*
+ * Tear down the GC heap.
+ *
+ * Frees all memory allocated via dvmMalloc() as
+ * a side-effect.
+ */
+void dvmHeapShutdown(void);
+
+#if 0       // needs to be in Alloc.h so debug code can find it.
+/*
+ * Returns a number of bytes greater than or
+ * equal to the size of the named object in the heap.
+ *
+ * Specifically, it returns the size of the heap
+ * chunk which contains the object.
+ */
+size_t dvmObjectSizeInHeap(const Object *obj);
+#endif
+
+/*
+ * Run the garbage collector without doing any locking.
+ */
+void dvmCollectGarbageInternal(bool collectSoftReferences);
+
+#endif  // _DALVIK_ALLOC_HEAP
diff --git a/vm/alloc/HeapBitmap.c b/vm/alloc/HeapBitmap.c
new file mode 100644
index 0000000..2c75678
--- /dev/null
+++ b/vm/alloc/HeapBitmap.c
@@ -0,0 +1,419 @@
+/*
+ * Copyright (C) 2008 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "Dalvik.h"
+#include "HeapBitmap.h"
+#include "clz.h"
+#include <limits.h>     // for ULONG_MAX
+#include <sys/mman.h>   // for madvise(), mmap()
+#include <cutils/ashmem.h>
+
+#define HB_ASHMEM_NAME "dalvik-heap-bitmap"
+
+#ifndef PAGE_SIZE
+#define PAGE_SIZE 4096
+#endif
+#define ALIGN_UP_TO_PAGE_SIZE(p) \
+    (((size_t)(p) + (PAGE_SIZE - 1)) & ~(PAGE_SIZE - 1))
+
+#define LIKELY(exp)     (__builtin_expect((exp) != 0, true))
+#define UNLIKELY(exp)   (__builtin_expect((exp) != 0, false))
+
+/*
+ * Initialize a HeapBitmap so that it points to a bitmap large
+ * enough to cover a heap at <base> of <maxSize> bytes, where
+ * objects are guaranteed to be HB_OBJECT_ALIGNMENT-aligned.
+ */
+bool
+dvmHeapBitmapInit(HeapBitmap *hb, const void *base, size_t maxSize,
+        const char *name)
+{
+    void *bits;
+    size_t bitsLen;
+    size_t allocLen;
+    int fd;
+    char nameBuf[ASHMEM_NAME_LEN] = HB_ASHMEM_NAME;
+
+    assert(hb != NULL);
+
+    bitsLen = HB_OFFSET_TO_INDEX(maxSize) * sizeof(*hb->bits);
+    allocLen = ALIGN_UP_TO_PAGE_SIZE(bitsLen);   // required by ashmem
+
+    if (name != NULL) {
+        snprintf(nameBuf, sizeof(nameBuf), HB_ASHMEM_NAME "/%s", name);
+    }
+    fd = ashmem_create_region(nameBuf, allocLen);
+    if (fd < 0) {
+        LOGE("Could not create %zu-byte ashmem region \"%s\" to cover "
+                "%zu-byte heap (%d)\n",
+                allocLen, nameBuf, maxSize, fd);
+        return false;
+    }
+
+    bits = mmap(NULL, bitsLen, PROT_READ | PROT_WRITE, MAP_PRIVATE, fd, 0);
+    close(fd);
+    if (bits == MAP_FAILED) {
+        LOGE("Could not mmap %d-byte ashmem region \"%s\"\n",
+                bitsLen, nameBuf);
+        return false;
+    }
+
+    memset(hb, 0, sizeof(*hb));
+    hb->bits = bits;
+    hb->bitsLen = bitsLen;
+    hb->base = (uintptr_t)base;
+    hb->max = hb->base - 1;
+
+    return true;
+}
+
+/*
+ * Initialize <hb> so that it covers the same extent as <templateBitmap>.
+ */
+bool
+dvmHeapBitmapInitFromTemplate(HeapBitmap *hb, const HeapBitmap *templateBitmap,
+        const char *name)
+{
+    return dvmHeapBitmapInit(hb,
+            (void *)templateBitmap->base, HB_MAX_OFFSET(templateBitmap), name);
+}
+
+/*
+ * Initialize the bitmaps in <out> so that they cover the same extent as
+ * the corresponding bitmaps in <templates>.
+ */
+bool
+dvmHeapBitmapInitListFromTemplates(HeapBitmap out[], HeapBitmap templates[],
+    size_t numBitmaps, const char *name)
+{
+    size_t i;
+    char fullName[PATH_MAX];
+
+    fullName[sizeof(fullName)-1] = '\0';
+    for (i = 0; i < numBitmaps; i++) {
+        bool ok;
+
+        /* If two ashmem regions have the same name, only one gets
+         * the name when looking at the maps.
+         */
+        snprintf(fullName, sizeof(fullName)-1, "%s/%zd", name, i);
+        
+        ok = dvmHeapBitmapInitFromTemplate(&out[i], &templates[i], fullName);
+        if (!ok) {
+            dvmHeapBitmapDeleteList(out, i);
+            return false;
+        }
+    }
+    return true;
+}
+
+/*
+ * Clean up any resources associated with the bitmap.
+ */
+void
+dvmHeapBitmapDelete(HeapBitmap *hb)
+{
+    assert(hb != NULL);
+
+    if (hb->bits != NULL) {
+        // Re-calculate the size we passed to mmap().
+        size_t allocLen = ALIGN_UP_TO_PAGE_SIZE(hb->bitsLen);
+        munmap((char *)hb->bits, allocLen);
+    }
+    memset(hb, 0, sizeof(*hb));
+}
+
+/*
+ * Clean up any resources associated with the bitmaps.
+ */
+void
+dvmHeapBitmapDeleteList(HeapBitmap hbs[], size_t numBitmaps)
+{
+    size_t i;
+
+    for (i = 0; i < numBitmaps; i++) {
+        dvmHeapBitmapDelete(&hbs[i]);
+    }
+}
+
+/*
+ * Fill the bitmap with zeroes.  Returns the bitmap's memory to
+ * the system as a side-effect.
+ */
+void
+dvmHeapBitmapZero(HeapBitmap *hb)
+{
+    assert(hb != NULL);
+
+    if (hb->bits != NULL) {
+        /* This returns the memory to the system.
+         * Successive page faults will return zeroed memory.
+         */
+        madvise(hb->bits, hb->bitsLen, MADV_DONTNEED);
+        hb->max = hb->base - 1;
+    }
+}
+
+/*
+ * Walk through the bitmaps in increasing address order, and find the
+ * object pointers that correspond to places where the bitmaps differ.
+ * Call <callback> zero or more times with lists of these object pointers.
+ *
+ * The <finger> argument to the callback indicates the next-highest
+ * address that hasn't been visited yet; setting bits for objects whose
+ * addresses are less than <finger> are not guaranteed to be seen by
+ * the current XorWalk.  <finger> will be set to ULONG_MAX when the
+ * end of the bitmap is reached.
+ */
+bool
+dvmHeapBitmapXorWalk(const HeapBitmap *hb1, const HeapBitmap *hb2,
+        bool (*callback)(size_t numPtrs, void **ptrs,
+                         const void *finger, void *arg),
+        void *callbackArg)
+{
+    static const size_t kPointerBufSize = 128;
+    void *pointerBuf[kPointerBufSize];
+    void **pb = pointerBuf;
+    size_t index;
+    size_t i;
+
+#define FLUSH_POINTERBUF(finger_) \
+    do { \
+        if (!callback(pb - pointerBuf, (void **)pointerBuf, \
+                (void *)(finger_), callbackArg)) \
+        { \
+            LOGW("dvmHeapBitmapXorWalk: callback failed\n"); \
+            return false; \
+        } \
+        pb = pointerBuf; \
+    } while (false)
+
+#define DECODE_BITS(hb_, bits_, update_index_) \
+    do { \
+        if (UNLIKELY(bits_ != 0)) { \
+            static const unsigned long kHighBit = \
+                    (unsigned long)1 << (HB_BITS_PER_WORD - 1); \
+            const uintptr_t ptrBase = HB_INDEX_TO_OFFSET(i) + hb_->base; \
+/*TODO: hold onto ptrBase so we can shrink max later if possible */ \
+/*TODO: see if this is likely or unlikely */ \
+            while (bits_ != 0) { \
+                const int rshift = CLZ(bits_); \
+                bits_ &= ~(kHighBit >> rshift); \
+                *pb++ = (void *)(ptrBase + rshift * HB_OBJECT_ALIGNMENT); \
+            } \
+            /* Make sure that there are always enough slots available */ \
+            /* for an entire word of 1s. */ \
+            if (kPointerBufSize - (pb - pointerBuf) < HB_BITS_PER_WORD) { \
+                FLUSH_POINTERBUF(ptrBase + \
+                        HB_BITS_PER_WORD * HB_OBJECT_ALIGNMENT); \
+                if (update_index_) { \
+                    /* The callback may have caused hb_->max to grow. */ \
+                    index = HB_OFFSET_TO_INDEX(hb_->max - hb_->base); \
+                } \
+            } \
+        } \
+    } while (false)
+
+    assert(hb1 != NULL);
+    assert(hb1->bits != NULL);
+    assert(hb2 != NULL);
+    assert(hb2->bits != NULL);
+    assert(callback != NULL);
+
+    if (hb1->base != hb2->base) {
+        LOGW("dvmHeapBitmapXorWalk: bitmaps cover different heaps "
+                "(0x%08x != 0x%08x)\n",
+                (uintptr_t)hb1->base, (uintptr_t)hb2->base);
+        return false;
+    }
+    if (hb1->bitsLen != hb2->bitsLen) {
+        LOGW("dvmHeapBitmapXorWalk: size of bitmaps differ (%zd != %zd)\n",
+                hb1->bitsLen, hb2->bitsLen);
+        return false;
+    }
+    if (hb1->max < hb1->base && hb2->max < hb2->base) {
+        /* Easy case; both are obviously empty.
+         */
+        return true;
+    }
+
+    /* First, walk along the section of the bitmaps that may be the same.
+     */
+    if (hb1->max >= hb1->base && hb2->max >= hb2->base) {
+        unsigned long int *p1, *p2;
+        uintptr_t offset;
+
+        offset = ((hb1->max < hb2->max) ? hb1->max : hb2->max) - hb1->base;
+//TODO: keep track of which (and whether) one is longer for later
+        index = HB_OFFSET_TO_INDEX(offset);
+
+        p1 = hb1->bits;
+        p2 = hb2->bits;
+        for (i = 0; i <= index; i++) {
+//TODO: unroll this. pile up a few in locals?
+            unsigned long int diff = *p1++ ^ *p2++;
+            DECODE_BITS(hb1, diff, false);
+//BUG: if the callback was called, either max could have changed.
+        }
+        /* The next index to look at.
+         */
+        index++;
+    } else {
+        /* One of the bitmaps is empty.
+         */
+        index = 0;
+    }
+
+    /* If one bitmap's max is larger, walk through the rest of the
+     * set bits.
+     */
+const HeapBitmap *longHb;
+unsigned long int *p;
+//TODO: may be the same size, in which case this is wasted work
+    longHb = (hb1->max > hb2->max) ? hb1 : hb2;
+    i = index;
+    index = HB_OFFSET_TO_INDEX(longHb->max - longHb->base);
+    p = longHb->bits + i;
+    for (/* i = i */; i <= index; i++) {
+//TODO: unroll this
+        unsigned long bits = *p++;
+        DECODE_BITS(longHb, bits, true);
+    }
+
+    if (pb > pointerBuf) {
+        /* Set the finger to the end of the heap (rather than longHb->max)
+         * so that the callback doesn't expect to be called again
+         * if it happens to change the current max.
+         */
+        FLUSH_POINTERBUF(longHb->base + HB_MAX_OFFSET(longHb));
+    }
+
+    return true;
+
+#undef FLUSH_POINTERBUF
+#undef DECODE_BITS
+}
+
+/*
+ * Fills outIndexList with indices so that for all i:
+ *
+ *   hb[outIndexList[i]].base < hb[outIndexList[i+1]].base
+ */
+static void
+createSortedBitmapIndexList(const HeapBitmap hbs[], size_t numBitmaps,
+        size_t outIndexList[])
+{
+    int i, j;
+
+    /* numBitmaps is usually 2 or 3, so use a simple sort */
+    for (i = 0; i < (int) numBitmaps; i++) {
+        outIndexList[i] = i;
+        for (j = 0; j < i; j++) {
+            if (hbs[j].base > hbs[i].base) {
+                int tmp = outIndexList[i];
+                outIndexList[i] = outIndexList[j];
+                outIndexList[j] = tmp;
+            }
+        }
+    }
+}
+
+/*
+ * Similar to dvmHeapBitmapXorWalk(), but compare multiple bitmaps.
+ * Regardless of the order of the arrays, the bitmaps will be visited
+ * in address order, so that finger will increase monotonically.
+ */
+bool
+dvmHeapBitmapXorWalkLists(const HeapBitmap hbs1[], const HeapBitmap hbs2[],
+        size_t numBitmaps,
+        bool (*callback)(size_t numPtrs, void **ptrs,
+                         const void *finger, void *arg),
+        void *callbackArg)
+{
+    size_t indexList[numBitmaps];
+    size_t i;
+
+    /* Sort the bitmaps by address.  Assume that the two lists contain
+     * congruent bitmaps.
+     */
+    createSortedBitmapIndexList(hbs1, numBitmaps, indexList);
+
+    /* Walk each pair of bitmaps, lowest address first.
+     */
+    for (i = 0; i < numBitmaps; i++) {
+        bool ok;
+
+        ok = dvmHeapBitmapXorWalk(&hbs1[indexList[i]], &hbs2[indexList[i]],
+                callback, callbackArg);
+        if (!ok) {
+            return false;
+        }
+    }
+
+    return true;
+}
+
+/*
+ * Similar to dvmHeapBitmapXorWalk(), but visit the set bits
+ * in a single bitmap.
+ */
+bool
+dvmHeapBitmapWalk(const HeapBitmap *hb,
+        bool (*callback)(size_t numPtrs, void **ptrs,
+                         const void *finger, void *arg),
+        void *callbackArg)
+{
+    /* Create an empty bitmap with the same extent as <hb>.
+     * Don't actually allocate any memory.
+     */
+    HeapBitmap emptyHb = *hb;
+    emptyHb.max = emptyHb.base - 1; // empty
+    emptyHb.bits = (void *)1;       // non-NULL but intentionally bad
+
+    return dvmHeapBitmapXorWalk(hb, &emptyHb, callback, callbackArg);
+}
+
+/*
+ * Similar to dvmHeapBitmapXorWalkList(), but visit the set bits
+ * in a single list of bitmaps.  Regardless of the order of the array,
+ * the bitmaps will be visited in address order, so that finger will
+ * increase monotonically.
+ */
+bool dvmHeapBitmapWalkList(const HeapBitmap hbs[], size_t numBitmaps,
+        bool (*callback)(size_t numPtrs, void **ptrs,
+                         const void *finger, void *arg),
+        void *callbackArg)
+{
+    size_t indexList[numBitmaps];
+    size_t i;
+
+    /* Sort the bitmaps by address.
+     */
+    createSortedBitmapIndexList(hbs, numBitmaps, indexList);
+
+    /* Walk each bitmap, lowest address first.
+     */
+    for (i = 0; i < numBitmaps; i++) {
+        bool ok;
+
+        ok = dvmHeapBitmapWalk(&hbs[indexList[i]], callback, callbackArg);
+        if (!ok) {
+            return false;
+        }
+    }
+
+    return true;
+}
diff --git a/vm/alloc/HeapBitmap.h b/vm/alloc/HeapBitmap.h
new file mode 100644
index 0000000..0994600
--- /dev/null
+++ b/vm/alloc/HeapBitmap.h
@@ -0,0 +1,339 @@
+/*
+ * Copyright (C) 2008 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#ifndef _DALVIK_HEAP_BITMAP
+#define _DALVIK_HEAP_BITMAP
+
+#include <stdint.h>
+
+#define HB_OBJECT_ALIGNMENT 8
+#define HB_BITS_PER_WORD    (sizeof (unsigned long int) * 8)
+
+/* <offset> is the difference from .base to a pointer address.
+ * <index> is the index of .bits that contains the bit representing
+ *         <offset>.
+ */
+#define HB_OFFSET_TO_INDEX(offset_) \
+    ((uintptr_t)(offset_) / HB_OBJECT_ALIGNMENT / HB_BITS_PER_WORD)
+#define HB_INDEX_TO_OFFSET(index_) \
+    ((uintptr_t)(index_) * HB_OBJECT_ALIGNMENT * HB_BITS_PER_WORD)
+
+/* Pack the bits in backwards so they come out in address order
+ * when using CLZ.
+ */
+#define HB_OFFSET_TO_MASK(offset_) \
+    (1 << \
+        (31-(((uintptr_t)(offset_) / HB_OBJECT_ALIGNMENT) % HB_BITS_PER_WORD)))
+
+/* Return the maximum offset (exclusive) that <hb> can represent.
+ */
+#define HB_MAX_OFFSET(hb_) \
+    HB_INDEX_TO_OFFSET((hb_)->bitsLen / sizeof(*(hb_)->bits))
+
+#define HB_INLINE_PROTO(p) \
+    static inline p __attribute__((always_inline)); \
+    static inline p
+
+
+typedef struct {
+    /* The bitmap data, which points to an mmap()ed area of zeroed
+     * anonymous memory.
+     */
+    unsigned long int *bits;
+
+    /* The size of the memory pointed to by bits, in bytes.
+     */
+    size_t bitsLen;
+
+    /* The base address, which corresponds to the first bit in
+     * the bitmap.
+     */
+    uintptr_t base;
+
+    /* The highest pointer value ever returned by an allocation
+     * from this heap.  I.e., the highest address that may correspond
+     * to a set bit.  If there are no bits set, (max < base).
+     */
+    uintptr_t max;
+} HeapBitmap;
+
+
+/*
+ * Initialize a HeapBitmap so that it points to a bitmap large
+ * enough to cover a heap at <base> of <maxSize> bytes, where
+ * objects are guaranteed to be HB_OBJECT_ALIGNMENT-aligned.
+ */
+bool dvmHeapBitmapInit(HeapBitmap *hb, const void *base, size_t maxSize,
+        const char *name);
+
+/*
+ * Initialize <hb> so that it covers the same extent as <templateBitmap>.
+ */
+bool dvmHeapBitmapInitFromTemplate(HeapBitmap *hb,
+        const HeapBitmap *templateBitmap, const char *name);
+
+/*
+ * Initialize the bitmaps in <out> so that they cover the same extent as
+ * the corresponding bitmaps in <templates>.
+ */
+bool dvmHeapBitmapInitListFromTemplates(HeapBitmap out[],
+    HeapBitmap templates[], size_t numBitmaps, const char *name);
+
+/*
+ * Clean up any resources associated with the bitmap.
+ */
+void dvmHeapBitmapDelete(HeapBitmap *hb);
+
+/*
+ * Clean up any resources associated with the bitmaps.
+ */
+void dvmHeapBitmapDeleteList(HeapBitmap hbs[], size_t numBitmaps);
+
+/*
+ * Fill the bitmap with zeroes.  Returns the bitmap's memory to
+ * the system as a side-effect.
+ */
+void dvmHeapBitmapZero(HeapBitmap *hb);
+
+/*
+ * Walk through the bitmaps in increasing address order, and find the
+ * object pointers that correspond to places where the bitmaps differ.
+ * Call <callback> zero or more times with lists of these object pointers.
+ *
+ * The <finger> argument to the callback indicates the next-highest
+ * address that hasn't been visited yet; setting bits for objects whose
+ * addresses are less than <finger> are not guaranteed to be seen by
+ * the current XorWalk.  <finger> will be set to ULONG_MAX when the
+ * end of the bitmap is reached.
+ */
+bool dvmHeapBitmapXorWalk(const HeapBitmap *hb1, const HeapBitmap *hb2,
+        bool (*callback)(size_t numPtrs, void **ptrs,
+                         const void *finger, void *arg),
+        void *callbackArg);
+
+/*
+ * Similar to dvmHeapBitmapXorWalk(), but compare multiple bitmaps.
+ * Regardless of the order of the arrays, the bitmaps will be visited
+ * in address order, so that finger will increase monotonically.
+ */
+bool dvmHeapBitmapXorWalkLists(const HeapBitmap hbs1[], const HeapBitmap hbs2[],
+        size_t numBitmaps,
+        bool (*callback)(size_t numPtrs, void **ptrs,
+                         const void *finger, void *arg),
+        void *callbackArg);
+
+/*
+ * Similar to dvmHeapBitmapXorWalk(), but visit the set bits
+ * in a single bitmap.
+ */
+bool dvmHeapBitmapWalk(const HeapBitmap *hb,
+        bool (*callback)(size_t numPtrs, void **ptrs,
+                         const void *finger, void *arg),
+        void *callbackArg);
+
+/*
+ * Similar to dvmHeapBitmapXorWalkList(), but visit the set bits
+ * in a single list of bitmaps.  Regardless of the order of the array,
+ * the bitmaps will be visited in address order, so that finger will
+ * increase monotonically.
+ */
+bool dvmHeapBitmapWalkList(const HeapBitmap hbs[], size_t numBitmaps,
+        bool (*callback)(size_t numPtrs, void **ptrs,
+                         const void *finger, void *arg),
+        void *callbackArg);
+/*
+ * Return true iff <obj> is within the range of pointers that
+ * have had corresponding bits set in this bitmap.
+ */
+HB_INLINE_PROTO(
+    bool
+    dvmHeapBitmapMayContainObject(const HeapBitmap *hb,
+            const void *obj)
+)
+{
+    const uintptr_t p = (const uintptr_t)obj;
+
+    assert((p & (HB_OBJECT_ALIGNMENT - 1)) == 0);
+
+    return p >= hb->base && p <= hb->max;
+}
+
+/*
+ * Return true iff <obj> is within the range of pointers that this
+ * bitmap could potentially cover, even if a bit has not been set
+ * for it.
+ */
+HB_INLINE_PROTO(
+    bool
+    dvmHeapBitmapCoversAddress(const HeapBitmap *hb, const void *obj)
+)
+{
+    assert(hb != NULL);
+
+    if (obj != NULL) {
+        const uintptr_t offset = (uintptr_t)obj - hb->base;
+        const size_t index = HB_OFFSET_TO_INDEX(offset);
+        return index < hb->bitsLen / sizeof(*hb->bits);
+    }
+    return false;
+}
+
+/*
+ * Internal function; do not call directly.
+ */
+HB_INLINE_PROTO(
+    unsigned long int
+    _heapBitmapModifyObjectBit(HeapBitmap *hb, const void *obj,
+            bool setBit, bool returnOld)
+)
+{
+    const uintptr_t offset = (uintptr_t)obj - hb->base;
+    const size_t index = HB_OFFSET_TO_INDEX(offset);
+    const unsigned long int mask = HB_OFFSET_TO_MASK(offset);
+
+#ifndef NDEBUG
+    assert(hb->bits != NULL);
+    assert((uintptr_t)obj >= hb->base);
+    assert(index < hb->bitsLen / sizeof(*hb->bits));
+#endif
+
+    if (setBit) {
+        if ((uintptr_t)obj > hb->max) {
+            hb->max = (uintptr_t)obj;
+        }
+        if (returnOld) {
+            unsigned long int *p = hb->bits + index;
+            const unsigned long int word = *p;
+            *p |= mask;
+            return word & mask;
+        } else {
+            hb->bits[index] |= mask;
+        }
+    } else {
+        hb->bits[index] &= ~mask;
+    }
+    return false;
+}
+
+/*
+ * Sets the bit corresponding to <obj>, and returns the previous value
+ * of that bit (as zero or non-zero). Does no range checking to see if
+ * <obj> is outside of the coverage of the bitmap.
+ *
+ * NOTE: casting this value to a bool is dangerous, because higher
+ * set bits will be lost.
+ */
+HB_INLINE_PROTO(
+    unsigned long int
+    dvmHeapBitmapSetAndReturnObjectBit(HeapBitmap *hb, const void *obj)
+)
+{
+    return _heapBitmapModifyObjectBit(hb, obj, true, true);
+}
+
+/*
+ * Like dvmHeapBitmapSetAndReturnObjectBit(), but sets/returns the bit
+ * in the appropriate bitmap.  Results are undefined if <obj> is not
+ * covered by any bitmap.
+ */
+HB_INLINE_PROTO(
+    unsigned long int
+    dvmHeapBitmapSetAndReturnObjectBitInList(HeapBitmap hbs[],
+            size_t numBitmaps, const void *obj)
+)
+{
+    size_t i;
+
+    for (i = 0; i < numBitmaps; i++) {
+        if (dvmHeapBitmapCoversAddress(&hbs[i], obj)) {
+            return dvmHeapBitmapSetAndReturnObjectBit(&hbs[i], obj);
+        }
+    }
+
+    assert(!"object not covered by any bitmap");
+    return false;
+}
+
+/*
+ * Sets the bit corresponding to <obj>, and widens the range of seen
+ * pointers if necessary.  Does no range checking.
+ */
+HB_INLINE_PROTO(
+    void
+    dvmHeapBitmapSetObjectBit(HeapBitmap *hb, const void *obj)
+)
+{
+    (void)_heapBitmapModifyObjectBit(hb, obj, true, false);
+}
+
+/*
+ * Clears the bit corresponding to <obj>.  Does no range checking.
+ */
+HB_INLINE_PROTO(
+    void
+    dvmHeapBitmapClearObjectBit(HeapBitmap *hb, const void *obj)
+)
+{
+    (void)_heapBitmapModifyObjectBit(hb, obj, false, false);
+}
+
+/*
+ * Returns the current value of the bit corresponding to <obj>,
+ * as zero or non-zero.  Does no range checking.
+ *
+ * NOTE: casting this value to a bool is dangerous, because higher
+ * set bits will be lost.
+ */
+HB_INLINE_PROTO(
+    unsigned long int
+    dvmHeapBitmapIsObjectBitSet(const HeapBitmap *hb, const void *obj)
+)
+{
+    assert(dvmHeapBitmapCoversAddress(hb, obj));
+    assert(hb->bits != NULL);
+    assert((uintptr_t)obj >= hb->base);
+
+    if ((uintptr_t)obj <= hb->max) {
+        const uintptr_t offset = (uintptr_t)obj - hb->base;
+        return hb->bits[HB_OFFSET_TO_INDEX(offset)] & HB_OFFSET_TO_MASK(offset);
+    } else {
+        return 0;
+    }
+}
+
+/*
+ * Looks through the list of bitmaps and returns the current value of the
+ * bit corresponding to <obj>, which may be covered by any of the bitmaps.
+ * Does no range checking.
+ */
+HB_INLINE_PROTO(
+    long
+    dvmHeapBitmapIsObjectBitSetInList(const HeapBitmap hbs[], size_t numBitmaps,
+            const void *obj)
+)
+{
+    size_t i;
+
+    for (i = 0; i < numBitmaps; i++) {
+        if (dvmHeapBitmapCoversAddress(&hbs[i], obj)) {
+            return dvmHeapBitmapIsObjectBitSet(&hbs[i], obj);
+        }
+    }
+    return false;
+}
+
+#undef HB_INLINE_PROTO
+
+#endif  // _DALVIK_HEAP_BITMAP
diff --git a/vm/alloc/HeapDebug.c b/vm/alloc/HeapDebug.c
new file mode 100644
index 0000000..fc3655f
--- /dev/null
+++ b/vm/alloc/HeapDebug.c
@@ -0,0 +1,403 @@
+/*
+ * Copyright (C) 2008 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <fcntl.h>
+#include <malloc.h>
+
+#include "Dalvik.h"
+#include "HeapInternal.h"
+#include "HeapSource.h"
+#include "Float12.h"
+
+int dvmGetHeapDebugInfo(HeapDebugInfoType info)
+{
+    switch (info) {
+    case kVirtualHeapSize:
+        return (int)dvmHeapSourceGetValue(HS_FOOTPRINT, NULL, 0);
+    case kVirtualHeapAllocated:
+        return (int)dvmHeapSourceGetValue(HS_BYTES_ALLOCATED, NULL, 0);
+    default:
+        return -1;
+    }
+}
+
+/* Looks up the cmdline for the process and tries to find
+ * the most descriptive five characters, then inserts the
+ * short name into the provided event value.
+ */
+#define PROC_NAME_LEN 5
+static void insertProcessName(long long *ep)
+{
+    static bool foundRealName = false;
+    static char name[PROC_NAME_LEN] = { 'X', 'X', 'X', 'X', 'X' };
+    long long event = *ep;
+
+    if (!foundRealName) {
+        int fd = open("/proc/self/cmdline", O_RDONLY);
+        if (fd > 0) {
+            char buf[128];
+            ssize_t n = read(fd, buf, sizeof(buf) - 1);
+            close(fd);
+            if (n > 0) {
+                memset(name, 0, sizeof(name));
+                if (n <= PROC_NAME_LEN) {
+                    // The whole name fits.
+                    memcpy(name, buf, n);
+                } else {
+                    /* We need to truncate.  The name will look something
+                     * like "com.android.home".  Favor the characters
+                     * immediately following the last dot.
+                     */
+                    buf[n] = '\0';
+                    char *dot = strrchr(buf, '.');
+                    if (dot == NULL) {
+                        /* Or, look for a slash, in case it's something like
+                         * "/system/bin/runtime".
+                         */
+                        dot = strrchr(buf, '/');
+                    }
+                    if (dot != NULL) {
+                        dot++;  // Skip the dot
+                        size_t dotlen = strlen(dot);
+                        if (dotlen < PROC_NAME_LEN) {
+                            /* Use all available characters.  We know that
+                             * n > PROC_NAME_LEN from the check above.
+                             */
+                            dot -= PROC_NAME_LEN - dotlen;
+                        }
+                        strncpy(name, dot, PROC_NAME_LEN);
+                    } else {
+                        // No dot; just use the leading characters.
+                        memcpy(name, buf, PROC_NAME_LEN);
+                    }
+                }
+                if (strcmp(buf, "zygote") != 0) {
+                    /* If the process is no longer called "zygote",
+                     * cache this name.
+                     */
+                    foundRealName = true;
+                }
+            }
+        }
+    }
+
+    event &= ~(0xffffffffffLL << 24);
+    event |= (long long)name[0] << 56;
+    event |= (long long)name[1] << 48;
+    event |= (long long)name[2] << 40;
+    event |= (long long)name[3] << 32;
+    event |= (long long)name[4] << 24;
+
+    *ep = event;
+}
+
+// See device/data/etc/event-log-tags
+#define EVENT_LOG_TAG_dvm_gc_info 20001
+#define EVENT_LOG_TAG_dvm_gc_madvise_info 20002
+
+void dvmLogGcStats(size_t numFreed, size_t sizeFreed, size_t gcTimeMs)
+{
+    const GcHeap *gcHeap = gDvm.gcHeap;
+    size_t perHeapActualSize[HEAP_SOURCE_MAX_HEAP_COUNT],
+           perHeapAllowedSize[HEAP_SOURCE_MAX_HEAP_COUNT],
+           perHeapNumAllocated[HEAP_SOURCE_MAX_HEAP_COUNT],
+           perHeapSizeAllocated[HEAP_SOURCE_MAX_HEAP_COUNT];
+    unsigned char eventBuf[1 + (1 + sizeof(long long)) * 4];
+    size_t actualSize, allowedSize, numAllocated, sizeAllocated;
+    size_t i;
+    size_t softLimit = dvmHeapSourceGetIdealFootprint();
+    size_t nHeaps = dvmHeapSourceGetNumHeaps();
+
+    /* Enough to quiet down gcc for unitialized variable check */
+    perHeapActualSize[0] = perHeapAllowedSize[0] = perHeapNumAllocated[0] =
+                           perHeapSizeAllocated[0] = 0;
+    actualSize = dvmHeapSourceGetValue(HS_FOOTPRINT, perHeapActualSize, 
+                                       HEAP_SOURCE_MAX_HEAP_COUNT);
+    allowedSize = dvmHeapSourceGetValue(HS_ALLOWED_FOOTPRINT, 
+                      perHeapAllowedSize, HEAP_SOURCE_MAX_HEAP_COUNT);
+    numAllocated = dvmHeapSourceGetValue(HS_OBJECTS_ALLOCATED,
+                      perHeapNumAllocated, HEAP_SOURCE_MAX_HEAP_COUNT);
+    sizeAllocated = dvmHeapSourceGetValue(HS_BYTES_ALLOCATED,
+                      perHeapSizeAllocated, HEAP_SOURCE_MAX_HEAP_COUNT);
+
+    /* 
+     * Construct the the first 64-bit value to write to the log. 
+     * Global information:
+     *
+     * [63   ] Must be zero
+     * [62-24] ASCII process identifier
+     * [23-12] GC time in ms
+     * [11- 0] Bytes freed
+     *
+     */
+    long long event0;
+    event0 = 0LL << 63 |
+            (long long)intToFloat12(gcTimeMs) << 12 |
+            (long long)intToFloat12(sizeFreed);
+    insertProcessName(&event0);
+
+    /*
+     * Aggregated heap stats:
+     *
+     * [63-62] 10
+     * [61-60] Reserved; must be zero
+     * [59-48] Objects freed
+     * [47-36] Actual size (current footprint)
+     * [35-24] Allowed size (current hard max)
+     * [23-12] Objects allocated
+     * [11- 0] Bytes allocated
+     */
+    long long event1;
+    event1 = 2LL << 62 |
+            (long long)intToFloat12(numFreed) << 48 |
+            (long long)intToFloat12(actualSize) << 36 |
+            (long long)intToFloat12(allowedSize) << 24 |
+            (long long)intToFloat12(numAllocated) << 12 |
+            (long long)intToFloat12(sizeAllocated);
+
+    /*
+     * Report the current state of the zygote heap(s).
+     *
+     * The active heap is always heap[0].  We can be in one of three states
+     * at present:
+     *
+     *  (1) Still in the zygote.  Zygote using heap[0].
+     *  (2) In the zygote, when the first child is started.  We created a
+     *      new heap just before the first fork() call, so the original
+     *      "zygote heap" is now heap[1], and we have a small heap[0] for
+     *      anything we do from here on.
+     *  (3) In an app process.  The app gets a new heap[0], and can also
+     *      see the two zygote heaps [1] and [2] (probably unwise to
+     *      assume any specific ordering).
+     *
+     * So if nHeaps == 1, we want the stats from heap[0]; else we want
+     * the sum of the values from heap[1] to heap[nHeaps-1].
+     *
+     *
+     * Zygote heap stats (except for the soft limit, which belongs to the
+     * active heap):
+     *
+     * [63-62] 11
+     * [61-60] Reserved; must be zero
+     * [59-48] Soft Limit (for the active heap)
+     * [47-36] Actual size (current footprint)
+     * [35-24] Allowed size (current hard max)
+     * [23-12] Objects allocated
+     * [11- 0] Bytes allocated
+     */
+    long long event2;
+    size_t zActualSize, zAllowedSize, zNumAllocated, zSizeAllocated;
+    int firstHeap = (nHeaps == 1) ? 0 : 1;
+    size_t hh;
+
+    zActualSize = zAllowedSize = zNumAllocated = zSizeAllocated = 0;
+    for (hh = firstHeap; hh < nHeaps; hh++) {
+        zActualSize += perHeapActualSize[hh];
+        zAllowedSize += perHeapAllowedSize[hh];
+        zNumAllocated += perHeapNumAllocated[hh];
+        zSizeAllocated += perHeapSizeAllocated[hh];
+    }
+    event2 = 3LL << 62 |
+            (long long)intToFloat12(softLimit) << 48 |
+            (long long)intToFloat12(zActualSize) << 36 |
+            (long long)intToFloat12(zAllowedSize) << 24 |
+            (long long)intToFloat12(zNumAllocated) << 12 |
+            (long long)intToFloat12(zSizeAllocated);
+
+    /*
+     * Report the current external allocation stats and the native heap
+     * summary.
+     *
+     * [63-48] Reserved; must be zero (TODO: put new data in these slots)
+     * [47-36] dlmalloc_footprint
+     * [35-24] mallinfo: total allocated space
+     * [23-12] External byte limit
+     * [11- 0] External bytes allocated
+     */
+    long long event3;
+    size_t externalLimit, externalBytesAllocated;
+    size_t uordblks, footprint;
+
+#if 0
+    /*
+     * This adds 2-5msec to the GC cost on a DVT, or about 2-3% of the cost
+     * of a GC, so it's not horribly expensive but it's not free either.
+     */
+    extern size_t dlmalloc_footprint(void);
+    struct mallinfo mi;
+    //u8 start, end;
+
+    //start = dvmGetRelativeTimeNsec();
+    mi = mallinfo();
+    uordblks = mi.uordblks;
+    footprint = dlmalloc_footprint();
+    //end = dvmGetRelativeTimeNsec();
+    //LOGD("mallinfo+footprint took %dusec; used=%zd footprint=%zd\n",
+    //    (int)((end - start) / 1000), mi.uordblks, footprint);
+#else
+    uordblks = footprint = 0;
+#endif
+
+    externalLimit =
+            dvmHeapSourceGetValue(HS_EXTERNAL_LIMIT, NULL, 0);
+    externalBytesAllocated =
+            dvmHeapSourceGetValue(HS_EXTERNAL_BYTES_ALLOCATED, NULL, 0);
+    event3 =
+            (long long)intToFloat12(footprint) << 36 |
+            (long long)intToFloat12(uordblks) << 24 |
+            (long long)intToFloat12(externalLimit) << 12 |
+            (long long)intToFloat12(externalBytesAllocated);
+
+    /* Build the event data.
+     * [ 0: 0] item count (4)
+     * [ 1: 1] EVENT_TYPE_LONG
+     * [ 2: 9] event0
+     * [10:10] EVENT_TYPE_LONG
+     * [11:18] event1
+     * [19:19] EVENT_TYPE_LONG
+     * [20:27] event2
+     * [28:28] EVENT_TYPE_LONG
+     * [29:36] event2
+     */
+    unsigned char *c = eventBuf;
+    *c++ = 4;
+    *c++ = EVENT_TYPE_LONG;
+    memcpy(c, &event0, sizeof(event0));
+    c += sizeof(event0);
+    *c++ = EVENT_TYPE_LONG;
+    memcpy(c, &event1, sizeof(event1));
+    c += sizeof(event1);
+    *c++ = EVENT_TYPE_LONG;
+    memcpy(c, &event2, sizeof(event2));
+    c += sizeof(event2);
+    *c++ = EVENT_TYPE_LONG;
+    memcpy(c, &event3, sizeof(event3));
+
+    (void) android_btWriteLog(EVENT_LOG_TAG_dvm_gc_info, EVENT_TYPE_LIST,
+            eventBuf, sizeof(eventBuf));
+}
+
+void dvmLogMadviseStats(size_t madvisedSizes[], size_t arrayLen)
+{
+    unsigned char eventBuf[1 + (1 + sizeof(int)) * 2];
+    size_t total, zyg;
+    size_t firstHeap, i;
+    size_t nHeaps = dvmHeapSourceGetNumHeaps();
+
+    assert(arrayLen >= nHeaps);
+
+    firstHeap = nHeaps > 1 ? 1 : 0;
+    total = 0;
+    zyg = 0;
+    for (i = 0; i < nHeaps; i++) {
+        total += madvisedSizes[i];
+        if (i >= firstHeap) {
+            zyg += madvisedSizes[i];
+        }
+    }
+
+    /* Build the event data.
+     * [ 0: 0] item count (2)
+     * [ 1: 1] EVENT_TYPE_INT
+     * [ 2: 5] total madvise byte count
+     * [ 6: 6] EVENT_TYPE_INT
+     * [ 7:10] zygote heap madvise byte count
+     */
+    unsigned char *c = eventBuf;
+    *c++ = 2;
+    *c++ = EVENT_TYPE_INT;
+    memcpy(c, &total, sizeof(total));
+    c += sizeof(total);
+    *c++ = EVENT_TYPE_INT;
+    memcpy(c, &zyg, sizeof(zyg));
+    c += sizeof(zyg);
+
+    (void) android_btWriteLog(EVENT_LOG_TAG_dvm_gc_madvise_info,
+            EVENT_TYPE_LIST, eventBuf, sizeof(eventBuf));
+}
+
+#if 0
+#include <errno.h>
+#include <stdio.h>
+
+typedef struct HeapDumpContext {
+    FILE *fp;
+    void *chunkStart;
+    size_t chunkLen;
+    bool chunkFree;
+} HeapDumpContext;
+
+static void
+dump_context(const HeapDumpContext *ctx)
+{
+    fprintf(ctx->fp, "0x%08x %12.12zd %s\n", (uintptr_t)ctx->chunkStart,
+            ctx->chunkLen, ctx->chunkFree ? "FREE" : "USED");
+}
+
+static void
+heap_chunk_callback(const void *chunkptr, size_t chunklen,
+                    const void *userptr, size_t userlen, void *arg)
+{
+    HeapDumpContext *ctx = (HeapDumpContext *)arg;
+    bool chunkFree = (userptr == NULL);
+
+    if (chunkFree != ctx->chunkFree ||
+            ((char *)ctx->chunkStart + ctx->chunkLen) != chunkptr)
+    {
+        /* The new chunk is of a different type or isn't
+         * contiguous with the current chunk.  Dump the
+         * old one and start a new one.
+         */
+        if (ctx->chunkStart != NULL) {
+            /* It's not the first chunk. */
+            dump_context(ctx);
+        }
+        ctx->chunkStart = (void *)chunkptr;
+        ctx->chunkLen = chunklen;
+        ctx->chunkFree = chunkFree;
+    } else {
+        /* Extend the current chunk.
+         */
+        ctx->chunkLen += chunklen;
+    }
+}
+
+/* Dumps free and used ranges, as text, to the named file.
+ */
+void dvmDumpHeapToFile(const char *fileName)
+{
+    HeapDumpContext ctx;
+    FILE *fp;
+
+    fp = fopen(fileName, "w+");
+    if (fp == NULL) {
+        LOGE("Can't open %s for writing: %s\n", fileName, strerror(errno));
+        return;
+    }
+    LOGW("Dumping heap to %s...\n", fileName);
+
+    fprintf(fp, "==== Dalvik heap dump ====\n");
+    memset(&ctx, 0, sizeof(ctx));
+    ctx.fp = fp;
+    dvmHeapSourceWalk(heap_chunk_callback, (void *)&ctx);
+    dump_context(&ctx);
+    fprintf(fp, "==== end heap dump ====\n");
+
+    LOGW("Dumped heap to %s.\n", fileName);
+
+    fclose(fp);
+}
+#endif
diff --git a/vm/alloc/HeapDebug.h b/vm/alloc/HeapDebug.h
new file mode 100644
index 0000000..19f4b45
--- /dev/null
+++ b/vm/alloc/HeapDebug.h
@@ -0,0 +1,31 @@
+/*
+ * Copyright (C) 2008 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#ifndef _DALVIK_HEAPDEBUG
+#define _DALVIK_HEAPDEBUG
+
+typedef enum HeapDebugInfoType {
+    kVirtualHeapSize = 0,
+    kNativeHeapSize = 1,
+    kVirtualHeapAllocated = 2,
+    kNativeHeapAllocated = 3,
+} HeapDebugInfoType;
+
+/* Return the specified value.
+ * Returns -1 if the type is unknown.
+ */
+int dvmGetHeapDebugInfo(HeapDebugInfoType info);
+
+#endif  // _DALVIK_HEAPDEBUG
diff --git a/vm/alloc/HeapInternal.h b/vm/alloc/HeapInternal.h
new file mode 100644
index 0000000..e3b212d
--- /dev/null
+++ b/vm/alloc/HeapInternal.h
@@ -0,0 +1,233 @@
+/*
+ * Copyright (C) 2008 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/*
+ * Types and macros used internally by the heap.
+ */
+#ifndef _DALVIK_ALLOC_HEAP_INTERNAL
+#define _DALVIK_ALLOC_HEAP_INTERNAL
+
+#include <time.h>  // for struct timespec
+
+#include "HeapTable.h"
+#include "MarkSweep.h"
+
+#define SCHEDULED_REFERENCE_MAGIC   ((Object*)0x87654321)
+
+#define ptr2chunk(p)    (((DvmHeapChunk *)(p)) - 1)
+#define chunk2ptr(p)    ((void *)(((DvmHeapChunk *)(p)) + 1))
+
+#define WITH_OBJECT_HEADERS 0
+#if WITH_OBJECT_HEADERS
+#define OBJECT_HEADER   0x11335577
+extern u2 gGeneration;
+#endif
+
+typedef struct DvmHeapChunk {
+#if WITH_OBJECT_HEADERS
+    u4 header;
+    const Object *parent;
+    const Object *parentOld;
+    const Object *markFinger;
+    const Object *markFingerOld;
+    u2 birthGeneration;
+    u2 markCount;
+    u2 scanCount;
+    u2 oldMarkGeneration;
+    u2 markGeneration;
+    u2 oldScanGeneration;
+    u2 scanGeneration;
+#endif
+#if WITH_HPROF && WITH_HPROF_STACK
+    u4 stackTraceSerialNumber;
+#endif
+    u8 data[0];
+} DvmHeapChunk;
+
+struct GcHeap {
+    HeapSource      *heapSource;
+
+    /* List of heap objects that the GC should never collect.
+     * These should be included in the root set of objects.
+     */
+    HeapRefTable    nonCollectableRefs;
+
+    /* List of heap objects that will require finalization when
+     * collected.  I.e., instance objects
+     *
+     *     a) whose class definitions override java.lang.Object.finalize()
+     *
+     * *** AND ***
+     *
+     *     b) that have never been finalized.
+     *
+     * Note that this does not exclude non-garbage objects;  this
+     * is not the list of pending finalizations, but of objects that
+     * potentially have finalization in their futures.
+     */
+    LargeHeapRefTable  *finalizableRefs;
+
+    /* The list of objects that need to have finalize() called
+     * on themselves.  These references are part of the root set.
+     *
+     * This table is protected by gDvm.heapWorkerListLock, which must
+     * be acquired after the heap lock.
+     */
+    LargeHeapRefTable  *pendingFinalizationRefs;
+
+    /* Linked lists of subclass instances of java/lang/ref/Reference
+     * that we find while recursing.  The "next" pointers are hidden
+     * in the objects' <code>int Reference.vmData</code> fields.
+     * These lists are cleared and rebuilt each time the GC runs.
+     */
+    Object         *softReferences;
+    Object         *weakReferences;
+    Object         *phantomReferences;
+
+    /* The list of Reference objects that need to be cleared and/or
+     * enqueued.  The bottom two bits of the object pointers indicate
+     * whether they should be cleared and/or enqueued.
+     *
+     * This table is protected by gDvm.heapWorkerListLock, which must
+     * be acquired after the heap lock.
+     */
+    LargeHeapRefTable  *referenceOperations;
+
+    /* If non-null, the method that the HeapWorker is currently
+     * executing.
+     */
+    Object *heapWorkerCurrentObject;
+    Method *heapWorkerCurrentMethod;
+
+    /* If heapWorkerCurrentObject is non-null, this gives the time when
+     * HeapWorker started executing that method.  The time value must come
+     * from dvmGetRelativeTimeUsec().
+     *
+     * The "Cpu" entry tracks the per-thread CPU timer (when available).
+     */
+    u8 heapWorkerInterpStartTime;
+    u8 heapWorkerInterpCpuStartTime;
+
+    /* If any fields are non-zero, indicates the next (absolute) time that
+     * the HeapWorker thread should call dvmHeapSourceTrim().
+     */
+    struct timespec heapWorkerNextTrim;
+
+    /* The current state of the mark step.
+     * Only valid during a GC.
+     */
+    GcMarkContext   markContext;
+
+    /* Set to dvmGetRelativeTimeUsec() whenever a GC begins.
+     * The value is preserved between GCs, so it can be used
+     * to determine the time between successive GCs.
+     * Initialized to zero before the first GC.
+     */
+    u8              gcStartTime;
+
+    /* Is the GC running?  Used to avoid recursive calls to GC.
+     */
+    bool            gcRunning;
+
+    /* Set at the end of a GC to indicate the collection policy
+     * for SoftReferences during the following GC.
+     */
+    enum { SR_COLLECT_NONE, SR_COLLECT_SOME, SR_COLLECT_ALL }
+                    softReferenceCollectionState;
+
+    /* The size of the heap is compared against this value
+     * to determine when to start collecting SoftReferences.
+     */
+    size_t          softReferenceHeapSizeThreshold;
+
+    /* A value that will increment every time we see a SoftReference
+     * whose referent isn't marked (during SR_COLLECT_SOME).
+     * The absolute value is meaningless, and does not need to
+     * be reset or initialized at any point.
+     */
+    int             softReferenceColor;
+
+    /* Indicates whether or not the object scanner should bother
+     * keeping track of any references.  If markAllReferents is
+     * true, referents will be hard-marked.  If false, normal
+     * reference following is used.
+     */
+    bool            markAllReferents;
+
+#if DVM_TRACK_HEAP_MARKING
+    /* Every time an unmarked object becomes marked, markCount
+     * is incremented and markSize increases by the size of
+     * that object.
+     */
+    size_t          markCount;
+    size_t          markSize;
+#endif
+
+    /*
+     * Debug control values
+     */
+
+    int             ddmHpifWhen;
+    int             ddmHpsgWhen;
+    int             ddmHpsgWhat;
+    int             ddmNhsgWhen;
+    int             ddmNhsgWhat;
+
+#if WITH_HPROF
+    bool            hprofDumpOnGc;
+    hprof_context_t *hprofContext;
+#endif
+};
+
+bool dvmLockHeap(void);
+void dvmUnlockHeap(void);
+void dvmLogGcStats(size_t numFreed, size_t sizeFreed, size_t gcTimeMs);
+void dvmLogMadviseStats(size_t madvisedSizes[], size_t arrayLen);
+void dvmHeapSizeChanged(void);
+
+/*
+ * Logging helpers
+ */
+
+#define HEAP_LOG_TAG      LOG_TAG "-heap"
+
+#if LOG_NDEBUG
+#define LOGV_HEAP(...)    ((void)0)
+#define LOGD_HEAP(...)    ((void)0)
+#else
+#define LOGV_HEAP(...)    LOG(LOG_VERBOSE, HEAP_LOG_TAG, __VA_ARGS__)
+#define LOGD_HEAP(...)    LOG(LOG_DEBUG, HEAP_LOG_TAG, __VA_ARGS__)
+#endif
+#define LOGI_HEAP(...)    LOG(LOG_INFO, HEAP_LOG_TAG, __VA_ARGS__)
+#define LOGW_HEAP(...)    LOG(LOG_WARN, HEAP_LOG_TAG, __VA_ARGS__)
+#define LOGE_HEAP(...)    LOG(LOG_ERROR, HEAP_LOG_TAG, __VA_ARGS__)
+
+#define QUIET_ZYGOTE_GC 1
+#if QUIET_ZYGOTE_GC
+#undef LOGI_HEAP
+#define LOGI_HEAP(...) \
+    do { \
+        if (!gDvm.zygote) { \
+            LOG(LOG_INFO, HEAP_LOG_TAG, __VA_ARGS__); \
+        } \
+    } while (false)
+#endif
+
+#define FRACTIONAL_MB(n)    (n) / (1024 * 1024), \
+                            ((((n) % (1024 * 1024)) / 1024) * 1000) / 1024
+#define FRACTIONAL_PCT(n,max)    ((n) * 100) / (max), \
+                                 (((n) * 1000) / (max)) % 10
+
+#endif  // _DALVIK_ALLOC_HEAP_INTERNAL
diff --git a/vm/alloc/HeapSource.c b/vm/alloc/HeapSource.c
new file mode 100644
index 0000000..58952e8
--- /dev/null
+++ b/vm/alloc/HeapSource.c
@@ -0,0 +1,1574 @@
+/*
+ * Copyright (C) 2008 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <cutils/mspace.h>
+#include <limits.h>     // for INT_MAX
+#include <sys/mman.h>
+
+#include "Dalvik.h"
+#include "alloc/Heap.h"
+#include "alloc/HeapInternal.h"
+#include "alloc/HeapSource.h"
+#include "alloc/HeapBitmap.h"
+
+// TODO: find a real header file for these.
+extern int dlmalloc_trim(size_t);
+extern void dlmalloc_walk_free_pages(void(*)(void*, void*, void*), void*);
+
+static void snapIdealFootprint(void);
+static void setIdealFootprint(size_t max);
+
+#ifndef PAGE_SIZE
+#define PAGE_SIZE 4096
+#endif
+#define ALIGN_UP_TO_PAGE_SIZE(p) \
+    (((size_t)(p) + (PAGE_SIZE - 1)) & ~(PAGE_SIZE - 1))
+#define ALIGN_DOWN_TO_PAGE_SIZE(p) \
+    ((size_t)(p) & ~(PAGE_SIZE - 1))
+
+#define HEAP_UTILIZATION_MAX        1024
+#define DEFAULT_HEAP_UTILIZATION    512     // Range 1..HEAP_UTILIZATION_MAX
+#define HEAP_IDEAL_FREE             (2 * 1024 * 1024)
+#define HEAP_MIN_FREE               (HEAP_IDEAL_FREE / 4)
+
+#define HS_BOILERPLATE() \
+    do { \
+        assert(gDvm.gcHeap != NULL); \
+        assert(gDvm.gcHeap->heapSource != NULL); \
+        assert(gHs == gDvm.gcHeap->heapSource); \
+    } while (0)
+
+#define DEBUG_HEAP_SOURCE 0
+#if DEBUG_HEAP_SOURCE
+#define HSTRACE(...)  LOG(LOG_INFO, LOG_TAG "-hs", __VA_ARGS__)
+#else
+#define HSTRACE(...)  /**/
+#endif
+
+/*
+=======================================================
+=======================================================
+=======================================================
+
+How will this be used?
+allocating/freeing: Heap.c just wants to say "alloc(n)" and get a ptr
+    - if allocating in large doesn't work, try allocating from small
+Heap.c will use HeapSource.h; HeapSource.c will do the right thing
+    between small and large
+    - some operations should be abstracted; put in a structure
+
+How do we manage the size trade-offs?
+- keep mspace max footprint clamped to actual footprint
+- if small-alloc returns null, adjust large vs. small ratio
+    - give small all available slack and retry
+    - success or fail, snap back to actual footprint and give rest to large
+
+managed as "small actual" + "large actual" + "delta to allowed total footprint"
+- when allocating from one source or the other, give the delta to the
+    active source, but snap back afterwards
+- that may not work so great for a gc heap, because small will always consume.
+    - but we need to use the memory, and the current max is the amount we
+      need to fill before a GC.
+
+Find a way to permanently steal pages from the middle of the heap
+    - segment tricks?
+
+Allocate String and char[] in a separate heap?
+
+Maybe avoid growing small heap, even if there's slack?  Look at
+live ratio of small heap after a gc; scale it based on that.
+
+=======================================================
+=======================================================
+=======================================================
+*/
+
+typedef struct {
+    /* The mspace to allocate from.
+     */
+    mspace *msp;
+
+    /* The bitmap that keeps track of where objects are in the heap.
+     */
+    HeapBitmap objectBitmap;
+
+    /* The largest size that this heap is allowed to grow to.
+     */
+    size_t absoluteMaxSize;
+
+    /* Number of bytes allocated from this mspace for objects,
+     * including any overhead.  This value is NOT exact, and
+     * should only be used as an input for certain heuristics.
+     */
+    size_t bytesAllocated;
+
+    /* Number of objects currently allocated from this mspace.
+     */
+    size_t objectsAllocated;
+} Heap;
+
+struct HeapSource {
+    /* Target ideal heap utilization ratio; range 1..HEAP_UTILIZATION_MAX
+     */
+    size_t targetUtilization;
+
+    /* Requested minimum heap size, or zero if there is no minimum.
+     */
+    size_t minimumSize;
+
+    /* The starting heap size.
+     */
+    size_t startSize;
+
+    /* The largest that the heap source as a whole is allowed to grow.
+     */
+    size_t absoluteMaxSize;
+
+    /* The desired max size of the heap source as a whole.
+     */
+    size_t idealSize;
+
+    /* The maximum number of bytes allowed to be allocated from the
+     * active heap before a GC is forced.  This is used to "shrink" the
+     * heap in lieu of actual compaction.
+     */
+    size_t softLimit;
+
+    /* The heaps; heaps[0] is always the active heap,
+     * which new objects should be allocated from.
+     */
+    Heap heaps[HEAP_SOURCE_MAX_HEAP_COUNT];
+
+    /* The current number of heaps.
+     */
+    size_t numHeaps;
+
+    /* External allocation count.
+     */
+    size_t externalBytesAllocated;
+
+    /* The maximum number of external bytes that may be allocated.
+     */
+    size_t externalLimit;
+
+    /* True if zygote mode was active when the HeapSource was created.
+     */
+    bool sawZygote;
+};
+
+#define hs2heap(hs_) (&((hs_)->heaps[0]))
+
+/*
+ * Returns true iff a soft limit is in effect for the active heap.
+ */
+static inline bool
+softLimited(const HeapSource *hs)
+{
+    /* softLimit will be either INT_MAX or the limit for the
+     * active mspace.  idealSize can be greater than softLimit
+     * if there is more than one heap.  If there is only one
+     * heap, a non-INT_MAX softLimit should always be the same
+     * as idealSize.
+     */
+    return hs->softLimit <= hs->idealSize;
+}
+
+/*
+ * Returns the current footprint of all heaps.  If includeActive
+ * is false, don't count the heap at index 0.
+ */
+static inline size_t
+oldHeapOverhead(const HeapSource *hs, bool includeActive)
+{
+    size_t footprint = 0;
+    size_t i;
+
+    if (includeActive) {
+        i = 0;
+    } else {
+        i = 1;
+    }
+    for (/* i = i */; i < hs->numHeaps; i++) {
+//TODO: include size of bitmaps?  If so, don't use bitsLen, listen to .max
+        footprint += mspace_footprint(hs->heaps[i].msp);
+    }
+    return footprint;
+}
+
+/*
+ * Returns the heap that <ptr> could have come from, or NULL
+ * if it could not have come from any heap.
+ */
+static inline Heap *
+ptr2heap(const HeapSource *hs, const void *ptr)
+{
+    const size_t numHeaps = hs->numHeaps;
+    size_t i;
+
+//TODO: unroll this to HEAP_SOURCE_MAX_HEAP_COUNT
+    if (ptr != NULL) {
+        for (i = 0; i < numHeaps; i++) {
+            const Heap *const heap = &hs->heaps[i];
+            
+            if (dvmHeapBitmapMayContainObject(&heap->objectBitmap, ptr)) {
+                return (Heap *)heap;
+            }
+        }
+    }
+    return NULL;
+}
+
+/*
+ * Functions to update heapSource->bytesAllocated when an object
+ * is allocated or freed.  mspace_usable_size() will give
+ * us a much more accurate picture of heap utilization than
+ * the requested byte sizes would.
+ *
+ * These aren't exact, and should not be treated as such.
+ */
+static inline void
+countAllocation(Heap *heap, const void *ptr, bool isObj)
+{
+    assert(heap->bytesAllocated < mspace_footprint(heap->msp));
+
+    heap->bytesAllocated += mspace_usable_size(heap->msp, ptr) +
+            HEAP_SOURCE_CHUNK_OVERHEAD;
+    if (isObj) {
+        heap->objectsAllocated++;
+        dvmHeapBitmapSetObjectBit(&heap->objectBitmap, ptr);
+    }
+
+    assert(heap->bytesAllocated < mspace_footprint(heap->msp));
+}
+
+static inline void
+countFree(Heap *heap, const void *ptr, bool isObj)
+{
+    size_t delta;
+
+    delta = mspace_usable_size(heap->msp, ptr) + HEAP_SOURCE_CHUNK_OVERHEAD;
+    assert(delta > 0);
+    if (delta < heap->bytesAllocated) {
+        heap->bytesAllocated -= delta;
+    } else {
+        heap->bytesAllocated = 0;
+    }
+    if (isObj) {
+        dvmHeapBitmapClearObjectBit(&heap->objectBitmap, ptr);
+        if (heap->objectsAllocated > 0) {
+            heap->objectsAllocated--;
+        }
+    }
+}
+
+static HeapSource *gHs = NULL;
+
+static mspace *
+createMspace(size_t startSize, size_t absoluteMaxSize, size_t id)
+{
+    mspace *msp;
+    char name[PATH_MAX];
+
+    /* If two ashmem regions have the same name, only one gets
+     * the name when looking at the maps.
+     */
+    snprintf(name, sizeof(name)-1, "dalvik-heap%s/%zd",
+        gDvm.zygote ? "/zygote" : "", id);
+    name[sizeof(name)-1] = '\0';
+
+    /* Create an unlocked dlmalloc mspace to use as
+     * a small-object heap source.
+     *
+     * We start off reserving heapSizeStart/2 bytes but
+     * letting the heap grow to heapSizeStart.  This saves
+     * memory in the case where a process uses even less
+     * than the starting size.
+     */
+    LOGV_HEAP("Creating VM heap of size %u\n", startSize);
+    msp = create_contiguous_mspace_with_name(startSize/2,
+            absoluteMaxSize, /*locked=*/false, name);
+    if (msp != NULL) {
+        /* Don't let the heap grow past the starting size without
+         * our intervention.
+         */
+        mspace_set_max_allowed_footprint(msp, startSize);
+    } else {
+        LOGE_HEAP("Can't create VM heap of size %u\n", startSize/2);
+    }
+
+    return msp;
+}
+
+static bool
+addNewHeap(HeapSource *hs, mspace *msp, size_t mspAbsoluteMaxSize)
+{
+    Heap heap;
+
+    if (hs->numHeaps >= HEAP_SOURCE_MAX_HEAP_COUNT) {
+        LOGE("Attempt to create too many heaps (%zd >= %zd)\n",
+                hs->numHeaps, HEAP_SOURCE_MAX_HEAP_COUNT);
+        dvmAbort();
+        return false;
+    }
+
+    memset(&heap, 0, sizeof(heap));
+
+    if (msp != NULL) {
+        heap.msp = msp;
+        heap.absoluteMaxSize = mspAbsoluteMaxSize;
+    } else {
+        size_t overhead;
+
+        overhead = oldHeapOverhead(hs, true);
+        if (overhead + HEAP_MIN_FREE >= hs->absoluteMaxSize) {
+            LOGE_HEAP("No room to create any more heaps "
+                    "(%zd overhead, %zd max)\n",
+                    overhead, hs->absoluteMaxSize);
+            return false;
+        }
+        heap.absoluteMaxSize = hs->absoluteMaxSize - overhead;
+        heap.msp = createMspace(HEAP_MIN_FREE, heap.absoluteMaxSize,
+                hs->numHeaps);
+        if (heap.msp == NULL) {
+            return false;
+        }
+    }
+    if (!dvmHeapBitmapInit(&heap.objectBitmap,
+                           (void *)ALIGN_DOWN_TO_PAGE_SIZE(heap.msp),
+                           heap.absoluteMaxSize,
+                           "objects"))
+    {
+        LOGE_HEAP("Can't create objectBitmap\n");
+        goto fail;
+    }
+
+    /* Don't let the soon-to-be-old heap grow any further.
+     */
+    if (hs->numHeaps > 0) {
+        mspace *msp = hs->heaps[0].msp;
+        mspace_set_max_allowed_footprint(msp, mspace_footprint(msp));
+    }
+
+    /* Put the new heap in the list, at heaps[0].
+     * Shift existing heaps down.
+     */
+    memmove(&hs->heaps[1], &hs->heaps[0], hs->numHeaps * sizeof(hs->heaps[0]));
+    hs->heaps[0] = heap;
+    hs->numHeaps++;
+
+    return true;
+
+fail:
+    if (msp == NULL) {
+        destroy_contiguous_mspace(heap.msp);
+    }
+    return false;
+}
+
+/*
+ * Initializes the heap source; must be called before any other
+ * dvmHeapSource*() functions.  Returns a GcHeap structure
+ * allocated from the heap source.
+ */
+GcHeap *
+dvmHeapSourceStartup(size_t startSize, size_t absoluteMaxSize)
+{
+    GcHeap *gcHeap;
+    HeapSource *hs;
+    Heap *heap;
+    mspace msp;
+
+    assert(gHs == NULL);
+
+    if (startSize > absoluteMaxSize) {
+        LOGE("Bad heap parameters (start=%d, max=%d)\n",
+           startSize, absoluteMaxSize);
+        return NULL;
+    }
+
+    /* Create an unlocked dlmalloc mspace to use as
+     * the small object heap source.
+     */
+    msp = createMspace(startSize, absoluteMaxSize, 0);
+    if (msp == NULL) {
+        return false;
+    }
+
+    /* Allocate a descriptor from the heap we just created.
+     */
+    gcHeap = mspace_malloc(msp, sizeof(*gcHeap));
+    if (gcHeap == NULL) {
+        LOGE_HEAP("Can't allocate heap descriptor\n");
+        goto fail;
+    }
+    memset(gcHeap, 0, sizeof(*gcHeap));
+
+    hs = mspace_malloc(msp, sizeof(*hs));
+    if (hs == NULL) {
+        LOGE_HEAP("Can't allocate heap source\n");
+        goto fail;
+    }
+    memset(hs, 0, sizeof(*hs));
+
+    hs->targetUtilization = DEFAULT_HEAP_UTILIZATION;
+    hs->minimumSize = 0;
+    hs->startSize = startSize;
+    hs->absoluteMaxSize = absoluteMaxSize;
+    hs->idealSize = startSize;
+    hs->softLimit = INT_MAX;    // no soft limit at first
+    hs->numHeaps = 0;
+    hs->sawZygote = gDvm.zygote;
+    if (!addNewHeap(hs, msp, absoluteMaxSize)) {
+        LOGE_HEAP("Can't add initial heap\n");
+        goto fail;
+    }
+
+    gcHeap->heapSource = hs;
+
+    countAllocation(hs2heap(hs), gcHeap, false);
+    countAllocation(hs2heap(hs), hs, false);
+
+    gHs = hs;
+    return gcHeap;
+
+fail:
+    destroy_contiguous_mspace(msp);
+    return NULL;
+}
+
+/*
+ * If the HeapSource was created while in zygote mode, this
+ * will create a new heap for post-zygote allocations.
+ * Having a separate heap should maximize the number of pages
+ * that a given app_process shares with the zygote process.
+ */
+bool
+dvmHeapSourceStartupAfterZygote()
+{
+    HeapSource *hs = gHs; // use a local to avoid the implicit "volatile"
+
+    HS_BOILERPLATE();
+
+    assert(!gDvm.zygote);
+
+    if (hs->sawZygote) {
+        /* Create a new heap for post-zygote allocations.
+         */
+        return addNewHeap(hs, NULL, 0);
+    }
+    return true;
+}
+
+/*
+ * This is called while in zygote mode, right before we fork() for the
+ * first time.  We create a heap for all future zygote process allocations,
+ * in an attempt to avoid touching pages in the zygote heap.  (This would
+ * probably be unnecessary if we had a compacting GC -- the source of our
+ * troubles is small allocations filling in the gaps from larger ones.)
+ */
+bool
+dvmHeapSourceStartupBeforeFork()
+{
+    HeapSource *hs = gHs; // use a local to avoid the implicit "volatile"
+
+    HS_BOILERPLATE();
+
+    assert(gDvm.zygote);
+
+    if (!gDvm.newZygoteHeapAllocated) {
+        /* Create a new heap for post-fork zygote allocations.  We only
+         * try once, even if it fails.
+         */
+        LOGI("Splitting out new zygote heap\n");
+        gDvm.newZygoteHeapAllocated = true;
+        return addNewHeap(hs, NULL, 0);
+    }
+    return true;
+}
+
+/*
+ * Tears down the heap source and frees any resources associated with it.
+ */
+void
+dvmHeapSourceShutdown(GcHeap *gcHeap)
+{
+    if (gcHeap != NULL && gcHeap->heapSource != NULL) {
+        HeapSource *hs;
+        size_t numHeaps;
+        size_t i;
+
+        hs = gcHeap->heapSource;
+        gHs = NULL;
+
+        /* Cache numHeaps because hs will be invalid after the last
+         * heap is freed.
+         */
+        numHeaps = hs->numHeaps;
+
+        for (i = 0; i < numHeaps; i++) {
+            Heap *heap = &hs->heaps[i];
+
+            dvmHeapBitmapDelete(&heap->objectBitmap);
+            destroy_contiguous_mspace(heap->msp);
+        }
+        /* The last heap is the original one, which contains the
+         * HeapSource object itself.
+         */
+    }
+}
+
+/*
+ * Returns the requested value. If the per-heap stats are requested, fill
+ * them as well.
+ *
+ * Caller must hold the heap lock.
+ */
+size_t
+dvmHeapSourceGetValue(enum HeapSourceValueSpec spec, size_t perHeapStats[],
+                      size_t arrayLen)
+{
+    HeapSource *hs = gHs;
+    size_t value = 0;
+    size_t total = 0;
+    size_t i;
+
+    HS_BOILERPLATE();
+
+    switch (spec) {
+    case HS_EXTERNAL_BYTES_ALLOCATED:
+        return hs->externalBytesAllocated;
+    case HS_EXTERNAL_LIMIT:
+        return hs->externalLimit;
+    default:
+        // look at all heaps.
+        ;
+    }
+
+    assert(arrayLen >= hs->numHeaps || perHeapStats == NULL);
+    for (i = 0; i < hs->numHeaps; i++) {
+        Heap *const heap = &hs->heaps[i];
+
+        switch (spec) {
+        case HS_FOOTPRINT:
+            value = mspace_footprint(heap->msp);
+            break;
+        case HS_ALLOWED_FOOTPRINT:
+            value = mspace_max_allowed_footprint(heap->msp);
+            break;
+        case HS_BYTES_ALLOCATED:
+            value = heap->bytesAllocated;
+            break;
+        case HS_OBJECTS_ALLOCATED:
+            value = heap->objectsAllocated;
+            break;
+        default:
+            // quiet gcc
+            break;
+        }
+        if (perHeapStats) {
+            perHeapStats[i] = value;
+        }
+        total += value;
+    }
+    return total;
+}
+
+/*
+ * Writes shallow copies of the currently-used bitmaps into outBitmaps,
+ * returning the number of bitmaps written.  Returns <0 if the array
+ * was not long enough.
+ */
+ssize_t
+dvmHeapSourceGetObjectBitmaps(HeapBitmap outBitmaps[], size_t maxBitmaps)
+{
+    HeapSource *hs = gHs;
+
+    HS_BOILERPLATE();
+
+    if (maxBitmaps >= hs->numHeaps) {
+        size_t i;
+
+        for (i = 0; i < hs->numHeaps; i++) {
+            outBitmaps[i] = hs->heaps[i].objectBitmap;
+        }
+        return i;
+    }
+    return -1;
+}
+
+/*
+ * Replaces the object location HeapBitmaps with the elements of
+ * <objectBitmaps>.  The elements of <objectBitmaps> are overwritten
+ * with shallow copies of the old bitmaps.
+ *
+ * Returns false if the number of bitmaps doesn't match the number
+ * of heaps.
+ */
+bool
+dvmHeapSourceReplaceObjectBitmaps(HeapBitmap objectBitmaps[], size_t nBitmaps)
+{
+    HeapSource *hs = gHs;
+    size_t i;
+
+    HS_BOILERPLATE();
+
+    if (nBitmaps != hs->numHeaps) {
+        return false;
+    }
+
+    for (i = 0; i < hs->numHeaps; i++) {
+        Heap *heap = &hs->heaps[i];
+        HeapBitmap swap;
+
+        swap = heap->objectBitmap;
+        heap->objectBitmap = objectBitmaps[i];
+        objectBitmaps[i] = swap;
+    }
+    return true;
+}
+
+/*
+ * Allocates <n> bytes of zeroed data.
+ */
+void *
+dvmHeapSourceAlloc(size_t n)
+{
+    HeapSource *hs = gHs;
+    Heap *heap;
+    void *ptr;
+
+    HS_BOILERPLATE();
+    heap = hs2heap(hs);
+
+    if (heap->bytesAllocated + n <= hs->softLimit) {
+// TODO: allocate large blocks (>64k?) as separate mmap regions so that
+//       they don't increase the high-water mark when they're freed.
+// TODO: zero out large objects using madvise
+        ptr = mspace_calloc(heap->msp, 1, n);
+        if (ptr != NULL) {
+            countAllocation(heap, ptr, true);
+        }
+    } else {
+        /* This allocation would push us over the soft limit;
+         * act as if the heap is full.
+         */
+        LOGV_HEAP("softLimit of %zd.%03zdMB hit for %zd-byte allocation\n",
+                FRACTIONAL_MB(hs->softLimit), n);
+        ptr = NULL;
+    }
+    return ptr;
+}
+
+/* Remove any hard limits, try to allocate, and shrink back down.
+ * Last resort when trying to allocate an object.
+ */
+static void *
+heapAllocAndGrow(HeapSource *hs, Heap *heap, size_t n)
+{
+    void *ptr;
+    size_t max;
+
+    /* Grow as much as possible, but don't let the real footprint
+     * plus external allocations go over the absolute max.
+     */
+    max = heap->absoluteMaxSize;
+    if (max > hs->externalBytesAllocated) {
+        max -= hs->externalBytesAllocated;
+
+        mspace_set_max_allowed_footprint(heap->msp, max);
+        ptr = dvmHeapSourceAlloc(n);
+
+        /* Shrink back down as small as possible.  Our caller may
+         * readjust max_allowed to a more appropriate value.
+         */
+        mspace_set_max_allowed_footprint(heap->msp,
+                mspace_footprint(heap->msp));
+    } else {
+        ptr = NULL;
+    }
+
+    return ptr;
+}
+
+/*
+ * Allocates <n> bytes of zeroed data, growing as much as possible
+ * if necessary.
+ */
+void *
+dvmHeapSourceAllocAndGrow(size_t n)
+{
+    HeapSource *hs = gHs;
+    Heap *heap;
+    void *ptr;
+    size_t oldIdealSize;
+
+    HS_BOILERPLATE();
+    heap = hs2heap(hs);
+
+    ptr = dvmHeapSourceAlloc(n);
+    if (ptr != NULL) {
+        return ptr;
+    }
+
+    oldIdealSize = hs->idealSize;
+    if (softLimited(hs)) {
+        /* We're soft-limited.  Try removing the soft limit to
+         * see if we can allocate without actually growing.
+         */
+        hs->softLimit = INT_MAX;
+        ptr = dvmHeapSourceAlloc(n);
+        if (ptr != NULL) {
+            /* Removing the soft limit worked;  fix things up to
+             * reflect the new effective ideal size.
+             */
+            snapIdealFootprint();
+            return ptr;
+        }
+        // softLimit intentionally left at INT_MAX.
+    }
+
+    /* We're not soft-limited.  Grow the heap to satisfy the request.
+     * If this call fails, no footprints will have changed.
+     */
+    ptr = heapAllocAndGrow(hs, heap, n);
+    if (ptr != NULL) {
+        /* The allocation succeeded.  Fix up the ideal size to
+         * reflect any footprint modifications that had to happen.
+         */
+        snapIdealFootprint();
+    } else {
+        /* We just couldn't do it.  Restore the original ideal size,
+         * fixing up softLimit if necessary.
+         */
+        setIdealFootprint(oldIdealSize);
+    }
+    return ptr;
+}
+
+/*
+ * Frees the memory pointed to by <ptr>, which may be NULL.
+ */
+void
+dvmHeapSourceFree(void *ptr)
+{
+    Heap *heap;
+
+    HS_BOILERPLATE();
+
+    heap = ptr2heap(gHs, ptr);
+    if (heap != NULL) {
+        countFree(heap, ptr, true);
+        /* Only free objects that are in the active heap.
+         * Touching old heaps would pull pages into this process.
+         */
+        if (heap == gHs->heaps) {
+            mspace_free(heap->msp, ptr);
+        }
+    }
+}
+
+/*
+ * Returns true iff <ptr> was allocated from the heap source.
+ */
+bool
+dvmHeapSourceContains(const void *ptr)
+{
+    Heap *heap;
+
+    HS_BOILERPLATE();
+
+    heap = ptr2heap(gHs, ptr);
+    if (heap != NULL) {
+        return dvmHeapBitmapIsObjectBitSet(&heap->objectBitmap, ptr) != 0;
+    }
+    return false;
+}
+
+/*
+ * Returns the value of the requested flag.
+ */
+bool
+dvmHeapSourceGetPtrFlag(const void *ptr, enum HeapSourcePtrFlag flag)
+{
+    if (ptr == NULL) {
+        return false;
+    }
+
+    if (flag == HS_CONTAINS) {
+        return dvmHeapSourceContains(ptr);
+    } else if (flag == HS_ALLOCATED_IN_ZYGOTE) {
+        HeapSource *hs = gHs;
+
+        HS_BOILERPLATE();
+
+        if (hs->sawZygote) {
+            Heap *heap;
+
+            heap = ptr2heap(hs, ptr);
+            if (heap != NULL) {
+                /* If the object is not in the active heap, we assume that
+                 * it was allocated as part of zygote.
+                 */
+                return heap != hs->heaps;
+            }
+        }
+        /* The pointer is outside of any known heap, or we are not
+         * running in zygote mode.
+         */
+        return false;
+    }
+
+    return false;
+}
+
+/*
+ * Returns the number of usable bytes in an allocated chunk; the size
+ * may be larger than the size passed to dvmHeapSourceAlloc().
+ */
+size_t
+dvmHeapSourceChunkSize(const void *ptr)
+{
+    Heap *heap;
+
+    HS_BOILERPLATE();
+
+    heap = ptr2heap(gHs, ptr);
+    if (heap != NULL) {
+        return mspace_usable_size(heap->msp, ptr);
+    }
+    return 0;
+}
+
+/*
+ * Returns the number of bytes that the heap source has allocated
+ * from the system using sbrk/mmap, etc.
+ *
+ * Caller must hold the heap lock.
+ */
+size_t
+dvmHeapSourceFootprint()
+{
+    HS_BOILERPLATE();
+
+//TODO: include size of bitmaps?
+    return oldHeapOverhead(gHs, true);
+}
+
+/*
+ * Return the real bytes used by old heaps and external memory
+ * plus the soft usage of the current heap.  When a soft limit
+ * is in effect, this is effectively what it's compared against
+ * (though, in practice, it only looks at the current heap).
+ */
+static size_t
+getSoftFootprint(bool includeActive)
+{
+    HeapSource *hs = gHs;
+    size_t ret;
+
+    HS_BOILERPLATE();
+
+    ret = oldHeapOverhead(hs, false) + hs->externalBytesAllocated;
+    if (includeActive) {
+        ret += hs->heaps[0].bytesAllocated;
+    }
+
+    return ret;
+}
+
+/*
+ * Gets the maximum number of bytes that the heap source is allowed
+ * to allocate from the system.
+ */
+size_t
+dvmHeapSourceGetIdealFootprint()
+{
+    HeapSource *hs = gHs;
+
+    HS_BOILERPLATE();
+
+    return hs->idealSize;
+}
+
+/*
+ * Sets the soft limit, handling any necessary changes to the allowed
+ * footprint of the active heap.
+ */
+static void
+setSoftLimit(HeapSource *hs, size_t softLimit)
+{
+    /* Compare against the actual footprint, rather than the
+     * max_allowed, because the heap may not have grown all the
+     * way to the allowed size yet.
+     */
+    mspace *msp = hs->heaps[0].msp;
+    size_t currentHeapSize = mspace_footprint(msp);
+    if (softLimit < currentHeapSize) {
+        /* Don't let the heap grow any more, and impose a soft limit.
+         */
+        mspace_set_max_allowed_footprint(msp, currentHeapSize);
+        hs->softLimit = softLimit;
+    } else {
+        /* Let the heap grow to the requested max, and remove any
+         * soft limit, if set.
+         */
+        mspace_set_max_allowed_footprint(msp, softLimit);
+        hs->softLimit = INT_MAX;
+    }
+}
+
+/*
+ * Sets the maximum number of bytes that the heap source is allowed
+ * to allocate from the system.  Clamps to the appropriate maximum
+ * value.
+ */
+static void
+setIdealFootprint(size_t max)
+{
+    HeapSource *hs = gHs;
+#if DEBUG_HEAP_SOURCE
+    HeapSource oldHs = *hs;
+    mspace *msp = hs->heaps[0].msp;
+    size_t oldAllowedFootprint =
+            mspace_max_allowed_footprint(msp);
+#endif
+
+    HS_BOILERPLATE();
+
+    if (max > hs->absoluteMaxSize) {
+        LOGI_HEAP("Clamp target GC heap from %zd.%03zdMB to %u.%03uMB\n",
+                FRACTIONAL_MB(max),
+                FRACTIONAL_MB(hs->absoluteMaxSize));
+        max = hs->absoluteMaxSize;
+    } else if (max < hs->minimumSize) {
+        max = hs->minimumSize;
+    }
+
+    /* Convert max into a size that applies to the active heap.
+     * Old heaps and external allocations will count against the ideal size.
+     */
+    size_t overhead = getSoftFootprint(false);
+    size_t activeMax;
+    if (overhead < max) {
+        activeMax = max - overhead;
+    } else {
+        activeMax = 0;
+    }
+
+    setSoftLimit(hs, activeMax);
+    hs->idealSize = max;
+
+    HSTRACE("IDEAL %zd->%zd (%d), soft %zd->%zd (%d), allowed %zd->%zd (%d), "
+            "ext %zd\n",
+            oldHs.idealSize, hs->idealSize, hs->idealSize - oldHs.idealSize,
+            oldHs.softLimit, hs->softLimit, hs->softLimit - oldHs.softLimit,
+            oldAllowedFootprint, mspace_max_allowed_footprint(msp),
+            mspace_max_allowed_footprint(msp) - oldAllowedFootprint,
+            hs->externalBytesAllocated);
+
+}
+
+/*
+ * Make the ideal footprint equal to the current footprint.
+ */
+static void
+snapIdealFootprint()
+{
+    HeapSource *hs = gHs;
+
+    HS_BOILERPLATE();
+
+    setIdealFootprint(getSoftFootprint(true));
+}
+
+/*
+ * Gets the current ideal heap utilization, represented as a number
+ * between zero and one.
+ */
+float dvmGetTargetHeapUtilization()
+{
+    HeapSource *hs = gHs;
+
+    HS_BOILERPLATE();
+
+    return (float)hs->targetUtilization / (float)HEAP_UTILIZATION_MAX;
+}
+
+/*
+ * Sets the new ideal heap utilization, represented as a number
+ * between zero and one.
+ */
+void dvmSetTargetHeapUtilization(float newTarget)
+{
+    HeapSource *hs = gHs;
+    size_t newUtilization;
+
+    HS_BOILERPLATE();
+
+    /* Clamp it to a reasonable range.
+     */
+    // TODO: This may need some tuning.
+    if (newTarget < 0.2) {
+        newTarget = 0.2;
+    } else if (newTarget > 0.8) {
+        newTarget = 0.8;
+    }
+
+    hs->targetUtilization =
+            (size_t)(newTarget * (float)HEAP_UTILIZATION_MAX);
+    LOGV("Set heap target utilization to %zd/%d (%f)\n", 
+            hs->targetUtilization, HEAP_UTILIZATION_MAX, newTarget);
+}
+
+/*
+ * If set is true, sets the new minimum heap size to size; always
+ * returns the current (or previous) size.  If size is negative,
+ * removes the current minimum constraint (if present).
+ */
+size_t
+dvmMinimumHeapSize(size_t size, bool set)
+{
+    HeapSource *hs = gHs;
+    size_t oldMinimumSize;
+
+    /* gHs caches an entry in gDvm.gcHeap;  we need to hold the
+     * heap lock if we're going to look at it.  We also need the
+     * lock for the call to setIdealFootprint().
+     */
+    dvmLockHeap();
+
+    HS_BOILERPLATE();
+
+    oldMinimumSize = hs->minimumSize;
+
+    if (set) {
+        /* Don't worry about external allocations right now.
+         * setIdealFootprint() will take them into account when
+         * minimumSize is used, and it's better to hold onto the
+         * intended minimumSize than to clamp it arbitrarily based
+         * on the current allocations.
+         */
+        if (size > hs->absoluteMaxSize) {
+            size = hs->absoluteMaxSize;
+        }
+        hs->minimumSize = size;
+        if (size > hs->idealSize) {
+            /* Force a snap to the minimum value, which we just set
+             * and which setIdealFootprint() will take into consideration.
+             */
+            setIdealFootprint(hs->idealSize);
+        }
+        /* Otherwise we'll just keep it in mind the next time
+         * setIdealFootprint() is called.
+         */
+    }
+
+    dvmUnlockHeap();
+
+    return oldMinimumSize;
+}
+
+/*
+ * Given the size of a live set, returns the ideal heap size given
+ * the current target utilization and MIN/MAX values.
+ *
+ * targetUtilization is in the range 1..HEAP_UTILIZATION_MAX.
+ */
+static size_t
+getUtilizationTarget(const HeapSource *hs,
+        size_t liveSize, size_t targetUtilization)
+{
+    size_t targetSize;
+
+    /* Use the current target utilization ratio to determine the
+     * ideal heap size based on the size of the live set.
+     */
+    targetSize = (liveSize / targetUtilization) * HEAP_UTILIZATION_MAX;
+
+    /* Cap the amount of free space, though, so we don't end up
+     * with, e.g., 8MB of free space when the live set size hits 8MB.
+     */
+    if (targetSize > liveSize + HEAP_IDEAL_FREE) {
+        targetSize = liveSize + HEAP_IDEAL_FREE;
+    } else if (targetSize < liveSize + HEAP_MIN_FREE) {
+        targetSize = liveSize + HEAP_MIN_FREE;
+    }
+    return targetSize;
+}
+
+/*
+ * Given the current contents of the active heap, increase the allowed
+ * heap footprint to match the target utilization ratio.  This
+ * should only be called immediately after a full mark/sweep.
+ */
+void dvmHeapSourceGrowForUtilization()
+{
+    HeapSource *hs = gHs;
+    Heap *heap;
+    size_t targetHeapSize;
+    size_t currentHeapUsed;
+    size_t oldIdealSize;
+    size_t newHeapMax;
+    size_t overhead;
+
+    HS_BOILERPLATE();
+    heap = hs2heap(hs);
+
+    /* Use the current target utilization ratio to determine the
+     * ideal heap size based on the size of the live set.
+     * Note that only the active heap plays any part in this.
+     *
+     * Avoid letting the old heaps influence the target free size,
+     * because they may be full of objects that aren't actually
+     * in the working set.  Just look at the allocated size of
+     * the current heap.
+     */
+    currentHeapUsed = heap->bytesAllocated;
+#define LET_EXTERNAL_INFLUENCE_UTILIZATION 1
+#if LET_EXTERNAL_INFLUENCE_UTILIZATION
+    /* This is a hack to deal with the side-effects of moving
+     * bitmap data out of the Dalvik heap.  Since the amount
+     * of free space after a GC scales with the size of the
+     * live set, many apps expected the large free space that
+     * appeared along with megabytes' worth of bitmaps.  When
+     * the bitmaps were removed, the free size shrank significantly,
+     * and apps started GCing constantly.  This makes it so the
+     * post-GC free space is the same size it would have been
+     * if the bitmaps were still in the Dalvik heap.
+     */
+    currentHeapUsed += hs->externalBytesAllocated;
+#endif
+    targetHeapSize =
+            getUtilizationTarget(hs, currentHeapUsed, hs->targetUtilization);
+#if LET_EXTERNAL_INFLUENCE_UTILIZATION
+    currentHeapUsed -= hs->externalBytesAllocated;
+    targetHeapSize -= hs->externalBytesAllocated;
+#endif
+
+    /* The ideal size includes the old heaps; add overhead so that
+     * it can be immediately subtracted again in setIdealFootprint().
+     * If the target heap size would exceed the max, setIdealFootprint()
+     * will clamp it to a legal value.
+     */
+    overhead = getSoftFootprint(false);
+    oldIdealSize = hs->idealSize;
+    setIdealFootprint(targetHeapSize + overhead);
+
+    newHeapMax = mspace_max_allowed_footprint(heap->msp);
+    if (softLimited(hs)) {
+        LOGD_HEAP("GC old usage %zd.%zd%%; now "
+                "%zd.%03zdMB used / %zd.%03zdMB soft max "
+                "(%zd.%03zdMB over, "
+                "%zd.%03zdMB ext, "
+                "%zd.%03zdMB real max)\n",
+                FRACTIONAL_PCT(currentHeapUsed, oldIdealSize),
+                FRACTIONAL_MB(currentHeapUsed),
+                FRACTIONAL_MB(hs->softLimit),
+                FRACTIONAL_MB(overhead),
+                FRACTIONAL_MB(hs->externalBytesAllocated),
+                FRACTIONAL_MB(newHeapMax));
+    } else {
+        LOGD_HEAP("GC old usage %zd.%zd%%; now "
+                "%zd.%03zdMB used / %zd.%03zdMB real max "
+                "(%zd.%03zdMB over, "
+                "%zd.%03zdMB ext)\n",
+                FRACTIONAL_PCT(currentHeapUsed, oldIdealSize),
+                FRACTIONAL_MB(currentHeapUsed),
+                FRACTIONAL_MB(newHeapMax),
+                FRACTIONAL_MB(overhead),
+                FRACTIONAL_MB(hs->externalBytesAllocated));
+    }
+}
+
+/*
+ * Return free pages to the system.
+ * TODO: move this somewhere else, especially the native heap part.
+ */
+
+static void releasePagesInRange(void *start, void *end, void *nbytes)
+{
+    /* Linux requires that the madvise() start address is page-aligned.
+    * We also align the end address.
+    */
+    start = (void *)ALIGN_UP_TO_PAGE_SIZE(start);
+    end = (void *)((size_t)end & ~(PAGE_SIZE - 1));
+    if (start < end) {
+        size_t length = (char *)end - (char *)start;
+        madvise(start, length, MADV_DONTNEED);
+        *(size_t *)nbytes += length;
+    }
+}
+
+/*
+ * Return unused memory to the system if possible.
+ */
+void
+dvmHeapSourceTrim(size_t bytesTrimmed[], size_t arrayLen)
+{
+    HeapSource *hs = gHs;
+    size_t nativeBytes, heapBytes;
+    size_t i;
+
+    HS_BOILERPLATE();
+
+    assert(arrayLen >= hs->numHeaps);
+
+    heapBytes = 0;
+    for (i = 0; i < hs->numHeaps; i++) {
+        Heap *heap = &hs->heaps[i];
+
+        /* Return the wilderness chunk to the system.
+         */
+        mspace_trim(heap->msp, 0);
+
+        /* Return any whole free pages to the system.
+         */
+        bytesTrimmed[i] = 0;
+        mspace_walk_free_pages(heap->msp, releasePagesInRange, 
+                               &bytesTrimmed[i]);
+        heapBytes += bytesTrimmed[i];
+    }
+
+    /* Same for the native heap.
+     */
+    dlmalloc_trim(0);
+    nativeBytes = 0;
+    dlmalloc_walk_free_pages(releasePagesInRange, &nativeBytes);
+
+    LOGD_HEAP("madvised %zd (GC) + %zd (native) = %zd total bytes\n",
+            heapBytes, nativeBytes, heapBytes + nativeBytes);
+}
+
+/*
+ * Walks over the heap source and passes every allocated and
+ * free chunk to the callback.
+ */
+void
+dvmHeapSourceWalk(void(*callback)(const void *chunkptr, size_t chunklen,
+                                      const void *userptr, size_t userlen,
+                                      void *arg),
+                  void *arg)
+{
+    HeapSource *hs = gHs;
+    size_t i;
+
+    HS_BOILERPLATE();
+
+    /* Walk the heaps from oldest to newest.
+     */
+//TODO: do this in address order
+    for (i = hs->numHeaps; i > 0; --i) {
+        mspace_walk_heap(hs->heaps[i-1].msp, callback, arg);
+    }
+}
+
+/*
+ * Gets the number of heaps available in the heap source.
+ *
+ * Caller must hold the heap lock, because gHs caches a field
+ * in gDvm.gcHeap.
+ */
+size_t
+dvmHeapSourceGetNumHeaps()
+{
+    HeapSource *hs = gHs;
+
+    HS_BOILERPLATE();
+
+    return hs->numHeaps;
+}
+
+
+/*
+ * External allocation tracking
+ *
+ * In some situations, memory outside of the heap is tied to the
+ * lifetime of objects in the heap.  Since that memory is kept alive
+ * by heap objects, it should provide memory pressure that can influence
+ * GCs.
+ */
+
+
+static bool
+externalAllocPossible(const HeapSource *hs, size_t n)
+{
+    const Heap *heap;
+    size_t currentHeapSize;
+
+    /* Make sure that this allocation is even possible.
+     * Don't let the external size plus the actual heap size
+     * go over the absolute max.  This essentially treats
+     * external allocations as part of the active heap.
+     *
+     * Note that this will fail "mysteriously" if there's
+     * a small softLimit but a large heap footprint.
+     */
+    heap = hs2heap(hs);
+    currentHeapSize = mspace_max_allowed_footprint(heap->msp);
+    if (currentHeapSize + hs->externalBytesAllocated + n <=
+            heap->absoluteMaxSize)
+    {
+        return true;
+    }
+    HSTRACE("externalAllocPossible(): "
+            "footprint %zu + extAlloc %zu + n %zu >= max %zu (space for %zu)\n",
+            currentHeapSize, hs->externalBytesAllocated, n,
+            heap->absoluteMaxSize,
+            heap->absoluteMaxSize -
+                    (currentHeapSize + hs->externalBytesAllocated));
+    return false;
+}
+
+#define EXTERNAL_TARGET_UTILIZATION 820  // 80%
+
+/*
+ * Tries to update the internal count of externally-allocated memory.
+ * If there's enough room for that memory, returns true.  If not, returns
+ * false and does not update the count.
+ * 
+ * The caller must ensure externalAllocPossible(hs, n) == true.
+ */
+static bool
+externalAlloc(HeapSource *hs, size_t n, bool grow)
+{
+    Heap *heap;
+    size_t currentHeapSize;
+    size_t newTotal;
+    size_t max;
+    bool grew;
+
+    assert(hs->externalLimit >= hs->externalBytesAllocated);
+
+    HSTRACE("externalAlloc(%zd%s)\n", n, grow ? ", grow" : "");
+    assert(externalAllocPossible(hs, n));  // The caller must ensure this.
+
+    /* External allocations have their own "free space" that they
+     * can allocate from without causing a GC.
+     */
+    if (hs->externalBytesAllocated + n <= hs->externalLimit) {
+        hs->externalBytesAllocated += n;
+#if defined(WITH_PROFILER) && PROFILE_EXTERNAL_ALLOCATIONS
+        if (gDvm.allocProf.enabled) {
+            Thread* self = dvmThreadSelf();
+            gDvm.allocProf.externalAllocCount++;
+            gDvm.allocProf.externalAllocSize += n;
+            if (self != NULL) {
+                self->allocProf.externalAllocCount++;
+                self->allocProf.externalAllocSize += n;
+            }
+        }
+#endif
+        return true;
+    }
+    if (!grow) {
+        return false;
+    }
+
+    /* GROW */
+    hs->externalBytesAllocated += n;
+    hs->externalLimit = getUtilizationTarget(hs,
+            hs->externalBytesAllocated, EXTERNAL_TARGET_UTILIZATION);
+    HSTRACE("EXTERNAL grow limit to %zd\n", hs->externalLimit);
+    return true;
+}
+
+static void
+gcForExternalAlloc(bool collectSoftReferences)
+{
+#ifdef WITH_PROFILER  // even if !PROFILE_EXTERNAL_ALLOCATIONS
+    if (gDvm.allocProf.enabled) {
+        Thread* self = dvmThreadSelf();
+        gDvm.allocProf.gcCount++;
+        if (self != NULL) {
+            self->allocProf.gcCount++;
+        }
+    }
+#endif
+    dvmCollectGarbageInternal(collectSoftReferences);
+}
+
+/*
+ * Updates the internal count of externally-allocated memory.  If there's
+ * enough room for that memory, returns true.  If not, returns false and
+ * does not update the count.
+ *
+ * May cause a GC as a side-effect.
+ */
+bool
+dvmTrackExternalAllocation(size_t n)
+{
+    HeapSource *hs = gHs;
+    size_t overhead;
+    bool ret = false;
+
+    /* gHs caches an entry in gDvm.gcHeap;  we need to hold the
+     * heap lock if we're going to look at it.
+     */
+    dvmLockHeap();
+
+    HS_BOILERPLATE();
+    assert(hs->externalLimit >= hs->externalBytesAllocated);
+
+    if (!externalAllocPossible(hs, n)) {
+        LOGE_HEAP("%zd-byte external allocation "
+                "too large for this process.\n", n);
+        goto out;
+    }
+
+    /* Try "allocating" using the existing "free space".
+     */
+    HSTRACE("EXTERNAL alloc %zu (%zu < %zu)\n",
+            n, hs->externalBytesAllocated, hs->externalLimit);
+    if (externalAlloc(hs, n, false)) {
+        ret = true;
+        goto out;
+    }
+
+    /* The "allocation" failed.  Free up some space by doing
+     * a full garbage collection.  This may grow the heap source
+     * if the live set is sufficiently large.
+     */
+    HSTRACE("EXTERNAL alloc %zd: GC 1\n", n);
+    gcForExternalAlloc(false);  // don't collect SoftReferences
+    if (externalAlloc(hs, n, false)) {
+        ret = true;
+        goto out;
+    }
+
+    /* Even that didn't work;  this is an exceptional state.
+     * Try harder, growing the heap source if necessary.
+     */
+    HSTRACE("EXTERNAL alloc %zd: frag\n", n);
+    ret = externalAlloc(hs, n, true);
+    dvmHeapSizeChanged();
+    if (ret) {
+        goto out;
+    }
+
+    /* We couldn't even grow enough to satisfy the request.
+     * Try one last GC, collecting SoftReferences this time.
+     */
+    HSTRACE("EXTERNAL alloc %zd: GC 2\n", n);
+    gcForExternalAlloc(true);  // collect SoftReferences
+    ret = externalAlloc(hs, n, true);
+    dvmHeapSizeChanged();
+    if (!ret) {
+        LOGE_HEAP("Out of external memory on a %zu-byte allocation.\n", n);
+    }
+
+#if defined(WITH_PROFILER) && PROFILE_EXTERNAL_ALLOCATIONS
+    if (gDvm.allocProf.enabled) {
+        Thread* self = dvmThreadSelf();
+        gDvm.allocProf.failedExternalAllocCount++;
+        gDvm.allocProf.failedExternalAllocSize += n;
+        if (self != NULL) {
+            self->allocProf.failedExternalAllocCount++;
+            self->allocProf.failedExternalAllocSize += n;
+        }
+    }
+#endif
+
+out:
+    dvmUnlockHeap();
+
+    return ret;
+}
+
+/*
+ * Reduces the internal count of externally-allocated memory.
+ */
+void
+dvmTrackExternalFree(size_t n)
+{
+    HeapSource *hs = gHs;
+    size_t newIdealSize;
+    size_t newExternalLimit;
+    size_t oldExternalBytesAllocated;
+
+    HSTRACE("EXTERNAL free %zu (%zu < %zu)\n",
+            n, hs->externalBytesAllocated, hs->externalLimit);
+
+    /* gHs caches an entry in gDvm.gcHeap;  we need to hold the
+     * heap lock if we're going to look at it.
+     */
+    dvmLockHeap();
+
+    HS_BOILERPLATE();
+    assert(hs->externalLimit >= hs->externalBytesAllocated);
+
+    oldExternalBytesAllocated = hs->externalBytesAllocated;
+    if (n <= hs->externalBytesAllocated) {
+        hs->externalBytesAllocated -= n;
+    } else {
+        n = hs->externalBytesAllocated;
+        hs->externalBytesAllocated = 0;
+    }
+
+#if defined(WITH_PROFILER) && PROFILE_EXTERNAL_ALLOCATIONS
+    if (gDvm.allocProf.enabled) {
+        Thread* self = dvmThreadSelf();
+        gDvm.allocProf.externalFreeCount++;
+        gDvm.allocProf.externalFreeSize += n;
+        if (self != NULL) {
+            self->allocProf.externalFreeCount++;
+            self->allocProf.externalFreeSize += n;
+        }
+    }
+#endif
+
+    /* Shrink as quickly as we can.
+     */
+    newExternalLimit = getUtilizationTarget(hs,
+            hs->externalBytesAllocated, EXTERNAL_TARGET_UTILIZATION);
+    if (newExternalLimit < oldExternalBytesAllocated) {
+        /* Make sure that the remaining free space is at least
+         * big enough to allocate something of the size that was
+         * just freed.  This makes it more likely that
+         *     externalFree(N); externalAlloc(N);
+         * will work without causing a GC.
+         */
+        HSTRACE("EXTERNAL free preserved %zu extra free bytes\n",
+                oldExternalBytesAllocated - newExternalLimit);
+        newExternalLimit = oldExternalBytesAllocated;
+    }
+    if (newExternalLimit < hs->externalLimit) {
+        hs->externalLimit = newExternalLimit;
+    }
+
+    dvmUnlockHeap();
+}
+
+/*
+ * Returns the number of externally-allocated bytes being tracked by
+ * dvmTrackExternalAllocation/Free().
+ */
+size_t
+dvmGetExternalBytesAllocated()
+{
+    const HeapSource *hs = gHs;
+    size_t ret;
+
+    /* gHs caches an entry in gDvm.gcHeap;  we need to hold the
+     * heap lock if we're going to look at it.  We also need the
+     * lock for the call to setIdealFootprint().
+     */
+    dvmLockHeap();
+    HS_BOILERPLATE();
+    ret = hs->externalBytesAllocated;
+    dvmUnlockHeap();
+
+    return ret;
+}
diff --git a/vm/alloc/HeapSource.h b/vm/alloc/HeapSource.h
new file mode 100644
index 0000000..3007d4f
--- /dev/null
+++ b/vm/alloc/HeapSource.h
@@ -0,0 +1,165 @@
+/*
+ * Copyright (C) 2008 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#ifndef _DALVIK_HEAP_SOURCE
+#define _DALVIK_HEAP_SOURCE
+
+#include "alloc/HeapInternal.h" // for GcHeap
+
+/* dlmalloc uses one size_t per allocated chunk.
+ */
+#define HEAP_SOURCE_CHUNK_OVERHEAD         (1 * sizeof (size_t))
+#define HEAP_SOURCE_WORST_CHUNK_OVERHEAD   (32 * sizeof (size_t))
+
+/* The largest number of separate heaps we can handle.
+ */
+#define HEAP_SOURCE_MAX_HEAP_COUNT 3
+
+/*
+ * Initializes the heap source; must be called before any other
+ * dvmHeapSource*() functions.
+ */
+GcHeap *dvmHeapSourceStartup(size_t startSize, size_t absoluteMaxSize);
+
+/*
+ * If the HeapSource was created while in zygote mode, this
+ * will create a new heap for post-zygote allocations.
+ * Having a separate heap should maximize the number of pages
+ * that a given app_process shares with the zygote process.
+ */
+bool dvmHeapSourceStartupAfterZygote(void);
+
+/*
+ * If the HeapSource was created while in zygote mode, this
+ * will create an additional zygote heap before the first fork().
+ * Having a separate heap should reduce the number of shared
+ * pages subsequently touched by the zygote process.
+ */
+bool dvmHeapSourceStartupBeforeFork(void);
+
+/*
+ * Tears down the heap source and frees any resources associated with it.
+ */
+void dvmHeapSourceShutdown(GcHeap *gcHeap);
+
+/*
+ * Writes shallow copies of the currently-used bitmaps into outBitmaps,
+ * returning the number of bitmaps written.  Returns <0 if the array
+ * was not long enough.
+ */
+ssize_t dvmHeapSourceGetObjectBitmaps(HeapBitmap outBitmaps[],
+        size_t maxBitmaps);
+
+/*
+ * Replaces the object location HeapBitmaps with the elements of
+ * <objectBitmaps>.  The elements of <objectBitmaps> are overwritten
+ * with shallow copies of the old bitmaps.
+ *
+ * Returns false if the number of bitmaps doesn't match the number
+ * of heaps.
+ */
+bool dvmHeapSourceReplaceObjectBitmaps(HeapBitmap objectBitmaps[],
+        size_t nBitmaps);
+
+/*
+ * Returns the requested value. If the per-heap stats are requested, fill
+ * them as well.
+ */
+enum HeapSourceValueSpec {
+    HS_FOOTPRINT,
+    HS_ALLOWED_FOOTPRINT,
+    HS_BYTES_ALLOCATED,
+    HS_OBJECTS_ALLOCATED,
+    HS_EXTERNAL_BYTES_ALLOCATED,
+    HS_EXTERNAL_LIMIT
+};
+size_t dvmHeapSourceGetValue(enum HeapSourceValueSpec spec, 
+                             size_t perHeapStats[], size_t arrayLen);
+
+/*
+ * Allocates <n> bytes of zeroed data.
+ */
+void *dvmHeapSourceAlloc(size_t n);
+
+/*
+ * Allocates <n> bytes of zeroed data, growing up to absoluteMaxSize
+ * if necessary.
+ */
+void *dvmHeapSourceAllocAndGrow(size_t n);
+
+/*
+ * Frees the memory pointed to by <ptr>, which may be NULL.
+ */
+void dvmHeapSourceFree(void *ptr);
+
+/*
+ * Returns true iff <ptr> was allocated from the heap source.
+ */
+bool dvmHeapSourceContains(const void *ptr);
+
+/*
+ * Returns the value of the requested flag.
+ */
+enum HeapSourcePtrFlag {
+    HS_CONTAINS,    // identical to dvmHeapSourceContains()
+    HS_ALLOCATED_IN_ZYGOTE
+};
+bool dvmHeapSourceGetPtrFlag(const void *ptr, enum HeapSourcePtrFlag flag);
+
+/*
+ * Returns the number of usable bytes in an allocated chunk; the size
+ * may be larger than the size passed to dvmHeapSourceAlloc().
+ */
+size_t dvmHeapSourceChunkSize(const void *ptr);
+
+/*
+ * Returns the number of bytes that the heap source has allocated
+ * from the system using sbrk/mmap, etc.
+ */
+size_t dvmHeapSourceFootprint(void);
+
+/*
+ * Gets the maximum number of bytes that the heap source is allowed
+ * to allocate from the system.
+ */
+size_t dvmHeapSourceGetIdealFootprint(void);
+
+/*
+ * Given the current contents of the heap, increase the allowed
+ * heap footprint to match the target utilization ratio.  This
+ * should only be called immediately after a full mark/sweep.
+ */
+void dvmHeapSourceGrowForUtilization(void);
+
+/*
+ * Return unused memory to the system if possible.  If <bytesTrimmed>
+ * is non-NULL, the number of bytes returned to the system is written to it.
+ */
+void dvmHeapSourceTrim(size_t bytesTrimmed[], size_t arrayLen);
+
+/*
+ * Walks over the heap source and passes every allocated and
+ * free chunk to the callback.
+ */
+void dvmHeapSourceWalk(void(*callback)(const void *chunkptr, size_t chunklen,
+                                      const void *userptr, size_t userlen,
+                                      void *arg),
+                       void *arg);
+/*
+ * Gets the number of heaps available in the heap source.
+ */
+size_t dvmHeapSourceGetNumHeaps(void);
+
+#endif  // _DALVIK_HEAP_SOURCE
diff --git a/vm/alloc/HeapTable.c b/vm/alloc/HeapTable.c
new file mode 100644
index 0000000..b56de64
--- /dev/null
+++ b/vm/alloc/HeapTable.c
@@ -0,0 +1,226 @@
+/*
+ * Copyright (C) 2008 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "Dalvik.h"
+#include "alloc/HeapTable.h"
+#include "alloc/HeapInternal.h"
+
+#include <limits.h> // for INT_MAX
+
+static void *heapTableRealloc(void *oldPtr, size_t newSize)
+{
+    /* Don't just call realloc(), in case the native system
+     * doesn't malloc() on realloc(NULL).
+     */
+    if (oldPtr != NULL) {
+        return realloc(oldPtr, newSize);
+    } else {
+        return malloc(newSize);
+    }
+}
+
+void dvmHeapHeapTableFree(void *ptr)
+{
+    free(ptr);
+}
+
+#define heapRefTableIsFull(refs) \
+    ({ \
+        const HeapRefTable *HRTIF_refs = (refs); \
+        dvmIsReferenceTableFull(refs); \
+    })
+
+bool dvmHeapInitHeapRefTable(HeapRefTable *refs, size_t nelems)
+{
+    memset(refs, 0, sizeof(*refs));
+    return dvmInitReferenceTable(refs, nelems, INT_MAX);
+}
+
+/* Frees the array inside the HeapRefTable, not the HeapRefTable itself.
+ */
+void dvmHeapFreeHeapRefTable(HeapRefTable *refs)
+{
+    dvmClearReferenceTable(refs);
+}
+
+/*
+ * Large, non-contiguous reference tables
+ */
+
+#define kLargeHeapRefTableNElems 1024
+bool dvmHeapAddRefToLargeTable(LargeHeapRefTable **tableP, Object *ref)
+{
+    LargeHeapRefTable *table;
+
+    assert(tableP != NULL);
+    assert(ref != NULL);
+
+    /* Make sure that a table with a free slot is
+     * at the head of the list.
+     */
+    if (*tableP != NULL) {
+        table = *tableP;
+        LargeHeapRefTable *prevTable;
+
+        /* Find an empty slot for this reference.
+         */
+        prevTable = NULL;
+        while (table != NULL && heapRefTableIsFull(&table->refs)) {
+            prevTable = table;
+            table = table->next;
+        }
+        if (table != NULL) {
+            if (prevTable != NULL) {
+                /* Move the table to the head of the list.
+                 */
+                prevTable->next = table->next;
+                table->next = *tableP;
+                *tableP = table;
+            }
+            /* else it's already at the head. */
+
+            goto insert;
+        }
+        /* else all tables are already full;
+         * fall through to the alloc case.
+         */
+    }
+
+    /* Allocate a new table.
+     */
+    table = (LargeHeapRefTable *)heapTableRealloc(NULL,
+            sizeof(LargeHeapRefTable));
+    if (table == NULL) {
+        LOGE_HEAP("Can't allocate a new large ref table\n");
+        return false;
+    }
+    if (!dvmHeapInitHeapRefTable(&table->refs, kLargeHeapRefTableNElems)) {
+        LOGE_HEAP("Can't initialize a new large ref table\n");
+        dvmHeapHeapTableFree(table);
+        return false;
+    }
+
+    /* Stick it at the head.
+     */
+    table->next = *tableP;
+    *tableP = table;
+
+insert:
+    /* Insert the reference.
+     */
+    assert(table == *tableP);
+    assert(table != NULL);
+    assert(!heapRefTableIsFull(&table->refs));
+    *table->refs.nextEntry++ = ref;
+
+    return true;
+}
+
+bool dvmHeapAddTableToLargeTable(LargeHeapRefTable **tableP, HeapRefTable *refs)
+{
+    LargeHeapRefTable *table;
+
+    /* Allocate a node.
+     */
+    table = (LargeHeapRefTable *)heapTableRealloc(NULL,
+            sizeof(LargeHeapRefTable));
+    if (table == NULL) {
+        LOGE_HEAP("Can't allocate a new large ref table\n");
+        return false;
+    }
+    table->refs = *refs;
+
+    /* Insert the table into the list.
+     */
+    table->next = *tableP;
+    *tableP = table;
+
+    return true;
+}
+
+/* Frees everything associated with the LargeHeapRefTable.
+ */
+void dvmHeapFreeLargeTable(LargeHeapRefTable *table)
+{
+    while (table != NULL) {
+        LargeHeapRefTable *next = table->next;
+        dvmHeapFreeHeapRefTable(&table->refs);
+        dvmHeapHeapTableFree(table);
+        table = next;
+    }
+}
+
+Object *dvmHeapGetNextObjectFromLargeTable(LargeHeapRefTable **pTable)
+{
+    LargeHeapRefTable *table;
+    Object *obj;
+
+    assert(pTable != NULL);
+
+    obj = NULL;
+    table = *pTable;
+    if (table != NULL) {
+        GcHeap *gcHeap = gDvm.gcHeap;
+        HeapRefTable *refs = &table->refs;
+
+        /* We should never have an empty table node in the list.
+         */
+        assert(dvmReferenceTableEntries(refs) != 0);
+
+        /* Remove and return the last entry in the list.
+         */
+        obj = *--refs->nextEntry;
+
+        /* If this was the last entry in the table node,
+         * free it and patch up the list.
+         */
+        if (refs->nextEntry == refs->table) {
+            *pTable = table->next;
+            dvmClearReferenceTable(refs);
+            dvmHeapHeapTableFree(table);
+        }
+    }
+
+    return obj;
+}
+
+void dvmHeapMarkLargeTableRefs(LargeHeapRefTable *table, bool stripLowBits)
+{
+    while (table != NULL) {
+        Object **ref, **lastRef;
+
+        ref = table->refs.table;
+        lastRef = table->refs.nextEntry;
+        if (stripLowBits) {
+            /* This case is used for marking reference objects that
+             * are still waiting for the heap worker thread to get to
+             * them.  The referents pointed to by the references are
+             * marked when a SCHEDULED_REFERENCE_MAGIC is encountered
+             * during scanning.
+             */
+            while (ref < lastRef) {
+                dvmMarkObjectNonNull((Object *)((uintptr_t)*ref++ & ~3));
+            }
+        } else {
+            while (ref < lastRef) {
+                dvmMarkObjectNonNull(*ref++);
+            }
+        }
+        table = table->next;
+    }
+}
+
+
diff --git a/vm/alloc/HeapTable.h b/vm/alloc/HeapTable.h
new file mode 100644
index 0000000..784e8fd
--- /dev/null
+++ b/vm/alloc/HeapTable.h
@@ -0,0 +1,52 @@
+/*
+ * Copyright (C) 2008 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#ifndef _DALVIK_ALLOC_HEAP_TABLE
+#define _DALVIK_ALLOC_HEAP_TABLE
+
+#include "ReferenceTable.h"
+
+typedef ReferenceTable HeapRefTable;
+typedef struct LargeHeapRefTable LargeHeapRefTable;
+typedef struct HeapSource HeapSource;
+
+struct LargeHeapRefTable {
+    LargeHeapRefTable *next;
+    HeapRefTable refs;
+};
+
+bool dvmHeapInitHeapRefTable(HeapRefTable *refs, size_t nelems);
+void dvmHeapFreeHeapRefTable(HeapRefTable *refs);
+void dvmHeapFreeLargeTable(LargeHeapRefTable *table);
+void dvmHeapHeapTableFree(void *ptr);
+bool dvmHeapAddRefToLargeTable(LargeHeapRefTable **tableP, Object *ref);
+void dvmHeapMarkLargeTableRefs(LargeHeapRefTable *table, bool stripLowBits);
+bool dvmHeapAddTableToLargeTable(LargeHeapRefTable **tableP,
+        HeapRefTable *refs);
+Object *dvmHeapGetNextObjectFromLargeTable(LargeHeapRefTable **pTable);
+
+#define dvmHeapAddToHeapRefTable(refs, ptr) \
+            dvmAddToReferenceTable((refs), (ptr))
+
+#define dvmHeapNumHeapRefTableEntries(refs) \
+    ({ \
+        const HeapRefTable *NHRTE_refs = (refs); \
+        dvmReferenceTableEntries(refs); \
+    })
+
+#define dvmHeapRemoveFromHeapRefTable(refs, ptr) \
+            dvmRemoveFromReferenceTable((refs), (refs)->table, (ptr))
+
+#endif  // _DALVIK_ALLOC_HEAP_TABLE
diff --git a/vm/alloc/HeapWorker.c b/vm/alloc/HeapWorker.c
new file mode 100644
index 0000000..0244cca
--- /dev/null
+++ b/vm/alloc/HeapWorker.c
@@ -0,0 +1,499 @@
+/*
+ * Copyright (C) 2008 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/*
+ * An async worker thread to handle certain heap operations that
+ * need to be done in a separate thread to avoid synchronization
+ * problems.  HeapWorkers and reference clearing/enqueuing are
+ * handled by this thread.
+ */
+#include "Dalvik.h"
+#include "HeapInternal.h"
+
+#include <sys/time.h>
+#include <stdlib.h>
+#include <pthread.h>
+#include <signal.h>
+#include <errno.h>  // for ETIMEDOUT, etc.
+
+static void* heapWorkerThreadStart(void* arg);
+
+/*
+ * Initialize any HeapWorker state that Heap.c
+ * cares about.  This lets the GC start before the
+ * HeapWorker thread is initialized.
+ */
+void dvmInitializeHeapWorkerState()
+{
+    assert(!gDvm.heapWorkerInitialized);
+
+    dvmInitMutex(&gDvm.heapWorkerLock);
+    pthread_cond_init(&gDvm.heapWorkerCond, NULL);
+    pthread_cond_init(&gDvm.heapWorkerIdleCond, NULL);
+
+    gDvm.heapWorkerInitialized = true;
+}
+
+/*
+ * Crank up the heap worker thread.
+ *
+ * Does not return until the thread is ready for business.
+ */
+bool dvmHeapWorkerStartup(void)
+{
+    assert(!gDvm.haltHeapWorker);
+    assert(!gDvm.heapWorkerReady);
+    assert(gDvm.heapWorkerHandle == 0);
+    assert(gDvm.heapWorkerInitialized);
+
+    /* use heapWorkerLock/heapWorkerCond to communicate readiness */
+    dvmLockMutex(&gDvm.heapWorkerLock);
+
+//BUG: If a GC happens in here or in the new thread while we hold the lock,
+//     the GC will deadlock when trying to acquire heapWorkerLock.
+    if (!dvmCreateInternalThread(&gDvm.heapWorkerHandle,
+                "HeapWorker", heapWorkerThreadStart, NULL))
+    {
+        dvmUnlockMutex(&gDvm.heapWorkerLock);
+        return false;
+    }
+
+    /*
+     * Wait for the heap worker to come up.  We know the thread was created,
+     * so this should not get stuck.
+     */
+    while (!gDvm.heapWorkerReady) {
+        int cc = pthread_cond_wait(&gDvm.heapWorkerCond, &gDvm.heapWorkerLock);
+        assert(cc == 0);
+    }
+
+    dvmUnlockMutex(&gDvm.heapWorkerLock);
+    return true;
+}
+
+/*
+ * Shut down the heap worker thread if it was started.
+ */
+void dvmHeapWorkerShutdown(void)
+{
+    void* threadReturn;
+
+    /* note: assuming that (pthread_t)0 is not a valid thread handle */
+    if (gDvm.heapWorkerHandle != 0) {
+        gDvm.haltHeapWorker = true;
+        dvmSignalHeapWorker(true);
+
+        /*
+         * We may not want to wait for the heapWorkers to complete.  It's
+         * a good idea to do so, in case they're holding some sort of OS
+         * resource that doesn't get reclaimed when the process exits
+         * (e.g. an open temp file).
+         */
+        if (pthread_join(gDvm.heapWorkerHandle, &threadReturn) != 0)
+            LOGW("HeapWorker thread join failed\n");
+        else
+            LOGD("HeapWorker thread has shut down\n");
+
+        gDvm.heapWorkerReady = false;
+    }
+}
+
+/* Make sure that the HeapWorker thread hasn't spent an inordinate
+ * amount of time inside interpreted a finalizer.
+ *
+ * Aborts the VM if the thread appears to be wedged.
+ *
+ * The caller must hold the heapWorkerLock to guarantee an atomic
+ * read of the watchdog values.
+ */
+void dvmAssertHeapWorkerThreadRunning()
+{
+    if (gDvm.gcHeap->heapWorkerCurrentObject != NULL) {
+        static const u8 HEAP_WORKER_WATCHDOG_TIMEOUT = 10*1000*1000LL; // 10sec
+
+        u8 heapWorkerInterpStartTime = gDvm.gcHeap->heapWorkerInterpStartTime;
+        u8 now = dvmGetRelativeTimeUsec();
+        u8 delta = now - heapWorkerInterpStartTime;
+
+        u8 heapWorkerInterpCpuStartTime =
+            gDvm.gcHeap->heapWorkerInterpCpuStartTime;
+        u8 nowCpu = dvmGetOtherThreadCpuTimeUsec(gDvm.heapWorkerHandle);
+        u8 deltaCpu = nowCpu - heapWorkerInterpCpuStartTime;
+
+        if (delta > HEAP_WORKER_WATCHDOG_TIMEOUT && gDvm.debuggerActive) {
+            /*
+             * Debugger suspension can block the thread indefinitely.  For
+             * best results we should reset this explicitly whenever the
+             * HeapWorker thread is resumed.  Ignoring the yelp isn't
+             * quite right but will do for a quick fix.
+             */
+            LOGI("Debugger is attached -- suppressing HeapWorker watchdog\n");
+            heapWorkerInterpStartTime = now;        /* reset timer */
+        } else if (delta > HEAP_WORKER_WATCHDOG_TIMEOUT) {
+            char* desc = dexProtoCopyMethodDescriptor(
+                    &gDvm.gcHeap->heapWorkerCurrentMethod->prototype);
+            LOGE("HeapWorker is wedged: %lldms spent inside %s.%s%s\n",
+                    delta / 1000,
+                    gDvm.gcHeap->heapWorkerCurrentObject->clazz->descriptor,
+                    gDvm.gcHeap->heapWorkerCurrentMethod->name, desc);
+            free(desc);
+            dvmDumpAllThreads(true);
+
+            /* abort the VM */
+            dvmAbort();
+        } else if (delta > HEAP_WORKER_WATCHDOG_TIMEOUT / 2) {
+            char* desc = dexProtoCopyMethodDescriptor(
+                    &gDvm.gcHeap->heapWorkerCurrentMethod->prototype);
+            LOGW("HeapWorker may be wedged: %lldms spent inside %s.%s%s\n",
+                    delta / 1000,
+                    gDvm.gcHeap->heapWorkerCurrentObject->clazz->descriptor,
+                    gDvm.gcHeap->heapWorkerCurrentMethod->name, desc);
+            free(desc);
+        }
+    }
+}
+
+static void callMethod(Thread *self, Object *obj, Method *method)
+{
+    JValue unused;
+
+    /* Keep track of the method we're about to call and
+     * the current time so that other threads can detect
+     * when this thread wedges and provide useful information.
+     */
+    gDvm.gcHeap->heapWorkerInterpStartTime = dvmGetRelativeTimeUsec();
+    gDvm.gcHeap->heapWorkerInterpCpuStartTime = dvmGetThreadCpuTimeUsec();
+    gDvm.gcHeap->heapWorkerCurrentMethod = method;
+    gDvm.gcHeap->heapWorkerCurrentObject = obj;
+
+    /* Call the method.
+     *
+     * Don't hold the lock when executing interpreted
+     * code.  It may suspend, and the GC needs to grab
+     * heapWorkerLock.
+     */
+    dvmUnlockMutex(&gDvm.heapWorkerLock);
+    if (false) {
+        /* Log entry/exit; this will likely flood the log enough to
+         * cause "logcat" to drop entries.
+         */
+        char tmpTag[16];
+        sprintf(tmpTag, "HW%d", self->systemTid);
+        LOG(LOG_DEBUG, tmpTag, "Call %s\n", method->clazz->descriptor);
+        dvmCallMethod(self, method, obj, &unused);
+        LOG(LOG_DEBUG, tmpTag, " done\n");
+    } else {
+        dvmCallMethod(self, method, obj, &unused);
+    }
+    dvmLockMutex(&gDvm.heapWorkerLock);
+
+    gDvm.gcHeap->heapWorkerCurrentObject = NULL;
+    gDvm.gcHeap->heapWorkerCurrentMethod = NULL;
+    gDvm.gcHeap->heapWorkerInterpStartTime = 0LL;
+
+    /* Exceptions thrown during these calls interrupt
+     * the method, but are otherwise ignored.
+     */
+    if (dvmCheckException(self)) {
+#if DVM_SHOW_EXCEPTION >= 1
+        LOGI("Uncaught exception thrown by finalizer (will be discarded):\n");
+        dvmLogExceptionStackTrace();
+#endif
+        dvmClearException(self);
+    }
+}
+
+/* Process all enqueued heap work, including finalizers and reference
+ * clearing/enqueueing.
+ *
+ * Caller must hold gDvm.heapWorkerLock.
+ */
+static void doHeapWork(Thread *self)
+{
+    Object *obj;
+    HeapWorkerOperation op;
+    int numFinalizersCalled, numReferencesEnqueued;
+#if FANCY_REFERENCE_SUBCLASS
+    int numReferencesCleared = 0;
+#endif
+
+    assert(gDvm.voffJavaLangObject_finalize >= 0);
+#if FANCY_REFERENCE_SUBCLASS
+    assert(gDvm.voffJavaLangRefReference_clear >= 0);
+    assert(gDvm.voffJavaLangRefReference_enqueue >= 0);
+#else
+    assert(gDvm.methJavaLangRefReference_enqueueInternal != NULL);
+#endif
+
+    numFinalizersCalled = 0;
+    numReferencesEnqueued = 0;
+    while ((obj = dvmGetNextHeapWorkerObject(&op)) != NULL) {
+        Method *method = NULL;
+
+        /* Make sure the object hasn't been collected since
+         * being scheduled.
+         */
+        assert(dvmIsValidObject(obj));
+
+        /* Call the appropriate method(s).
+         */
+        if (op == WORKER_FINALIZE) {
+            numFinalizersCalled++;
+            method = obj->clazz->vtable[gDvm.voffJavaLangObject_finalize];
+            assert(dvmCompareNameDescriptorAndMethod("finalize", "()V",
+                            method) == 0);
+            assert(method->clazz != gDvm.classJavaLangObject);
+            callMethod(self, obj, method);
+        } else {
+#if FANCY_REFERENCE_SUBCLASS
+            /* clear() *must* happen before enqueue(), otherwise
+             * a non-clear reference could appear on a reference
+             * queue.
+             */
+            if (op & WORKER_CLEAR) {
+                numReferencesCleared++;
+                method = obj->clazz->vtable[
+                        gDvm.voffJavaLangRefReference_clear];
+                assert(dvmCompareNameDescriptorAndMethod("clear", "()V",
+                                method) == 0);
+                assert(method->clazz != gDvm.classJavaLangRefReference);
+                callMethod(self, obj, method);
+            }
+            if (op & WORKER_ENQUEUE) {
+                numReferencesEnqueued++;
+                method = obj->clazz->vtable[
+                        gDvm.voffJavaLangRefReference_enqueue];
+                assert(dvmCompareNameDescriptorAndMethod("enqueue", "()Z",
+                                method) == 0);
+                /* We call enqueue() even when it isn't overridden,
+                 * so don't assert(!classJavaLangRefReference) here.
+                 */
+                callMethod(self, obj, method);
+            }
+#else
+            assert((op & WORKER_CLEAR) == 0);
+            if (op & WORKER_ENQUEUE) {
+                numReferencesEnqueued++;
+                callMethod(self, obj,
+                        gDvm.methJavaLangRefReference_enqueueInternal);
+            }
+#endif
+        }
+
+        /* Let the GC collect the object.
+         */
+        dvmReleaseTrackedAlloc(obj, self);
+    }
+    LOGV("Called %d finalizers\n", numFinalizersCalled);
+    LOGV("Enqueued %d references\n", numReferencesEnqueued);
+#if FANCY_REFERENCE_SUBCLASS
+    LOGV("Cleared %d overridden references\n", numReferencesCleared);
+#endif
+}
+
+/*
+ * The heap worker thread sits quietly until the GC tells it there's work
+ * to do.
+ */
+static void* heapWorkerThreadStart(void* arg)
+{
+    Thread *self = dvmThreadSelf();
+    int cc;
+
+    UNUSED_PARAMETER(arg);
+
+    LOGV("HeapWorker thread started (threadid=%d)\n", self->threadId);
+
+    /* tell the main thread that we're ready */
+    dvmLockMutex(&gDvm.heapWorkerLock);
+    gDvm.heapWorkerReady = true;
+    cc = pthread_cond_signal(&gDvm.heapWorkerCond);
+    assert(cc == 0);
+    dvmUnlockMutex(&gDvm.heapWorkerLock);
+
+    dvmLockMutex(&gDvm.heapWorkerLock);
+    while (!gDvm.haltHeapWorker) {
+        struct timespec trimtime;
+        bool timedwait = false;
+
+        /* We're done running interpreted code for now. */
+        dvmChangeStatus(NULL, THREAD_VMWAIT);
+
+        /* Signal anyone who wants to know when we're done. */
+        cc = pthread_cond_broadcast(&gDvm.heapWorkerIdleCond);
+        assert(cc == 0);
+
+        /* Trim the heap if we were asked to. */
+        trimtime = gDvm.gcHeap->heapWorkerNextTrim;
+        if (trimtime.tv_sec != 0 && trimtime.tv_nsec != 0) {
+            struct timeval now;
+
+            gettimeofday(&now, NULL);
+            if (trimtime.tv_sec < now.tv_sec ||
+                (trimtime.tv_sec == now.tv_sec && 
+                 trimtime.tv_nsec <= now.tv_usec * 1000))
+            {
+                size_t madvisedSizes[HEAP_SOURCE_MAX_HEAP_COUNT];
+
+                /* The heap must be locked before the HeapWorker;
+                 * unroll and re-order the locks.  dvmLockHeap()
+                 * will put us in VMWAIT if necessary.  Once it
+                 * returns, there shouldn't be any contention on
+                 * heapWorkerLock.
+                 */
+                dvmUnlockMutex(&gDvm.heapWorkerLock);
+                dvmLockHeap();
+                dvmLockMutex(&gDvm.heapWorkerLock);
+
+                memset(madvisedSizes, 0, sizeof(madvisedSizes));
+                dvmHeapSourceTrim(madvisedSizes, HEAP_SOURCE_MAX_HEAP_COUNT);
+                dvmLogMadviseStats(madvisedSizes, HEAP_SOURCE_MAX_HEAP_COUNT);
+
+                dvmUnlockHeap();
+
+                trimtime.tv_sec = 0;
+                trimtime.tv_nsec = 0;
+                gDvm.gcHeap->heapWorkerNextTrim = trimtime;
+            } else {
+                timedwait = true;
+            }
+        }
+
+        /* sleep until signaled */
+        if (timedwait) {
+            cc = pthread_cond_timedwait(&gDvm.heapWorkerCond,
+                    &gDvm.heapWorkerLock, &trimtime);
+            assert(cc == 0 || cc == ETIMEDOUT || cc == EINTR);
+        } else {
+            cc = pthread_cond_wait(&gDvm.heapWorkerCond, &gDvm.heapWorkerLock);
+            assert(cc == 0);
+        }
+
+        /* dvmChangeStatus() may block;  don't hold heapWorkerLock.
+         */
+        dvmUnlockMutex(&gDvm.heapWorkerLock);
+        dvmChangeStatus(NULL, THREAD_RUNNING);
+        dvmLockMutex(&gDvm.heapWorkerLock);
+        LOGV("HeapWorker is awake\n");
+
+        /* Process any events in the queue.
+         */
+        doHeapWork(self);
+    }
+    dvmUnlockMutex(&gDvm.heapWorkerLock);
+
+    LOGD("HeapWorker thread shutting down\n");
+    return NULL;
+}
+
+/*
+ * Wake up the heap worker to let it know that there's work to be done.
+ */
+void dvmSignalHeapWorker(bool shouldLock)
+{
+    int cc;
+
+    if (shouldLock) {
+        dvmLockMutex(&gDvm.heapWorkerLock);
+    }
+
+    cc = pthread_cond_signal(&gDvm.heapWorkerCond);
+    assert(cc == 0);
+
+    if (shouldLock) {
+        dvmUnlockMutex(&gDvm.heapWorkerLock);
+    }
+}
+
+/*
+ * Block until all pending heap worker work has finished.
+ */
+void dvmWaitForHeapWorkerIdle()
+{
+    int cc;
+
+    assert(gDvm.heapWorkerReady);
+
+    dvmChangeStatus(NULL, THREAD_VMWAIT);
+
+    dvmLockMutex(&gDvm.heapWorkerLock);
+
+    /* Wake up the heap worker and wait for it to finish. */
+    //TODO(http://b/issue?id=699704): This will deadlock if
+    //     called from finalize(), enqueue(), or clear().  We
+    //     need to detect when this is called from the HeapWorker
+    //     context and just give up.
+    dvmSignalHeapWorker(false);
+    cc = pthread_cond_wait(&gDvm.heapWorkerIdleCond, &gDvm.heapWorkerLock);
+    assert(cc == 0);
+
+    dvmUnlockMutex(&gDvm.heapWorkerLock);
+
+    dvmChangeStatus(NULL, THREAD_RUNNING);
+}
+
+/*
+ * Do not return until any pending heap work has finished.  This may
+ * or may not happen in the context of the calling thread.
+ * No exceptions will escape.
+ */
+void dvmRunFinalizationSync()
+{
+    if (gDvm.zygote) {
+        assert(!gDvm.heapWorkerReady);
+
+        /* When in zygote mode, there is no heap worker.
+         * Do the work in the current thread.
+         */
+        dvmLockMutex(&gDvm.heapWorkerLock);
+        doHeapWork(dvmThreadSelf());
+        dvmUnlockMutex(&gDvm.heapWorkerLock);
+    } else {
+        /* Outside of zygote mode, we can just ask the
+         * heap worker thread to do the work.
+         */
+        dvmWaitForHeapWorkerIdle();
+    }
+}
+
+/*
+ * Requests that dvmHeapSourceTrim() be called no sooner
+ * than timeoutSec seconds from now.  If timeoutSec
+ * is zero, any pending trim is cancelled.
+ *
+ * Caller must hold heapWorkerLock.
+ */
+void dvmScheduleHeapSourceTrim(size_t timeoutSec)
+{
+    GcHeap *gcHeap = gDvm.gcHeap;
+    struct timespec timeout;
+
+    if (timeoutSec == 0) {
+        timeout.tv_sec = 0;
+        timeout.tv_nsec = 0;
+        /* Don't wake up the thread just to tell it to cancel.
+         * If it wakes up naturally, we can avoid the extra
+         * context switch.
+         */
+    } else {
+        struct timeval now;
+
+        gettimeofday(&now, NULL);
+        timeout.tv_sec = now.tv_sec + timeoutSec;
+        timeout.tv_nsec = now.tv_usec * 1000;
+        dvmSignalHeapWorker(false);
+    }
+    gcHeap->heapWorkerNextTrim = timeout;
+}
diff --git a/vm/alloc/HeapWorker.h b/vm/alloc/HeapWorker.h
new file mode 100644
index 0000000..c079570
--- /dev/null
+++ b/vm/alloc/HeapWorker.h
@@ -0,0 +1,97 @@
+/*
+ * Copyright (C) 2008 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/*
+ * Manage async heap tasks.
+ */
+#ifndef _DALVIK_ALLOC_HEAP_WORKER
+#define _DALVIK_ALLOC_HEAP_WORKER
+
+/*
+ * Initialize any HeapWorker state that Heap.c
+ * cares about.  This lets the GC start before the
+ * HeapWorker thread is initialized.
+ */
+void dvmInitializeHeapWorkerState(void);
+
+/*
+ * Initialization.  Starts/stops the worker thread.
+ */
+bool dvmHeapWorkerStartup(void);
+void dvmHeapWorkerShutdown(void);
+
+/*
+ * Tell the worker thread to wake up and do work.
+ * If shouldLock is false, the caller must have already
+ * acquired gDvm.heapWorkerLock.
+ */
+void dvmSignalHeapWorker(bool shouldLock);
+
+/*
+ * Block until all pending heap worker work has finished.
+ */
+void dvmWaitForHeapWorkerIdle(void);
+
+/*
+ * Does not return until any pending finalizers have been called.
+ * This may or may not happen in the context of the calling thread.
+ * No exceptions will escape.
+ *
+ * Used by zygote, which doesn't have a HeapWorker thread.
+ */
+void dvmRunFinalizationSync(void);
+
+/*
+ * Requests that dvmHeapSourceTrim() be called no sooner
+ * than timeoutSec seconds from now.  If timeoutSec
+ * is zero, any pending trim is cancelled.
+ *
+ * Caller must hold heapWorkerLock.
+ */
+void dvmScheduleHeapSourceTrim(size_t timeoutSec);
+
+/* Make sure that the HeapWorker thread hasn't spent an inordinate
+ * amount of time inside interpreted code.
+ *
+ * Aborts the VM if the thread appears to be wedged.
+ *
+ * The caller must hold the heapWorkerLock.
+ */
+void dvmAssertHeapWorkerThreadRunning();
+
+/*
+ * The type of operation for HeapWorker to perform on an object.
+ */
+typedef enum HeapWorkerOperation {
+    WORKER_FINALIZE = 0,
+
+    /* Required: (WORKER_CLEAR | WORKER_ENQUEUE) <= (4-1)
+     * These values will be stuffed in the low bits of a pointer.
+     */
+    WORKER_CLEAR = (1<<0),
+    WORKER_ENQUEUE = (1<<1),
+} HeapWorkerOperation;
+
+/*
+ * Called by the worker thread to get the next object
+ * to finalize/enqueue/clear.  Implemented in Heap.c.
+ *
+ * @param op The operation to perform on the returned object.
+ *           Must be non-NULL.
+ * @return The object to operate on, or NULL.
+ */
+Object *dvmGetNextHeapWorkerObject(HeapWorkerOperation *op);
+
+#endif /*_DALVIK_ALLOC_HEAP_WORKER*/
diff --git a/vm/alloc/MarkSweep.c b/vm/alloc/MarkSweep.c
new file mode 100644
index 0000000..cdda9c6
--- /dev/null
+++ b/vm/alloc/MarkSweep.c
@@ -0,0 +1,1332 @@
+/*
+ * Copyright (C) 2008 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "Dalvik.h"
+#include "alloc/HeapBitmap.h"
+#include "alloc/HeapInternal.h"
+#include "alloc/HeapSource.h"
+#include "alloc/MarkSweep.h"
+#include <limits.h>     // for ULONG_MAX
+#include <sys/mman.h>   // for madvise(), mmap()
+#include <cutils/ashmem.h>
+
+#define GC_DEBUG_PARANOID   2
+#define GC_DEBUG_BASIC      1
+#define GC_DEBUG_OFF        0
+#define GC_DEBUG(l)         (GC_DEBUG_LEVEL >= (l))
+
+#if 1
+#define GC_DEBUG_LEVEL      GC_DEBUG_PARANOID
+#else
+#define GC_DEBUG_LEVEL      GC_DEBUG_OFF
+#endif
+
+#define VERBOSE_GC          0
+
+#define GC_LOG_TAG      LOG_TAG "-gc"
+
+#if LOG_NDEBUG
+#define LOGV_GC(...)    ((void)0)
+#define LOGD_GC(...)    ((void)0)
+#else
+#define LOGV_GC(...)    LOG(LOG_VERBOSE, GC_LOG_TAG, __VA_ARGS__)
+#define LOGD_GC(...)    LOG(LOG_DEBUG, GC_LOG_TAG, __VA_ARGS__)
+#endif
+
+#if VERBOSE_GC
+#define LOGVV_GC(...)   LOGV_GC(__VA_ARGS__)
+#else
+#define LOGVV_GC(...)   ((void)0)
+#endif
+
+#define LOGI_GC(...)    LOG(LOG_INFO, GC_LOG_TAG, __VA_ARGS__)
+#define LOGW_GC(...)    LOG(LOG_WARN, GC_LOG_TAG, __VA_ARGS__)
+#define LOGE_GC(...)    LOG(LOG_ERROR, GC_LOG_TAG, __VA_ARGS__)
+
+#define LOG_SCAN(...)   LOGV_GC("SCAN: " __VA_ARGS__)
+#define LOG_MARK(...)   LOGV_GC("MARK: " __VA_ARGS__)
+#define LOG_SWEEP(...)  LOGV_GC("SWEEP: " __VA_ARGS__)
+#define LOG_REF(...)    LOGV_GC("REF: " __VA_ARGS__)
+
+#define LOGV_SCAN(...)  LOGVV_GC("SCAN: " __VA_ARGS__)
+#define LOGV_MARK(...)  LOGVV_GC("MARK: " __VA_ARGS__)
+#define LOGV_SWEEP(...) LOGVV_GC("SWEEP: " __VA_ARGS__)
+#define LOGV_REF(...)   LOGVV_GC("REF: " __VA_ARGS__)
+
+#if WITH_OBJECT_HEADERS
+u2 gGeneration = 0;
+static const Object *gMarkParent = NULL;
+#endif
+
+#ifndef PAGE_SIZE
+#define PAGE_SIZE 4096
+#endif
+#define ALIGN_UP_TO_PAGE_SIZE(p) \
+    (((size_t)(p) + (PAGE_SIZE - 1)) & ~(PAGE_SIZE - 1))
+
+/* Do not cast the result of this to a boolean; the only set bit
+ * may be > 1<<8.
+ */
+static inline long isMarked(const DvmHeapChunk *hc, const GcMarkContext *ctx)
+        __attribute__((always_inline));
+static inline long isMarked(const DvmHeapChunk *hc, const GcMarkContext *ctx)
+{
+    return dvmHeapBitmapIsObjectBitSetInList(ctx->bitmaps, ctx->numBitmaps, hc);
+}
+
+static bool
+createMarkStack(GcMarkStack *stack)
+{
+    const Object **limit;
+    size_t size;
+    int fd;
+
+    /* Create a stack big enough for the worst possible case,
+     * where the heap is perfectly full of the smallest object.
+     * TODO: be better about memory usage; use a smaller stack with
+     *       overflow detection and recovery.
+     */
+    size = dvmHeapSourceGetIdealFootprint() /
+            (sizeof(Object) + HEAP_SOURCE_CHUNK_OVERHEAD);
+    size = ALIGN_UP_TO_PAGE_SIZE(size);
+    fd = ashmem_create_region("dalvik-heap-markstack", size);
+    if (fd < 0) {
+        LOGE_GC("Could not create %d-byte ashmem mark stack\n", size);
+        return false;
+    }
+    limit = (const Object **)mmap(NULL, size, PROT_READ | PROT_WRITE,
+            MAP_PRIVATE, fd, 0);
+    close(fd);
+    if (limit == MAP_FAILED) {
+        LOGE_GC("Could not mmap %d-byte ashmem mark stack\n", size);
+        return false;
+    }
+
+    memset(stack, 0, sizeof(*stack));
+    stack->limit = limit;
+    stack->base = (const Object **)((uintptr_t)limit + size);
+    stack->top = stack->base;
+
+    return true;
+}
+
+static void
+destroyMarkStack(GcMarkStack *stack)
+{
+    munmap((char *)stack->limit,
+            (uintptr_t)stack->base - (uintptr_t)stack->limit);
+    memset(stack, 0, sizeof(*stack));
+}
+
+#define MARK_STACK_PUSH(stack, obj) \
+    do { \
+        *--(stack).top = (obj); \
+    } while (false)
+
+bool
+dvmHeapBeginMarkStep()
+{
+    GcMarkContext *mc = &gDvm.gcHeap->markContext;
+    HeapBitmap objectBitmaps[HEAP_SOURCE_MAX_HEAP_COUNT];
+    size_t numBitmaps;
+
+    if (!createMarkStack(&mc->stack)) {
+        return false;
+    }
+
+    numBitmaps = dvmHeapSourceGetObjectBitmaps(objectBitmaps,
+            HEAP_SOURCE_MAX_HEAP_COUNT);
+    if (numBitmaps <= 0) {
+        return false;
+    }
+
+    /* Create mark bitmaps that cover the same ranges as the
+     * current object bitmaps.
+     */
+    if (!dvmHeapBitmapInitListFromTemplates(mc->bitmaps, objectBitmaps,
+            numBitmaps, "mark"))
+    {
+        return false;
+    }
+
+    mc->numBitmaps = numBitmaps;
+    mc->finger = NULL;
+
+#if WITH_OBJECT_HEADERS
+    gGeneration++;
+#endif
+
+    return true;
+}
+
+static long setAndReturnMarkBit(GcMarkContext *ctx, const DvmHeapChunk *hc)
+        __attribute__((always_inline));
+static long
+setAndReturnMarkBit(GcMarkContext *ctx, const DvmHeapChunk *hc)
+{
+    return dvmHeapBitmapSetAndReturnObjectBitInList(ctx->bitmaps,
+        ctx->numBitmaps, hc);
+}
+
+static void _markObjectNonNullCommon(const Object *obj, GcMarkContext *ctx,
+        bool checkFinger, bool forceStack)
+        __attribute__((always_inline));
+static void
+_markObjectNonNullCommon(const Object *obj, GcMarkContext *ctx,
+        bool checkFinger, bool forceStack)
+{
+    DvmHeapChunk *hc;
+
+    assert(obj != NULL);
+
+#if GC_DEBUG(GC_DEBUG_PARANOID)
+//TODO: make sure we're locked
+    assert(obj != (Object *)gDvm.unlinkedJavaLangClass);
+    assert(dvmIsValidObject(obj));
+#endif
+
+    hc = ptr2chunk(obj);
+    if (!setAndReturnMarkBit(ctx, hc)) {
+        /* This object was not previously marked.
+         */
+        if (forceStack || (checkFinger && (void *)hc < ctx->finger)) {
+            /* This object will need to go on the mark stack.
+             */
+            MARK_STACK_PUSH(ctx->stack, obj);
+        }
+
+#if WITH_OBJECT_HEADERS
+        if (hc->scanGeneration != hc->markGeneration) {
+            LOGE("markObject(0x%08x): wasn't scanned last time\n", (uint)obj);
+            dvmAbort();
+        }
+        if (hc->markGeneration == gGeneration) {
+            LOGE("markObject(0x%08x): already marked this generation\n",
+                    (uint)obj);
+            dvmAbort();
+        }
+        hc->oldMarkGeneration = hc->markGeneration;
+        hc->markGeneration = gGeneration;
+        hc->markFingerOld = hc->markFinger;
+        hc->markFinger = ctx->finger;
+        if (gMarkParent != NULL) {
+            hc->parentOld = hc->parent;
+            hc->parent = gMarkParent;
+        } else {
+            hc->parent = (const Object *)((uintptr_t)hc->parent | 1);
+        }
+        hc->markCount++;
+#endif
+#if WITH_HPROF
+        if (gDvm.gcHeap->hprofContext != NULL) {
+            hprofMarkRootObject(gDvm.gcHeap->hprofContext, obj, 0);
+        }
+#endif
+#if DVM_TRACK_HEAP_MARKING
+        gDvm.gcHeap->markCount++;
+        gDvm.gcHeap->markSize += dvmHeapSourceChunkSize((void *)hc) +
+                HEAP_SOURCE_CHUNK_OVERHEAD;
+#endif
+
+        /* obj->clazz can be NULL if we catch an object between
+         * dvmMalloc() and DVM_OBJECT_INIT().  This is ok.
+         */
+        LOGV_MARK("0x%08x %s\n", (uint)obj,
+                obj->clazz == NULL ? "<null class>" : obj->clazz->name);
+    }
+}
+
+/* Used to mark objects when recursing.  Recursion is done by moving
+ * the finger across the bitmaps in address order and marking child
+ * objects.  Any newly-marked objects whose addresses are lower than
+ * the finger won't be visited by the bitmap scan, so those objects
+ * need to be added to the mark stack.
+ */
+static void
+markObjectNonNull(const Object *obj, GcMarkContext *ctx)
+{
+    _markObjectNonNullCommon(obj, ctx, true, false);
+}
+
+#define markObject(obj, ctx) \
+    do { \
+        Object *MO_obj_ = (Object *)(obj); \
+        if (MO_obj_ != NULL) { \
+            markObjectNonNull(MO_obj_, (ctx)); \
+        } \
+    } while (false)
+
+/* If the object hasn't already been marked, mark it and
+ * schedule it to be scanned for references.
+ *
+ * obj may not be NULL.  The macro dvmMarkObject() should
+ * be used in situations where a reference may be NULL.
+ *
+ * This function may only be called when marking the root
+ * set.  When recursing, use the internal markObject[NonNull]().
+ */
+void
+dvmMarkObjectNonNull(const Object *obj)
+{
+    _markObjectNonNullCommon(obj, &gDvm.gcHeap->markContext, false, false);
+}
+
+/* Mark the set of root objects.
+ *
+ * Things we need to scan:
+ * - System classes defined by root classloader
+ * - For each thread:
+ *   - Interpreted stack, from top to "curFrame"
+ *     - Dalvik registers (args + local vars)
+ *   - JNI local references
+ *   - Automatic VM local references (TrackedAlloc)
+ *   - Associated Thread/VMThread object
+ *   - ThreadGroups (could track & start with these instead of working
+ *     upward from Threads)
+ *   - Exception currently being thrown, if present
+ * - JNI global references
+ * - Interned string table
+ * - Primitive classes
+ * - Special objects
+ *   - gDvm.outOfMemoryObj
+ * - Objects allocated with ALLOC_NO_GC
+ * - Objects pending finalization (but not yet finalized)
+ * - Objects in debugger object registry
+ *
+ * Don't need:
+ * - Native stack (for in-progress stuff in the VM)
+ *   - The TrackedAlloc stuff watches all native VM references.
+ */
+void dvmHeapMarkRootSet()
+{
+    HeapRefTable *refs;
+    GcHeap *gcHeap;
+    Object **op;
+
+    gcHeap = gDvm.gcHeap;
+
+    HPROF_SET_GC_SCAN_STATE(HPROF_ROOT_STICKY_CLASS, 0);
+
+    LOG_SCAN("root class loader\n");
+    dvmGcScanRootClassLoader();
+    LOG_SCAN("primitive classes\n");
+    dvmGcScanPrimitiveClasses();
+
+    /* dvmGcScanRootThreadGroups() sets a bunch of
+     * different scan states internally.
+     */
+    HPROF_CLEAR_GC_SCAN_STATE();
+
+    LOG_SCAN("root thread groups\n");
+    dvmGcScanRootThreadGroups();
+
+    HPROF_SET_GC_SCAN_STATE(HPROF_ROOT_INTERNED_STRING, 0);
+
+    LOG_SCAN("interned strings\n");
+    dvmGcScanInternedStrings();
+
+    HPROF_SET_GC_SCAN_STATE(HPROF_ROOT_JNI_GLOBAL, 0);
+
+    LOG_SCAN("JNI global refs\n");
+    dvmGcMarkJniGlobalRefs();
+
+    HPROF_SET_GC_SCAN_STATE(HPROF_ROOT_REFERENCE_CLEANUP, 0);
+
+    LOG_SCAN("pending reference operations\n");
+    dvmHeapMarkLargeTableRefs(gcHeap->referenceOperations, true);
+
+    HPROF_SET_GC_SCAN_STATE(HPROF_ROOT_FINALIZING, 0);
+
+    LOG_SCAN("pending finalizations\n");
+    dvmHeapMarkLargeTableRefs(gcHeap->pendingFinalizationRefs, false);
+
+    HPROF_SET_GC_SCAN_STATE(HPROF_ROOT_DEBUGGER, 0);
+
+    LOG_SCAN("debugger refs\n");
+    dvmGcMarkDebuggerRefs();
+
+    HPROF_SET_GC_SCAN_STATE(HPROF_ROOT_VM_INTERNAL, 0);
+
+    /* Mark all ALLOC_NO_GC objects.
+     */
+    LOG_SCAN("ALLOC_NO_GC objects\n");
+    refs = &gcHeap->nonCollectableRefs;
+    op = refs->table;
+    while ((uintptr_t)op < (uintptr_t)refs->nextEntry) {
+        dvmMarkObjectNonNull(*(op++));
+    }
+
+    /* Mark any special objects we have sitting around.
+     */
+    LOG_SCAN("special objects\n");
+    dvmMarkObjectNonNull(gDvm.outOfMemoryObj);
+    dvmMarkObjectNonNull(gDvm.internalErrorObj);
+//TODO: scan object references sitting in gDvm;  use pointer begin & end
+
+    HPROF_CLEAR_GC_SCAN_STATE();
+}
+
+/*
+ * Nothing past this point is allowed to use dvmMarkObject*().
+ * Scanning/recursion must use markObject*(), which takes the
+ * finger into account.
+ */
+#define dvmMarkObjectNonNull __dont_use_dvmMarkObjectNonNull__
+
+
+/* Mark all of a ClassObject's interfaces.
+ */
+static void markInterfaces(const ClassObject *clazz, GcMarkContext *ctx)
+{
+    ClassObject **interfaces;
+    int interfaceCount;
+    int i;
+
+    /* Mark all interfaces.
+     */
+    interfaces = clazz->interfaces;
+    interfaceCount = clazz->interfaceCount;
+    for (i = 0; i < interfaceCount; i++) {
+        markObjectNonNull((Object *)*interfaces, ctx);
+        interfaces++;
+    }
+}
+
+/* Mark all objects referred to by a ClassObject's static fields.
+ */
+static void scanStaticFields(const ClassObject *clazz, GcMarkContext *ctx)
+{
+    StaticField *f;
+    int i;
+
+    //TODO: Optimize this with a bit vector or something
+    f = clazz->sfields;
+    for (i = 0; i < clazz->sfieldCount; i++) {
+        char c = f->field.signature[0];
+        if (c == '[' || c == 'L') {
+            /* It's an array or class reference.
+             */
+            markObject((Object *)f->value.l, ctx);
+        }
+        f++;
+    }
+}
+
+/* Mark all objects referred to by a DataObject's instance fields.
+ */
+static void scanInstanceFields(const DataObject *obj, ClassObject *clazz,
+        GcMarkContext *ctx)
+{
+//TODO: Optimize this by avoiding walking the superclass chain
+    while (clazz != NULL) {
+        InstField *f;
+        int i;
+
+        /* All of the fields that contain object references
+         * are guaranteed to be at the beginning of the ifields list.
+         */
+        f = clazz->ifields;
+        for (i = 0; i < clazz->ifieldRefCount; i++) {
+            /* Mark the array or object reference.
+             * May be NULL.
+             *
+             * Note that, per the comment on struct InstField,
+             * f->byteOffset is the offset from the beginning of
+             * obj, not the offset into obj->instanceData.
+             */
+            markObject(dvmGetFieldObject((Object*)obj, f->byteOffset), ctx);
+            f++;
+        }
+
+        /* This will be NULL when we hit java.lang.Object
+         */
+        clazz = clazz->super;
+    }
+}
+
+/* Mark all objects referred to by the array's contents.
+ */
+static void scanObjectArray(const ArrayObject *array, GcMarkContext *ctx)
+{
+    Object **contents;
+    u4 length;
+    u4 i;
+
+    contents = (Object **)array->contents;
+    length = array->length;
+
+    for (i = 0; i < length; i++) {
+        markObject(*contents, ctx); // may be NULL
+        contents++;
+    }
+}
+
+/* Mark all objects referred to by the ClassObject.
+ */
+static void scanClassObject(const ClassObject *clazz, GcMarkContext *ctx)
+{
+    LOGV_SCAN("---------> %s\n", clazz->name);
+
+    if (IS_CLASS_FLAG_SET(clazz, CLASS_ISARRAY)) {
+        /* We're an array; mark the class object of the contents
+         * of the array.
+         *
+         * Note that we won't necessarily reach the array's element
+         * class by scanning the array contents;  the array may be
+         * zero-length, or may only contain null objects.
+         */
+        markObjectNonNull((Object *)clazz->elementClass, ctx);
+    }
+
+    /* We scan these explicitly in case the only remaining
+     * reference to a particular class object is via a data
+     * object;  we may not be guaranteed to reach all
+     * live class objects via a classloader.
+     */
+    markObject((Object *)clazz->super, ctx);  // may be NULL (java.lang.Object)
+    markObject(clazz->classLoader, ctx);      // may be NULL
+
+    scanStaticFields(clazz, ctx);
+    markInterfaces(clazz, ctx);
+}
+
+/* Mark all objects that obj refers to.
+ *
+ * Called on every object in markList.
+ */
+static void scanObject(const Object *obj, GcMarkContext *ctx)
+{
+    ClassObject *clazz;
+
+    assert(dvmIsValidObject(obj));
+    LOGV_SCAN("0x%08x %s\n", (uint)obj, obj->clazz->name);
+
+#if WITH_HPROF
+    if (gDvm.gcHeap->hprofContext != NULL) {
+        hprofDumpHeapObject(gDvm.gcHeap->hprofContext, obj);
+    }
+#endif
+
+    /* Get and mark the class object for this particular instance.
+     */
+    clazz = obj->clazz;
+    if (clazz == NULL) {
+        /* This can happen if we catch an object between
+         * dvmMalloc() and DVM_OBJECT_INIT().  The object
+         * won't contain any references yet, so we can
+         * just skip it.
+         */
+        return;
+    } else if (clazz == gDvm.unlinkedJavaLangClass) {
+        /* This class hasn't been linked yet.  We're guaranteed
+         * that the object doesn't contain any references that
+         * aren't already tracked, so we can skip scanning it.
+         *
+         * NOTE: unlinkedJavaLangClass is not on the heap, so
+         * it's very important that we don't try marking it.
+         */
+        return;
+    }
+#if WITH_OBJECT_HEADERS
+    gMarkParent = obj;
+    if (ptr2chunk(obj)->scanGeneration == gGeneration) {
+        LOGE("object 0x%08x was already scanned this generation\n",
+                (uintptr_t)obj);
+        dvmAbort();
+    }
+    ptr2chunk(obj)->oldScanGeneration = ptr2chunk(obj)->scanGeneration;
+    ptr2chunk(obj)->scanGeneration = gGeneration;
+    ptr2chunk(obj)->scanCount++;
+#endif
+
+    assert(dvmIsValidObject((Object *)clazz));
+    markObjectNonNull((Object *)clazz, ctx);
+
+    /* Mark any references in this object.
+     */
+    if (IS_CLASS_FLAG_SET(clazz, CLASS_ISARRAY)) {
+        /* It's an array object.
+         */
+        if (IS_CLASS_FLAG_SET(clazz, CLASS_ISOBJECTARRAY)) {
+            /* It's an array of object references.
+             */
+            scanObjectArray((ArrayObject *)obj, ctx);
+        }
+        // else there's nothing else to scan
+    } else {
+        /* It's a DataObject-compatible object.
+         */
+        scanInstanceFields((DataObject *)obj, clazz, ctx);
+
+        if (IS_CLASS_FLAG_SET(clazz, CLASS_ISREFERENCE)) {
+            GcHeap *gcHeap = gDvm.gcHeap;
+            Object *referent;
+
+            /* It's a subclass of java/lang/ref/Reference.
+             * The fields in this class have been arranged
+             * such that scanInstanceFields() did not actually
+             * mark the "referent" field;  we need to handle
+             * it specially.
+             *
+             * If the referent already has a strong mark (isMarked(referent)),
+             * we don't care about its reference status.
+             */
+            referent = dvmGetFieldObject(obj,
+                    gDvm.offJavaLangRefReference_referent);
+            if (referent != NULL &&
+                    !isMarked(ptr2chunk(referent), &gcHeap->markContext))
+            {
+                u4 refFlags;
+
+                if (gcHeap->markAllReferents) {
+                    LOG_REF("Hard-marking a reference\n");
+
+                    /* Don't bother with normal reference-following
+                     * behavior, just mark the referent.  This should
+                     * only be used when following objects that just
+                     * became scheduled for finalization.
+                     */
+                    markObjectNonNull(referent, ctx);
+                    goto skip_reference;
+                }
+
+                /* See if this reference was handled by a previous GC.
+                 */
+                if (dvmGetFieldObject(obj,
+                            gDvm.offJavaLangRefReference_vmData) ==
+                        SCHEDULED_REFERENCE_MAGIC)
+                {
+                    LOG_REF("Skipping scheduled reference\n");
+
+                    /* Don't reschedule it, but make sure that its
+                     * referent doesn't get collected (in case it's
+                     * a PhantomReference and wasn't cleared automatically).
+                     */
+                    //TODO: Mark these after handling all new refs of
+                    //      this strength, in case the new refs refer
+                    //      to the same referent.  Not a very common
+                    //      case, though.
+                    markObjectNonNull(referent, ctx);
+                    goto skip_reference;
+                }
+
+                /* Find out what kind of reference is pointing
+                 * to referent.
+                 */
+                refFlags = GET_CLASS_FLAG_GROUP(clazz,
+                    CLASS_ISREFERENCE |
+                    CLASS_ISWEAKREFERENCE |
+                    CLASS_ISPHANTOMREFERENCE);
+
+            /* We use the vmData field of Reference objects
+             * as a next pointer in a singly-linked list.
+             * That way, we don't need to allocate any memory
+             * while we're doing a GC.
+             */
+#define ADD_REF_TO_LIST(list, ref) \
+            do { \
+                Object *ARTL_ref_ = (/*de-const*/Object *)(ref); \
+                dvmSetFieldObject(ARTL_ref_, \
+                        gDvm.offJavaLangRefReference_vmData, list); \
+                list = ARTL_ref_; \
+            } while (false)
+
+                /* At this stage, we just keep track of all of
+                 * the live references that we've seen.  Later,
+                 * we'll walk through each of these lists and
+                 * deal with the referents.
+                 */
+                if (refFlags == CLASS_ISREFERENCE) {
+                    /* It's a soft reference.  Depending on the state,
+                     * we'll attempt to collect all of them, some of
+                     * them, or none of them.
+                     */
+                    if (gcHeap->softReferenceCollectionState ==
+                            SR_COLLECT_NONE)
+                    {
+                sr_collect_none:
+                        markObjectNonNull(referent, ctx);
+                    } else if (gcHeap->softReferenceCollectionState ==
+                            SR_COLLECT_ALL)
+                    {
+                sr_collect_all:
+                        ADD_REF_TO_LIST(gcHeap->softReferences, obj);
+                    } else {
+                        /* We'll only try to collect half of the
+                         * referents.
+                         */
+                        if (gcHeap->softReferenceColor++ & 1) {
+                            goto sr_collect_none;
+                        }
+                        goto sr_collect_all;
+                    }
+                } else {
+                    /* It's a weak or phantom reference.
+                     * Clearing CLASS_ISREFERENCE will reveal which.
+                     */
+                    refFlags &= ~CLASS_ISREFERENCE;
+                    if (refFlags == CLASS_ISWEAKREFERENCE) {
+                        ADD_REF_TO_LIST(gcHeap->weakReferences, obj);
+                    } else if (refFlags == CLASS_ISPHANTOMREFERENCE) {
+                        ADD_REF_TO_LIST(gcHeap->phantomReferences, obj);
+                    } else {
+                        assert(!"Unknown reference type");
+                    }
+                }
+#undef ADD_REF_TO_LIST
+            }
+        }
+
+    skip_reference:
+        /* If this is a class object, mark various other things that
+         * its internals point to.
+         *
+         * All class objects are instances of java.lang.Class,
+         * including the java.lang.Class class object.
+         */
+        if (clazz == gDvm.classJavaLangClass) {
+            scanClassObject((ClassObject *)obj, ctx);
+        }
+    }
+
+#if WITH_OBJECT_HEADERS
+    gMarkParent = NULL;
+#endif
+}
+
+static void
+processMarkStack(GcMarkContext *ctx)
+{
+    const Object **const base = ctx->stack.base;
+
+    /* Scan anything that's on the mark stack.
+     * We can't use the bitmaps anymore, so use
+     * a finger that points past the end of them.
+     */
+    ctx->finger = (void *)ULONG_MAX;
+    while (ctx->stack.top != base) {
+        scanObject(*ctx->stack.top++, ctx);
+    }
+}
+
+#ifndef NDEBUG
+static uintptr_t gLastFinger = 0;
+#endif
+
+static bool
+scanBitmapCallback(size_t numPtrs, void **ptrs, const void *finger, void *arg)
+{
+    GcMarkContext *ctx = (GcMarkContext *)arg;
+    size_t i;
+
+#ifndef NDEBUG
+    assert((uintptr_t)finger >= gLastFinger);
+    gLastFinger = (uintptr_t)finger;
+#endif
+
+    ctx->finger = finger;
+    for (i = 0; i < numPtrs; i++) {
+        /* The pointers we're getting back are DvmHeapChunks,
+         * not Objects.
+         */
+        scanObject(chunk2ptr(*ptrs++), ctx);
+    }
+
+    return true;
+}
+
+/* Given bitmaps with the root set marked, find and mark all
+ * reachable objects.  When this returns, the entire set of
+ * live objects will be marked and the mark stack will be empty.
+ */
+void dvmHeapScanMarkedObjects()
+{
+    GcMarkContext *ctx = &gDvm.gcHeap->markContext;
+
+    assert(ctx->finger == NULL);
+
+    /* The bitmaps currently have bits set for the root set.
+     * Walk across the bitmaps and scan each object.
+     */
+#ifndef NDEBUG
+    gLastFinger = 0;
+#endif
+    dvmHeapBitmapWalkList(ctx->bitmaps, ctx->numBitmaps,
+            scanBitmapCallback, ctx);
+
+    /* We've walked the mark bitmaps.  Scan anything that's
+     * left on the mark stack.
+     */
+    processMarkStack(ctx);
+
+    LOG_SCAN("done with marked objects\n");
+}
+
+/** @return true if we need to schedule a call to clear().
+ */
+static bool clearReference(Object *reference)
+{
+    /* This is what the default implementation of Reference.clear()
+     * does.  We're required to clear all references to a given
+     * referent atomically, so we can't pop in and out of interp
+     * code each time.
+     *
+     * Also, someone may have subclassed one of the basic Reference
+     * types, overriding clear().  We can't trust the clear()
+     * implementation to call super.clear();  we cannot let clear()
+     * resurrect the referent.  If we clear it here, we can safely
+     * call any overriding implementations.
+     */
+    dvmSetFieldObject(reference,
+            gDvm.offJavaLangRefReference_referent, NULL);
+
+#if FANCY_REFERENCE_SUBCLASS
+    /* See if clear() has actually been overridden.  If so,
+     * we need to schedule a call to it before calling enqueue().
+     */
+    if (reference->clazz->vtable[gDvm.voffJavaLangRefReference_clear]->clazz !=
+            gDvm.classJavaLangRefReference)
+    {
+        /* clear() has been overridden;  return true to indicate
+         * that we need to schedule a call to the real clear()
+         * implementation.
+         */
+        return true;
+    }
+#endif
+
+    return false;
+}
+
+/** @return true if we need to schedule a call to enqueue().
+ */
+static bool enqueueReference(Object *reference)
+{
+#if FANCY_REFERENCE_SUBCLASS
+    /* See if this reference class has overridden enqueue();
+     * if not, we can take a shortcut.
+     */
+    if (reference->clazz->vtable[gDvm.voffJavaLangRefReference_enqueue]->clazz
+            == gDvm.classJavaLangRefReference)
+#endif
+    {
+        Object *queue = dvmGetFieldObject(reference,
+                gDvm.offJavaLangRefReference_queue);
+        Object *queueNext = dvmGetFieldObject(reference,
+                gDvm.offJavaLangRefReference_queueNext);
+        if (queue == NULL || queueNext != NULL) {
+            /* There is no queue, or the reference has already
+             * been enqueued.  The Reference.enqueue() method
+             * will do nothing even if we call it.
+             */
+            return false;
+        }
+    }
+
+    /* We need to call enqueue(), but if we called it from
+     * here we'd probably deadlock.  Schedule a call.
+     */
+    return true;
+}
+
+/* All objects for stronger reference levels have been
+ * marked before this is called.
+ */
+void dvmHeapHandleReferences(Object *refListHead, enum RefType refType)
+{
+    Object *reference;
+    GcMarkContext *markContext = &gDvm.gcHeap->markContext;
+    const int offVmData = gDvm.offJavaLangRefReference_vmData;
+    const int offReferent = gDvm.offJavaLangRefReference_referent;
+    bool workRequired = false;
+
+size_t numCleared = 0;
+size_t numEnqueued = 0;
+    reference = refListHead;
+    while (reference != NULL) {
+        Object *next;
+        Object *referent;
+
+        /* Pull the interesting fields out of the Reference object.
+         */
+        next = dvmGetFieldObject(reference, offVmData);
+        referent = dvmGetFieldObject(reference, offReferent);
+
+        //TODO: when handling REF_PHANTOM, unlink any references
+        //      that fail this initial if().  We need to re-walk
+        //      the list, and it would be nice to avoid the extra
+        //      work.
+        if (referent != NULL && !isMarked(ptr2chunk(referent), markContext)) {
+            bool schedClear, schedEnqueue;
+
+            /* This is the strongest reference that refers to referent.
+             * Do the right thing.
+             */
+            switch (refType) {
+            case REF_SOFT:
+            case REF_WEAK:
+                schedClear = clearReference(reference);
+                schedEnqueue = enqueueReference(reference);
+                break;
+            case REF_PHANTOM:
+                /* PhantomReferences are not cleared automatically.
+                 * Until someone clears it (or the reference itself
+                 * is collected), the referent must remain alive.
+                 *
+                 * It's necessary to fully mark the referent because
+                 * it will still be present during the next GC, and
+                 * all objects that it points to must be valid.
+                 * (The referent will be marked outside of this loop,
+                 * after handing all references of this strength, in
+                 * case multiple references point to the same object.)
+                 */
+                schedClear = false;
+
+                /* A PhantomReference is only useful with a
+                 * queue, but since it's possible to create one
+                 * without a queue, we need to check.
+                 */
+                schedEnqueue = enqueueReference(reference);
+                break;
+            default:
+                assert(!"Bad reference type");
+                schedClear = false;
+                schedEnqueue = false;
+                break;
+            }
+numCleared += schedClear ? 1 : 0;
+numEnqueued += schedEnqueue ? 1 : 0;
+
+            if (schedClear || schedEnqueue) {
+                uintptr_t workBits;
+
+                /* Stuff the clear/enqueue bits in the bottom of
+                 * the pointer.  Assumes that objects are 8-byte
+                 * aligned.
+                 *
+                 * Note that we are adding the *Reference* (which
+                 * is by definition already marked at this point) to
+                 * this list; we're not adding the referent (which
+                 * has already been cleared).
+                 */
+                assert(((intptr_t)reference & 3) == 0);
+                assert(((WORKER_CLEAR | WORKER_ENQUEUE) & ~3) == 0);
+                workBits = (schedClear ? WORKER_CLEAR : 0) |
+                           (schedEnqueue ? WORKER_ENQUEUE : 0);
+                if (!dvmHeapAddRefToLargeTable(
+                        &gDvm.gcHeap->referenceOperations,
+                        (Object *)((uintptr_t)reference | workBits)))
+                {
+                    LOGE_HEAP("dvmMalloc(): no room for any more "
+                            "reference operations\n");
+                    dvmAbort();
+                }
+                workRequired = true;
+            }
+
+            if (refType != REF_PHANTOM) {
+                /* Let later GCs know not to reschedule this reference.
+                 */
+                dvmSetFieldObject(reference, offVmData,
+                        SCHEDULED_REFERENCE_MAGIC);
+            } // else this is handled later for REF_PHANTOM
+
+        } // else there was a stronger reference to the referent.
+
+        reference = next;
+    }
+#define refType2str(r) \
+    ((r) == REF_SOFT ? "soft" : ( \
+     (r) == REF_WEAK ? "weak" : ( \
+     (r) == REF_PHANTOM ? "phantom" : "UNKNOWN" )))
+LOGD_HEAP("dvmHeapHandleReferences(): cleared %zd, enqueued %zd %s references\n", numCleared, numEnqueued, refType2str(refType));
+
+    /* Walk though the reference list again, and mark any non-clear/marked
+     * referents.  Only PhantomReferences can have non-clear referents
+     * at this point.
+     */
+    if (refType == REF_PHANTOM) {
+        bool scanRequired = false;
+
+        HPROF_SET_GC_SCAN_STATE(HPROF_ROOT_REFERENCE_CLEANUP, 0);
+        reference = refListHead;
+        while (reference != NULL) {
+            Object *next;
+            Object *referent;
+
+            /* Pull the interesting fields out of the Reference object.
+             */
+            next = dvmGetFieldObject(reference, offVmData);
+            referent = dvmGetFieldObject(reference, offReferent);
+
+            if (referent != NULL && !isMarked(ptr2chunk(referent), markContext)) {
+                markObjectNonNull(referent, markContext);
+                scanRequired = true;
+
+                /* Let later GCs know not to reschedule this reference.
+                 */
+                dvmSetFieldObject(reference, offVmData,
+                        SCHEDULED_REFERENCE_MAGIC);
+            }
+
+            reference = next;
+        }
+        HPROF_CLEAR_GC_SCAN_STATE();
+
+        if (scanRequired) {
+            processMarkStack(markContext);
+        }
+    }
+
+    if (workRequired) {
+        dvmSignalHeapWorker(false);
+    }
+}
+
+
+/* Find unreachable objects that need to be finalized,
+ * and schedule them for finalization.
+ */
+void dvmHeapScheduleFinalizations()
+{
+    HeapRefTable newPendingRefs;
+    LargeHeapRefTable *finRefs = gDvm.gcHeap->finalizableRefs;
+    Object **ref;
+    Object **lastRef;
+    size_t totalPendCount;
+    GcMarkContext *markContext = &gDvm.gcHeap->markContext;
+
+    /*
+     * All reachable objects have been marked.
+     * Any unmarked finalizable objects need to be finalized.
+     */
+
+    /* Create a table that the new pending refs will
+     * be added to.
+     */
+    if (!dvmHeapInitHeapRefTable(&newPendingRefs, 128)) {
+        //TODO: mark all finalizable refs and hope that
+        //      we can schedule them next time.  Watch out,
+        //      because we may be expecting to free up space
+        //      by calling finalizers.
+        LOGE_GC("dvmHeapScheduleFinalizations(): no room for "
+                "pending finalizations\n");
+        dvmAbort();
+    }
+
+    /* Walk through finalizableRefs and move any unmarked references
+     * to the list of new pending refs.
+     */
+    totalPendCount = 0;
+    while (finRefs != NULL) {
+        Object **gapRef;
+        size_t newPendCount = 0;
+
+        gapRef = ref = finRefs->refs.table;
+        lastRef = finRefs->refs.nextEntry;
+        while (ref < lastRef) {
+            DvmHeapChunk *hc;
+
+            hc = ptr2chunk(*ref);
+            if (!isMarked(hc, markContext)) {
+                if (!dvmHeapAddToHeapRefTable(&newPendingRefs, *ref)) {
+                    //TODO: add the current table and allocate
+                    //      a new, smaller one.
+                    LOGE_GC("dvmHeapScheduleFinalizations(): "
+                            "no room for any more pending finalizations: %zd\n",
+                            dvmHeapNumHeapRefTableEntries(&newPendingRefs));
+                    dvmAbort();
+                }
+                newPendCount++;
+            } else {
+                /* This ref is marked, so will remain on finalizableRefs.
+                 */
+                if (newPendCount > 0) {
+                    /* Copy it up to fill the holes.
+                     */
+                    *gapRef++ = *ref;
+                } else {
+                    /* No holes yet; don't bother copying.
+                     */
+                    gapRef++;
+                }
+            }
+            ref++;
+        }
+        finRefs->refs.nextEntry = gapRef;
+        //TODO: if the table is empty when we're done, free it.
+        totalPendCount += newPendCount;
+        finRefs = finRefs->next;
+    }
+    LOGD_GC("dvmHeapScheduleFinalizations(): %zd finalizers triggered.\n",
+            totalPendCount);
+    if (totalPendCount == 0) {
+        /* No objects required finalization.
+         * Free the empty temporary table.
+         */
+        dvmClearReferenceTable(&newPendingRefs);
+        return;
+    }
+
+    /* Add the new pending refs to the main list.
+     */
+    if (!dvmHeapAddTableToLargeTable(&gDvm.gcHeap->pendingFinalizationRefs,
+                &newPendingRefs))
+    {
+        LOGE_GC("dvmHeapScheduleFinalizations(): can't insert new "
+                "pending finalizations\n");
+        dvmAbort();
+    }
+
+    //TODO: try compacting the main list with a memcpy loop
+
+    /* Mark the refs we just moved;  we don't want them or their
+     * children to get swept yet.
+     */
+    ref = newPendingRefs.table;
+    lastRef = newPendingRefs.nextEntry;
+    assert(ref < lastRef);
+    HPROF_SET_GC_SCAN_STATE(HPROF_ROOT_FINALIZING, 0);
+    while (ref < lastRef) {
+        markObjectNonNull(*ref, markContext);
+        ref++;
+    }
+    HPROF_CLEAR_GC_SCAN_STATE();
+
+    /* Set markAllReferents so that we don't collect referents whose
+     * only references are in final-reachable objects.
+     * TODO: eventually provide normal reference behavior by properly
+     *       marking these references.
+     */
+    gDvm.gcHeap->markAllReferents = true;
+    processMarkStack(markContext);
+    gDvm.gcHeap->markAllReferents = false;
+
+    dvmSignalHeapWorker(false);
+}
+
+void dvmHeapFinishMarkStep()
+{
+    HeapBitmap *markBitmap;
+    HeapBitmap objectBitmap;
+    GcMarkContext *markContext;
+
+    markContext = &gDvm.gcHeap->markContext;
+
+    /* The sweep step freed every object that appeared in the
+     * HeapSource bitmaps that didn't appear in the mark bitmaps.
+     * The new state of the HeapSource is exactly the final
+     * mark bitmaps, so swap them in.
+     *
+     * The old bitmaps will be swapped into the context so that
+     * we can clean them up.
+     */
+    dvmHeapSourceReplaceObjectBitmaps(markContext->bitmaps,
+            markContext->numBitmaps);
+
+    /* Clean up the old HeapSource bitmaps and anything else associated
+     * with the marking process.
+     */
+    dvmHeapBitmapDeleteList(markContext->bitmaps, markContext->numBitmaps);
+    destroyMarkStack(&markContext->stack);
+
+    memset(markContext, 0, sizeof(*markContext));
+}
+
+#if WITH_HPROF && WITH_HPROF_UNREACHABLE
+static bool
+hprofUnreachableBitmapCallback(size_t numPtrs, void **ptrs,
+        const void *finger, void *arg)
+{
+    hprof_context_t *hctx = (hprof_context_t *)arg;
+    size_t i;
+
+    for (i = 0; i < numPtrs; i++) {
+        Object *obj;
+
+        /* The pointers we're getting back are DvmHeapChunks, not
+         * Objects.
+         */
+        obj = (Object *)chunk2ptr(*ptrs++);
+
+        hprofMarkRootObject(hctx, obj, 0);
+        hprofDumpHeapObject(hctx, obj);
+    }
+
+    return true;
+}
+
+static void
+hprofDumpUnmarkedObjects(const HeapBitmap markBitmaps[],
+        const HeapBitmap objectBitmaps[], size_t numBitmaps)
+{
+    hprof_context_t *hctx = gDvm.gcHeap->hprofContext;
+    if (hctx == NULL) {
+        return;
+    }
+
+    LOGI("hprof: dumping unreachable objects\n");
+
+    HPROF_SET_GC_SCAN_STATE(HPROF_UNREACHABLE, 0);
+
+    dvmHeapBitmapXorWalkLists(markBitmaps, objectBitmaps, numBitmaps,
+            hprofUnreachableBitmapCallback, hctx);
+
+    HPROF_CLEAR_GC_SCAN_STATE();
+}
+#endif
+
+static bool
+sweepBitmapCallback(size_t numPtrs, void **ptrs, const void *finger, void *arg)
+{
+    const ClassObject *const classJavaLangClass = gDvm.classJavaLangClass;
+    size_t i;
+
+    for (i = 0; i < numPtrs; i++) {
+        DvmHeapChunk *hc;
+        Object *obj;
+
+        /* The pointers we're getting back are DvmHeapChunks, not
+         * Objects.
+         */
+        hc = (DvmHeapChunk *)*ptrs++;
+        obj = (Object *)chunk2ptr(hc);
+
+#if WITH_OBJECT_HEADERS
+        if (hc->markGeneration == gGeneration) {
+            LOGE("sweeping marked object: 0x%08x\n", (uint)obj);
+            dvmAbort();
+        }
+#endif
+
+        /* Free the monitor associated with the object.
+         */
+        dvmFreeObjectMonitor(obj);
+
+        /* NOTE: Dereferencing clazz is dangerous.  If obj was the last
+         * one to reference its class object, the class object could be
+         * on the sweep list, and could already have been swept, leaving
+         * us with a stale pointer.
+         */
+        LOGV_SWEEP("FREE: 0x%08x %s\n", (uint)obj, obj->clazz->name);
+
+        /* This assumes that java.lang.Class will never go away.
+         * If it can, and we were the last reference to it, it
+         * could have already been swept.  However, even in that case,
+         * gDvm.classJavaLangClass should still have a useful
+         * value.
+         */
+        if (obj->clazz == classJavaLangClass) {
+            LOGV_SWEEP("---------------> %s\n", ((ClassObject *)obj)->name);
+            /* dvmFreeClassInnards() may have already been called,
+             * but it's safe to call on the same ClassObject twice.
+             */
+            dvmFreeClassInnards((ClassObject *)obj);
+        }
+
+#if 0
+        /* Overwrite the to-be-freed object to make stale references
+         * more obvious.
+         */
+        {
+            int chunklen;
+            ClassObject *clazz = obj->clazz;
+#if WITH_OBJECT_HEADERS
+            DvmHeapChunk chunk = *hc;
+            chunk.header = ~OBJECT_HEADER | 1;
+#endif
+            chunklen = dvmHeapSourceChunkSize(hc);
+            memset(hc, 0xa5, chunklen);
+            obj->clazz = (ClassObject *)((uintptr_t)clazz ^ 0xffffffff);
+#if WITH_OBJECT_HEADERS
+            *hc = chunk;
+#endif
+        }
+#endif
+
+//TODO: provide a heapsource function that takes a list of pointers to free
+//      and call it outside of this loop.
+        dvmHeapSourceFree(hc);
+    }
+
+    return true;
+}
+
+/* A function suitable for passing to dvmHashForeachRemove()
+ * to clear out any unmarked objects.  Clears the low bits
+ * of the pointer because the intern table may set them.
+ */
+static int isUnmarkedObject(void *object)
+{
+    return !isMarked(ptr2chunk((uintptr_t)object & ~(HB_OBJECT_ALIGNMENT-1)),
+            &gDvm.gcHeap->markContext);
+}
+
+/* Walk through the list of objects that haven't been
+ * marked and free them.
+ */
+void
+dvmHeapSweepUnmarkedObjects(int *numFreed, size_t *sizeFreed)
+{
+    const HeapBitmap *markBitmaps;
+    const GcMarkContext *markContext;
+    HeapBitmap objectBitmaps[HEAP_SOURCE_MAX_HEAP_COUNT];
+    size_t origObjectsAllocated;
+    size_t origBytesAllocated;
+    size_t numBitmaps;
+
+    /* All reachable objects have been marked.
+     * Detach any unreachable interned strings before
+     * we sweep.
+     */
+    dvmGcDetachDeadInternedStrings(isUnmarkedObject);
+
+    /* Free any known objects that are not marked.
+     */
+    origObjectsAllocated = dvmHeapSourceGetValue(HS_OBJECTS_ALLOCATED, NULL, 0);
+    origBytesAllocated = dvmHeapSourceGetValue(HS_BYTES_ALLOCATED, NULL, 0);
+
+    markContext = &gDvm.gcHeap->markContext;
+    markBitmaps = markContext->bitmaps;
+    numBitmaps = dvmHeapSourceGetObjectBitmaps(objectBitmaps,
+            HEAP_SOURCE_MAX_HEAP_COUNT);
+#ifndef NDEBUG
+    if (numBitmaps != markContext->numBitmaps) {
+        LOGE("heap bitmap count mismatch: %zd != %zd\n",
+                numBitmaps, markContext->numBitmaps);
+        dvmAbort();
+    }
+#endif
+
+#if WITH_HPROF && WITH_HPROF_UNREACHABLE
+    hprofDumpUnmarkedObjects(markBitmaps, objectBitmaps, numBitmaps);
+#endif
+
+    dvmHeapBitmapXorWalkLists(markBitmaps, objectBitmaps, numBitmaps,
+            sweepBitmapCallback, NULL);
+
+    *numFreed = origObjectsAllocated -
+            dvmHeapSourceGetValue(HS_OBJECTS_ALLOCATED, NULL, 0);
+    *sizeFreed = origBytesAllocated -
+            dvmHeapSourceGetValue(HS_BYTES_ALLOCATED, NULL, 0);
+
+#ifdef WITH_PROFILER
+    if (gDvm.allocProf.enabled) {
+        gDvm.allocProf.freeCount += *numFreed;
+        gDvm.allocProf.freeSize += *sizeFreed;
+    }
+#endif
+}
diff --git a/vm/alloc/MarkSweep.h b/vm/alloc/MarkSweep.h
new file mode 100644
index 0000000..b087b40
--- /dev/null
+++ b/vm/alloc/MarkSweep.h
@@ -0,0 +1,63 @@
+/*
+ * Copyright (C) 2008 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#ifndef _DALVIK_ALLOC_MARK_SWEEP
+#define _DALVIK_ALLOC_MARK_SWEEP
+
+#include "alloc/HeapBitmap.h"
+#include "alloc/HeapSource.h"
+
+/* Downward-growing stack for better cache read behavior.
+ */
+typedef struct {
+    /* Lowest address (inclusive)
+     */
+    const Object **limit;
+
+    /* Current top of the stack (inclusive)
+     */
+    const Object **top;
+
+    /* Highest address (exclusive)
+     */
+    const Object **base;
+} GcMarkStack;
+
+/* This is declared publicly so that it can be included in gDvm.gcHeap.
+ */
+typedef struct {
+    HeapBitmap bitmaps[HEAP_SOURCE_MAX_HEAP_COUNT];
+    size_t numBitmaps;
+    GcMarkStack stack;
+    const void *finger;   // only used while scanning/recursing.
+} GcMarkContext;
+
+enum RefType {
+    REF_SOFT,
+    REF_WEAK,
+    REF_PHANTOM,
+    REF_WEAKGLOBAL
+};
+
+bool dvmHeapBeginMarkStep(void);
+void dvmHeapMarkRootSet(void);
+void dvmHeapScanMarkedObjects(void);
+void dvmHeapHandleReferences(Object *refListHead, enum RefType refType);
+void dvmHeapScheduleFinalizations(void);
+void dvmHeapFinishMarkStep(void);
+
+void dvmHeapSweepUnmarkedObjects(int *numFreed, size_t *sizeFreed);
+
+#endif  // _DALVIK_ALLOC_MARK_SWEEP
diff --git a/vm/alloc/TEST/HeapBitmapTest/Makefile b/vm/alloc/TEST/HeapBitmapTest/Makefile
new file mode 100644
index 0000000..fe31b24
--- /dev/null
+++ b/vm/alloc/TEST/HeapBitmapTest/Makefile
@@ -0,0 +1,28 @@
+.PHONY: all
+all: runtest
+
+$(shell mkdir -p out)
+
+CC := gcc
+CFLAGS := -g -Wall -Werror
+#CFLAGS += -O2
+
+out/main.o: main.c ../../HeapBitmap.h
+	$(CC) $(CFLAGS) -c $< -o $@ -I ../..
+
+out/HeapBitmap.o: ../../HeapBitmap.c ../../HeapBitmap.h ../../clz.h include/cutils/ashmem.h include/Dalvik.h
+	$(CC) $(CFLAGS) -c $< -o $@ -I ../.. -I include
+
+out/clz.o: ../../clz.c ../../clz.h
+	$(CC) $(CFLAGS) -c $< -o $@ -I ../..
+
+out/hbtest: out/main.o out/HeapBitmap.o out/clz.o
+	$(CC) $^ -o $@
+
+.PHONY: runtest
+runtest: out/hbtest
+	out/hbtest
+
+.PHONY: clean
+clean:
+	rm -rf out
diff --git a/vm/alloc/TEST/HeapBitmapTest/include/Dalvik.h b/vm/alloc/TEST/HeapBitmapTest/include/Dalvik.h
new file mode 100644
index 0000000..4c9f608
--- /dev/null
+++ b/vm/alloc/TEST/HeapBitmapTest/include/Dalvik.h
@@ -0,0 +1,18 @@
+#ifndef DALVIK_H_
+#define DALVIK_H_
+
+#include <stdio.h>
+#include <stdbool.h>
+#include <stdlib.h>
+#include <string.h>
+#include <unistd.h>
+#include <limits.h>
+
+#define LOGW(...) printf("W/" __VA_ARGS__)
+#define LOGE(...) printf("E/" __VA_ARGS__)
+
+inline void dvmAbort(void) {
+    exit(1);
+}
+
+#endif  // DALVIK_H_
diff --git a/vm/alloc/TEST/HeapBitmapTest/include/cutils/ashmem.h b/vm/alloc/TEST/HeapBitmapTest/include/cutils/ashmem.h
new file mode 100644
index 0000000..8680c77
--- /dev/null
+++ b/vm/alloc/TEST/HeapBitmapTest/include/cutils/ashmem.h
@@ -0,0 +1,14 @@
+#ifndef ASHMEM_H_
+#define ASHMEM_H_
+
+#include <fcntl.h>
+
+#define ASHMEM_NAME_LEN 128
+
+inline int
+ashmem_create_region(const char *name, size_t len)
+{
+    return open("/dev/zero", O_RDWR);
+}
+
+#endif
diff --git a/vm/alloc/TEST/HeapBitmapTest/main.c b/vm/alloc/TEST/HeapBitmapTest/main.c
new file mode 100644
index 0000000..10fa7f8
--- /dev/null
+++ b/vm/alloc/TEST/HeapBitmapTest/main.c
@@ -0,0 +1,496 @@
+#include <stdio.h>
+#include <stdbool.h>
+#include <stdint.h>
+#include <assert.h>
+#include <string.h>
+#include <unistd.h>
+#define __attribute(x) /* disable inlining */
+#include "HeapBitmap.h"
+#undef __attribute
+
+#define PAGE_SIZE 4096
+#define HEAP_BASE ((void *)0x10000)
+#define HEAP_SIZE (5 * PAGE_SIZE + 888)
+
+#define VERBOSE 1
+#if VERBOSE
+#define TRACE(...) printf(__VA_ARGS__)
+#else
+#define TRACE(...) /**/
+#endif
+
+void
+test_init()
+{
+    HeapBitmap hb;
+    bool ok;
+
+    memset(&hb, 0x55, sizeof(hb));
+
+    ok = dvmHeapBitmapInit(&hb, HEAP_BASE, HEAP_SIZE, "test");
+    assert(ok);
+
+    assert(hb.bits != NULL);
+    assert(hb.bitsLen >= HB_OFFSET_TO_INDEX(HEAP_SIZE));
+    assert(hb.base == (uintptr_t)HEAP_BASE);
+    assert(hb.max < hb.base);
+
+    /* Make sure hb.bits is mapped.
+     */
+    *hb.bits = 0x55;
+    assert(*hb.bits = 0x55);
+    *hb.bits = 0;
+
+#define TEST_UNMAP 0
+#if TEST_UNMAP
+    /* Hold onto this to make sure it's unmapped later.
+     */
+    unsigned long int *bits = hb.bits;
+#endif
+
+    dvmHeapBitmapDelete(&hb);
+
+    assert(hb.bits == NULL);
+    assert(hb.bitsLen == 0);
+    assert(hb.base == 0);
+    assert(hb.max == 0);
+
+#if TEST_UNMAP
+    /* This pointer shouldn't be mapped anymore.
+     */
+    *bits = 0x55;
+    assert(!"Should have segfaulted");
+#endif
+}
+
+bool is_zeroed(const HeapBitmap *hb)
+{
+    int i;
+
+    for (i = 0; i < hb->bitsLen / sizeof (*hb->bits); i++) {
+        if (hb->bits[i] != 0L) {
+            return false;
+        }
+    }
+    return true;
+}
+
+void assert_empty(const HeapBitmap *hb)
+{
+    assert(hb->bits != NULL);
+    assert(hb->bitsLen >= HB_OFFSET_TO_INDEX(HEAP_SIZE));
+    assert(hb->base == (uintptr_t)HEAP_BASE);
+    assert(hb->max < hb->base);
+
+    assert(is_zeroed(hb));
+
+    assert(!dvmHeapBitmapMayContainObject(hb,
+            HEAP_BASE));
+    assert(!dvmHeapBitmapMayContainObject(hb,
+            HEAP_BASE + HB_OBJECT_ALIGNMENT));
+    assert(!dvmHeapBitmapMayContainObject(hb,
+            HEAP_BASE + HEAP_SIZE - HB_OBJECT_ALIGNMENT));
+    assert(!dvmHeapBitmapMayContainObject(hb,
+            HEAP_BASE + HEAP_SIZE));
+
+    assert(!dvmHeapBitmapIsObjectBitSet(hb,
+            HEAP_BASE));
+    assert(!dvmHeapBitmapIsObjectBitSet(hb,
+            HEAP_BASE + HB_OBJECT_ALIGNMENT));
+    assert(!dvmHeapBitmapIsObjectBitSet(hb,
+            HEAP_BASE + HEAP_SIZE - HB_OBJECT_ALIGNMENT));
+}
+
+void
+test_bits()
+{
+    HeapBitmap hb;
+    bool ok;
+
+    ok = dvmHeapBitmapInit(&hb, HEAP_BASE, HEAP_SIZE, "test");
+    assert(ok);
+
+    assert_empty(&hb);
+
+    /* Set the lowest address.
+     */
+    dvmHeapBitmapSetObjectBit(&hb, HEAP_BASE);
+    assert(dvmHeapBitmapMayContainObject(&hb,
+            HEAP_BASE));
+    assert(!dvmHeapBitmapMayContainObject(&hb,
+            HEAP_BASE + HB_OBJECT_ALIGNMENT));
+    assert(!dvmHeapBitmapMayContainObject(&hb,
+            HEAP_BASE + HEAP_SIZE - HB_OBJECT_ALIGNMENT));
+    assert(!dvmHeapBitmapMayContainObject(&hb,
+            HEAP_BASE + HEAP_SIZE));
+
+    assert(dvmHeapBitmapIsObjectBitSet(&hb,
+            HEAP_BASE));
+    assert(!dvmHeapBitmapIsObjectBitSet(&hb,
+            HEAP_BASE + HB_OBJECT_ALIGNMENT));
+    assert(!dvmHeapBitmapIsObjectBitSet(&hb,
+            HEAP_BASE + HEAP_SIZE - HB_OBJECT_ALIGNMENT));
+
+    /* Set the highest address.
+     */
+    dvmHeapBitmapSetObjectBit(&hb, HEAP_BASE + HEAP_SIZE - HB_OBJECT_ALIGNMENT);
+    assert(dvmHeapBitmapMayContainObject(&hb,
+            HEAP_BASE));
+    assert(dvmHeapBitmapMayContainObject(&hb,
+            HEAP_BASE + HB_OBJECT_ALIGNMENT));
+    assert(dvmHeapBitmapMayContainObject(&hb,
+            HEAP_BASE + HEAP_SIZE - HB_OBJECT_ALIGNMENT));
+    assert(!dvmHeapBitmapMayContainObject(&hb,
+            HEAP_BASE + HEAP_SIZE));
+
+    assert(dvmHeapBitmapIsObjectBitSet(&hb,
+            HEAP_BASE));
+    assert(!dvmHeapBitmapIsObjectBitSet(&hb,
+            HEAP_BASE + HB_OBJECT_ALIGNMENT));
+    assert(dvmHeapBitmapIsObjectBitSet(&hb,
+            HEAP_BASE + HEAP_SIZE - HB_OBJECT_ALIGNMENT));
+
+    /* Clear the lowest address.
+     */
+    dvmHeapBitmapClearObjectBit(&hb, HEAP_BASE);
+    assert(!dvmHeapBitmapIsObjectBitSet(&hb,
+            HEAP_BASE));
+    assert(!dvmHeapBitmapIsObjectBitSet(&hb,
+            HEAP_BASE + HB_OBJECT_ALIGNMENT));
+    assert(dvmHeapBitmapIsObjectBitSet(&hb,
+            HEAP_BASE + HEAP_SIZE - HB_OBJECT_ALIGNMENT));
+    assert(!is_zeroed(&hb));
+
+    /* Clear the highest address.
+     */
+    dvmHeapBitmapClearObjectBit(&hb,
+            HEAP_BASE + HEAP_SIZE - HB_OBJECT_ALIGNMENT);
+    assert(!dvmHeapBitmapIsObjectBitSet(&hb,
+            HEAP_BASE));
+    assert(!dvmHeapBitmapIsObjectBitSet(&hb,
+            HEAP_BASE + HB_OBJECT_ALIGNMENT));
+    assert(!dvmHeapBitmapIsObjectBitSet(&hb,
+            HEAP_BASE + HEAP_SIZE - HB_OBJECT_ALIGNMENT));
+    assert(is_zeroed(&hb));
+
+    /* Clean up.
+     */
+    dvmHeapBitmapDelete(&hb);
+}
+
+void
+test_clear()
+{
+    HeapBitmap hb;
+    bool ok;
+
+    ok = dvmHeapBitmapInit(&hb, HEAP_BASE, HEAP_SIZE, "test");
+    assert(ok);
+    assert_empty(&hb);
+
+    /* Set the highest address.
+     */
+    dvmHeapBitmapSetObjectBit(&hb, HEAP_BASE + HEAP_SIZE - HB_OBJECT_ALIGNMENT);
+    assert(!is_zeroed(&hb));
+
+    /* Clear the bitmap.
+     */
+    dvmHeapBitmapZero(&hb);
+    assert_empty(&hb);
+
+    /* Clean up.
+     */
+    dvmHeapBitmapDelete(&hb);
+}
+
+void
+test_modify()
+{
+    HeapBitmap hb;
+    bool ok;
+    unsigned long bit;
+
+    ok = dvmHeapBitmapInit(&hb, HEAP_BASE, HEAP_SIZE, "test");
+    assert(ok);
+    assert_empty(&hb);
+
+    /* Set the lowest address.
+     */
+    bit = dvmHeapBitmapSetAndReturnObjectBit(&hb, HEAP_BASE);
+    assert(bit == 0);
+    assert(dvmHeapBitmapMayContainObject(&hb,
+            HEAP_BASE));
+    assert(!dvmHeapBitmapMayContainObject(&hb,
+            HEAP_BASE + HB_OBJECT_ALIGNMENT));
+    assert(!dvmHeapBitmapMayContainObject(&hb,
+            HEAP_BASE + HEAP_SIZE - HB_OBJECT_ALIGNMENT));
+    assert(!dvmHeapBitmapMayContainObject(&hb,
+            HEAP_BASE + HEAP_SIZE));
+
+    assert(dvmHeapBitmapIsObjectBitSet(&hb,
+            HEAP_BASE));
+    assert(!dvmHeapBitmapIsObjectBitSet(&hb,
+            HEAP_BASE + HB_OBJECT_ALIGNMENT));
+    assert(!dvmHeapBitmapIsObjectBitSet(&hb,
+            HEAP_BASE + HEAP_SIZE - HB_OBJECT_ALIGNMENT));
+
+    /* Set the lowest address again.
+     */
+    bit = dvmHeapBitmapSetAndReturnObjectBit(&hb, HEAP_BASE);
+    assert(bit != 0);
+    assert(dvmHeapBitmapMayContainObject(&hb,
+            HEAP_BASE));
+    assert(!dvmHeapBitmapMayContainObject(&hb,
+            HEAP_BASE + HB_OBJECT_ALIGNMENT));
+    assert(!dvmHeapBitmapMayContainObject(&hb,
+            HEAP_BASE + HEAP_SIZE - HB_OBJECT_ALIGNMENT));
+    assert(!dvmHeapBitmapMayContainObject(&hb,
+            HEAP_BASE + HEAP_SIZE));
+
+    assert(dvmHeapBitmapIsObjectBitSet(&hb,
+            HEAP_BASE));
+    assert(!dvmHeapBitmapIsObjectBitSet(&hb,
+            HEAP_BASE + HB_OBJECT_ALIGNMENT));
+    assert(!dvmHeapBitmapIsObjectBitSet(&hb,
+            HEAP_BASE + HEAP_SIZE - HB_OBJECT_ALIGNMENT));
+
+    /* Set the highest address.
+     */
+    bit = dvmHeapBitmapSetAndReturnObjectBit(&hb,
+            HEAP_BASE + HEAP_SIZE - HB_OBJECT_ALIGNMENT);
+    assert(bit == 0);
+    assert(dvmHeapBitmapMayContainObject(&hb,
+            HEAP_BASE));
+    assert(dvmHeapBitmapMayContainObject(&hb,
+            HEAP_BASE + HB_OBJECT_ALIGNMENT));
+    assert(dvmHeapBitmapMayContainObject(&hb,
+            HEAP_BASE + HEAP_SIZE - HB_OBJECT_ALIGNMENT));
+    assert(!dvmHeapBitmapMayContainObject(&hb,
+            HEAP_BASE + HEAP_SIZE));
+
+    assert(dvmHeapBitmapIsObjectBitSet(&hb,
+            HEAP_BASE));
+    assert(!dvmHeapBitmapIsObjectBitSet(&hb,
+            HEAP_BASE + HB_OBJECT_ALIGNMENT));
+    assert(dvmHeapBitmapIsObjectBitSet(&hb,
+            HEAP_BASE + HEAP_SIZE - HB_OBJECT_ALIGNMENT));
+
+    /* Set the highest address again.
+     */
+    bit = dvmHeapBitmapSetAndReturnObjectBit(&hb,
+            HEAP_BASE + HEAP_SIZE - HB_OBJECT_ALIGNMENT);
+    assert(bit != 0);
+    assert(dvmHeapBitmapMayContainObject(&hb,
+            HEAP_BASE));
+    assert(dvmHeapBitmapMayContainObject(&hb,
+            HEAP_BASE + HB_OBJECT_ALIGNMENT));
+    assert(dvmHeapBitmapMayContainObject(&hb,
+            HEAP_BASE + HEAP_SIZE - HB_OBJECT_ALIGNMENT));
+    assert(!dvmHeapBitmapMayContainObject(&hb,
+            HEAP_BASE + HEAP_SIZE));
+
+    assert(dvmHeapBitmapIsObjectBitSet(&hb,
+            HEAP_BASE));
+    assert(!dvmHeapBitmapIsObjectBitSet(&hb,
+            HEAP_BASE + HB_OBJECT_ALIGNMENT));
+    assert(dvmHeapBitmapIsObjectBitSet(&hb,
+            HEAP_BASE + HEAP_SIZE - HB_OBJECT_ALIGNMENT));
+
+    /* Clean up.
+     */
+    dvmHeapBitmapDelete(&hb);
+}
+
+/*
+ * xor test support functions
+ */
+
+static void *gCallbackArg = NULL;
+
+#define NUM_XOR_PTRS  128
+static size_t gNumPtrs;
+static void *gXorPtrs[NUM_XOR_PTRS];
+static bool gClearedPtrs[NUM_XOR_PTRS];
+static bool gSeenPtrs[NUM_XOR_PTRS];
+
+bool
+xorCallback(size_t numPtrs, void **ptrs, const void *finger, void *arg)
+{
+    assert(numPtrs > 0);
+    assert(ptrs != NULL);
+    assert(arg == gCallbackArg);
+
+size_t i;
+    for (i = 0; i < numPtrs; i++) {
+        assert(ptrs[i] < finger);
+        printf("callback: 0x%08x ( < 0x%08x )\n",
+                (uintptr_t)ptrs[i], (uintptr_t)finger);
+    }
+
+    return true;
+}
+
+bool
+seenAndClearedMatch()
+{
+    size_t i;
+    for (i = 0; i < gNumPtrs; i++) {
+        if (gClearedPtrs[i] != gSeenPtrs[i]) {
+            return false;
+        }
+    }
+    return true;
+}
+
+void
+run_xor(ssize_t offset, size_t step)
+{
+    assert(step != 0);
+    assert(step < HEAP_SIZE);
+
+    /* Figure out the range.
+     */
+uintptr_t base;
+uintptr_t top;
+    if (offset >= 0) {
+        base = (uintptr_t)HEAP_BASE + offset;
+    } else {
+        base = (uintptr_t)HEAP_BASE + (uintptr_t)HEAP_SIZE + offset;
+    }
+    if (base < (uintptr_t)HEAP_BASE) {
+        base = (uintptr_t)HEAP_BASE;
+    } else if (base > (uintptr_t)(HEAP_BASE + HEAP_SIZE)) {
+        base = (uintptr_t)(HEAP_BASE + HEAP_SIZE);
+    } else {
+        base = (base + HB_OBJECT_ALIGNMENT - 1) & ~(HB_OBJECT_ALIGNMENT - 1);
+    }
+    step *= HB_OBJECT_ALIGNMENT;
+    top = base + step * NUM_XOR_PTRS;
+    if (top > (uintptr_t)(HEAP_BASE + HEAP_SIZE)) {
+        top = (uintptr_t)(HEAP_BASE + HEAP_SIZE);
+    }
+
+    /* Create the pointers.
+     */
+    gNumPtrs = 0;
+    memset(gXorPtrs, 0, sizeof(gXorPtrs));
+    memset(gClearedPtrs, 0, sizeof(gClearedPtrs));
+    memset(gSeenPtrs, 0, sizeof(gSeenPtrs));
+
+uintptr_t addr;
+void **p = gXorPtrs;
+    for (addr = base; addr < top; addr += step) {
+        *p++ = (void *)addr;
+        gNumPtrs++;
+    }
+    assert(seenAndClearedMatch());
+
+    /* Set up the bitmaps.
+     */
+HeapBitmap hb1, hb2;
+bool ok;
+
+    ok = dvmHeapBitmapInit(&hb1, HEAP_BASE, HEAP_SIZE, "test1");
+    assert(ok);
+    ok = dvmHeapBitmapInitFromTemplate(&hb2, &hb1, "test2");
+    assert(ok);
+
+    /* Walk two empty bitmaps.
+     */
+TRACE("walk 0\n");
+    ok = dvmHeapBitmapXorWalk(&hb1, &hb2, xorCallback, gCallbackArg);
+    assert(ok);
+    assert(seenAndClearedMatch());
+
+    /* Walk one empty bitmap.
+     */
+TRACE("walk 1\n");
+    dvmHeapBitmapSetObjectBit(&hb1, (void *)base);
+    ok = dvmHeapBitmapXorWalk(&hb1, &hb2, xorCallback, gCallbackArg);
+    assert(ok);
+
+    /* Make the bitmaps match.
+     */
+TRACE("walk 2\n");
+    dvmHeapBitmapSetObjectBit(&hb2, (void *)base);
+    ok = dvmHeapBitmapXorWalk(&hb1, &hb2, xorCallback, gCallbackArg);
+    assert(ok);
+
+    /* Clear the bitmaps.
+     */
+    dvmHeapBitmapZero(&hb1);
+    assert_empty(&hb1);
+    dvmHeapBitmapZero(&hb2);
+    assert_empty(&hb2);
+
+    /* Set the pointers we created in one of the bitmaps,
+     * then visit them.
+     */
+size_t i;
+    for (i = 0; i < gNumPtrs; i++) {
+        dvmHeapBitmapSetObjectBit(&hb1, gXorPtrs[i]);
+    }
+TRACE("walk 3\n");
+    ok = dvmHeapBitmapXorWalk(&hb1, &hb2, xorCallback, gCallbackArg);
+    assert(ok);
+
+    /* Set every third pointer in the other bitmap, and visit again.
+     */
+    for (i = 0; i < gNumPtrs; i += 3) {
+        dvmHeapBitmapSetObjectBit(&hb2, gXorPtrs[i]);
+    }
+TRACE("walk 4\n");
+    ok = dvmHeapBitmapXorWalk(&hb1, &hb2, xorCallback, gCallbackArg);
+    assert(ok);
+
+    /* Set every other pointer in the other bitmap, and visit again.
+     */
+    for (i = 0; i < gNumPtrs; i += 2) {
+        dvmHeapBitmapSetObjectBit(&hb2, gXorPtrs[i]);
+    }
+TRACE("walk 5\n");
+    ok = dvmHeapBitmapXorWalk(&hb1, &hb2, xorCallback, gCallbackArg);
+    assert(ok);
+
+    /* Walk just one bitmap.
+     */
+TRACE("walk 6\n");
+    ok = dvmHeapBitmapWalk(&hb2, xorCallback, gCallbackArg);
+    assert(ok);
+
+//xxx build an expect list for the callback
+//xxx test where max points to beginning, middle, and end of a word
+
+    /* Clean up.
+     */
+    dvmHeapBitmapDelete(&hb1);
+    dvmHeapBitmapDelete(&hb2);
+}
+
+void
+test_xor()
+{
+    run_xor(0, 1);
+    run_xor(100, 34);
+}
+
+int main(int argc, char *argv[])
+{
+    printf("test_init...\n");
+    test_init();
+
+    printf("test_bits...\n");
+    test_bits();
+
+    printf("test_clear...\n");
+    test_clear();
+
+    printf("test_modify...\n");
+    test_modify();
+
+    printf("test_xor...\n");
+    test_xor();
+
+    printf("done.\n");
+    return 0;
+}
diff --git a/vm/alloc/clz.c b/vm/alloc/clz.c
new file mode 100644
index 0000000..77376a3
--- /dev/null
+++ b/vm/alloc/clz.c
@@ -0,0 +1,33 @@
+/*
+ * Copyright (C) 2007 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "clz.h"
+
+int clz_impl(unsigned long int x)
+{
+#if defined(__arm__) && !defined(__thumb__)
+    return __builtin_clz(x);
+#else
+    if (!x) return 32;
+    int e = 31;
+    if (x&0xFFFF0000)   { e -=16; x >>=16; }
+    if (x&0x0000FF00)   { e -= 8; x >>= 8; }
+    if (x&0x000000F0)   { e -= 4; x >>= 4; }
+    if (x&0x0000000C)   { e -= 2; x >>= 2; }
+    if (x&0x00000002)   { e -= 1; }
+    return e;
+#endif
+}
diff --git a/vm/alloc/clz.h b/vm/alloc/clz.h
new file mode 100644
index 0000000..58096c9
--- /dev/null
+++ b/vm/alloc/clz.h
@@ -0,0 +1,32 @@
+/*
+ * Copyright (C) 2007 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef _DALVIK_CLZ
+
+#include <stdint.h>
+
+#if defined(__arm__) && !defined(__thumb__)
+
+#define CLZ(x) __builtin_clz(x)
+
+#else
+
+int clz_impl(unsigned long int x);
+#define CLZ(x) clz_impl(x)
+
+#endif
+
+#endif // _DALVIK_CLZ
diff --git a/vm/analysis/CodeVerify.c b/vm/analysis/CodeVerify.c
new file mode 100644
index 0000000..fd4c75b
--- /dev/null
+++ b/vm/analysis/CodeVerify.c
@@ -0,0 +1,5174 @@
+/*
+ * Copyright (C) 2008 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/*
+ * Dalvik bytecode structural verifier.  The only public entry point
+ * (except for a few shared utility functions) is dvmVerifyCodeFlow().
+ *
+ * TODO: might benefit from a signature-->class lookup cache.  Could avoid
+ * some string-peeling and wouldn't need to compute hashes.
+ *
+ * TODO: we do too much stuff in here that could be done in the static
+ * verification pass.  It's convenient, because we have all of the
+ * necessary information, but it's more efficient to do it over in
+ * DexVerify.c because in here we may have to process instructions
+ * multiple times.
+ */
+#include "Dalvik.h"
+#include "analysis/CodeVerify.h"
+#include "libdex/DexCatch.h"
+#include "libdex/InstrUtils.h"
+
+#include <stddef.h>
+
+
+/*
+ * If defined, store registers for all instructions, not just branch
+ * targets.  Increases memory usage and adds to CPU load.  Only necessary
+ * when generating data for exact GC.
+ */
+#define USE_FULL_TABLE  false
+
+/*
+ * Set this to enable dead code scanning.  This is not required, but it's
+ * very useful when testing changes to the verifier (to make sure we're not
+ * skipping over stuff) and for checking the optimized output from "dx".
+ */
+#define DEAD_CODE_SCAN  true
+
+static bool gDebugVerbose = false;      // TODO: remove this
+
+/*
+ * Selectively enable verbose debug logging -- use this to activate
+ * dumpRegTypes() calls for all instructions in the specified method.
+ */
+static inline bool doVerboseLogging(const Method* meth) {
+    return false;       /* COMMENT OUT to enable verbose debugging */
+
+    const char* cd = "Lop_lshr;";
+    const char* mn = "test";
+    const char* sg = "(II)J";
+    return (strcmp(meth->clazz->descriptor, cd) == 0 &&
+            dvmCompareNameDescriptorAndMethod(mn, sg, meth) == 0);
+}
+
+#define SHOW_REG_DETAILS    (0 /*| DRT_SHOW_REF_TYPES | DRT_SHOW_LOCALS*/)
+
+/* verification failure reporting */
+#define LOG_VFY(...)                dvmLogVerifyFailure(NULL, __VA_ARGS__)
+#define LOG_VFY_METH(_meth, ...)    dvmLogVerifyFailure(_meth, __VA_ARGS__)
+
+/*
+ * We need an extra "pseudo register" to hold the return type briefly.  It
+ * can be category 1 or 2, so we need two slots.
+ */
+#define kExtraRegs  2
+#define RESULT_REGISTER(_insnRegCount)  (_insnRegCount)
+
+/*
+ * RegType holds information about the type of data held in a register.
+ * For most types it's a simple enum.  For reference types it holds a
+ * pointer to the ClassObject, and for uninitialized references it holds
+ * an index into the UninitInstanceMap.
+ */
+typedef u4 RegType;
+
+/*
+ * Enumeration for RegType values.  The "hi" piece of a 64-bit value MUST
+ * immediately follow the "lo" piece in the enumeration, so we can check
+ * that hi==lo+1.
+ *
+ * Assignment of constants:
+ *   [-MAXINT,-32768)   : integer
+ *   [-32768,-128)      : short
+ *   [-128,0)           : byte
+ *   0                  : zero
+ *   1                  : one
+ *   [2,128)            : posbyte
+ *   [128,32768)        : posshort
+ *   [32768,65536)      : char
+ *   [65536,MAXINT]     : integer
+ *
+ * Allowed "implicit" widening conversions:
+ *   zero -> boolean, posbyte, byte, posshort, short, char, integer, ref (null)
+ *   one -> boolean, posbyte, byte, posshort, short, char, integer
+ *   boolean -> posbyte, byte, posshort, short, char, integer
+ *   posbyte -> posshort, short, integer, char
+ *   byte -> short, integer
+ *   posshort -> integer, char
+ *   short -> integer
+ *   char -> integer
+ *
+ * In addition, all of the above can convert to "float".
+ *
+ * We're more careful with integer values than the spec requires.  The
+ * motivation is to restrict byte/char/short to the correct range of values.
+ * For example, if a method takes a byte argument, we don't want to allow
+ * the code to load the constant "1024" and pass it in.
+ */
+enum {
+    kRegTypeUnknown = 0,    /* initial state; use value=0 so calloc works */
+    kRegTypeUninit = 1,     /* MUST be odd to distinguish from pointer */
+    kRegTypeConflict,       /* merge clash makes this reg's type unknowable */
+
+    /*
+     * Category-1nr types.  The order of these is chiseled into a couple
+     * of tables, so don't add, remove, or reorder if you can avoid it.
+     */
+#define kRegType1nrSTART    kRegTypeFloat
+    kRegTypeFloat,
+    kRegTypeZero,           /* 32-bit 0, could be Boolean, Int, Float, or Ref */
+    kRegTypeOne,            /* 32-bit 1, could be Boolean, Int, Float */
+    kRegTypeBoolean,        /* must be 0 or 1 */
+    kRegTypePosByte,        /* byte, known positive (can become char) */
+    kRegTypeByte,
+    kRegTypePosShort,       /* short, known positive (can become char) */
+    kRegTypeShort,
+    kRegTypeChar,
+    kRegTypeInteger,
+#define kRegType1nrEND      kRegTypeInteger
+
+    kRegTypeLongLo,         /* lower-numbered register; endian-independent */
+    kRegTypeLongHi,
+    kRegTypeDoubleLo,
+    kRegTypeDoubleHi,
+
+    /*
+     * Anything larger than this is a ClassObject or uninit ref.  Mask off
+     * all but the low 8 bits; if you're left with kRegTypeUninit, pull
+     * the uninit index out of the high 24.  Because kRegTypeUninit has an
+     * odd value, there is no risk of a particular ClassObject pointer bit
+     * pattern being confused for it (assuming our class object allocator
+     * uses word alignment).
+     */
+    kRegTypeMAX
+};
+#define kRegTypeUninitMask  0xff
+#define kRegTypeUninitShift 8
+
+/*
+ * Big fat collection of registers.
+ */
+typedef struct RegisterTable {
+    /*
+     * Array of RegType pointers, one per address in the method.  We only
+     * set the pointers for addresses that are branch targets.
+     */
+    RegType**   addrRegs;
+
+    /*
+     * Number of registers we track for each instruction.  This is equal
+     * to the method's declared "registersSize" plus kExtraRegs.
+     */
+    int         insnRegCount;
+
+    /*
+     * A single large alloc, with all of the storage needed for insnRegs.
+     */
+    RegType*    regAlloc;
+} RegisterTable;
+
+
+/* fwd */
+static void checkMergeTab(void);
+static bool isInitMethod(const Method* meth);
+static RegType getInvocationThis(const RegType* insnRegs,\
+    const int insnRegCount, const DecodedInstruction* pDecInsn, bool* pOkay);
+static void verifyRegisterType(const RegType* insnRegs, const int insnRegCount,\
+    u4 vsrc, RegType checkType, bool* pOkay);
+static bool doCodeVerification(const Method* meth, InsnFlags* insnFlags,\
+    RegisterTable* regTable, UninitInstanceMap* uninitMap);
+static bool verifyInstruction(const Method* meth, InsnFlags* insnFlags,\
+    RegisterTable* regTable, RegType* workRegs, int insnIdx,
+    UninitInstanceMap* uninitMap, int* pStartGuess);
+static ClassObject* findCommonSuperclass(ClassObject* c1, ClassObject* c2);
+static void dumpRegTypes(const Method* meth, const InsnFlags* insnFlags,\
+    const RegType* addrRegs, int addr, const char* addrName,
+    const UninitInstanceMap* uninitMap, int displayFlags);
+
+/* bit values for dumpRegTypes() "displayFlags" */
+enum {
+    DRT_SIMPLE          = 0,
+    DRT_SHOW_REF_TYPES  = 0x01,
+    DRT_SHOW_LOCALS     = 0x02,
+};
+
+
+/*
+ * ===========================================================================
+ *      RegType and UninitInstanceMap utility functions
+ * ===========================================================================
+ */
+
+#define __  kRegTypeUnknown
+#define _U  kRegTypeUninit
+#define _X  kRegTypeConflict
+#define _F  kRegTypeFloat
+#define _0  kRegTypeZero
+#define _1  kRegTypeOne
+#define _Z  kRegTypeBoolean
+#define _b  kRegTypePosByte
+#define _B  kRegTypeByte
+#define _s  kRegTypePosShort
+#define _S  kRegTypeShort
+#define _C  kRegTypeChar
+#define _I  kRegTypeInteger
+#define _J  kRegTypeLongLo
+#define _j  kRegTypeLongHi
+#define _D  kRegTypeDoubleLo
+#define _d  kRegTypeDoubleHi
+
+/*
+ * Merge result table.  The table is symmetric along the diagonal.
+ *
+ * Note that 32-bit int/float do not merge into 64-bit long/double.  This
+ * is a register merge, not a widening conversion.  Only the "implicit"
+ * widening within a category, e.g. byte to short, is allowed.
+ *
+ * Because Dalvik does not draw a distinction between int and float, we
+ * have to allow free exchange between 32-bit int/float and 64-bit
+ * long/double.
+ *
+ * Note that Uninit+Uninit=Uninit.  This holds true because we only
+ * use this when the RegType value is exactly equal to kRegTypeUninit, which
+ * can only happen for the zeroeth entry in the table.
+ *
+ * "Unknown" never merges with anything known.  The only time a register
+ * transitions from "unknown" to "known" is when we're executing code
+ * for the first time, and we handle that with a simple copy.
+ */
+static const /*RegType*/ char gMergeTab[kRegTypeMAX][kRegTypeMAX] =
+{
+    /* chk:  _  U  X  F  0  1  Z  b  B  s  S  C  I  J  j  D  d */
+    { /*_*/ __,_X,_X,_X,_X,_X,_X,_X,_X,_X,_X,_X,_X,_X,_X,_X,_X },
+    { /*U*/ _X,_U,_X,_X,_X,_X,_X,_X,_X,_X,_X,_X,_X,_X,_X,_X,_X },
+    { /*X*/ _X,_X,_X,_X,_X,_X,_X,_X,_X,_X,_X,_X,_X,_X,_X,_X,_X },
+    { /*F*/ _X,_X,_X,_F,_F,_F,_F,_F,_F,_F,_F,_F,_F,_X,_X,_X,_X },
+    { /*0*/ _X,_X,_X,_F,_0,_Z,_Z,_b,_B,_s,_S,_C,_I,_X,_X,_X,_X },
+    { /*1*/ _X,_X,_X,_F,_Z,_1,_Z,_b,_B,_s,_S,_C,_I,_X,_X,_X,_X },
+    { /*Z*/ _X,_X,_X,_F,_Z,_Z,_Z,_b,_B,_s,_S,_C,_I,_X,_X,_X,_X },
+    { /*b*/ _X,_X,_X,_F,_b,_b,_b,_b,_B,_s,_S,_C,_I,_X,_X,_X,_X },
+    { /*B*/ _X,_X,_X,_F,_B,_B,_B,_B,_B,_S,_S,_I,_I,_X,_X,_X,_X },
+    { /*s*/ _X,_X,_X,_F,_s,_s,_s,_s,_S,_s,_S,_C,_I,_X,_X,_X,_X },
+    { /*S*/ _X,_X,_X,_F,_S,_S,_S,_S,_S,_S,_S,_I,_I,_X,_X,_X,_X },
+    { /*C*/ _X,_X,_X,_F,_C,_C,_C,_C,_I,_C,_I,_C,_I,_X,_X,_X,_X },
+    { /*I*/ _X,_X,_X,_F,_I,_I,_I,_I,_I,_I,_I,_I,_I,_X,_X,_X,_X },
+    { /*J*/ _X,_X,_X,_X,_X,_X,_X,_X,_X,_X,_X,_X,_X,_J,_X,_J,_X },
+    { /*j*/ _X,_X,_X,_X,_X,_X,_X,_X,_X,_X,_X,_X,_X,_X,_j,_X,_j },
+    { /*D*/ _X,_X,_X,_X,_X,_X,_X,_X,_X,_X,_X,_X,_X,_J,_X,_D,_X },
+    { /*d*/ _X,_X,_X,_X,_X,_X,_X,_X,_X,_X,_X,_X,_X,_X,_j,_X,_d },
+};
+
+#undef __
+#undef _U
+#undef _X
+#undef _F
+#undef _0
+#undef _1
+#undef _Z
+#undef _b
+#undef _B
+#undef _s
+#undef _S
+#undef _C
+#undef _I
+#undef _J
+#undef _j
+#undef _D
+#undef _d
+
+#ifndef NDEBUG
+/*
+ * Verify symmetry in the conversion table.
+ */
+static void checkMergeTab(void)
+{
+    int i, j;
+
+    for (i = 0; i < kRegTypeMAX; i++) {
+        for (j = i; j < kRegTypeMAX; j++) {
+            if (gMergeTab[i][j] != gMergeTab[j][i]) {
+                LOGE("Symmetry violation: %d,%d vs %d,%d\n", i, j, j, i);
+                dvmAbort();
+            }
+        }
+    }
+}
+#endif
+
+/*
+ * Determine whether we can convert "srcType" to "checkType", where
+ * "checkType" is one of the category-1 non-reference types.
+ *
+ * 32-bit int and float are interchangeable.
+ */
+static bool canConvertTo1nr(RegType srcType, RegType checkType)
+{
+    static const char convTab
+        [kRegType1nrEND-kRegType1nrSTART+1][kRegType1nrEND-kRegType1nrSTART+1] =
+    {
+        /* chk: F  0  1  Z  b  B  s  S  C  I */
+        { /*F*/ 1, 0, 0, 0, 0, 0, 0, 0, 0, 1 },
+        { /*0*/ 1, 1, 0, 1, 1, 1, 1, 1, 1, 1 },
+        { /*1*/ 1, 0, 1, 1, 1, 1, 1, 1, 1, 1 },
+        { /*Z*/ 1, 0, 0, 1, 1, 1, 1, 1, 1, 1 },
+        { /*b*/ 1, 0, 0, 0, 1, 1, 1, 1, 1, 1 },
+        { /*B*/ 1, 0, 0, 0, 0, 1, 0, 1, 0, 1 },
+        { /*s*/ 1, 0, 0, 0, 0, 0, 1, 1, 1, 1 },
+        { /*S*/ 1, 0, 0, 0, 0, 0, 0, 1, 0, 1 },
+        { /*C*/ 1, 0, 0, 0, 0, 0, 0, 0, 1, 1 },
+        { /*I*/ 1, 0, 0, 0, 0, 0, 0, 0, 0, 1 },
+    };
+
+    assert(checkType >= kRegType1nrSTART && checkType <= kRegType1nrEND);
+#if 0
+    if (checkType < kRegType1nrSTART || checkType > kRegType1nrEND) {
+        LOG_VFY("Unexpected checkType %d (srcType=%d)\n", checkType, srcType);
+        assert(false);
+        return false;
+    }
+#endif
+
+    //printf("convTab[%d][%d] = %d\n", srcType, checkType,
+    //    convTab[srcType-kRegType1nrSTART][checkType-kRegType1nrSTART]);
+    if (srcType >= kRegType1nrSTART && srcType <= kRegType1nrEND)
+        return (bool) convTab[srcType-kRegType1nrSTART][checkType-kRegType1nrSTART];
+
+    return false;
+}
+
+/*
+ * Determine whether the types are compatible.  In Dalvik, 64-bit doubles
+ * and longs are interchangeable.
+ */
+static bool canConvertTo2(RegType srcType, RegType checkType)
+{
+    return ((srcType == kRegTypeLongLo || srcType == kRegTypeDoubleLo) &&
+            (checkType == kRegTypeLongLo || checkType == kRegTypeDoubleLo));
+}
+
+/*
+ * Given a 32-bit constant, return the most-restricted RegType that can hold
+ * the value.
+ */
+static RegType determineCat1Const(s4 value)
+{
+    if (value < -32768)
+        return kRegTypeInteger;
+    else if (value < -128)
+        return kRegTypeShort;
+    else if (value < 0)
+        return kRegTypeByte;
+    else if (value == 0)
+        return kRegTypeZero;
+    else if (value == 1)
+        return kRegTypeOne;
+    else if (value < 128)
+        return kRegTypePosByte;
+    else if (value < 32768)
+        return kRegTypePosShort;
+    else if (value < 65536)
+        return kRegTypeChar;
+    else
+        return kRegTypeInteger;
+}
+
+/*
+ * Convert a VM PrimitiveType enum value to the equivalent RegType value.
+ */
+static RegType primitiveTypeToRegType(PrimitiveType primType)
+{
+    struct {
+        RegType         regType;        /* type equivalent */
+        PrimitiveType   primType;       /* verification */
+    } convTab[] = {
+        /* must match order of enum in Object.h */
+        { kRegTypeBoolean,      PRIM_BOOLEAN },
+        { kRegTypeChar,         PRIM_CHAR },
+        { kRegTypeFloat,        PRIM_FLOAT },
+        { kRegTypeDoubleLo,     PRIM_DOUBLE },
+        { kRegTypeByte,         PRIM_BYTE },
+        { kRegTypeShort,        PRIM_SHORT },
+        { kRegTypeInteger,      PRIM_INT },
+        { kRegTypeLongLo,       PRIM_LONG },
+        // PRIM_VOID
+    };
+
+    if (primType < 0 || primType > (int) (sizeof(convTab) / sizeof(convTab[0])))
+    {
+        assert(false);
+        return kRegTypeUnknown;
+    }
+
+    assert(convTab[primType].primType == primType);
+    return convTab[primType].regType;
+}
+
+/*
+ * Create a new uninitialized instance map.
+ *
+ * The map is allocated and populated with address entries.  The addresses
+ * appear in ascending order to allow binary searching.
+ *
+ * Very few methods have 10 or more new-instance instructions; the
+ * majority have 0 or 1.  Occasionally a static initializer will have 200+.
+ */
+UninitInstanceMap* dvmCreateUninitInstanceMap(const Method* meth,
+    const InsnFlags* insnFlags, int newInstanceCount)
+{
+    const int insnsSize = dvmGetMethodInsnsSize(meth);
+    const u2* insns = meth->insns;
+    UninitInstanceMap* uninitMap;
+    bool isInit = false;
+    int idx, addr;
+
+    if (isInitMethod(meth)) {
+        newInstanceCount++;
+        isInit = true;
+    }
+
+    /*
+     * Allocate the header and map as a single unit.
+     *
+     * TODO: consider having a static instance so we can avoid allocations.
+     * I don't think the verifier is guaranteed to be single-threaded when
+     * running in the VM (rather than dexopt), so that must be taken into
+     * account.
+     */
+    int size = offsetof(UninitInstanceMap, map) +
+                newInstanceCount * sizeof(uninitMap->map[0]);
+    uninitMap = calloc(1, size);
+    if (uninitMap == NULL)
+        return NULL;
+    uninitMap->numEntries = newInstanceCount;
+
+    idx = 0;
+    if (isInit) {
+        uninitMap->map[idx++].addr = kUninitThisArgAddr;
+    }
+
+    /*
+     * Run through and find the new-instance instructions.
+     */
+    for (addr = 0; addr < insnsSize; /**/) {
+        int width = dvmInsnGetWidth(insnFlags, addr);
+
+        if ((*insns & 0xff) == OP_NEW_INSTANCE)
+            uninitMap->map[idx++].addr = addr;
+
+        addr += width;
+        insns += width;
+    }
+
+    assert(idx == newInstanceCount);
+    return uninitMap;
+}
+
+/*
+ * Free the map.
+ */
+void dvmFreeUninitInstanceMap(UninitInstanceMap* uninitMap)
+{
+    free(uninitMap);
+}
+
+/*
+ * Set the class object associated with the instruction at "addr".
+ *
+ * Returns the map slot index, or -1 if the address isn't listed in the map
+ * (shouldn't happen) or if a class is already associated with the address
+ * (bad bytecode).
+ *
+ * Entries, once set, do not change -- a given address can only allocate
+ * one type of object.
+ */
+int dvmSetUninitInstance(UninitInstanceMap* uninitMap, int addr,
+    ClassObject* clazz)
+{
+    int idx;
+
+    assert(clazz != NULL);
+
+    /* TODO: binary search when numEntries > 8 */
+    for (idx = uninitMap->numEntries - 1; idx >= 0; idx--) {
+        if (uninitMap->map[idx].addr == addr) {
+            if (uninitMap->map[idx].clazz != NULL &&
+                uninitMap->map[idx].clazz != clazz)
+            {
+                LOG_VFY("VFY: addr %d already set to %p, not setting to %p\n",
+                    addr, uninitMap->map[idx].clazz, clazz);
+                return -1;          // already set to something else??
+            }
+            uninitMap->map[idx].clazz = clazz;
+            return idx;
+        }
+    }
+
+    LOG_VFY("VFY: addr %d not found in uninit map\n", addr);
+    assert(false);      // shouldn't happen
+    return -1;
+}
+
+/*
+ * Get the class object at the specified index.
+ */
+ClassObject* dvmGetUninitInstance(const UninitInstanceMap* uninitMap, int idx)
+{
+    assert(idx >= 0 && idx < uninitMap->numEntries);
+    return uninitMap->map[idx].clazz;
+}
+
+/* determine if "type" is actually an object reference (init/uninit/zero) */
+static inline bool regTypeIsReference(RegType type) {
+    return (type > kRegTypeMAX || type == kRegTypeUninit ||
+            type == kRegTypeZero);
+}
+
+/* determine if "type" is an uninitialized object reference */
+static inline bool regTypeIsUninitReference(RegType type) {
+    return ((type & kRegTypeUninitMask) == kRegTypeUninit);
+}
+
+/* convert the initialized reference "type" to a ClassObject pointer */
+/* (does not expect uninit ref types or "zero") */
+static ClassObject* regTypeInitializedReferenceToClass(RegType type)
+{
+    assert(regTypeIsReference(type) && type != kRegTypeZero);
+    if ((type & 0x01) == 0) {
+        return (ClassObject*) type;
+    } else {
+        //LOG_VFY("VFY: attempted to use uninitialized reference\n");
+        return NULL;
+    }
+}
+
+/* extract the index into the uninitialized instance map table */
+static inline int regTypeToUninitIndex(RegType type) {
+    assert(regTypeIsUninitReference(type));
+    return (type & ~kRegTypeUninitMask) >> kRegTypeUninitShift;
+}
+
+/* convert the reference "type" to a ClassObject pointer */
+static ClassObject* regTypeReferenceToClass(RegType type,
+    const UninitInstanceMap* uninitMap)
+{
+    assert(regTypeIsReference(type) && type != kRegTypeZero);
+    if (regTypeIsUninitReference(type)) {
+        assert(uninitMap != NULL);
+        return dvmGetUninitInstance(uninitMap, regTypeToUninitIndex(type));
+    } else {
+        return (ClassObject*) type;
+    }
+}
+
+/* convert the ClassObject pointer to an (initialized) register type */
+static inline RegType regTypeFromClass(ClassObject* clazz) {
+    return (u4) clazz;
+}
+
+/* return the RegType for the uninitialized reference in slot "uidx" */
+static RegType regTypeFromUninitIndex(int uidx) {
+    return (u4) (kRegTypeUninit | (uidx << kRegTypeUninitShift));
+}
+
+
+/*
+ * ===========================================================================
+ *      Signature operations
+ * ===========================================================================
+ */
+
+/*
+ * Is this method a constructor?
+ */
+static bool isInitMethod(const Method* meth)
+{
+    return (*meth->name == '<' && strcmp(meth->name+1, "init>") == 0);
+}
+
+/*
+ * Look up a class reference given as a simple string descriptor.
+ */
+static ClassObject* lookupClassByDescriptor(const Method* meth,
+    const char* pDescriptor, bool* pOkay)
+{
+    /*
+     * The javac compiler occasionally puts references to nonexistent
+     * classes in signatures.  For example, if you have a non-static
+     * inner class with no constructor, the compiler provides
+     * a private <init> for you.  Constructing the class
+     * requires <init>(parent), but the outer class can't call
+     * that because the method is private.  So the compiler
+     * generates a package-scope <init>(parent,bogus) method that
+     * just calls the regular <init> (the "bogus" part being necessary
+     * to distinguish the signature of the synthetic method).
+     * Treating the bogus class as an instance of java.lang.Object
+     * allows the verifier to process the class successfully.
+     */
+
+    //LOGI("Looking up '%s'\n", typeStr);
+    ClassObject* clazz;
+    clazz = dvmFindClassNoInit(pDescriptor, meth->clazz->classLoader);
+    if (clazz == NULL) {
+        dvmClearOptException(dvmThreadSelf());
+        if (strchr(pDescriptor, '$') != NULL) {
+            LOGV("VFY: unable to find class referenced in "
+                "signature (%s)\n", pDescriptor);
+        } else {
+            LOG_VFY("VFY: unable to find class referenced in "
+                "signature (%s)\n", pDescriptor);
+        }
+
+        if (pDescriptor[0] == '[') {
+            /* We are looking at an array descriptor. */
+
+            /*
+             * There should never be a problem loading primitive arrays.  
+             */
+            if (pDescriptor[1] != 'L' && pDescriptor[1] != '[') {
+                LOG_VFY("VFY: invalid char in signature in '%s'\n",
+                    pDescriptor);
+                *pOkay = false;
+            }
+
+            /*
+             * Try to continue with base array type.  This will let
+             * us pass basic stuff (e.g. get array len) that wouldn't
+             * fly with an Object.  This is NOT correct if the
+             * missing type is a primitive array, but we should never
+             * have a problem loading those.  (I'm not convinced this
+             * is correct or even useful.  Just use Object here?)
+             */
+            clazz = dvmFindClassNoInit("[Ljava/lang/Object;",
+                meth->clazz->classLoader);
+        } else if (pDescriptor[0] == 'L') {
+            /*
+             * We are looking at a non-array reference descriptor;
+             * try to continue with base reference type.
+             */
+            clazz = gDvm.classJavaLangObject;
+        } else {
+            /* We are looking at a primitive type. */
+            LOG_VFY("VFY: invalid char in signature in '%s'\n", pDescriptor);
+            *pOkay = false;
+        }
+
+        if (clazz == NULL) {
+            *pOkay = false;
+        }
+    }
+
+    if (dvmIsPrimitiveClass(clazz)) {
+        LOG_VFY("VFY: invalid use of primitive type '%s'\n", pDescriptor);
+        *pOkay = false;
+        clazz = NULL;
+    }
+
+    return clazz;
+}
+
+/*
+ * Look up a class reference in a signature.  Could be an arg or the
+ * return value.
+ *
+ * Advances "*pSig" to the last character in the signature (that is, to
+ * the ';').
+ *
+ * NOTE: this is also expected to verify the signature.
+ */
+static ClassObject* lookupSignatureClass(const Method* meth, const char** pSig,
+    bool* pOkay)
+{
+    const char* sig = *pSig;
+    const char* endp = sig;
+
+    assert(sig != NULL && *sig == 'L');
+
+    while (*++endp != ';' && *endp != '\0')
+        ;
+    if (*endp != ';') {
+        LOG_VFY("VFY: bad signature component '%s' (missing ';')\n", sig);
+        *pOkay = false;
+        return NULL;
+    }
+
+    endp++;    /* Advance past the ';'. */
+    int typeLen = endp - sig;
+    char typeStr[typeLen+1]; /* +1 for the '\0' */
+    memcpy(typeStr, sig, typeLen);
+    typeStr[typeLen] = '\0';
+
+    *pSig = endp - 1; /* - 1 so that *pSig points at, not past, the ';' */
+
+    return lookupClassByDescriptor(meth, typeStr, pOkay);
+}
+
+/*
+ * Look up an array class reference in a signature.  Could be an arg or the
+ * return value.
+ *
+ * Advances "*pSig" to the last character in the signature.
+ *
+ * NOTE: this is also expected to verify the signature.
+ */
+static ClassObject* lookupSignatureArrayClass(const Method* meth,
+    const char** pSig, bool* pOkay)
+{
+    const char* sig = *pSig;
+    const char* endp = sig;
+
+    assert(sig != NULL && *sig == '[');
+
+    /* find the end */
+    while (*++endp == '[' && *endp != '\0')
+        ;
+
+    if (*endp == 'L') {
+        while (*++endp != ';' && *endp != '\0')
+            ;
+        if (*endp != ';') {
+            LOG_VFY("VFY: bad signature component '%s' (missing ';')\n", sig);
+            *pOkay = false;
+            return NULL;
+        }
+    }
+
+    int typeLen = endp - sig +1;
+    char typeStr[typeLen+1];
+    memcpy(typeStr, sig, typeLen);
+    typeStr[typeLen] = '\0';
+
+    *pSig = endp;
+
+    return lookupClassByDescriptor(meth, typeStr, pOkay);
+}
+
+/*
+ * Set the register types for the first instruction in the method based on
+ * the method signature.
+ *
+ * This has the side-effect of validating the signature.
+ *
+ * Returns "true" on success.
+ */
+static bool setTypesFromSignature(const Method* meth, RegType* regTypes,
+    UninitInstanceMap* uninitMap)
+{
+    DexParameterIterator iterator;
+    int actualArgs, expectedArgs, argStart;
+    bool okay = true;
+
+    dexParameterIteratorInit(&iterator, &meth->prototype);
+    argStart = meth->registersSize - meth->insSize;
+    expectedArgs = meth->insSize;     /* long/double count as two */
+    actualArgs = 0;
+
+    /*
+     * Include the "this" pointer.
+     */
+    if (!dvmIsStaticMethod(meth)) {
+        /*
+         * If this is a constructor for a class other than java.lang.Object,
+         * mark the first ("this") argument as uninitialized.  This restricts
+         * field access until the superclass constructor is called.
+         */
+        if (isInitMethod(meth) && meth->clazz != gDvm.classJavaLangObject) {
+            int uidx = dvmSetUninitInstance(uninitMap, kUninitThisArgAddr,
+                            meth->clazz);
+            assert(uidx == 0);
+            regTypes[argStart + actualArgs] = regTypeFromUninitIndex(uidx);
+        } else {
+            regTypes[argStart + actualArgs] = regTypeFromClass(meth->clazz);
+        }
+        actualArgs++;
+    }
+
+    for (;;) {
+        const char* descriptor = dexParameterIteratorNextDescriptor(&iterator);
+
+        if (descriptor == NULL) {
+            break;
+        }
+
+        if (actualArgs >= expectedArgs) {
+            LOG_VFY("VFY: expected %d args, found more (%s)\n",
+                expectedArgs, descriptor);
+            goto bad_sig;
+        }
+
+        switch (*descriptor) {
+        case 'L':
+        case '[':
+            /*
+             * We assume that reference arguments are initialized.  The
+             * only way it could be otherwise (assuming the caller was
+             * verified) is if the current method is <init>, but in that
+             * case it's effectively considered initialized the instant
+             * we reach here (in the sense that we can return without
+             * doing anything or call virtual methods).
+             */
+            {
+                ClassObject* clazz =
+                    lookupClassByDescriptor(meth, descriptor, &okay);
+                if (!okay)
+                    goto bad_sig;
+                regTypes[argStart + actualArgs] = regTypeFromClass(clazz);
+            }
+            actualArgs++;
+            break;
+        case 'Z':
+            regTypes[argStart + actualArgs] = kRegTypeBoolean;
+            actualArgs++;
+            break;
+        case 'C':
+            regTypes[argStart + actualArgs] = kRegTypeChar;
+            actualArgs++;
+            break;
+        case 'B':
+            regTypes[argStart + actualArgs] = kRegTypeByte;
+            actualArgs++;
+            break;
+        case 'I':
+            regTypes[argStart + actualArgs] = kRegTypeInteger;
+            actualArgs++;
+            break;
+        case 'S':
+            regTypes[argStart + actualArgs] = kRegTypeShort;
+            actualArgs++;
+            break;
+        case 'F':
+            regTypes[argStart + actualArgs] = kRegTypeFloat;
+            actualArgs++;
+            break;
+        case 'D':
+            regTypes[argStart + actualArgs] = kRegTypeDoubleLo;
+            regTypes[argStart + actualArgs +1] = kRegTypeDoubleHi;
+            actualArgs += 2;
+            break;
+        case 'J':
+            regTypes[argStart + actualArgs] = kRegTypeLongLo;
+            regTypes[argStart + actualArgs +1] = kRegTypeLongHi;
+            actualArgs += 2;
+            break;
+        default:
+            LOG_VFY("VFY: unexpected signature type char '%c'\n", *descriptor);
+            goto bad_sig;
+        }
+    }
+
+    if (actualArgs != expectedArgs) {
+        LOG_VFY("VFY: expected %d args, found %d\n", expectedArgs, actualArgs);
+        goto bad_sig;
+    }
+
+    const char* descriptor = dexProtoGetReturnType(&meth->prototype);
+
+    /*
+     * Validate return type.  We don't do the type lookup; just want to make
+     * sure that it has the right format.  Only major difference from the
+     * method argument format is that 'V' is supported.
+     */
+    switch (*descriptor) {
+    case 'I':
+    case 'C':
+    case 'S':
+    case 'B':
+    case 'Z':
+    case 'V':
+    case 'F':
+    case 'D':
+    case 'J':
+        if (*(descriptor+1) != '\0')
+            goto bad_sig;
+        break;
+    case '[':
+        /* single/multi, object/primitive */
+        while (*++descriptor == '[')
+            ;
+        if (*descriptor == 'L') {
+            while (*++descriptor != ';' && *descriptor != '\0')
+                ;
+            if (*descriptor != ';')
+                goto bad_sig;
+        } else {
+            if (*(descriptor+1) != '\0')
+                goto bad_sig;
+        }
+        break;
+    case 'L':
+        /* could be more thorough here, but shouldn't be required */
+        while (*++descriptor != ';' && *descriptor != '\0')
+            ;
+        if (*descriptor != ';')
+            goto bad_sig;
+        break;
+    default:
+        goto bad_sig;
+    }
+
+    return true;
+
+//fail:
+//    LOG_VFY_METH(meth, "VFY:  bad sig\n");
+//    return false;
+
+bad_sig:
+    {
+        char* desc = dexProtoCopyMethodDescriptor(&meth->prototype);
+        LOG_VFY("VFY: bad signature '%s' for %s.%s\n",
+            desc, meth->clazz->descriptor, meth->name);
+        free(desc);
+    }
+    return false;
+}
+
+/*
+ * Return the register type for the method.  We can't just use the
+ * already-computed DalvikJniReturnType, because if it's a reference type
+ * we need to do the class lookup.
+ *
+ * Returned references are assumed to be initialized.
+ *
+ * Returns kRegTypeUnknown for "void".
+ */
+static RegType getMethodReturnType(const Method* meth)
+{
+    RegType type;
+    bool okay = true;
+    const char* descriptor = dexProtoGetReturnType(&meth->prototype);
+    
+    switch (*descriptor) {
+    case 'I':
+        type = kRegTypeInteger;
+        break;
+    case 'C':
+        type = kRegTypeChar;
+        break;
+    case 'S':
+        type = kRegTypeShort;
+        break;
+    case 'B':
+        type = kRegTypeByte;
+        break;
+    case 'Z':
+        type = kRegTypeBoolean;
+        break;
+    case 'V':
+        type = kRegTypeUnknown;
+        break;
+    case 'F':
+        type = kRegTypeFloat;
+        break;
+    case 'D':
+        type = kRegTypeDoubleLo;
+        break;
+    case 'J':
+        type = kRegTypeLongLo;
+        break;
+    case 'L':
+    case '[':
+        {
+            ClassObject* clazz =
+                lookupClassByDescriptor(meth, descriptor, &okay);
+            assert(okay);
+            type = regTypeFromClass(clazz);
+        }
+        break;
+    default:
+        /* we verified signature return type earlier, so this is impossible */
+        assert(false);
+        type = kRegTypeConflict;
+        break;
+    }
+
+    return type;
+}
+
+/*
+ * Convert a single-character signature value (i.e. a primitive type) to
+ * the corresponding RegType.  This is intended for access to object fields
+ * holding primitive types.
+ *
+ * Returns kRegTypeUnknown for objects, arrays, and void.
+ */
+static RegType primSigCharToRegType(char sigChar)
+{
+    RegType type;
+
+    switch (sigChar) {
+    case 'I':
+        type = kRegTypeInteger;
+        break;
+    case 'C':
+        type = kRegTypeChar;
+        break;
+    case 'S':
+        type = kRegTypeShort;
+        break;
+    case 'B':
+        type = kRegTypeByte;
+        break;
+    case 'Z':
+        type = kRegTypeBoolean;
+        break;
+    case 'F':
+        type = kRegTypeFloat;
+        break;
+    case 'D':
+        type = kRegTypeDoubleLo;
+        break;
+    case 'J':
+        type = kRegTypeLongLo;
+        break;
+    case 'V':
+    case 'L':
+    case '[':
+        type = kRegTypeUnknown;
+        break;
+    default:
+        assert(false);
+        type = kRegTypeUnknown;
+        break;
+    }
+
+    return type;
+}
+
+/*
+ * Verify the arguments to a method.  We're executing in "method", making
+ * a call to the method reference in vB.
+ *
+ * If this is a "direct" invoke, we allow calls to <init>.  For calls to
+ * <init>, the first argument may be an uninitialized reference.  Otherwise,
+ * calls to anything starting with '<' will be rejected, as will any
+ * uninitialized reference arguments.
+ *
+ * For non-static method calls, this will verify that the method call is
+ * appropriate for the "this" argument.
+ *
+ * The method reference is in vBBBB.  The "isRange" parameter determines
+ * whether we use 0-4 "args" values or a range of registers defined by
+ * vAA and vCCCC.
+ *
+ * Widening conversions on integers and references are allowed, but
+ * narrowing conversions are not.
+ *
+ * Returns the resolved method on success, NULL (and sets "*pOkay" to "false")
+ * on failure.
+ */
+static Method* verifyInvocationArgs(const Method* meth, const RegType* insnRegs,
+    const int insnRegCount, const DecodedInstruction* pDecInsn,
+    UninitInstanceMap* uninitMap, MethodType methodType, bool isRange,
+    bool isSuper, bool* pOkay)
+{
+    Method* resMethod;
+    char* sigOriginal = NULL;
+
+    /*
+     * Resolve the method.  This could be an abstract or concrete method
+     * depending on what sort of call we're making.
+     */
+    if (methodType == METHOD_INTERFACE) {
+        resMethod = dvmOptResolveInterfaceMethod(meth->clazz, pDecInsn->vB);
+    } else {
+        resMethod = dvmOptResolveMethod(meth->clazz, pDecInsn->vB, methodType);
+    }
+    if (resMethod == NULL) {
+        /* failed; print a meaningful failure message */
+        DexFile* pDexFile = meth->clazz->pDvmDex->pDexFile;
+        const DexMethodId* pMethodId;
+        const char* methodName;
+        char* methodDesc;
+        const char* classDescriptor;
+
+        pMethodId = dexGetMethodId(pDexFile, pDecInsn->vB);
+        methodName = dexStringById(pDexFile, pMethodId->nameIdx);
+        methodDesc = dexCopyDescriptorFromMethodId(pDexFile, pMethodId);
+        classDescriptor = dexStringByTypeIdx(pDexFile, pMethodId->classIdx);
+
+        LOG_VFY("VFY: unable to resolve %s method %u: %s.%s %s\n",
+            dvmMethodTypeStr(methodType), pDecInsn->vB,
+            classDescriptor, methodName, methodDesc);
+        free(methodDesc);
+        goto fail;
+    }
+
+    /*
+     * Only time you can explicitly call a method starting with '<' is when
+     * making a "direct" invocation on "<init>".  There are additional
+     * restrictions but we don't enforce them here.
+     */
+    if (resMethod->name[0] == '<') {
+        if (methodType != METHOD_DIRECT || !isInitMethod(resMethod)) {
+            LOG_VFY("VFY: invalid call to %s.%s\n",
+                    resMethod->clazz->descriptor, resMethod->name);
+            goto bad_sig;
+        }
+    }
+
+    /*
+     * If we're using invoke-super(method), make sure that the executing
+     * method's class' superclass has a vtable entry for the target method.
+     */
+    if (isSuper) {
+        assert(methodType == METHOD_VIRTUAL);
+        ClassObject* super = meth->clazz->super;
+        if (super == NULL || resMethod->methodIndex > super->vtableCount) {
+            char* desc = dexProtoCopyMethodDescriptor(&resMethod->prototype);
+            LOG_VFY("VFY: invalid invoke-super from %s.%s to super %s.%s %s\n",
+                    meth->clazz->descriptor, meth->name,
+                    (super == NULL) ? "-" : super->descriptor,
+                    resMethod->name, desc);
+            free(desc);
+            goto fail;
+        }
+    }
+
+    /*
+     * We use vAA as our expected arg count, rather than resMethod->insSize,
+     * because we need to match the call to the signature.  Also, we might
+     * might be calling through an abstract method definition (which doesn't
+     * have register count values).
+     */
+    sigOriginal = dexProtoCopyMethodDescriptor(&resMethod->prototype);
+    const char* sig = sigOriginal;
+    int expectedArgs = pDecInsn->vA;
+    int actualArgs = 0;
+
+    if (!isRange && expectedArgs > 5) {
+        LOG_VFY("VFY: invalid arg count in non-range invoke (%d)\n",
+            pDecInsn->vA);
+        goto fail;
+    }
+    if (expectedArgs > meth->outsSize) {
+        LOG_VFY("VFY: invalid arg count (%d) exceeds outsSize (%d)\n",
+            expectedArgs, meth->outsSize);
+        goto fail;
+    }
+
+    if (*sig++ != '(')
+        goto bad_sig;
+
+    /*
+     * Check the "this" argument, which must be an instance of the class
+     * that declared the method.  For an interface class, we don't do the
+     * full interface merge, so we can't do a rigorous check here (which
+     * is okay since we have to do it at runtime).
+     */
+    if (!dvmIsStaticMethod(resMethod)) {
+        ClassObject* actualThisRef;
+        RegType actualArgType;
+
+        actualArgType = getInvocationThis(insnRegs, insnRegCount, pDecInsn,
+                            pOkay);
+        if (!*pOkay)
+            goto fail;
+
+        if (regTypeIsUninitReference(actualArgType) && resMethod->name[0] != '<')
+        {
+            LOG_VFY("VFY: 'this' arg must be initialized\n");
+            goto fail;
+        }
+        if (methodType != METHOD_INTERFACE && actualArgType != kRegTypeZero) {
+            actualThisRef = regTypeReferenceToClass(actualArgType, uninitMap);
+            if (!dvmInstanceof(actualThisRef, resMethod->clazz)) {
+                LOG_VFY("VFY: 'this' arg '%s' not instance of '%s'\n",
+                        actualThisRef->descriptor,
+                        resMethod->clazz->descriptor);
+                goto fail;
+            }
+        }
+        actualArgs++;
+    }
+
+    /*
+     * Process the target method's signature.  This signature may or may not
+     * have been verified, so we can't assume it's properly formed.
+     */
+    while (*sig != '\0' && *sig != ')') {
+        if (actualArgs >= expectedArgs) {
+            LOG_VFY("VFY: expected %d args, found more (%c)\n",
+                expectedArgs, *sig);
+            goto bad_sig;
+        }
+
+        u4 getReg;
+        if (isRange)
+            getReg = pDecInsn->vC + actualArgs;
+        else
+            getReg = pDecInsn->arg[actualArgs];
+
+        switch (*sig) {
+        case 'L':
+            {
+                ClassObject* clazz = lookupSignatureClass(meth, &sig, pOkay);
+                if (!*pOkay)
+                    goto bad_sig;
+                verifyRegisterType(insnRegs, insnRegCount, getReg,
+                    regTypeFromClass(clazz), pOkay);
+                if (!*pOkay) {
+                    LOG_VFY("VFY: bad arg %d (into %s)\n",
+                            actualArgs, clazz->descriptor);
+                    goto bad_sig;
+                }
+            }
+            actualArgs++;
+            break;
+        case '[':
+            {
+                ClassObject* clazz =
+                    lookupSignatureArrayClass(meth, &sig, pOkay);
+                if (!*pOkay)
+                    goto bad_sig;
+                verifyRegisterType(insnRegs, insnRegCount, getReg,
+                    regTypeFromClass(clazz), pOkay);
+                if (!*pOkay) {
+                    LOG_VFY("VFY: bad arg %d (into %s)\n",
+                            actualArgs, clazz->descriptor);
+                    goto bad_sig;
+                }
+            }
+            actualArgs++;
+            break;
+        case 'Z':
+            verifyRegisterType(insnRegs, insnRegCount, getReg,
+                kRegTypeBoolean, pOkay);
+            actualArgs++;
+            break;
+        case 'C':
+            verifyRegisterType(insnRegs, insnRegCount, getReg,
+                kRegTypeChar, pOkay);
+            actualArgs++;
+            break;
+        case 'B':
+            verifyRegisterType(insnRegs, insnRegCount, getReg,
+                kRegTypeByte, pOkay);
+            actualArgs++;
+            break;
+        case 'I':
+            verifyRegisterType(insnRegs, insnRegCount, getReg,
+                kRegTypeInteger, pOkay);
+            actualArgs++;
+            break;
+        case 'S':
+            verifyRegisterType(insnRegs, insnRegCount, getReg,
+                kRegTypeShort, pOkay);
+            actualArgs++;
+            break;
+        case 'F':
+            verifyRegisterType(insnRegs, insnRegCount, getReg,
+                kRegTypeFloat, pOkay);
+            actualArgs++;
+            break;
+        case 'D':
+            verifyRegisterType(insnRegs, insnRegCount, getReg,
+                kRegTypeDoubleLo, pOkay);
+            actualArgs += 2;
+            break;
+        case 'J':
+            verifyRegisterType(insnRegs, insnRegCount, getReg,
+                kRegTypeLongLo, pOkay);
+            actualArgs += 2;
+            break;
+        default:
+            LOG_VFY("VFY: invocation target: bad signature type char '%c'\n",
+                *sig);
+            goto bad_sig;
+        }
+
+        sig++;
+    }
+    if (*sig != ')') {
+        char* desc = dexProtoCopyMethodDescriptor(&resMethod->prototype);
+        LOG_VFY("VFY: invocation target: bad signature '%s'\n", desc);
+        free(desc);
+        goto bad_sig;
+    }
+
+    if (actualArgs != expectedArgs) {
+        LOG_VFY("VFY: expected %d args, found %d\n", expectedArgs, actualArgs);
+        goto bad_sig;
+    }
+
+    free(sigOriginal);
+    return resMethod;
+
+bad_sig:
+    if (resMethod != NULL) {
+        char* desc = dexProtoCopyMethodDescriptor(&resMethod->prototype);
+        LOG_VFY("VFY:  rejecting call to %s.%s %s\n",
+                resMethod->clazz->descriptor, resMethod->name, desc);
+        free(desc);
+    }
+
+fail:
+    free(sigOriginal);
+    *pOkay = false;
+    return NULL;
+}
+
+/*
+ * Get the class object for the type of data stored in a field.  This isn't
+ * stored in the Field struct, so we have to recover it from the signature.
+ *
+ * This only works for reference types.  Don't call this for primitive types.
+ *
+ * If we can't find the class, we return java.lang.Object, so that
+ * verification can continue if a field is only accessed in trivial ways.
+ */
+static ClassObject* getFieldClass(const Method* meth, const Field* field)
+{
+    ClassObject* fieldClass;
+    const char* signature = field->signature;
+
+    if ((*signature == 'L') || (*signature == '[')) {
+        fieldClass = dvmFindClassNoInit(signature,
+                meth->clazz->classLoader);
+    } else {
+        return NULL;
+    }
+
+    if (fieldClass == NULL) {
+        dvmClearOptException(dvmThreadSelf());
+        LOGV("VFY: unable to find class '%s' for field %s.%s, trying Object\n",
+            field->signature, meth->clazz->descriptor, field->name);
+        fieldClass = gDvm.classJavaLangObject;
+    } else {
+        assert(!dvmIsPrimitiveClass(fieldClass));
+    }
+    return fieldClass;
+}
+
+
+/*
+ * ===========================================================================
+ *      Register operations
+ * ===========================================================================
+ */
+
+/*
+ * Get the type of register N, verifying that the register is valid.
+ *
+ * Sets "*pOkay" to false if the register number is out of range.
+ */
+static inline RegType getRegisterType(const RegType* insnRegs,
+    const int insnRegCount, u4 vsrc, bool* pOkay)
+{
+    RegType type;
+
+    if (vsrc >= (u4) insnRegCount) {
+        *pOkay = false;
+        return kRegTypeUnknown;
+    } else {
+        return insnRegs[vsrc];
+    }
+}
+
+/*
+ * Get the value from a register, and cast it to a ClassObject.  Sets
+ * "pOkay" to false if something fails.
+ *
+ * This fails if the register holds an uninitialized class.
+ *
+ * If the register holds kRegTypeZero, this returns a NULL pointer.
+ */
+static ClassObject* getClassFromRegister(const RegType* insnRegs,
+    const int insnRegCount, u4 vsrc, bool* pOkay)
+{
+    ClassObject* clazz = NULL;
+    RegType type;
+
+    /* get the element type of the array held in vsrc */
+    type = getRegisterType(insnRegs, insnRegCount, vsrc, pOkay);
+    if (!*pOkay)
+        goto bail;
+
+    /* if "always zero", we allow it to fail at runtime */
+    if (type == kRegTypeZero)
+        goto bail;
+
+    if (!regTypeIsReference(type)) {
+        LOG_VFY("VFY: tried to get class from non-ref register v%d (type=%d)\n",
+            vsrc, type);
+        *pOkay = false;
+        goto bail;
+    }
+    if (regTypeIsUninitReference(type)) {
+        LOG_VFY("VFY: register %u holds uninitialized reference\n", vsrc);
+        *pOkay = false;
+        goto bail;
+    }
+
+    clazz = regTypeInitializedReferenceToClass(type);
+
+bail:
+    return clazz;
+}
+
+/*
+ * Get the "this" pointer from a non-static method invocation.  This
+ * returns the RegType so the caller can decide whether it needs the
+ * reference to be initialized or not.  (Can also return kRegTypeZero
+ * if the reference can only be zero at this point.)
+ *
+ * The argument count is in vA, and the first argument is in vC, for both
+ * "simple" and "range" versions.  We just need to make sure vA is >= 1
+ * and then return vC.
+ */
+static RegType getInvocationThis(const RegType* insnRegs,
+    const int insnRegCount, const DecodedInstruction* pDecInsn, bool* pOkay)
+{
+    RegType thisType = kRegTypeUnknown;
+
+    if (pDecInsn->vA < 1) {
+        LOG_VFY("VFY: invoke lacks 'this'\n");
+        *pOkay = false;
+        goto bail;
+    }
+
+    /* get the element type of the array held in vsrc */
+    thisType = getRegisterType(insnRegs, insnRegCount, pDecInsn->vC, pOkay);
+    if (!*pOkay) {
+        LOG_VFY("VFY: failed to get this from register %u\n", pDecInsn->vC);
+        goto bail;
+    }
+
+    if (!regTypeIsReference(thisType)) {
+        LOG_VFY("VFY: tried to get class from non-ref register v%d (type=%d)\n",
+            pDecInsn->vC, thisType);
+        *pOkay = false;
+        goto bail;
+    }
+
+bail:
+    return thisType;
+}
+
+/*
+ * Set the type of register N, verifying that the register is valid.  If
+ * "newType" is the "Lo" part of a 64-bit value, register N+1 will be
+ * set to "newType+1".
+ *
+ * Sets "*pOkay" to false if the register number is out of range.
+ */
+static void setRegisterType(RegType* insnRegs, const int insnRegCount,
+    u4 vdst, RegType newType, bool* pOkay)
+{
+    //LOGD("set-reg v%u = %d\n", vdst, newType);
+    switch (newType) {
+    case kRegTypeUnknown:
+    case kRegTypeBoolean:
+    case kRegTypeOne:
+    case kRegTypeByte:
+    case kRegTypePosByte:
+    case kRegTypeShort:
+    case kRegTypePosShort:
+    case kRegTypeChar:
+    case kRegTypeInteger:
+    case kRegTypeFloat:
+    case kRegTypeZero:
+        if (vdst >= (u4) insnRegCount) {
+            *pOkay = false;
+        } else {
+            insnRegs[vdst] = newType;
+        }
+        break;
+    case kRegTypeLongLo:
+    case kRegTypeDoubleLo:
+        if (vdst+1 >= (u4) insnRegCount) {
+            *pOkay = false;
+        } else {
+            insnRegs[vdst] = newType;
+            insnRegs[vdst+1] = newType+1;
+        }
+        break;
+    case kRegTypeLongHi:
+    case kRegTypeDoubleHi:
+        /* should never set these explicitly */
+        *pOkay = false;
+        break;
+
+    case kRegTypeUninit:
+    default:
+        if (regTypeIsReference(newType)) {
+            if (vdst >= (u4) insnRegCount) {
+                *pOkay = false;
+                break;
+            }
+            insnRegs[vdst] = newType;
+
+            /* if it's an initialized ref, make sure it's not a prim class */
+            assert(regTypeIsUninitReference(newType) ||
+                !dvmIsPrimitiveClass(regTypeInitializedReferenceToClass(newType)));
+            break;
+        }
+        /* bad - fall through */
+
+    case kRegTypeConflict:      // should only be set during a merge
+        LOG_VFY("Unexpected set type %d\n", newType);
+        assert(false);
+        *pOkay = false;
+        break;
+    }
+}
+
+/*
+ * Verify that the contents of the specified register have the specified
+ * type (or can be converted to it through an implicit widening conversion).
+ *
+ * In theory we could use this to modify the type of the source register,
+ * e.g. a generic 32-bit constant, once used as a float, would thereafter
+ * remain a float.  There is no compelling reason to require this though.
+ *
+ * If "vsrc" is a reference, both it and the "vsrc" register must be
+ * initialized ("vsrc" may be Zero).  This will verify that the value in
+ * the register is an instance of checkType, or if checkType is an
+ * interface, verify that the register implements checkType.
+ */
+static void verifyRegisterType(const RegType* insnRegs, const int insnRegCount,
+    u4 vsrc, RegType checkType, bool* pOkay)
+{
+    if (vsrc >= (u4) insnRegCount) {
+        *pOkay = false;
+        return;
+    }
+
+    RegType srcType = insnRegs[vsrc];
+
+    //LOGD("check-reg v%u = %d\n", vsrc, checkType);
+    switch (checkType) {
+    case kRegTypeFloat:
+    case kRegTypeBoolean:
+    case kRegTypePosByte:
+    case kRegTypeByte:
+    case kRegTypePosShort:
+    case kRegTypeShort:
+    case kRegTypeChar:
+    case kRegTypeInteger:
+        if (!canConvertTo1nr(srcType, checkType)) {
+            LOG_VFY("VFY: register1 v%u type %d, wanted %d\n",
+                vsrc, srcType, checkType);
+            *pOkay = false;
+        }
+        break;
+    case kRegTypeLongLo:
+    case kRegTypeDoubleLo:
+        if (vsrc+1 >= (u4) insnRegCount) {
+            LOG_VFY("VFY: register2 v%u out of range (%d)\n",
+                vsrc, insnRegCount);
+            *pOkay = false;
+        } else if (insnRegs[vsrc+1] != srcType+1) {
+            LOG_VFY("VFY: register2 v%u-%u values %d,%d\n",
+                vsrc, vsrc+1, insnRegs[vsrc], insnRegs[vsrc+1]);
+            *pOkay = false;
+        } else if (!canConvertTo2(srcType, checkType)) {
+            LOG_VFY("VFY: register2 v%u type %d, wanted %d\n",
+                vsrc, srcType, checkType);
+            *pOkay = false;
+        }
+        break;
+
+    case kRegTypeLongHi:
+    case kRegTypeDoubleHi:
+    case kRegTypeZero:
+    case kRegTypeOne:
+    case kRegTypeUnknown:
+    case kRegTypeConflict:
+        /* should never be checking for these explicitly */
+        assert(false);
+        *pOkay = false;
+        return;
+    case kRegTypeUninit:
+    default:
+        /* make sure checkType is initialized reference */
+        if (!regTypeIsReference(checkType)) {
+            LOG_VFY("VFY: unexpected check type %d\n", checkType);
+            assert(false);
+            *pOkay = false;
+            break;
+        }
+        if (regTypeIsUninitReference(checkType)) {
+            LOG_VFY("VFY: uninitialized ref not expected as reg check\n");
+            *pOkay = false;
+            break;
+        }
+        /* make sure srcType is initialized reference or always-NULL */
+        if (!regTypeIsReference(srcType)) {
+            LOG_VFY("VFY: register1 v%u type %d, wanted ref\n", vsrc, srcType);
+            *pOkay = false;
+            break;
+        }
+        if (regTypeIsUninitReference(srcType)) {
+            LOG_VFY("VFY: register1 v%u holds uninitialized ref\n", vsrc);
+            *pOkay = false;
+            break;
+        }
+        /* if the register isn't Zero, make sure it's an instance of check */
+        if (srcType != kRegTypeZero) {
+            ClassObject* srcClass = regTypeInitializedReferenceToClass(srcType);
+            ClassObject* checkClass = regTypeInitializedReferenceToClass(checkType);
+            assert(srcClass != NULL);
+            assert(checkClass != NULL);
+
+            if (dvmIsInterfaceClass(checkClass)) {
+                /*
+                 * All objects implement all interfaces as far as the
+                 * verifier is concerned.  The runtime has to sort it out.
+                 * See comments above findCommonSuperclass.
+                 */
+                /*
+                if (srcClass != checkClass && 
+                    !dvmImplements(srcClass, checkClass))
+                {
+                    LOG_VFY("VFY: %s does not implement %s\n",
+                            srcClass->descriptor, checkClass->descriptor);
+                    *pOkay = false;
+                }
+                */
+            } else {
+                if (!dvmInstanceof(srcClass, checkClass)) {
+                    LOG_VFY("VFY: %s is not instance of %s\n",
+                            srcClass->descriptor, checkClass->descriptor);
+                    *pOkay = false;
+                }
+            }
+        }
+        break;
+    }
+}
+
+/*
+ * Set the type of the "result" register.  Mostly this exists to expand
+ * "insnRegCount" to encompass the result register.
+ */
+static void setResultRegisterType(RegType* insnRegs, const int insnRegCount,
+    RegType newType, bool* pOkay)
+{
+    setRegisterType(insnRegs, insnRegCount + kExtraRegs,
+        RESULT_REGISTER(insnRegCount), newType, pOkay);
+}
+
+
+/*
+ * Update all registers holding "uninitType" to instead hold the
+ * corresponding initialized reference type.  This is called when an
+ * appropriate <init> method is invoked -- all copies of the reference
+ * must be marked as initialized.
+ */
+static void markRefsAsInitialized(RegType* insnRegs, int insnRegCount,
+    UninitInstanceMap* uninitMap, RegType uninitType, bool* pOkay)
+{
+    ClassObject* clazz;
+    RegType initType;
+    int i, changed;
+
+    clazz = dvmGetUninitInstance(uninitMap, regTypeToUninitIndex(uninitType));
+    if (clazz == NULL) {
+        LOGE("VFY: unable to find type=0x%x (idx=%d)\n",
+            uninitType, regTypeToUninitIndex(uninitType));
+        *pOkay = false;
+        return;
+    }
+    initType = regTypeFromClass(clazz);
+
+    changed = 0;
+    for (i = 0; i < insnRegCount; i++) {
+        if (insnRegs[i] == uninitType) {
+            insnRegs[i] = initType;
+            changed++;
+        }
+    }
+    //LOGD("VFY: marked %d registers as initialized\n", changed);
+    assert(changed > 0);
+
+    return;
+}
+
+/*
+ * We're creating a new instance of class C at address A.  Any registers
+ * holding instances previously created at address A must be initialized
+ * by now.  If not, we mark them as "conflict" to prevent them from being
+ * used (otherwise, markRefsAsInitialized would mark the old ones and the
+ * new ones at the same time).
+ */
+static void markUninitRefsAsInvalid(RegType* insnRegs, int insnRegCount,
+    UninitInstanceMap* uninitMap, RegType uninitType)
+{
+    int i, changed;
+
+    changed = 0;
+    for (i = 0; i < insnRegCount; i++) {
+        if (insnRegs[i] == uninitType) {
+            insnRegs[i] = kRegTypeConflict;
+            changed++;
+        }
+    }
+
+    //if (changed)
+    //    LOGD("VFY: marked %d uninitialized registers as invalid\n", changed);
+}
+
+/*
+ * Find the start of the register set for the specified instruction in
+ * the current method.
+ */
+static RegType* getRegisterLine(const RegisterTable* regTable, int insnIdx)
+{
+    return regTable->addrRegs[insnIdx];
+}
+
+/*
+ * Copy a bunch of registers.
+ */
+static inline void copyRegisters(RegType* dst, const RegType* src,
+    int numRegs)
+{
+    memcpy(dst, src, numRegs * sizeof(RegType));
+}
+
+/*
+ * Compare a bunch of registers.
+ */
+static inline int compareRegisters(const RegType* src1, const RegType* src2,
+    int numRegs)
+{
+    return memcmp(src1, src2, numRegs * sizeof(RegType));
+}
+
+/*
+ * Register type categories, for type checking.
+ *
+ * The spec says category 1 includes boolean, byte, char, short, int, float,
+ * reference, and returnAddress.  Category 2 includes long and double.
+ *
+ * We treat object references separately, so we have "category1nr".  We
+ * don't support jsr/ret, so there is no "returnAddress" type.
+ */
+typedef enum TypeCategory {
+    kTypeCategoryUnknown = 0,
+    kTypeCategory1nr,           // byte, char, int, float, boolean
+    kTypeCategory2,             // long, double
+    kTypeCategoryRef,           // object reference
+} TypeCategory;
+
+/*
+ * See if "type" matches "cat".  All we're really looking for here is that
+ * we're not mixing and matching 32-bit and 64-bit quantities, and we're
+ * not mixing references with numerics.  (For example, the arguments to
+ * "a < b" could be integers of different sizes, but they must both be
+ * integers.  Dalvik is less specific about int vs. float, so we treat them
+ * as equivalent here.)
+ *
+ * For category 2 values, "type" must be the "low" half of the value.
+ *
+ * Sets "*pOkay" to false if not.
+ */
+static void checkTypeCategory(RegType type, TypeCategory cat, bool* pOkay)
+{
+    switch (cat) {
+    case kTypeCategory1nr:
+        switch (type) {
+        case kRegTypeFloat:
+        case kRegTypeZero:
+        case kRegTypeOne:
+        case kRegTypeBoolean:
+        case kRegTypePosByte:
+        case kRegTypeByte:
+        case kRegTypePosShort:
+        case kRegTypeShort:
+        case kRegTypeChar:
+        case kRegTypeInteger:
+            break;
+        default:
+            *pOkay = false;
+            break;
+        }
+        break;
+
+    case kTypeCategory2:
+        switch (type) {
+        case kRegTypeLongLo:
+        case kRegTypeDoubleLo:
+            break;
+        default:
+            *pOkay = false;
+            break;
+        }
+        break;
+
+    case kTypeCategoryRef:
+        if (type != kRegTypeZero && !regTypeIsReference(type))
+            *pOkay = false;
+        break;
+
+    default:
+        assert(false);
+        *pOkay = false;
+        break;
+    }
+}
+
+/*
+ * For a category 2 register pair, verify that "typeh" is the appropriate
+ * high part for "typel".
+ *
+ * Does not verify that "typel" is in fact the low part of a 64-bit
+ * register pair.
+ */
+static void checkWidePair(RegType typel, RegType typeh, bool* pOkay)
+{
+    if ((typeh != typel+1))
+        *pOkay = false;
+}
+
+/*
+ * Implement category-1 "move" instructions.  Copy a 32-bit value from
+ * "vsrc" to "vdst".
+ *
+ * "insnRegCount" is the number of registers available.  The "vdst" and
+ * "vsrc" values are checked against this.
+ */
+static void copyRegister1(RegType* insnRegs, int insnRegCount, u4 vdst,
+    u4 vsrc, TypeCategory cat, bool* pOkay)
+{
+    RegType type = getRegisterType(insnRegs, insnRegCount, vsrc, pOkay);
+    if (*pOkay)
+        checkTypeCategory(type, cat, pOkay);
+    if (*pOkay)
+        setRegisterType(insnRegs, insnRegCount, vdst, type, pOkay);
+
+    if (!*pOkay) {
+        LOG_VFY("VFY: copy1 v%u<-v%u type=%d cat=%d\n", vdst, vsrc, type, cat);
+    }
+}
+
+/*
+ * Implement category-2 "move" instructions.  Copy a 64-bit value from
+ * "vsrc" to "vdst".  This copies both halves of the register.
+ */
+static void copyRegister2(RegType* insnRegs, int insnRegCount, u4 vdst,
+    u4 vsrc, bool* pOkay)
+{
+    RegType typel = getRegisterType(insnRegs, insnRegCount, vsrc, pOkay);
+    RegType typeh = getRegisterType(insnRegs, insnRegCount, vsrc+1, pOkay);
+    if (*pOkay) {
+        checkTypeCategory(typel, kTypeCategory2, pOkay);
+        checkWidePair(typel, typeh, pOkay);
+    }
+    if (*pOkay)
+        setRegisterType(insnRegs, insnRegCount, vdst, typel, pOkay);
+
+    if (!*pOkay) {
+        LOG_VFY("VFY: copy2 v%u<-v%u type=%d/%d\n", vdst, vsrc, typel, typeh);
+    }
+}
+
+/*
+ * Implement "move-result".  Copy the category-1 value from the result
+ * register to another register, and reset the result register.
+ *
+ * We can't just call copyRegister1 with an altered insnRegCount,
+ * because that would affect the test on "vdst" as well.
+ */
+static void copyResultRegister1(RegType* insnRegs, const int insnRegCount,
+    u4 vdst, TypeCategory cat, bool* pOkay)
+{
+    RegType type;
+    u4 vsrc;
+    
+    vsrc = RESULT_REGISTER(insnRegCount);
+    type = getRegisterType(insnRegs, insnRegCount + kExtraRegs, vsrc, pOkay);
+    if (*pOkay)
+        checkTypeCategory(type, cat, pOkay);
+    if (*pOkay) {
+        setRegisterType(insnRegs, insnRegCount, vdst, type, pOkay);
+        insnRegs[vsrc] = kRegTypeUnknown;
+    }
+
+    if (!*pOkay) {
+        LOG_VFY("VFY: copyRes1 v%u<-v%u cat=%d type=%d\n",
+            vdst, vsrc, cat, type);
+    }
+}
+
+/*
+ * Implement "move-result-wide".  Copy the category-2 value from the result
+ * register to another register, and reset the result register.
+ *
+ * We can't just call copyRegister2 with an altered insnRegCount,
+ * because that would affect the test on "vdst" as well.
+ */
+static void copyResultRegister2(RegType* insnRegs, const int insnRegCount,
+    u4 vdst, bool* pOkay)
+{
+    RegType typel, typeh;
+    u4 vsrc;
+    
+    vsrc = RESULT_REGISTER(insnRegCount);
+    typel = getRegisterType(insnRegs, insnRegCount + kExtraRegs, vsrc, pOkay);
+    typeh = getRegisterType(insnRegs, insnRegCount + kExtraRegs, vsrc+1, pOkay);
+    if (*pOkay) {
+        checkTypeCategory(typel, kTypeCategory2, pOkay);
+        checkWidePair(typel, typeh, pOkay);
+    }
+    if (*pOkay) {
+        setRegisterType(insnRegs, insnRegCount, vdst, typel, pOkay);
+        insnRegs[vsrc] = kRegTypeUnknown;
+        insnRegs[vsrc+1] = kRegTypeUnknown;
+    }
+
+    if (!*pOkay) {
+        LOG_VFY("VFY: copyRes2 v%u<-v%u type=%d/%d\n",
+            vdst, vsrc, typel, typeh);
+    }
+}
+
+/*
+ * Verify types for a simple two-register instruction (e.g. "neg-int").
+ * "dstType" is stored into vA, and "srcType" is verified against vB.
+ */
+static void checkUnop(RegType* insnRegs, const int insnRegCount,
+    DecodedInstruction* pDecInsn, RegType dstType, RegType srcType,
+    bool* pOkay)
+{
+    verifyRegisterType(insnRegs, insnRegCount, pDecInsn->vB, srcType, pOkay);
+    setRegisterType(insnRegs, insnRegCount, pDecInsn->vA, dstType, pOkay);
+}
+
+/*
+ * We're performing an operation like "and-int/2addr" that can be
+ * performed on booleans as well as integers.  We get no indication of
+ * boolean-ness, but we can infer it from the types of the arguments.
+ *
+ * Assumes we've already validated reg1/reg2.
+ *
+ * Returns true if both args are Boolean, Zero, or One.
+ */
+static bool upcastBooleanOp(RegType* insnRegs, const int insnRegCount,
+    u4 reg1, u4 reg2)
+{
+    RegType type1, type2;
+
+    type1 = insnRegs[reg1];
+    type2 = insnRegs[reg2];
+
+    if ((type1 == kRegTypeBoolean || type1 == kRegTypeZero ||
+            type1 == kRegTypeOne) &&
+        (type2 == kRegTypeBoolean || type2 == kRegTypeZero ||
+            type2 == kRegTypeOne))
+    {
+        return true;
+    }
+    return false;
+}
+
+/*
+ * Verify types for A two-register instruction with a literal constant
+ * (e.g. "add-int/lit8").  "dstType" is stored into vA, and "srcType" is
+ * verified against vB.
+ *
+ * If "checkBooleanOp" is set, we use the constant value in vC.
+ */
+static void checkLitop(RegType* insnRegs, const int insnRegCount,
+    DecodedInstruction* pDecInsn, RegType dstType, RegType srcType,
+    bool checkBooleanOp, bool* pOkay)
+{
+    verifyRegisterType(insnRegs, insnRegCount, pDecInsn->vB, srcType, pOkay);
+    if (*pOkay && checkBooleanOp) {
+        assert(dstType == kRegTypeInteger);
+        /* check vB with the call, then check the constant manually */
+        if (upcastBooleanOp(insnRegs, insnRegCount, pDecInsn->vB, pDecInsn->vB)
+            && (pDecInsn->vC == 0 || pDecInsn->vC == 1))
+        {
+            dstType = kRegTypeBoolean;
+        }
+    }
+    setRegisterType(insnRegs, insnRegCount, pDecInsn->vA, dstType, pOkay);
+}
+
+/*
+ * Verify types for a simple three-register instruction (e.g. "add-int").
+ * "dstType" is stored into vA, and "srcType1"/"srcType2" are verified
+ * against vB/vC.
+ */
+static void checkBinop(RegType* insnRegs, const int insnRegCount,
+    DecodedInstruction* pDecInsn, RegType dstType, RegType srcType1,
+    RegType srcType2, bool checkBooleanOp, bool* pOkay)
+{
+    verifyRegisterType(insnRegs, insnRegCount, pDecInsn->vB, srcType1, pOkay);
+    verifyRegisterType(insnRegs, insnRegCount, pDecInsn->vC, srcType2, pOkay);
+    if (*pOkay && checkBooleanOp) {
+        assert(dstType == kRegTypeInteger);
+        if (upcastBooleanOp(insnRegs, insnRegCount, pDecInsn->vB, pDecInsn->vC))
+            dstType = kRegTypeBoolean;
+    }
+    setRegisterType(insnRegs, insnRegCount, pDecInsn->vA, dstType, pOkay);
+}
+
+/*
+ * Verify types for a binary "2addr" operation.  "srcType1"/"srcType2"
+ * are verified against vA/vB, then "dstType" is stored into vA.
+ */
+static void checkBinop2addr(RegType* insnRegs, const int insnRegCount,
+    DecodedInstruction* pDecInsn, RegType dstType, RegType srcType1,
+    RegType srcType2, bool checkBooleanOp, bool* pOkay)
+{
+    verifyRegisterType(insnRegs, insnRegCount, pDecInsn->vA, srcType1, pOkay);
+    verifyRegisterType(insnRegs, insnRegCount, pDecInsn->vB, srcType2, pOkay);
+    if (*pOkay && checkBooleanOp) {
+        assert(dstType == kRegTypeInteger);
+        if (upcastBooleanOp(insnRegs, insnRegCount, pDecInsn->vA, pDecInsn->vB))
+            dstType = kRegTypeBoolean;
+    }
+    setRegisterType(insnRegs, insnRegCount, pDecInsn->vA, dstType, pOkay);
+}
+
+
+/*
+ * ===========================================================================
+ *      Register merge
+ * ===========================================================================
+ */
+
+/*
+ * Compute the "class depth" of a class.  This is the distance from the
+ * class to the top of the tree, chasing superclass links.  java.lang.Object
+ * has a class depth of 0.
+ */
+static int getClassDepth(ClassObject* clazz)
+{
+    int depth = 0;
+
+    while (clazz->super != NULL) {
+        clazz = clazz->super;
+        depth++;
+    }
+    return depth;
+}
+
+/*
+ * Given two classes, walk up the superclass tree to find a common
+ * ancestor.  (Called from findCommonSuperclass().)
+ *
+ * TODO: consider caching the class depth in the class object so we don't
+ * have to search for it here.
+ */
+static ClassObject* digForSuperclass(ClassObject* c1, ClassObject* c2)
+{
+    int depth1, depth2;
+
+    depth1 = getClassDepth(c1);
+    depth2 = getClassDepth(c2);
+
+    if (gDebugVerbose) {
+        LOGVV("COMMON: %s(%d) + %s(%d)\n",
+            c1->descriptor, depth1, c2->descriptor, depth2);
+    }
+
+    /* pull the deepest one up */
+    if (depth1 > depth2) {
+        while (depth1 > depth2) {
+            c1 = c1->super;
+            depth1--;
+        }
+    } else {
+        while (depth2 > depth1) {
+            c2 = c2->super;
+            depth2--;
+        }
+    }
+
+    /* walk up in lock-step */
+    while (c1 != c2) {
+        c1 = c1->super;
+        c2 = c2->super;
+
+        assert(c1 != NULL && c2 != NULL);
+    }
+
+    if (gDebugVerbose) {
+        LOGVV("      : --> %s\n", c1->descriptor);
+    }
+    return c1;
+}
+
+/*
+ * Merge two array classes.  We can't use the general "walk up to the
+ * superclass" merge because the superclass of an array is always Object.
+ * We want String[] + Integer[] = Object[].  This works for higher dimensions
+ * as well, e.g. String[][] + Integer[][] = Object[][].
+ *
+ * If Foo1 and Foo2 are subclasses of Foo, Foo1[] + Foo2[] = Foo[].
+ *
+ * If Class implements Type, Class[] + Type[] = Type[].
+ *
+ * If the dimensions don't match, we want to convert to an array of Object
+ * with the least dimension, e.g. String[][] + String[][][][] = Object[][].
+ *
+ * This gets a little awkward because we may have to ask the VM to create
+ * a new array type with the appropriate element and dimensions.  However, we
+ * shouldn't be doing this often.
+ */
+static ClassObject* findCommonArraySuperclass(ClassObject* c1, ClassObject* c2)
+{
+    ClassObject* arrayClass = NULL;
+    ClassObject* commonElem;
+    int i, numDims;
+
+    assert(c1->arrayDim > 0);
+    assert(c2->arrayDim > 0);
+
+    if (c1->arrayDim == c2->arrayDim) {
+        //commonElem = digForSuperclass(c1->elementClass, c2->elementClass);
+        commonElem = findCommonSuperclass(c1->elementClass, c2->elementClass);
+        numDims = c1->arrayDim;
+    } else {
+        if (c1->arrayDim < c2->arrayDim)
+            numDims = c1->arrayDim;
+        else
+            numDims = c2->arrayDim;
+        commonElem = c1->super;     // == java.lang.Object
+    }
+
+    /* walk from the element to the (multi-)dimensioned array type */
+    for (i = 0; i < numDims; i++) {
+        arrayClass = dvmFindArrayClassForElement(commonElem);
+        commonElem = arrayClass;
+    }
+
+    LOGVV("ArrayMerge '%s' + '%s' --> '%s'\n",
+        c1->descriptor, c2->descriptor, arrayClass->descriptor);
+    return arrayClass;
+}
+
+/*
+ * Find the first common superclass of the two classes.  We're not
+ * interested in common interfaces.
+ *
+ * The easiest way to do this for concrete classes is to compute the "class
+ * depth" of each, move up toward the root of the deepest one until they're
+ * at the same depth, then walk both up to the root until they match.
+ *
+ * If both classes are arrays of non-primitive types, we need to merge
+ * based on array depth and element type.
+ *
+ * If one class is an interface, we check to see if the other class/interface
+ * (or one of its predecessors) implements the interface.  If so, we return
+ * the interface; otherwise, we return Object.
+ *
+ * NOTE: we continue the tradition of "lazy interface handling".  To wit,
+ * suppose we have three classes:
+ *   One implements Fancy, Free
+ *   Two implements Fancy, Free
+ *   Three implements Free
+ * where Fancy and Free are unrelated interfaces.  The code requires us
+ * to merge One into Two.  Ideally we'd use a common interface, which
+ * gives us a choice between Fancy and Free, and no guidance on which to
+ * use.  If we use Free, we'll be okay when Three gets merged in, but if
+ * we choose Fancy, we're hosed.  The "ideal" solution is to create a
+ * set of common interfaces and carry that around, merging further references
+ * into it.  This is a pain.  The easy solution is to simply boil them
+ * down to Objects and let the runtime invokeinterface call fail, which
+ * is what we do.
+ */
+static ClassObject* findCommonSuperclass(ClassObject* c1, ClassObject* c2)
+{
+    assert(!dvmIsPrimitiveClass(c1) && !dvmIsPrimitiveClass(c2));
+
+    if (c1 == c2)
+        return c1;
+
+    if (dvmIsInterfaceClass(c1) && dvmImplements(c2, c1)) {
+        if (gDebugVerbose)
+            LOGVV("COMMON/I1: %s + %s --> %s\n",
+                c1->descriptor, c2->descriptor, c1->descriptor);
+        return c1;
+    }
+    if (dvmIsInterfaceClass(c2) && dvmImplements(c1, c2)) {
+        if (gDebugVerbose)
+            LOGVV("COMMON/I2: %s + %s --> %s\n",
+                c1->descriptor, c2->descriptor, c2->descriptor);
+        return c2;
+    }
+
+    if (dvmIsArrayClass(c1) && dvmIsArrayClass(c2) &&
+        !dvmIsPrimitiveClass(c1->elementClass) &&
+        !dvmIsPrimitiveClass(c2->elementClass))
+    {
+        return findCommonArraySuperclass(c1, c2);
+    }
+
+    return digForSuperclass(c1, c2);
+}
+
+/*
+ * Merge two RegType values.
+ *
+ * Sets "pChanged" to "true" if the result doesn't match "type1".
+ */
+static RegType mergeTypes(RegType type1, RegType type2, bool* pChanged)
+{
+    RegType result;
+
+    /*
+     * Check for trivial case so we don't have to hit memory.
+     */
+    if (type1 == type2)
+        return type1;
+
+    /*
+     * Use the table if we can, and reject any attempts to merge something
+     * from the table with a reference type.
+     *
+     * The uninitialized table entry at index zero *will* show up as a
+     * simple kRegTypeUninit value.  Since this cannot be merged with
+     * anything but itself, the rules do the right thing.
+     */
+    if (type1 < kRegTypeMAX) {
+        if (type2 < kRegTypeMAX) {
+            result = gMergeTab[type1][type2];
+        } else {
+            /* simple + reference == conflict, usually */
+            if (type1 == kRegTypeZero)
+                result = type2;
+            else
+                result = kRegTypeConflict;
+        }
+    } else {
+        if (type2 < kRegTypeMAX) {
+            /* reference + simple == conflict, usually */
+            if (type2 == kRegTypeZero)
+                result = type1;
+            else
+                result = kRegTypeConflict;
+        } else {
+            /* merging two references */
+            if (regTypeIsUninitReference(type1) ||
+                regTypeIsUninitReference(type2))
+            {
+                /* can't merge uninit with anything but self */
+                result = kRegTypeConflict;
+            } else {
+                ClassObject* clazz1 = regTypeInitializedReferenceToClass(type1);
+                ClassObject* clazz2 = regTypeInitializedReferenceToClass(type2);
+                ClassObject* mergedClass;
+
+                mergedClass = findCommonSuperclass(clazz1, clazz2);
+                assert(mergedClass != NULL);
+                result = regTypeFromClass(mergedClass);
+            }
+        }
+    }
+
+    if (result != type1)
+        *pChanged = true;
+    return result;
+}
+
+/*
+ * Control can transfer to "nextInsn".
+ *
+ * Merge the registers from "workRegs" into "regTypes" at "nextInsn", and
+ * set the "changed" flag if the registers have changed.
+ */
+static void updateRegisters(const Method* meth, InsnFlags* insnFlags,
+    RegisterTable* regTable, int nextInsn, const RegType* workRegs)
+{
+    RegType* targetRegs = getRegisterLine(regTable, nextInsn);
+    const int insnRegCount = meth->registersSize;
+
+#if 0
+    if (!dvmInsnIsBranchTarget(insnFlags, nextInsn)) {
+        LOGE("insnFlags[0x%x]=0x%08x\n", nextInsn, insnFlags[nextInsn]);
+        LOGE(" In %s.%s %s\n",
+            meth->clazz->descriptor, meth->name, meth->descriptor);
+        assert(false);
+    }
+#endif
+
+    if (!dvmInsnIsVisitedOrChanged(insnFlags, nextInsn)) {
+        /*
+         * We haven't processed this instruction before, and we haven't
+         * touched the registers here, so there's nothing to "merge".  Copy
+         * the registers over and mark it as changed.  (This is the only
+         * way a register can transition out of "unknown", so this is not
+         * just an optimization.)
+         */
+        LOGVV("COPY into 0x%04x\n", nextInsn);
+        copyRegisters(targetRegs, workRegs, insnRegCount + kExtraRegs);
+        dvmInsnSetChanged(insnFlags, nextInsn, true);
+    } else {
+        if (gDebugVerbose) {
+            LOGVV("MERGE into 0x%04x\n", nextInsn);
+            //dumpRegTypes(meth, insnFlags, targetRegs, 0, "targ", NULL, 0);
+            //dumpRegTypes(meth, insnFlags, workRegs, 0, "work", NULL, 0);
+        }
+        /* merge registers, set Changed only if different */
+        bool changed = false;
+        int i;
+
+        for (i = 0; i < insnRegCount + kExtraRegs; i++) {
+            targetRegs[i] = mergeTypes(targetRegs[i], workRegs[i], &changed);
+        }
+
+        if (gDebugVerbose) {
+            //LOGI(" RESULT (changed=%d)\n", changed);
+            //dumpRegTypes(meth, insnFlags, targetRegs, 0, "rslt", NULL, 0);
+        }
+
+        if (changed)
+            dvmInsnSetChanged(insnFlags, nextInsn, true);
+    }
+}
+
+
+/*
+ * ===========================================================================
+ *      Utility functions
+ * ===========================================================================
+ */
+
+/*
+ * Output a code verifier warning message.  For the pre-verifier it's not
+ * a big deal if something fails (and it may even be expected), but if
+ * we're doing just-in-time verification it's significant.
+ */
+void dvmLogVerifyFailure(const Method* meth, const char* format, ...)
+{
+    va_list ap;
+    int logLevel;
+
+    if (gDvm.optimizing)
+        return; /* logLevel = ANDROID_LOG_DEBUG; */
+    else
+        logLevel = ANDROID_LOG_WARN;
+
+    va_start(ap, format);
+    LOG_PRI_VA(logLevel, LOG_TAG, format, ap);
+    if (meth != NULL) {
+        char* desc = dexProtoCopyMethodDescriptor(&meth->prototype);
+        LOG_PRI(logLevel, LOG_TAG, "VFY:  rejected %s.%s %s\n",
+            meth->clazz->descriptor, meth->name, desc);
+        free(desc);
+    }
+}
+
+/*
+ * Extract the relative offset from a branch instruction.
+ *
+ * Returns "false" on failure (e.g. this isn't a branch instruction).
+ */
+bool dvmGetBranchTarget(const Method* meth, InsnFlags* insnFlags,
+    int curOffset, int* pOffset, bool* pConditional)
+{
+    const u2* insns = meth->insns + curOffset;
+    int tmp;
+
+    switch (*insns & 0xff) {
+    case OP_GOTO:
+        *pOffset = ((s2) *insns) >> 8;
+        *pConditional = false;
+        break;
+    case OP_GOTO_32:
+        *pOffset = insns[1] | (((u4) insns[2]) << 16);
+        *pConditional = false;
+        break;
+    case OP_GOTO_16:
+        *pOffset = (s2) insns[1];
+        *pConditional = false;
+        break;
+    case OP_IF_EQ:
+    case OP_IF_NE:
+    case OP_IF_LT:
+    case OP_IF_GE:
+    case OP_IF_GT:
+    case OP_IF_LE:
+    case OP_IF_EQZ:
+    case OP_IF_NEZ:
+    case OP_IF_LTZ:
+    case OP_IF_GEZ:
+    case OP_IF_GTZ:
+    case OP_IF_LEZ:
+        *pOffset = (s2) insns[1];
+        *pConditional = true;
+        break;
+    default:
+        return false;
+        break;
+    }
+
+    return true;
+}
+
+
+/*
+ * Look up an instance field, specified by "fieldIdx", that is going to be
+ * accessed in object "objType".  This resolves the field and then verifies
+ * that the class containing the field is an instance of the reference in
+ * "objType".
+ *
+ * It is possible for "objType" to be kRegTypeZero, meaning that we might
+ * have a null reference.  This is a runtime problem, so we allow it,
+ * skipping some of the type checks.
+ *
+ * In general, "objType" must be an initialized reference.  However, we
+ * allow it to be uninitialized if this is an "<init>" method and the field
+ * is declared within the "objType" class.
+ *
+ * Returns an InstField on success, returns NULL and sets "*pOkay" to false
+ * on failure.
+ */
+static InstField* getInstField(const Method* meth,
+    const UninitInstanceMap* uninitMap, RegType objType, int fieldIdx,
+    bool* pOkay)
+{
+    InstField* instField = NULL;
+    ClassObject* objClass;
+    bool mustBeLocal = false;
+
+    if (!regTypeIsReference(objType)) {
+        LOG_VFY("VFY: attempt to access field of non-reference type %d\n",
+            objType);
+        *pOkay = false;
+        goto bail;
+    }
+
+    instField = dvmOptResolveInstField(meth->clazz, fieldIdx);
+    if (instField == NULL) {
+        LOG_VFY("VFY: unable to resolve instance field %u\n", fieldIdx);
+        *pOkay = false;
+        goto bail;
+    }
+
+    if (objType == kRegTypeZero)
+        goto bail;
+
+    /*
+     * Access to fields in uninitialized objects is allowed if this is
+     * the <init> method for the object and the field in question is
+     * declared by this class.
+     */
+    objClass = regTypeReferenceToClass(objType, uninitMap);
+    assert(objClass != NULL);
+    if (regTypeIsUninitReference(objType)) {
+        if (!isInitMethod(meth) || meth->clazz != objClass) {
+            LOG_VFY("VFY: attempt to access field via uninitialized ref\n");
+            *pOkay = false;
+            goto bail;
+        }
+        mustBeLocal = true;
+    }
+
+    if (!dvmInstanceof(objClass, instField->field.clazz)) {
+        LOG_VFY("VFY: invalid field access (field %s.%s, through %s ref)\n",
+                instField->field.clazz->descriptor, instField->field.name,
+                objClass->descriptor);
+        *pOkay = false;
+        goto bail;
+    }
+
+    if (mustBeLocal) {
+        /* for uninit ref, make sure it's defined by this class, not super */
+        if (instField < objClass->ifields ||
+            instField >= objClass->ifields + objClass->ifieldCount)
+        {
+            LOG_VFY("VFY: invalid constructor field access (field %s in %s)\n",
+                    instField->field.name, objClass->descriptor);
+            *pOkay = false;
+            goto bail;
+        }
+    }
+
+bail:
+    return instField;
+}
+
+/*
+ * Look up a static field.
+ *
+ * Returns a StaticField on success, returns NULL and sets "*pOkay" to false
+ * on failure.
+ */
+static StaticField* getStaticField(const Method* meth, int fieldIdx,
+    bool* pOkay)
+{
+    StaticField* staticField;
+
+    staticField = dvmOptResolveStaticField(meth->clazz, fieldIdx);
+    if (staticField == NULL) {
+        LOG_VFY("VFY: unable to resolve static field %u\n", fieldIdx);
+        *pOkay = false;
+        goto bail;
+    }
+
+bail:
+    return staticField;
+}
+
+/*
+ * Check constraints on constructor return.  Specifically, make sure that
+ * the "this" argument got initialized.
+ *
+ * The "this" argument to <init> uses code offset kUninitThisArgAddr, which
+ * puts it at the start of the list in slot 0.  If we see a register with
+ * an uninitialized slot 0 reference, we know it somehow didn't get
+ * initialized.
+ *
+ * Returns "true" if all is well.
+ */
+static bool checkConstructorReturn(const Method* meth, const RegType* insnRegs,
+    const int insnRegCount)
+{
+    int i;
+
+    if (!isInitMethod(meth))
+        return true;
+
+    RegType uninitThis = regTypeFromUninitIndex(kUninitThisArgSlot);
+
+    for (i = 0; i < insnRegCount; i++) {
+        if (insnRegs[i] == uninitThis) {
+            LOG_VFY("VFY: <init> returning without calling superclass init\n");
+            return false;
+        }
+    }
+    return true;
+}
+
+/*
+ * Verify that the target instruction is not "move-exception".  It's important
+ * that the only way to execute a move-exception is as the first instruction
+ * of an exception handler.
+ *
+ * Returns "true" if all is well, "false" if the target instruction is
+ * move-exception.
+ */
+static bool checkMoveException(const Method* meth, int insnIdx,
+    const char* logNote)
+{
+    assert(insnIdx >= 0 && insnIdx < (int)dvmGetMethodInsnsSize(meth));
+
+    if ((meth->insns[insnIdx] & 0xff) == OP_MOVE_EXCEPTION) {
+        LOG_VFY("VFY: invalid use of move-exception\n");
+        return false;
+    }
+    return true;
+}
+
+/*
+ * For the "move-exception" instruction at "insnIdx", which must be at an
+ * exception handler address, determine the first common superclass of
+ * all exceptions that can land here.  (For javac output, we're probably
+ * looking at multiple spans of bytecode covered by one "try" that lands
+ * at an exception-specific "catch", but in general the handler could be
+ * shared for multiple exceptions.)
+ *
+ * Returns NULL if no matching exception handler can be found, or if the
+ * exception is not a subclass of Throwable.
+ */
+static ClassObject* getCaughtExceptionType(const Method* meth, int insnIdx)
+{
+    const DexCode* pCode;
+    DexFile* pDexFile;
+    ClassObject* commonSuper = NULL;
+    u4 handlersSize;
+    u4 offset;
+    u4 i;
+
+    pDexFile = meth->clazz->pDvmDex->pDexFile;
+    pCode = dvmGetMethodCode(meth);
+
+    if (pCode->triesSize != 0) {
+        handlersSize = dexGetHandlersSize(pCode);
+        offset = dexGetFirstHandlerOffset(pCode);
+    } else {
+        handlersSize = 0;
+        offset = 0;
+    }
+
+    for (i = 0; i < handlersSize; i++) {
+        DexCatchIterator iterator;
+        dexCatchIteratorInit(&iterator, pCode, offset);
+
+        for (;;) {
+            const DexCatchHandler* handler = dexCatchIteratorNext(&iterator);
+
+            if (handler == NULL) {
+                break;
+            }
+        
+            if (handler->address == (u4) insnIdx) {
+                ClassObject* clazz;
+
+                if (handler->typeIdx == kDexNoIndex)
+                    clazz = gDvm.classJavaLangThrowable;
+                else
+                    clazz = dvmOptResolveClass(meth->clazz, handler->typeIdx);
+
+                if (clazz == NULL) {
+                    LOGD("VFY: unable to resolve exceptionIdx=%u\n",
+                        handler->typeIdx);
+                } else {
+                    if (commonSuper == NULL)
+                        commonSuper = clazz;
+                    else
+                        commonSuper = findCommonSuperclass(clazz, commonSuper);
+                }
+            }
+        }
+
+        offset = dexCatchIteratorGetEndOffset(&iterator, pCode);
+    }
+
+    if (commonSuper == NULL) {
+        LOG_VFY_METH(meth,
+            "VFY: unable to find exception handler at addr 0x%x\n", insnIdx);
+    }
+
+    return commonSuper;
+}
+
+/*
+ * Initialize the RegisterTable.
+ *
+ * Every instruction address can have a different set of information about
+ * what's in which register, but for verification purposes we only need to
+ * store it at branch target addresses (because we merge into that).
+ *
+ * If we need to generate tables describing reference type usage for
+ * "exact gc", we will need to save the complete set.
+ *
+ * By zeroing out the storage we are effectively initializing the register
+ * information to kRegTypeUnknown.
+ */
+static bool initRegisterTable(const Method* meth, const InsnFlags* insnFlags,
+    RegisterTable* regTable)
+{
+    const int insnsSize = dvmGetMethodInsnsSize(meth);
+    int i;
+
+    regTable->insnRegCount = meth->registersSize + kExtraRegs;
+    regTable->addrRegs = (RegType**) calloc(insnsSize, sizeof(RegType*));
+    if (regTable->addrRegs == NULL)
+        return false;
+
+    /*
+     * "Full" means "every address that holds the start of an instruction".
+     * "Not full" means "every address that can be branched to".
+     *
+     * "Full" seems to require > 6x the memory on average.  Fortunately we
+     * don't need to hold on to it for very long.
+     */
+    if (USE_FULL_TABLE) {
+        int insnCount = 0;
+
+        for (i = 0; i < insnsSize; i++) {
+            if (dvmInsnIsOpcode(insnFlags, i))
+                insnCount++;
+        }
+
+        regTable->regAlloc = (RegType*)
+            calloc(regTable->insnRegCount * insnCount, sizeof(RegType));
+        if (regTable->regAlloc == NULL)
+            return false;
+
+        RegType* regPtr = regTable->regAlloc;
+        for (i = 0; i < insnsSize; i++) {
+            if (dvmInsnIsOpcode(insnFlags, i)) {
+                regTable->addrRegs[i] = regPtr;
+                regPtr += regTable->insnRegCount;
+            }
+        }
+        assert(regPtr - regTable->regAlloc ==
+               regTable->insnRegCount * insnCount);
+    } else {
+        int branchCount = 0;
+
+        for (i = 0; i < insnsSize; i++) {
+            if (dvmInsnIsBranchTarget(insnFlags, i))
+                branchCount++;
+        }
+        assert(branchCount > 0);
+
+        regTable->regAlloc = (RegType*)
+            calloc(regTable->insnRegCount * branchCount, sizeof(RegType));
+        if (regTable->regAlloc == NULL)
+            return false;
+
+        RegType* regPtr = regTable->regAlloc;
+        for (i = 0; i < insnsSize; i++) {
+            if (dvmInsnIsBranchTarget(insnFlags, i)) {
+                regTable->addrRegs[i] = regPtr;
+                regPtr += regTable->insnRegCount;
+            }
+        }
+
+        assert(regPtr - regTable->regAlloc ==
+               regTable->insnRegCount * branchCount);
+    }
+
+    assert(regTable->addrRegs[0] != NULL);
+    return true;
+}
+
+
+/*
+ * ===========================================================================
+ *      Entry point and driver loop
+ * ===========================================================================
+ */
+
+/*
+ * Entry point for the detailed code-flow analysis.
+ */
+bool dvmVerifyCodeFlow(const Method* meth, InsnFlags* insnFlags,
+    UninitInstanceMap* uninitMap)
+{
+    bool result = false;
+    const int insnsSize = dvmGetMethodInsnsSize(meth);
+    const u2* insns = meth->insns;
+    int i, offset;
+    bool isConditional;
+    RegisterTable regTable;
+
+    memset(&regTable, 0, sizeof(regTable));
+
+#ifndef NDEBUG
+    checkMergeTab();     // only need to do this when table changes
+#endif
+
+    /*
+     * We rely on these for verification of const-class, const-string,
+     * and throw instructions.  Make sure we have them.
+     */
+    if (gDvm.classJavaLangClass == NULL)
+        gDvm.classJavaLangClass =
+            dvmFindSystemClassNoInit("Ljava/lang/Class;");
+    if (gDvm.classJavaLangString == NULL)
+        gDvm.classJavaLangString =
+            dvmFindSystemClassNoInit("Ljava/lang/String;");
+    if (gDvm.classJavaLangThrowable == NULL)
+        gDvm.classJavaLangThrowable =
+            dvmFindSystemClassNoInit("Ljava/lang/Throwable;");
+    if (gDvm.classJavaLangObject == NULL)
+        gDvm.classJavaLangObject =
+            dvmFindSystemClassNoInit("Ljava/lang/Object;");
+
+    if (meth->registersSize * insnsSize > 2*1024*1024) {
+        /* should probably base this on actual memory requirements */
+        LOG_VFY_METH(meth,
+            "VFY: arbitrarily rejecting large method (regs=%d count=%d)\n",
+            meth->registersSize, insnsSize);
+        goto bail;
+    }
+
+    /*
+     * Create register lists, and initialize them to "Unknown".
+     */
+    if (!initRegisterTable(meth, insnFlags, &regTable))
+        goto bail;
+
+    /*
+     * Initialize the types of the registers that correspond to the
+     * method arguments.  We can determine this from the method signature.
+     */
+    if (!setTypesFromSignature(meth, regTable.addrRegs[0], uninitMap))
+        goto bail;
+
+    /*
+     * Run the verifier.
+     */
+    if (!doCodeVerification(meth, insnFlags, &regTable, uninitMap))
+        goto bail;
+
+    /*
+     * Success.  Reduce regTypes to a compact bitmap representation for the
+     * benefit of exact GC.
+     *
+     * (copy to LinearAlloc area? after verify, DexOpt gathers up all the
+     * successful ones and generates a new section in the DEX file so we
+     * can see who got verified)
+     */
+
+    result = true;
+
+bail:
+    free(regTable.addrRegs);
+    free(regTable.regAlloc);
+    return result;
+}
+
+/*
+ * Grind through the instructions.
+ *
+ * The basic strategy is as outlined in v3 4.11.1.2: set the "changed" bit
+ * on the first instruction, process it (setting additional "changed" bits),
+ * and repeat until there are no more.
+ *
+ * v3 4.11.1.1
+ * - (N/A) operand stack is always the same size
+ * - operand stack [registers] contain the correct types of values
+ * - local variables [registers] contain the correct types of values
+ * - methods are invoked with the appropriate arguments
+ * - fields are assigned using values of appropriate types
+ * - opcodes have the correct type values in operand registers
+ * - there is never an uninitialized class instance in a local variable in
+ *   code protected by an exception handler (operand stack is okay, because
+ *   the operand stack is discarded when an exception is thrown) [can't
+ *   know what's a local var w/o the debug info -- should fall out of
+ *   register typing]
+ *
+ * v3 4.11.1.2
+ * - execution cannot fall off the end of the code
+ *
+ * (We also do many of the items described in the "static checks" sections,
+ * because it's easier to do them here.)
+ *
+ * We need an array of RegType values, one per register, for every
+ * instruction.  In theory this could become quite large -- up to several
+ * megabytes for a monster function.  For self-preservation we reject
+ * anything that requires more than a certain amount of memory.  (Typical
+ * "large" should be on the order of 4K code units * 8 registers.)  This
+ * will likely have to be adjusted.
+ *
+ *
+ * The spec forbids backward branches when there's an uninitialized reference
+ * in a register.  The idea is to prevent something like this:
+ *   loop:
+ *     move r1, r0
+ *     new-instance r0, MyClass
+ *     ...
+ *     if-eq rN, loop  // once
+ *   initialize r0
+ *
+ * This leaves us with two different instances, both allocated by the
+ * same instruction, but only one is initialized.  The scheme outlined in
+ * v3 4.11.1.4 wouldn't catch this, so they work around it by preventing
+ * backward branches.  We achieve identical results without restricting
+ * code reordering by specifying that you can't execute the new-instance
+ * instruction if a register contains an uninitialized instance created
+ * by that same instrutcion.
+ */
+static bool doCodeVerification(const Method* meth, InsnFlags* insnFlags,
+    RegisterTable* regTable, UninitInstanceMap* uninitMap)
+{
+    const int insnsSize = dvmGetMethodInsnsSize(meth);
+    const u2* insns = meth->insns;
+    RegType workRegs[meth->registersSize + kExtraRegs];
+    bool result = false;
+    bool debugVerbose = false;
+    int insnIdx, startGuess, prevAddr;
+
+    /*
+     * Begin by marking the first instruction as "changed".
+     */
+    dvmInsnSetChanged(insnFlags, 0, true);
+
+    if (doVerboseLogging(meth)) {
+        IF_LOGI() {
+            char* desc = dexProtoCopyMethodDescriptor(&meth->prototype);
+            LOGI("Now verifying: %s.%s %s (ins=%d regs=%d)\n",
+                meth->clazz->descriptor, meth->name, desc,
+                meth->insSize, meth->registersSize);
+            LOGI(" ------ [0    4    8    12   16   20   24   28   32   36\n");
+            free(desc);
+        }
+        debugVerbose = true;
+        gDebugVerbose = true;
+    } else {
+        gDebugVerbose = false;
+    }
+
+    startGuess = 0;
+
+    /*
+     * Continue until no instructions are marked "changed".
+     */
+    while (true) {
+        /*
+         * Find the first marked one.  Use "startGuess" as a way to find
+         * one quickly.
+         */
+        for (insnIdx = startGuess; insnIdx < insnsSize; insnIdx++) {
+            if (dvmInsnIsChanged(insnFlags, insnIdx))
+                break;
+        }
+
+        if (insnIdx == insnsSize) {
+            if (startGuess != 0) {
+                /* try again, starting from the top */
+                startGuess = 0;
+                continue;
+            } else {
+                /* all flags are clear */
+                break;
+            }
+        }
+
+        /*
+         * We carry the working set of registers from instruction to
+         * instruction.  If this address can be the target of a branch
+         * (or throw) instruction, or if we're skipping around chasing
+         * "changed" flags, we need to load the set of registers from
+         * the table.
+         *
+         * Because we always prefer to continue on to the next instruction,
+         * we should never have a situation where we have a stray
+         * "changed" flag set on an instruction that isn't a branch target.
+         */
+        if (dvmInsnIsBranchTarget(insnFlags, insnIdx)) {
+            RegType* insnRegs = getRegisterLine(regTable, insnIdx);
+            assert(insnRegs != NULL);
+            copyRegisters(workRegs, insnRegs, meth->registersSize + kExtraRegs);
+
+            if (debugVerbose) {
+                dumpRegTypes(meth, insnFlags, workRegs, insnIdx, NULL,uninitMap,
+                    SHOW_REG_DETAILS);
+            }
+
+        } else {
+            if (debugVerbose) {
+                dumpRegTypes(meth, insnFlags, workRegs, insnIdx, NULL,uninitMap,
+                    SHOW_REG_DETAILS);
+            }
+
+#ifndef NDEBUG
+            /*
+             * Sanity check: retrieve the stored register line (assuming
+             * a full table) and make sure it actually matches.
+             */
+            RegType* insnRegs = getRegisterLine(regTable, insnIdx);
+            if (insnRegs != NULL &&
+                compareRegisters(workRegs, insnRegs,
+                    meth->registersSize + kExtraRegs) != 0)
+            {
+                int ii;
+                char* desc = dexProtoCopyMethodDescriptor(&meth->prototype);
+                LOG_VFY("HUH? workRegs diverged in %s.%s %s\n",
+                        meth->clazz->descriptor, meth->name, desc);
+                free(desc);
+                dumpRegTypes(meth, insnFlags, workRegs, 0, "work",
+                    uninitMap, DRT_SHOW_REF_TYPES | DRT_SHOW_LOCALS);
+                dumpRegTypes(meth, insnFlags, insnRegs, 0, "insn",
+                    uninitMap, DRT_SHOW_REF_TYPES | DRT_SHOW_LOCALS);
+            }
+#endif
+        }
+
+        //LOGI("process %s.%s %s %d\n",
+        //    meth->clazz->descriptor, meth->name, meth->descriptor, insnIdx);
+        if (!verifyInstruction(meth, insnFlags, regTable, workRegs, insnIdx,
+                uninitMap, &startGuess))
+        {
+            //LOGD("+++ %s bailing at %d\n", meth->name, insnIdx);
+            goto bail;
+        }
+
+        /*
+         * Clear "changed" and mark as visited.
+         */
+        dvmInsnSetVisited(insnFlags, insnIdx, true);
+        dvmInsnSetChanged(insnFlags, insnIdx, false);
+    }
+
+    if (DEAD_CODE_SCAN) {
+        /*
+         * Scan for dead code.  There's nothing "evil" about dead code, but it
+         * indicates a flaw somewhere down the line, possibly in the verifier.
+         */
+        int deadStart = -1;
+        for (insnIdx = 0; insnIdx < insnsSize;
+            insnIdx += dvmInsnGetWidth(insnFlags, insnIdx))
+        {
+            /*
+             * Switch-statement data doesn't get "visited" by scanner.  It
+             * may or may not be preceded by a padding NOP.
+             */
+            int instr = meth->insns[insnIdx];
+            if (instr == kPackedSwitchSignature ||
+                instr == kSparseSwitchSignature ||
+                instr == kArrayDataSignature ||
+                (instr == OP_NOP &&
+                 (meth->insns[insnIdx+1] == kPackedSwitchSignature ||
+                  meth->insns[insnIdx+1] == kSparseSwitchSignature ||
+                  meth->insns[insnIdx+1] == kArrayDataSignature)))
+            {
+                dvmInsnSetVisited(insnFlags, insnIdx, true);
+            }
+
+            if (!dvmInsnIsVisited(insnFlags, insnIdx)) {
+                if (deadStart < 0)
+                    deadStart = insnIdx;
+            } else if (deadStart >= 0) {
+                IF_LOGD() {
+                    char* desc =
+                        dexProtoCopyMethodDescriptor(&meth->prototype);
+                    LOGD("VFY: dead code 0x%04x-%04x in %s.%s %s\n",
+                        deadStart, insnIdx-1,
+                        meth->clazz->descriptor, meth->name, desc);
+                    free(desc);
+                }
+                
+                deadStart = -1;
+            }
+        }
+        if (deadStart >= 0) {
+            IF_LOGD() {
+                char* desc = dexProtoCopyMethodDescriptor(&meth->prototype);
+                LOGD("VFY: dead code 0x%04x-%04x in %s.%s %s\n",
+                    deadStart, insnIdx-1,
+                    meth->clazz->descriptor, meth->name, desc);
+                free(desc);
+            }
+        }
+    }
+
+    result = true;
+
+bail:
+    return result;
+}
+
+
+/*
+ * Perform verification for a single instruction.
+ *
+ * This requires fully decoding the instruction to determine the effect
+ * it has on registers.
+ *
+ * Finds zero or more following instructions and sets the "changed" flag
+ * if execution at that point needs to be (re-)evaluated.  Register changes
+ * are merged into "regTypes" at the target addresses.  Does not set or
+ * clear any other flags in "insnFlags".
+ */
+static bool verifyInstruction(const Method* meth, InsnFlags* insnFlags,
+    RegisterTable* regTable, RegType* workRegs, int insnIdx,
+    UninitInstanceMap* uninitMap, int* pStartGuess)
+{
+    const int insnsSize = dvmGetMethodInsnsSize(meth);
+    const u2* insns = meth->insns + insnIdx;
+    bool result = false;
+
+    /*
+     * Once we finish decoding the instruction, we need to figure out where
+     * we can go from here.  There are three possible ways to transfer
+     * control to another statement:
+     *
+     * (1) Continue to the next instruction.  Applies to all but
+     *     unconditional branches, method returns, and exception throws.
+     * (2) Branch to one or more possible locations.  Applies to branches
+     *     and switch statements.
+     * (3) Exception handlers.  Applies to any instruction that can
+     *     throw an exception that is handled by an encompassing "try"
+     *     block.  (We simplify this to be any instruction that can
+     *     throw any exception.)
+     *
+     * We can also return, in which case there is no successor instruction
+     * from this point.
+     *
+     * The behavior is determined by the InstrFlags.
+     */
+
+    const DexFile* pDexFile = meth->clazz->pDvmDex->pDexFile;
+    RegType entryRegs[meth->registersSize + kExtraRegs];
+    ClassObject* resClass;
+    const char* className;
+    int branchTarget = 0;
+    const int insnRegCount = meth->registersSize;
+    RegType tmpType;
+    DecodedInstruction decInsn;
+    bool justSetResult = false;
+    bool okay = true;
+
+#ifndef NDEBUG
+    memset(&decInsn, 0x81, sizeof(decInsn));
+#endif
+    dexDecodeInstruction(gDvm.instrFormat, insns, &decInsn);
+
+    const int nextFlags = dexGetInstrFlags(gDvm.instrFlags, decInsn.opCode);
+
+    /*
+     * Make a copy of the previous register state.  If the instruction
+     * throws an exception, we merge *this* into the destination rather
+     * than workRegs, because we don't want the result from the "successful"
+     * code path (e.g. a check-cast that "improves" a type) to be visible
+     * to the exception handler.
+     */
+    if ((nextFlags & kInstrCanThrow) != 0 && dvmInsnIsInTry(insnFlags, insnIdx))
+    {
+        copyRegisters(entryRegs, workRegs, meth->registersSize + kExtraRegs);
+    } else {
+#ifndef NDEBUG
+        memset(entryRegs, 0xdd,
+            (meth->registersSize + kExtraRegs) * sizeof(RegType));
+#endif
+    }
+
+    switch (decInsn.opCode) {
+    case OP_NOP:
+        /* no effect on anything */
+        break;
+
+    case OP_MOVE:
+    case OP_MOVE_FROM16:
+    case OP_MOVE_16:
+        copyRegister1(workRegs, insnRegCount, decInsn.vA, decInsn.vB,
+            kTypeCategory1nr, &okay);
+        break;
+    case OP_MOVE_WIDE:
+    case OP_MOVE_WIDE_FROM16:
+    case OP_MOVE_WIDE_16:
+        copyRegister2(workRegs, insnRegCount, decInsn.vA, decInsn.vB, &okay);
+        break;
+    case OP_MOVE_OBJECT:
+    case OP_MOVE_OBJECT_FROM16:
+    case OP_MOVE_OBJECT_16:
+        copyRegister1(workRegs, insnRegCount, decInsn.vA, decInsn.vB,
+            kTypeCategoryRef, &okay);
+        break;
+
+    /*
+     * The move-result instructions copy data out of a "pseudo-register"
+     * with the results from the last method invocation.  In practice we
+     * might want to hold the result in an actual CPU register, so the
+     * Dalvik spec requires that these only appear immediately after an
+     * invoke or filled-new-array.
+     *
+     * These calls invalidate the "result" register.  (This is now
+     * redundant with the reset done below, but it can make the debug info
+     * easier to read in some cases.)
+     */
+    case OP_MOVE_RESULT:
+        copyResultRegister1(workRegs, insnRegCount, decInsn.vA,
+            kTypeCategory1nr, &okay);
+        break;
+    case OP_MOVE_RESULT_WIDE:
+        copyResultRegister2(workRegs, insnRegCount, decInsn.vA, &okay);
+        break;
+    case OP_MOVE_RESULT_OBJECT:
+        copyResultRegister1(workRegs, insnRegCount, decInsn.vA,
+            kTypeCategoryRef, &okay);
+        break;
+
+    case OP_MOVE_EXCEPTION:
+        /*
+         * This statement can only appear as the first instruction in an
+         * exception handler (though not all exception handlers need to
+         * have one of these).  We verify that as part of extracting the
+         * exception type from the catch block list.
+         *
+         * "resClass" will hold the closest common superclass of all
+         * exceptions that can be handled here.
+         */
+        resClass = getCaughtExceptionType(meth, insnIdx);
+        if (resClass == NULL) {
+            okay = false;
+        } else {
+            setRegisterType(workRegs, insnRegCount, decInsn.vA,
+                regTypeFromClass(resClass), &okay);
+        }
+        break;
+
+    case OP_RETURN_VOID:
+        okay = checkConstructorReturn(meth, workRegs, insnRegCount);
+        if (okay && getMethodReturnType(meth) != kRegTypeUnknown) {
+            LOG_VFY("VFY: return-void not expected\n");
+            okay = false;
+        }
+        break;
+    case OP_RETURN:
+        okay = checkConstructorReturn(meth, workRegs, insnRegCount);
+        if (okay) {
+            /* check the method signature */
+            RegType returnType = getMethodReturnType(meth);
+            checkTypeCategory(returnType, kTypeCategory1nr, &okay);
+            if (!okay)
+                LOG_VFY("VFY: return-32 not expected\n");
+
+            /* check the register contents */
+            returnType = getRegisterType(workRegs, insnRegCount, decInsn.vA,
+                &okay);
+            checkTypeCategory(returnType, kTypeCategory1nr, &okay);
+            if (!okay)
+                LOG_VFY("VFY: return-32 on invalid register v%d\n", decInsn.vA);
+        }
+        break;
+    case OP_RETURN_WIDE:
+        okay = checkConstructorReturn(meth, workRegs, insnRegCount);
+        if (okay) {
+            RegType returnType, returnTypeHi;
+
+            /* check the method signature */
+            returnType = getMethodReturnType(meth);
+            checkTypeCategory(returnType, kTypeCategory2, &okay);
+            if (!okay)
+                LOG_VFY("VFY: return-wide not expected\n");
+
+            /* check the register contents */
+            returnType = getRegisterType(workRegs, insnRegCount, decInsn.vA,
+                &okay);
+            returnTypeHi = getRegisterType(workRegs, insnRegCount,
+                decInsn.vA +1, &okay);
+            if (okay) {
+                checkTypeCategory(returnType, kTypeCategory2, &okay);
+                checkWidePair(returnType, returnTypeHi, &okay);
+            }
+            if (!okay) {
+                LOG_VFY("VFY: return-wide on invalid register pair v%d\n",
+                    decInsn.vA);
+            }
+        }
+        break;
+    case OP_RETURN_OBJECT:
+        okay = checkConstructorReturn(meth, workRegs, insnRegCount);
+        if (okay) {
+            RegType returnType = getMethodReturnType(meth);
+            checkTypeCategory(returnType, kTypeCategoryRef, &okay);
+            if (!okay) {
+                LOG_VFY("VFY: return-object not expected\n");
+                break;
+            }
+
+            /* returnType is the *expected* return type, not register value */
+            assert(returnType != kRegTypeZero);
+            assert(!regTypeIsUninitReference(returnType));
+
+            /*
+             * Verify that the reference in vAA is an instance of the type
+             * in "returnType".  The Zero type is allowed here.  If the
+             * method is declared to return an interface, then any
+             * initialized reference is acceptable.
+             *
+             * Note getClassFromRegister fails if the register holds an
+             * uninitialized reference, so we do not allow them to be
+             * returned.
+             */
+            ClassObject* declClass;
+            
+            declClass = regTypeInitializedReferenceToClass(returnType);
+            resClass = getClassFromRegister(workRegs, insnRegCount,
+                            decInsn.vA, &okay);
+            if (!okay)
+                break;
+            if (resClass != NULL) {
+                if (!dvmIsInterfaceClass(declClass) &&
+                    !dvmInstanceof(resClass, declClass))
+                {
+                    LOG_VFY("VFY: returning %s, declared %s\n",
+                            resClass->descriptor, declClass->descriptor);
+                    okay = false;
+                    break;
+                }
+            }
+        }
+        break;
+
+    case OP_CONST_4:
+    case OP_CONST_16:
+    case OP_CONST:
+        /* could be boolean, int, float, or a null reference */
+        setRegisterType(workRegs, insnRegCount, decInsn.vA,
+            determineCat1Const((s4)decInsn.vB), &okay);
+        break;
+    case OP_CONST_HIGH16:
+        /* could be boolean, int, float, or a null reference */
+        setRegisterType(workRegs, insnRegCount, decInsn.vA,
+            determineCat1Const((s4) decInsn.vB << 16), &okay);
+        break;
+    case OP_CONST_WIDE_16:
+    case OP_CONST_WIDE_32:
+    case OP_CONST_WIDE:
+    case OP_CONST_WIDE_HIGH16:
+        /* could be long or double; default to long and allow conversion */
+        setRegisterType(workRegs, insnRegCount, decInsn.vA,
+            kRegTypeLongLo, &okay);
+        break;
+    case OP_CONST_STRING:
+    case OP_CONST_STRING_JUMBO:
+        assert(gDvm.classJavaLangString != NULL);
+        if (decInsn.vB >= pDexFile->pHeader->stringIdsSize) {
+            LOG_VFY("VFY: invalid string pool index %u\n", decInsn.vB);
+            okay = false;
+        } else {
+            setRegisterType(workRegs, insnRegCount, decInsn.vA,
+                regTypeFromClass(gDvm.classJavaLangString), &okay);
+        }
+        break;
+    case OP_CONST_CLASS:
+        assert(gDvm.classJavaLangClass != NULL);
+        if (decInsn.vB >= pDexFile->pHeader->typeIdsSize) {
+            LOG_VFY("VFY: invalid class pool index %u\n", decInsn.vB);
+            okay = false;
+        } else {
+            setRegisterType(workRegs, insnRegCount, decInsn.vA,
+                regTypeFromClass(gDvm.classJavaLangClass), &okay);
+        }
+        break;
+
+    case OP_MONITOR_ENTER:
+    case OP_MONITOR_EXIT:
+        tmpType = getRegisterType(workRegs, insnRegCount, decInsn.vA, &okay);
+        if (okay && !regTypeIsReference(tmpType)) {
+            LOG_VFY("VFY: monitor op on non-object\n");
+            okay = false;
+        }
+        break;
+
+    case OP_CHECK_CAST:
+        /*
+         * If this instruction succeeds, we will promote register vA to
+         * the type in vB.  (This could be a demotion -- not expected, so
+         * we don't try to address it.)
+         *
+         * If it fails, an exception is thrown, which we deal with later
+         * by ignoring the update to decInsn.vA when branching to a handler.
+         */
+        resClass = dvmOptResolveClass(meth->clazz, decInsn.vB);
+        if (resClass == NULL) {
+            LOG_VFY("VFY: unable to resolve check-cast %d (%s) in %s\n",
+                    decInsn.vB, dexStringByTypeIdx(pDexFile, decInsn.vB),
+                    meth->clazz->descriptor);
+            okay = false;
+        } else {
+            RegType origType;
+
+            origType = getRegisterType(workRegs, insnRegCount, decInsn.vA,
+                        &okay);
+            if (!okay)
+                break;
+            if (!regTypeIsReference(origType)) {
+                LOG_VFY("VFY: check-cast on non-reference in v%u\n",decInsn.vA);
+                okay = false;
+                break;
+            }
+            setRegisterType(workRegs, insnRegCount, decInsn.vA,
+                regTypeFromClass(resClass), &okay);
+        }
+        break;
+    case OP_INSTANCE_OF:
+        if (decInsn.vC >= pDexFile->pHeader->typeIdsSize) {
+            LOG_VFY("VFY: invalid class pool index %u\n", decInsn.vC);
+            okay = false;
+            break;
+        }
+        tmpType = getRegisterType(workRegs, insnRegCount, decInsn.vB, &okay);
+        if (!okay)
+            break;
+        if (!regTypeIsReference(tmpType)) {
+            LOG_VFY("VFY: vB not a reference\n");
+            okay = false;
+            break;
+        }
+        /* result is boolean */
+        setRegisterType(workRegs, insnRegCount, decInsn.vA,
+            kRegTypeBoolean, &okay);
+        break;
+
+    case OP_ARRAY_LENGTH:
+        resClass = getClassFromRegister(workRegs, insnRegCount,
+                        decInsn.vB, &okay);
+        if (!okay)
+            break;
+        if (resClass != NULL && !dvmIsArrayClass(resClass)) {
+            LOG_VFY("VFY: array-length on non-array\n");
+            okay = false;
+            break;
+        }
+        setRegisterType(workRegs, insnRegCount, decInsn.vA, kRegTypeInteger,
+            &okay);
+        break;
+
+    case OP_NEW_INSTANCE:
+        /*
+         * We can check for interface and abstract classes here, but we
+         * can't reject them.  We can ask the optimizer to replace the
+         * instructions with a magic "always throw InstantiationError"
+         * instruction.  (Not enough bytes to sub in a method call.)
+         */
+        resClass = dvmOptResolveClass(meth->clazz, decInsn.vB);
+        if (resClass == NULL) {
+            LOG_VFY("VFY: unable to resolve new-instance %d (%s) in %s\n",
+                    decInsn.vB, dexStringByTypeIdx(pDexFile, decInsn.vB),
+                    meth->clazz->descriptor);
+            okay = false;
+        } else {
+            RegType uninitType;
+
+            /* add resolved class to uninit map if not already there */
+            int uidx = dvmSetUninitInstance(uninitMap, insnIdx, resClass);
+            assert(uidx >= 0);
+            uninitType = regTypeFromUninitIndex(uidx);
+
+            /*
+             * Any registers holding previous allocations from this address
+             * that have not yet been initialized must be marked invalid.
+             */
+            markUninitRefsAsInvalid(workRegs, insnRegCount, uninitMap,
+                uninitType);
+
+            /* add the new uninitialized reference to the register ste */
+            setRegisterType(workRegs, insnRegCount, decInsn.vA,
+                uninitType, &okay);
+        }
+        break;
+    case OP_NEW_ARRAY:
+        resClass = dvmOptResolveClass(meth->clazz, decInsn.vC);
+        if (resClass == NULL) {
+            LOG_VFY("VFY: unable to resolve new-array %d (%s) in %s\n",
+                    decInsn.vC, dexStringByTypeIdx(pDexFile, decInsn.vB),
+                    meth->clazz->descriptor);
+            okay = false;
+        } else if (!dvmIsArrayClass(resClass)) {
+            LOG_VFY("VFY: new-array on non-array class\n");
+            okay = false;
+        } else {
+            /* set register type to array class */
+            setRegisterType(workRegs, insnRegCount, decInsn.vA,
+                regTypeFromClass(resClass), &okay);
+        }
+        break;
+    case OP_FILLED_NEW_ARRAY:
+    case OP_FILLED_NEW_ARRAY_RANGE:
+        /* (decInsn.vA == 0) is silly, but not illegal */
+        resClass = dvmOptResolveClass(meth->clazz, decInsn.vB);
+        if (resClass == NULL) {
+            LOG_VFY("VFY: unable to resolve filled-array %d (%s) in %s\n",
+                    decInsn.vB, dexStringByTypeIdx(pDexFile, decInsn.vB),
+                    meth->clazz->descriptor);
+            okay = false;
+        } else if (!dvmIsArrayClass(resClass)) {
+            LOG_VFY("VFY: filled-new-array on non-array class\n");
+            okay = false;
+        } else {
+            /*
+             * TODO: if resClass is array of references, verify the registers
+             * in the argument list against the array type.
+             * TODO: if resClass is array of primitives, verify that the
+             * contents of the registers are appropriate.
+             */
+            /* filled-array result goes into "result" register */
+            setResultRegisterType(workRegs, insnRegCount,
+                regTypeFromClass(resClass), &okay);
+            justSetResult = true;
+        }
+        break;
+
+    case OP_CMPL_FLOAT:
+    case OP_CMPG_FLOAT:
+        verifyRegisterType(workRegs, insnRegCount, decInsn.vB, kRegTypeFloat,
+            &okay);
+        verifyRegisterType(workRegs, insnRegCount, decInsn.vC, kRegTypeFloat,
+            &okay);
+        setRegisterType(workRegs, insnRegCount, decInsn.vA, kRegTypeBoolean,
+            &okay);
+        break;
+    case OP_CMPL_DOUBLE:
+    case OP_CMPG_DOUBLE:
+        verifyRegisterType(workRegs, insnRegCount, decInsn.vB, kRegTypeDoubleLo,
+            &okay);
+        verifyRegisterType(workRegs, insnRegCount, decInsn.vC, kRegTypeDoubleLo,
+            &okay);
+        setRegisterType(workRegs, insnRegCount, decInsn.vA, kRegTypeBoolean,
+            &okay);
+        break;
+    case OP_CMP_LONG:
+        verifyRegisterType(workRegs, insnRegCount, decInsn.vB, kRegTypeLongLo,
+            &okay);
+        verifyRegisterType(workRegs, insnRegCount, decInsn.vC, kRegTypeLongLo,
+            &okay);
+        setRegisterType(workRegs, insnRegCount, decInsn.vA, kRegTypeBoolean,
+            &okay);
+        break;
+
+    case OP_THROW:
+        resClass = getClassFromRegister(workRegs, insnRegCount,
+                        decInsn.vA, &okay);
+        if (okay && resClass != NULL) {
+            if (!dvmInstanceof(resClass, gDvm.classJavaLangThrowable)) {
+                LOG_VFY("VFY: thrown class %s not instanceof Throwable\n",
+                        resClass->descriptor);
+                okay = false;
+            }
+        }
+        break;
+
+    case OP_GOTO:
+    case OP_GOTO_16:
+    case OP_GOTO_32:
+        /* no effect on or use of registers */
+        break;
+
+    case OP_PACKED_SWITCH:
+    case OP_SPARSE_SWITCH:
+        /* verify that vAA is an integer, or can be converted to one */
+        verifyRegisterType(workRegs, insnRegCount, decInsn.vA,
+            kRegTypeInteger, &okay);
+        break;
+
+    case OP_FILL_ARRAY_DATA:
+        {
+            RegType valueType;
+            const u2 *arrayData;
+            u2 elemWidth;
+
+            /* Similar to the verification done for APUT */
+            resClass = getClassFromRegister(workRegs, insnRegCount,
+                            decInsn.vA, &okay);
+            if (!okay)
+                break;
+
+            /* resClass can be null if the reg type is Zero */
+            if (resClass == NULL)
+                break;
+
+            if (!dvmIsArrayClass(resClass) || resClass->arrayDim != 1 ||
+                resClass->elementClass->primitiveType == PRIM_NOT ||
+                resClass->elementClass->primitiveType == PRIM_VOID)
+            {
+                LOG_VFY("VFY: invalid fill-array-data on %s\n", 
+                        resClass->descriptor);
+                okay = false;
+                break;
+            }
+
+            valueType = primitiveTypeToRegType(
+                                    resClass->elementClass->primitiveType);
+            assert(valueType != kRegTypeUnknown);
+
+            /* 
+             * Now verify if the element width in the table matches the element
+             * width declared in the array
+             */
+            arrayData = insns + (insns[1] | (((s4)insns[2]) << 16));
+            if (arrayData[0] != kArrayDataSignature) {
+                LOG_VFY("VFY: invalid magic for array-data\n"); 
+                okay = false;
+                break;
+            }
+            
+            switch (resClass->elementClass->primitiveType) {
+                case PRIM_BOOLEAN:
+                case PRIM_BYTE:
+                     elemWidth = 1;
+                     break;
+                case PRIM_CHAR:
+                case PRIM_SHORT:
+                     elemWidth = 2;
+                     break;
+                case PRIM_FLOAT:
+                case PRIM_INT:
+                     elemWidth = 4;
+                     break;
+                case PRIM_DOUBLE:
+                case PRIM_LONG:
+                     elemWidth = 8;
+                     break;
+                default:
+                     elemWidth = 0;
+                     break;
+            }
+
+            /* 
+             * Since we don't compress the data in Dex, expect to see equal
+             * width of data stored in the table and expected from the array
+             * class.
+             */
+            if (arrayData[1] != elemWidth) {
+                LOG_VFY("VFY: array-data size mismatch (%d vs %d)\n", 
+                        arrayData[1], elemWidth); 
+                okay = false;
+            }
+        }
+        break;
+
+    case OP_IF_EQ:
+    case OP_IF_NE:
+        {
+            RegType type1, type2;
+            bool tmpResult;
+
+            type1 = getRegisterType(workRegs, insnRegCount, decInsn.vA, &okay);
+            type2 = getRegisterType(workRegs, insnRegCount, decInsn.vB, &okay);
+            if (!okay)
+                break;
+
+            /* both references? */
+            if (regTypeIsReference(type1) && regTypeIsReference(type2))
+                break;
+
+            /* both category-1nr? */
+            checkTypeCategory(type1, kTypeCategory1nr, &okay);
+            checkTypeCategory(type2, kTypeCategory1nr, &okay);
+            if (!okay) {
+                LOG_VFY("VFY: args to if-eq/if-ne must both be refs or cat1\n");
+                break;
+            }
+        }
+        break;
+    case OP_IF_LT:
+    case OP_IF_GE:
+    case OP_IF_GT:
+    case OP_IF_LE:
+        tmpType = getRegisterType(workRegs, insnRegCount, decInsn.vA, &okay);
+        if (!okay)
+            break;
+        checkTypeCategory(tmpType, kTypeCategory1nr, &okay);
+        if (!okay) {
+            LOG_VFY("VFY: args to 'if' must be cat-1nr\n");
+            break;
+        }
+        tmpType = getRegisterType(workRegs, insnRegCount, decInsn.vB,&okay);
+        if (!okay)
+            break;
+        checkTypeCategory(tmpType, kTypeCategory1nr, &okay);
+        if (!okay) {
+            LOG_VFY("VFY: args to 'if' must be cat-1nr\n");
+            break;
+        }
+        break;
+    case OP_IF_EQZ:
+    case OP_IF_NEZ:
+        tmpType = getRegisterType(workRegs, insnRegCount, decInsn.vA, &okay);
+        if (!okay)
+            break;
+        if (regTypeIsReference(tmpType))
+            break;
+        checkTypeCategory(tmpType, kTypeCategory1nr, &okay);
+        if (!okay)
+            LOG_VFY("VFY: expected cat-1 arg to if\n");
+        break;
+    case OP_IF_LTZ:
+    case OP_IF_GEZ:
+    case OP_IF_GTZ:
+    case OP_IF_LEZ:
+        tmpType = getRegisterType(workRegs, insnRegCount, decInsn.vA, &okay);
+        if (!okay)
+            break;
+        checkTypeCategory(tmpType, kTypeCategory1nr, &okay);
+        if (!okay)
+            LOG_VFY("VFY: expected cat-1 arg to if\n");
+        break;
+
+    case OP_AGET:
+        tmpType = kRegTypeInteger;
+        goto aget_1nr_common;
+    case OP_AGET_BOOLEAN:
+        tmpType = kRegTypeBoolean;
+        goto aget_1nr_common;
+    case OP_AGET_BYTE:
+        tmpType = kRegTypeByte;
+        goto aget_1nr_common;
+    case OP_AGET_CHAR:
+        tmpType = kRegTypeChar;
+        goto aget_1nr_common;
+    case OP_AGET_SHORT:
+        tmpType = kRegTypeShort;
+        goto aget_1nr_common;
+aget_1nr_common:
+        {
+            RegType srcType;
+
+            resClass = getClassFromRegister(workRegs, insnRegCount,
+                            decInsn.vB, &okay);
+            if (!okay)
+                break;
+            if (resClass != NULL) {
+                /* verify the class and check "tmpType" */
+                if (!dvmIsArrayClass(resClass) || resClass->arrayDim != 1 ||
+                    resClass->elementClass->primitiveType == PRIM_NOT)
+                {
+                    LOG_VFY("VFY: invalid aget-1nr on %s\n",
+                            resClass->descriptor);
+                    okay = false;
+                    break;
+                }
+
+                srcType = primitiveTypeToRegType(
+                                        resClass->elementClass->primitiveType);
+
+                if (!canConvertTo1nr(srcType, tmpType)) {
+                    LOG_VFY("VFY: unable to aget array type=%d into local type=%d"
+                            " (on %s)\n",
+                            srcType, tmpType, resClass->descriptor);
+                    okay = false;
+                    break;
+                }
+
+            }
+            setRegisterType(workRegs, insnRegCount, decInsn.vA,
+                tmpType, &okay);
+        }
+        break;
+
+    case OP_AGET_WIDE:
+        {
+            RegType dstType = kRegTypeUnknown;
+
+            resClass = getClassFromRegister(workRegs, insnRegCount,
+                            decInsn.vB, &okay);
+            if (!okay)
+                break;
+            if (resClass != NULL) {
+                /* verify the class and try to refine "dstType" */
+                if (!dvmIsArrayClass(resClass) || resClass->arrayDim != 1 ||
+                    resClass->elementClass->primitiveType == PRIM_NOT)
+                {
+                    LOG_VFY("VFY: invalid aget-wide on %s\n",
+                            resClass->descriptor);
+                    okay = false;
+                    break;
+                }
+
+                switch (resClass->elementClass->primitiveType) {
+                case PRIM_LONG:
+                    dstType = kRegTypeLongLo;
+                    break;
+                case PRIM_DOUBLE:
+                    dstType = kRegTypeDoubleLo;
+                    break;
+                default:
+                    LOG_VFY("VFY: invalid aget-wide on %s\n",
+                            resClass->descriptor);
+                    dstType = kRegTypeUnknown;
+                    okay = false;
+                    break;
+                }
+            }
+            setRegisterType(workRegs, insnRegCount, decInsn.vA,
+                dstType, &okay);
+        }
+        break;
+
+    case OP_AGET_OBJECT:
+        {
+            RegType dstType;
+
+            resClass = getClassFromRegister(workRegs, insnRegCount,
+                            decInsn.vB, &okay);
+            if (!okay)
+                break;
+            if (resClass != NULL) {
+                ClassObject* elementClass;
+
+                assert(resClass != NULL);
+                if (!dvmIsArrayClass(resClass)) {
+                    LOG_VFY("VFY: aget-object on non-ref array class\n");
+                    okay = false;
+                    break;
+                }
+                assert(resClass->elementClass != NULL);
+
+                /*
+                 * Find the element class.  resClass->elementClass indicates
+                 * the basic type, which won't be what we want for a
+                 * multi-dimensional array.
+                 */
+                if (resClass->descriptor[1] == '[') {
+                    assert(resClass->arrayDim > 1);
+                    elementClass = dvmFindArrayClass(&resClass->descriptor[1],
+                                        resClass->classLoader);
+                } else {
+                    assert(resClass->arrayDim == 1);
+                    elementClass = resClass->elementClass;
+                }
+
+                dstType = regTypeFromClass(elementClass);
+            } else {
+                /*
+                 * The array reference is NULL, so the current code path will
+                 * throw an exception.  For proper merging with later code
+                 * paths, and correct handling of "if-eqz" tests on the
+                 * result of the array get, we want to treat this as a null
+                 * reference.
+                 */
+                dstType = kRegTypeZero;
+            }
+            setRegisterType(workRegs, insnRegCount, decInsn.vA,
+                dstType, &okay);
+        }
+        break;
+    case OP_APUT:
+        tmpType = kRegTypeInteger;
+        goto aput_1nr_common;
+    case OP_APUT_BOOLEAN:
+        tmpType = kRegTypeBoolean;
+        goto aput_1nr_common;
+    case OP_APUT_BYTE:
+        tmpType = kRegTypeByte;
+        goto aput_1nr_common;
+    case OP_APUT_CHAR:
+        tmpType = kRegTypeChar;
+        goto aput_1nr_common;
+    case OP_APUT_SHORT:
+        tmpType = kRegTypeShort;
+        goto aput_1nr_common;
+aput_1nr_common:
+        {
+            RegType srcType, dstType;
+
+            /* make sure the source register has the correct type */
+            srcType = getRegisterType(workRegs, insnRegCount, decInsn.vA,
+                            &okay);
+            if (!canConvertTo1nr(srcType, tmpType)) {
+                LOG_VFY("VFY: invalid reg type %d on aput instr (need %d)\n",
+                    srcType, tmpType);
+                okay = false;
+                break;
+            }
+
+            resClass = getClassFromRegister(workRegs, insnRegCount,
+                            decInsn.vB, &okay);
+            if (!okay)
+                break;
+
+            /* resClass can be null if the reg type is Zero */
+            if (resClass == NULL)
+                break;
+
+            if (!dvmIsArrayClass(resClass) || resClass->arrayDim != 1 ||
+                resClass->elementClass->primitiveType == PRIM_NOT)
+            {
+                LOG_VFY("VFY: invalid aput-1nr on %s\n", resClass->descriptor);
+                okay = false;
+                break;
+            }
+
+            dstType = primitiveTypeToRegType(
+                                    resClass->elementClass->primitiveType);
+            assert(dstType != kRegTypeUnknown);
+
+            if (!canConvertTo1nr(srcType, dstType)) {
+                LOG_VFY("VFY: invalid aput-1nr on %s (src=%d dst=%d)\n",
+                        resClass->descriptor, srcType, dstType);
+                okay = false;
+                break;
+            }
+        }
+        break;
+    case OP_APUT_WIDE:
+        tmpType = getRegisterType(workRegs, insnRegCount, decInsn.vA, &okay);
+        if (okay) {
+            RegType typeHi = 
+                getRegisterType(workRegs, insnRegCount, decInsn.vA+1, &okay);
+            checkTypeCategory(tmpType, kTypeCategory2, &okay);
+            checkWidePair(tmpType, typeHi, &okay);
+        }
+        if (!okay)
+            break;
+
+        resClass = getClassFromRegister(workRegs, insnRegCount,
+                        decInsn.vB, &okay);
+        if (!okay)
+            break;
+        if (resClass != NULL) {
+            /* verify the class and try to refine "dstType" */
+            if (!dvmIsArrayClass(resClass) || resClass->arrayDim != 1 ||
+                resClass->elementClass->primitiveType == PRIM_NOT)
+            {
+                LOG_VFY("VFY: invalid aput-wide on %s\n",
+                        resClass->descriptor);
+                okay = false;
+                break;
+            }
+
+            switch (resClass->elementClass->primitiveType) {
+            case PRIM_LONG:
+            case PRIM_DOUBLE:
+                /* these are okay */
+                break;
+            default:
+                LOG_VFY("VFY: invalid aput-wide on %s\n",
+                        resClass->descriptor);
+                okay = false;
+                break;
+            }
+        }
+        break;
+    case OP_APUT_OBJECT:
+        /* get the ref we're storing; Zero is okay, Uninit is not */
+        resClass = getClassFromRegister(workRegs, insnRegCount,
+                        decInsn.vA, &okay);
+        if (!okay)
+            break;
+        if (resClass != NULL) {
+            ClassObject* arrayClass;
+            ClassObject* elementClass;
+
+            /*
+             * Get the array class.  If the array ref is null, we won't
+             * have type information (and we'll crash at runtime with a
+             * null pointer exception).
+             */
+            arrayClass = getClassFromRegister(workRegs, insnRegCount,
+                            decInsn.vB, &okay);
+
+            if (arrayClass != NULL) {
+                /* see if the array holds a compatible type */
+                if (!dvmIsArrayClass(arrayClass)) {
+                    LOG_VFY("VFY: invalid aput-object on %s\n",
+                            arrayClass->descriptor);
+                    okay = false;
+                    break;
+                }
+
+                /*
+                 * Find the element class.  resClass->elementClass indicates
+                 * the basic type, which won't be what we want for a
+                 * multi-dimensional array.
+                 *
+                 * All we want to check here is that the element type is a
+                 * reference class.  We *don't* check instanceof here, because
+                 * you can still put a String into a String[] after the latter
+                 * has been cast to an Object[].
+                 */
+                if (arrayClass->descriptor[1] == '[') {
+                    assert(arrayClass->arrayDim > 1);
+                    elementClass = dvmFindArrayClass(&arrayClass->descriptor[1],
+                                        arrayClass->classLoader);
+                } else {
+                    assert(arrayClass->arrayDim == 1);
+                    elementClass = arrayClass->elementClass;
+                }
+                if (elementClass->primitiveType != PRIM_NOT) {
+                    LOG_VFY("VFY: invalid aput-object of %s into %s\n",
+                            resClass->descriptor, arrayClass->descriptor);
+                    okay = false;
+                    break;
+                }
+            }
+        }
+        break;
+
+    case OP_IGET:
+        tmpType = kRegTypeInteger;
+        goto iget_1nr_common;
+    case OP_IGET_BOOLEAN:
+        tmpType = kRegTypeBoolean;
+        goto iget_1nr_common;
+    case OP_IGET_BYTE:
+        tmpType = kRegTypeByte;
+        goto iget_1nr_common;
+    case OP_IGET_CHAR:
+        tmpType = kRegTypeChar;
+        goto iget_1nr_common;
+    case OP_IGET_SHORT:
+        tmpType = kRegTypeShort;
+        goto iget_1nr_common;
+iget_1nr_common:
+        {
+            ClassObject* fieldClass;
+            InstField* instField;
+            RegType objType, fieldType;
+
+            objType = getRegisterType(workRegs, insnRegCount, decInsn.vB,
+                        &okay);
+            if (!okay)
+                break;
+            instField = getInstField(meth, uninitMap, objType, decInsn.vC,
+                            &okay);
+            if (!okay)
+                break;
+
+            /* make sure the field's type is compatible with expectation */
+            fieldType = primSigCharToRegType(instField->field.signature[0]);
+            if (fieldType == kRegTypeUnknown ||
+                !canConvertTo1nr(fieldType, tmpType))
+            {
+                LOG_VFY("VFY: invalid iget-1nr of %s.%s (req=%d actual=%d)\n",
+                        instField->field.clazz->descriptor,
+                        instField->field.name, tmpType, fieldType);
+                okay = false;
+                break;
+            }
+
+            setRegisterType(workRegs, insnRegCount, decInsn.vA, tmpType, &okay);
+        }
+        break;
+    case OP_IGET_WIDE:
+        {
+            RegType dstType;
+            ClassObject* fieldClass;
+            InstField* instField;
+            RegType objType;
+
+            objType = getRegisterType(workRegs, insnRegCount, decInsn.vB,
+                        &okay);
+            if (!okay)
+                break;
+            instField = getInstField(meth, uninitMap, objType, decInsn.vC,
+                            &okay);
+            if (!okay)
+                break;
+            /* check the type, which should be prim */
+            switch (instField->field.signature[0]) {
+            case 'D':
+                dstType = kRegTypeDoubleLo;
+                break;
+            case 'J':
+                dstType = kRegTypeLongLo;
+                break;
+            default:
+                LOG_VFY("VFY: invalid iget-wide of %s.%s\n",
+                        instField->field.clazz->descriptor,
+                        instField->field.name);
+                dstType = kRegTypeUnknown;
+                okay = false;
+                break;
+            }
+            if (okay) {
+                setRegisterType(workRegs, insnRegCount, decInsn.vA,
+                    dstType, &okay);
+            }
+        }
+        break;
+    case OP_IGET_OBJECT:
+        {
+            ClassObject* fieldClass;
+            InstField* instField;
+            RegType objType;
+
+            objType = getRegisterType(workRegs, insnRegCount, decInsn.vB,
+                        &okay);
+            if (!okay)
+                break;
+            instField = getInstField(meth, uninitMap, objType, decInsn.vC,
+                            &okay);
+            if (!okay)
+                break;
+            fieldClass = getFieldClass(meth, &instField->field);
+            if (fieldClass == NULL) {
+                /* class not found or primitive type */
+                LOG_VFY("VFY: unable to recover field class from '%s'\n",
+                    instField->field.signature);
+                okay = false;
+                break;
+            }
+            if (okay) {
+                assert(!dvmIsPrimitiveClass(fieldClass));
+                setRegisterType(workRegs, insnRegCount, decInsn.vA,
+                    regTypeFromClass(fieldClass), &okay);
+            }
+        }
+        break;
+    case OP_IPUT:
+        tmpType = kRegTypeInteger;
+        goto iput_1nr_common;
+    case OP_IPUT_BOOLEAN:
+        tmpType = kRegTypeBoolean;
+        goto iput_1nr_common;
+    case OP_IPUT_BYTE:
+        tmpType = kRegTypeByte;
+        goto iput_1nr_common;
+    case OP_IPUT_CHAR:
+        tmpType = kRegTypeChar;
+        goto iput_1nr_common;
+    case OP_IPUT_SHORT:
+        tmpType = kRegTypeShort;
+        goto iput_1nr_common;
+iput_1nr_common:
+        {
+            RegType srcType, fieldType, objType;
+            ClassObject* fieldClass;
+            InstField* instField;
+            
+            /* make sure the source register has the correct type */
+            srcType = getRegisterType(workRegs, insnRegCount, decInsn.vA,
+                        &okay);
+            if (!canConvertTo1nr(srcType, tmpType)) {
+                LOG_VFY("VFY: invalid reg type %d on iput instr (need %d)\n",
+                    srcType, tmpType);
+                okay = false;
+                break;
+            }
+
+            objType = getRegisterType(workRegs, insnRegCount, decInsn.vB,
+                        &okay);
+            if (!okay)
+                break;
+            instField = getInstField(meth, uninitMap, objType, decInsn.vC,
+                            &okay);
+            if (!okay)
+                break;
+
+            /* get type of field we're storing into */
+            fieldType = primSigCharToRegType(instField->field.signature[0]);
+            if (fieldType == kRegTypeUnknown ||
+                !canConvertTo1nr(srcType, fieldType))
+            {
+                LOG_VFY("VFY: invalid iput-1nr of %s.%s (src=%d dst=%d)\n",
+                        instField->field.clazz->descriptor,
+                        instField->field.name, srcType, fieldType);
+                okay = false;
+                break;
+            }
+        }
+        break;
+    case OP_IPUT_WIDE:
+        tmpType = getRegisterType(workRegs, insnRegCount, decInsn.vA, &okay);
+        if (okay) {
+            RegType typeHi = 
+                getRegisterType(workRegs, insnRegCount, decInsn.vA+1, &okay);
+            checkTypeCategory(tmpType, kTypeCategory2, &okay);
+            checkWidePair(tmpType, typeHi, &okay);
+        }
+        if (okay) {
+            ClassObject* fieldClass;
+            InstField* instField;
+            RegType objType;
+
+            objType = getRegisterType(workRegs, insnRegCount, decInsn.vB,
+                        &okay);
+            if (!okay)
+                break;
+            instField = getInstField(meth, uninitMap, objType, decInsn.vC,
+                            &okay);
+            if (!okay)
+                break;
+            /* check the type, which should be prim */
+            switch (instField->field.signature[0]) {
+            case 'D':
+            case 'J':
+                /* these are okay (and interchangeable) */
+                break;
+            default:
+                LOG_VFY("VFY: invalid iput-wide of %s.%s\n",
+                        instField->field.clazz->descriptor,
+                        instField->field.name);
+                okay = false;
+                break;
+            }
+        }
+        break;
+    case OP_IPUT_OBJECT:
+        {
+            ClassObject* fieldClass;
+            ClassObject* valueClass;
+            InstField* instField;
+            RegType objType, valueType;
+
+            objType = getRegisterType(workRegs, insnRegCount, decInsn.vB,
+                        &okay);
+            if (!okay)
+                break;
+            instField = getInstField(meth, uninitMap, objType, decInsn.vC,
+                            &okay);
+            if (!okay)
+                break;
+            fieldClass = getFieldClass(meth, &instField->field);
+            if (fieldClass == NULL) {
+                LOG_VFY("VFY: unable to recover field class from '%s'\n",
+                    instField->field.signature);
+                okay = false;
+                break;
+            }
+
+            valueType = getRegisterType(workRegs, insnRegCount, decInsn.vA,
+                        &okay);
+            if (!okay)
+                break;
+            if (!regTypeIsReference(valueType)) {
+                LOG_VFY("VFY: storing non-ref v%d into ref field '%s' (%s)\n",
+                        decInsn.vA, instField->field.name,
+                        fieldClass->descriptor);
+                okay = false;
+                break;
+            }
+            if (valueType != kRegTypeZero) {
+                valueClass = regTypeInitializedReferenceToClass(valueType);
+                if (valueClass == NULL) {
+                    LOG_VFY("VFY: storing uninit ref v%d into ref field\n",
+                        decInsn.vA);
+                    okay = false;
+                    break;
+                }
+                /* allow if field is any interface or field is base class */
+                if (!dvmIsInterfaceClass(fieldClass) &&
+                    !dvmInstanceof(valueClass, fieldClass))
+                {
+                    LOG_VFY("VFY: storing type '%s' into field type '%s' (%s.%s)\n",
+                            valueClass->descriptor, fieldClass->descriptor,
+                            instField->field.clazz->descriptor,
+                            instField->field.name);
+                    okay = false;
+                    break;
+                }
+            }
+        }
+        break;
+
+    case OP_SGET:
+        tmpType = kRegTypeInteger;
+        goto sget_1nr_common;
+    case OP_SGET_BOOLEAN:
+        tmpType = kRegTypeBoolean;
+        goto sget_1nr_common;
+    case OP_SGET_BYTE:
+        tmpType = kRegTypeByte;
+        goto sget_1nr_common;
+    case OP_SGET_CHAR:
+        tmpType = kRegTypeChar;
+        goto sget_1nr_common;
+    case OP_SGET_SHORT:
+        tmpType = kRegTypeShort;
+        goto sget_1nr_common;
+sget_1nr_common:
+        {
+            StaticField* staticField;
+            RegType fieldType;
+
+            staticField = getStaticField(meth, decInsn.vB, &okay);
+            if (!okay)
+                break;
+
+            /* make sure the field's type is compatible with expectation */
+            fieldType = primSigCharToRegType(staticField->field.signature[0]);
+            if (fieldType == kRegTypeUnknown ||
+                !canConvertTo1nr(fieldType, tmpType))
+            {
+                LOG_VFY("VFY: invalid sget-1nr of %s.%s (req=%d actual=%d)\n",
+                        staticField->field.clazz->descriptor,
+                        staticField->field.name, tmpType, fieldType);
+                okay = false;
+                break;
+            }
+
+            setRegisterType(workRegs, insnRegCount, decInsn.vA, tmpType, &okay);
+        }
+        break;
+    case OP_SGET_WIDE:
+        {
+            StaticField* staticField;
+            RegType dstType;
+
+            staticField = getStaticField(meth, decInsn.vB, &okay);
+            if (!okay)
+                break;
+            /* check the type, which should be prim */
+            switch (staticField->field.signature[0]) {
+            case 'D':
+                dstType = kRegTypeDoubleLo;
+                break;
+            case 'J':
+                dstType = kRegTypeLongLo;
+                break;
+            default:
+                LOG_VFY("VFY: invalid sget-wide of %s.%s\n",
+                        staticField->field.clazz->descriptor,
+                        staticField->field.name);
+                dstType = kRegTypeUnknown;
+                okay = false;
+                break;
+            }
+            if (okay) {
+                setRegisterType(workRegs, insnRegCount, decInsn.vA,
+                    dstType, &okay);
+            }
+        }
+        break;
+    case OP_SGET_OBJECT:
+        {
+            StaticField* staticField;
+            ClassObject* fieldClass;
+
+            staticField = getStaticField(meth, decInsn.vB, &okay);
+            if (!okay)
+                break;
+            fieldClass = getFieldClass(meth, &staticField->field);
+            if (fieldClass == NULL) {
+                LOG_VFY("VFY: unable to recover field class from '%s'\n",
+                    staticField->field.signature);
+                okay = false;
+                break;
+            }
+            if (dvmIsPrimitiveClass(fieldClass)) {
+                LOG_VFY("VFY: attempt to get prim field with sget-object\n");
+                okay = false;
+                break;
+            }
+            setRegisterType(workRegs, insnRegCount, decInsn.vA,
+                regTypeFromClass(fieldClass), &okay);
+        }
+        break;
+    case OP_SPUT:
+        tmpType = kRegTypeInteger;
+        goto sput_1nr_common;
+    case OP_SPUT_BOOLEAN:
+        tmpType = kRegTypeBoolean;
+        goto sput_1nr_common;
+    case OP_SPUT_BYTE:
+        tmpType = kRegTypeByte;
+        goto sput_1nr_common;
+    case OP_SPUT_CHAR:
+        tmpType = kRegTypeChar;
+        goto sput_1nr_common;
+    case OP_SPUT_SHORT:
+        tmpType = kRegTypeShort;
+        goto sput_1nr_common;
+sput_1nr_common:
+        {
+            RegType srcType, fieldType;
+            StaticField* staticField;
+            
+            /* make sure the source register has the correct type */
+            srcType = getRegisterType(workRegs, insnRegCount, decInsn.vA,
+                        &okay);
+            if (!canConvertTo1nr(srcType, tmpType)) {
+                LOG_VFY("VFY: invalid reg type %d on iput instr (need %d)\n",
+                    srcType, tmpType);
+                okay = false;
+                break;
+            }
+
+            staticField = getStaticField(meth, decInsn.vB, &okay);
+            if (!okay)
+                break;
+
+            /* get type of field we're storing into */
+            fieldType = primSigCharToRegType(staticField->field.signature[0]);
+            if (fieldType == kRegTypeUnknown ||
+                !canConvertTo1nr(srcType, fieldType))
+            {
+                LOG_VFY("VFY: invalid sput-1nr of %s.%s (req=%d actual=%d)\n",
+                        staticField->field.clazz->descriptor,
+                        staticField->field.name, tmpType, fieldType);
+                okay = false;
+                break;
+            }
+        }
+        break;
+    case OP_SPUT_WIDE:
+        tmpType = getRegisterType(workRegs, insnRegCount, decInsn.vA, &okay);
+        if (okay) {
+            RegType typeHi = 
+                getRegisterType(workRegs, insnRegCount, decInsn.vA+1, &okay);
+            checkTypeCategory(tmpType, kTypeCategory2, &okay);
+            checkWidePair(tmpType, typeHi, &okay);
+        }
+        if (okay) {
+            StaticField* staticField;
+
+            staticField = getStaticField(meth, decInsn.vB, &okay);
+            if (!okay)
+                break;
+            /* check the type, which should be prim */
+            switch (staticField->field.signature[0]) {
+            case 'D':
+            case 'J':
+                /* these are okay */
+                break;
+            default:
+                LOG_VFY("VFY: invalid sput-wide of %s.%s\n",
+                        staticField->field.clazz->descriptor,
+                        staticField->field.name);
+                okay = false;
+                break;
+            }
+        }
+        break;
+    case OP_SPUT_OBJECT:
+        {
+            ClassObject* fieldClass;
+            ClassObject* valueClass;
+            StaticField* staticField;
+            RegType valueType;
+
+            staticField = getStaticField(meth, decInsn.vB, &okay);
+            if (!okay)
+                break;
+            fieldClass = getFieldClass(meth, &staticField->field);
+            if (fieldClass == NULL) {
+                LOG_VFY("VFY: unable to recover field class from '%s'\n",
+                    staticField->field.signature);
+                okay = false;
+                break;
+            }
+
+            valueType = getRegisterType(workRegs, insnRegCount, decInsn.vA,
+                        &okay);
+            if (!okay)
+                break;
+            if (!regTypeIsReference(valueType)) {
+                LOG_VFY("VFY: storing non-ref v%d into ref field '%s' (%s)\n",
+                        decInsn.vA, staticField->field.name,
+                        fieldClass->descriptor);
+                okay = false;
+                break;
+            }
+            if (valueType != kRegTypeZero) {
+                valueClass = regTypeInitializedReferenceToClass(valueType);
+                if (valueClass == NULL) {
+                    LOG_VFY("VFY: storing uninit ref v%d into ref field\n",
+                        decInsn.vA);
+                    okay = false;
+                    break;
+                }
+                /* allow if field is any interface or field is base class */
+                if (!dvmIsInterfaceClass(fieldClass) &&
+                    !dvmInstanceof(valueClass, fieldClass))
+                {
+                    LOG_VFY("VFY: storing type '%s' into field type '%s' (%s.%s)\n",
+                            valueClass->descriptor, fieldClass->descriptor,
+                            staticField->field.clazz->descriptor,
+                            staticField->field.name);
+                    okay = false;
+                    break;
+                }
+            }
+        }
+        break;
+
+    case OP_INVOKE_VIRTUAL:
+    case OP_INVOKE_VIRTUAL_RANGE:
+    case OP_INVOKE_SUPER:
+    case OP_INVOKE_SUPER_RANGE:
+        {
+            Method* calledMethod;
+            RegType returnType;
+            bool isRange;
+            bool isSuper;
+
+            isRange =  (decInsn.opCode == OP_INVOKE_VIRTUAL_RANGE ||
+                        decInsn.opCode == OP_INVOKE_SUPER_RANGE);
+            isSuper =  (decInsn.opCode == OP_INVOKE_SUPER ||
+                        decInsn.opCode == OP_INVOKE_SUPER_RANGE);
+
+            calledMethod = verifyInvocationArgs(meth, workRegs, insnRegCount,
+                            &decInsn, uninitMap, METHOD_VIRTUAL, isRange,
+                            isSuper, &okay);
+            if (!okay)
+                break;
+            returnType = getMethodReturnType(calledMethod);
+            setResultRegisterType(workRegs, insnRegCount, returnType, &okay);
+            justSetResult = true;
+        }
+        break;
+    case OP_INVOKE_DIRECT:
+    case OP_INVOKE_DIRECT_RANGE:
+        {
+            RegType returnType;
+            Method* calledMethod;
+            bool isRange;
+
+            isRange =  (decInsn.opCode == OP_INVOKE_DIRECT_RANGE);
+            calledMethod = verifyInvocationArgs(meth, workRegs, insnRegCount,
+                            &decInsn, uninitMap, METHOD_DIRECT, isRange,
+                            false, &okay);
+            if (!okay)
+                break;
+
+            /*
+             * Some additional checks when calling <init>.  We know from
+             * the invocation arg check that the "this" argument is an
+             * instance of calledMethod->clazz.  Now we further restrict
+             * that to require that calledMethod->clazz is the same as
+             * this->clazz or this->super, allowing the latter only if
+             * the "this" argument is the same as the "this" argument to
+             * this method (which implies that we're in <init> ourselves).
+             */
+            if (isInitMethod(calledMethod)) {
+                RegType thisType;
+                thisType = getInvocationThis(workRegs, insnRegCount,
+                            &decInsn, &okay);
+                if (!okay)
+                    break;
+
+                /* no null refs allowed (?) */
+                if (thisType == kRegTypeZero) {
+                    LOG_VFY("VFY: unable to initialize null ref\n");
+                    okay = false;
+                    break;
+                }
+
+                ClassObject* thisClass;
+                
+                thisClass = regTypeReferenceToClass(thisType, uninitMap);
+                assert(thisClass != NULL);
+
+                /* must be in same class or in superclass */
+                if (calledMethod->clazz == thisClass->super) {
+                    if (thisClass != meth->clazz) {
+                        LOG_VFY("VFY: invoke-direct <init> on super only "
+                            "allowed for 'this' in <init>");
+                        okay = false;
+                        break;
+                    }
+                }  else if (calledMethod->clazz != thisClass) {
+                    LOG_VFY("VFY: invoke-direct <init> must be on current "
+                            "class or super\n");
+                    okay = false;
+                    break;
+                }
+
+                /* arg must be an uninitialized reference */
+                if (!regTypeIsUninitReference(thisType)) {
+                    LOG_VFY("VFY: can only initialize the uninitialized\n");
+                    okay = false;
+                    break;
+                }
+
+                /*
+                 * Replace the uninitialized reference with an initialized
+                 * one, and clear the entry in the uninit map.  We need to
+                 * do this for all registers that have the same object
+                 * instance in them, not just the "this" register.
+                 */
+                int uidx = regTypeToUninitIndex(thisType);
+                markRefsAsInitialized(workRegs, insnRegCount, uninitMap,
+                    thisType, &okay);
+                if (!okay)
+                    break;
+            }
+            returnType = getMethodReturnType(calledMethod);
+            setResultRegisterType(workRegs, insnRegCount,
+                returnType, &okay);
+            justSetResult = true;
+        }
+        break;
+    case OP_INVOKE_STATIC:
+    case OP_INVOKE_STATIC_RANGE:
+        {
+            RegType returnType;
+            Method* calledMethod;
+            bool isRange;
+
+            isRange =  (decInsn.opCode == OP_INVOKE_STATIC_RANGE);
+            calledMethod = verifyInvocationArgs(meth, workRegs, insnRegCount,
+                            &decInsn, uninitMap, METHOD_STATIC, isRange,
+                            false, &okay);
+            if (!okay)
+                break;
+
+            returnType = getMethodReturnType(calledMethod);
+            setResultRegisterType(workRegs, insnRegCount, returnType, &okay);
+            justSetResult = true;
+        }
+        break;
+    case OP_INVOKE_INTERFACE:
+    case OP_INVOKE_INTERFACE_RANGE:
+        {
+            RegType thisType, returnType;
+            Method* absMethod;
+            bool isRange;
+
+            isRange =  (decInsn.opCode == OP_INVOKE_INTERFACE_RANGE);
+            absMethod = verifyInvocationArgs(meth, workRegs, insnRegCount,
+                            &decInsn, uninitMap, METHOD_INTERFACE, isRange,
+                            false, &okay);
+            if (!okay)
+                break;
+
+            /*
+             * Get the type of the "this" arg, which should always be an
+             * interface class.  Because we don't do a full merge on
+             * interface classes, this might have reduced to Object.
+             */
+            thisType = getInvocationThis(workRegs, insnRegCount,
+                        &decInsn, &okay);
+            if (!okay)
+                break;
+
+#if 0       /* can't do this here, fails on dalvik test 052-verifier-fun */
+            if (thisType == kRegTypeZero) {
+                /* null pointer always passes (and always fails at runtime) */
+            } else {
+                ClassObject* thisClass;
+
+                thisClass = regTypeInitializedReferenceToClass(thisType);
+                if (thisClass == NULL) {
+                    LOG_VFY("VFY: interface call on uninitialized\n");
+                    okay = false;
+                    break;
+                }
+
+                /*
+                 * Either "thisClass" needs to be the interface class that
+                 * defined absMethod, or absMethod's class needs to be one
+                 * of the interfaces implemented by "thisClass".  (Or, if
+                 * we couldn't complete the merge, this will be Object.)
+                 */
+                if (thisClass != absMethod->clazz &&
+                    thisClass != gDvm.classJavaLangObject &&
+                    !dvmImplements(thisClass, absMethod->clazz))
+                {
+                    LOG_VFY("VFY: unable to match absMethod '%s' with %s interfaces\n",
+                            absMethod->name, thisClass->descriptor);
+                    okay = false;
+                    break;
+                }
+            }
+#endif
+
+            /*
+             * We don't have an object instance, so we can't find the
+             * concrete method.  However, all of the type information is
+             * in the abstract method, so we're good.
+             */
+            returnType = getMethodReturnType(absMethod);
+            setResultRegisterType(workRegs, insnRegCount, returnType, &okay);
+            justSetResult = true;
+        }
+        break;
+
+    case OP_NEG_INT:
+    case OP_NOT_INT:
+        checkUnop(workRegs, insnRegCount, &decInsn,
+            kRegTypeInteger, kRegTypeInteger, &okay);
+        break;
+    case OP_NEG_LONG:
+    case OP_NOT_LONG:
+        checkUnop(workRegs, insnRegCount, &decInsn,
+            kRegTypeLongLo, kRegTypeLongLo, &okay);
+        break;
+    case OP_NEG_FLOAT:
+        checkUnop(workRegs, insnRegCount, &decInsn,
+            kRegTypeFloat, kRegTypeFloat, &okay);
+        break;
+    case OP_NEG_DOUBLE:
+        checkUnop(workRegs, insnRegCount, &decInsn,
+            kRegTypeDoubleLo, kRegTypeDoubleLo, &okay);
+        break;
+    case OP_INT_TO_LONG:
+        checkUnop(workRegs, insnRegCount, &decInsn,
+            kRegTypeLongLo, kRegTypeInteger, &okay);
+        break;
+    case OP_INT_TO_FLOAT:
+        checkUnop(workRegs, insnRegCount, &decInsn,
+            kRegTypeFloat, kRegTypeInteger, &okay);
+        break;
+    case OP_INT_TO_DOUBLE:
+        checkUnop(workRegs, insnRegCount, &decInsn,
+            kRegTypeDoubleLo, kRegTypeInteger, &okay);
+        break;
+    case OP_LONG_TO_INT:
+        checkUnop(workRegs, insnRegCount, &decInsn,
+            kRegTypeInteger, kRegTypeLongLo, &okay);
+        break;
+    case OP_LONG_TO_FLOAT:
+        checkUnop(workRegs, insnRegCount, &decInsn,
+            kRegTypeFloat, kRegTypeLongLo, &okay);
+        break;
+    case OP_LONG_TO_DOUBLE:
+        checkUnop(workRegs, insnRegCount, &decInsn,
+            kRegTypeDoubleLo, kRegTypeLongLo, &okay);
+        break;
+    case OP_FLOAT_TO_INT:
+        checkUnop(workRegs, insnRegCount, &decInsn,
+            kRegTypeInteger, kRegTypeFloat, &okay);
+        break;
+    case OP_FLOAT_TO_LONG:
+        checkUnop(workRegs, insnRegCount, &decInsn,
+            kRegTypeLongLo, kRegTypeFloat, &okay);
+        break;
+    case OP_FLOAT_TO_DOUBLE:
+        checkUnop(workRegs, insnRegCount, &decInsn,
+            kRegTypeDoubleLo, kRegTypeFloat, &okay);
+        break;
+    case OP_DOUBLE_TO_INT:
+        checkUnop(workRegs, insnRegCount, &decInsn,
+            kRegTypeInteger, kRegTypeDoubleLo, &okay);
+        break;
+    case OP_DOUBLE_TO_LONG:
+        checkUnop(workRegs, insnRegCount, &decInsn,
+            kRegTypeLongLo, kRegTypeDoubleLo, &okay);
+        break;
+    case OP_DOUBLE_TO_FLOAT:
+        checkUnop(workRegs, insnRegCount, &decInsn,
+            kRegTypeFloat, kRegTypeDoubleLo, &okay);
+        break;
+    case OP_INT_TO_BYTE:
+        checkUnop(workRegs, insnRegCount, &decInsn,
+            kRegTypeByte, kRegTypeInteger, &okay);
+        break;
+    case OP_INT_TO_CHAR:
+        checkUnop(workRegs, insnRegCount, &decInsn,
+            kRegTypeChar, kRegTypeInteger, &okay);
+        break;
+    case OP_INT_TO_SHORT:
+        checkUnop(workRegs, insnRegCount, &decInsn,
+            kRegTypeShort, kRegTypeInteger, &okay);
+        break;
+
+    case OP_ADD_INT:
+    case OP_SUB_INT:
+    case OP_MUL_INT:
+    case OP_REM_INT:
+    case OP_DIV_INT:
+    case OP_SHL_INT:
+    case OP_SHR_INT:
+    case OP_USHR_INT:
+        checkBinop(workRegs, insnRegCount, &decInsn,
+            kRegTypeInteger, kRegTypeInteger, kRegTypeInteger, false, &okay);
+        break;
+    case OP_AND_INT:
+    case OP_OR_INT:
+    case OP_XOR_INT:
+        checkBinop(workRegs, insnRegCount, &decInsn,
+            kRegTypeInteger, kRegTypeInteger, kRegTypeInteger, true, &okay);
+        break;
+    case OP_ADD_LONG:
+    case OP_SUB_LONG:
+    case OP_MUL_LONG:
+    case OP_DIV_LONG:
+    case OP_REM_LONG:
+    case OP_AND_LONG:
+    case OP_OR_LONG:
+    case OP_XOR_LONG:
+        checkBinop(workRegs, insnRegCount, &decInsn,
+            kRegTypeLongLo, kRegTypeLongLo, kRegTypeLongLo, false, &okay);
+        break;
+    case OP_SHL_LONG:
+    case OP_SHR_LONG:
+    case OP_USHR_LONG:
+        /* shift distance is Int, making these different from other binops */
+        checkBinop(workRegs, insnRegCount, &decInsn,
+            kRegTypeLongLo, kRegTypeLongLo, kRegTypeInteger, false, &okay);
+        break;
+    case OP_ADD_FLOAT:
+    case OP_SUB_FLOAT:
+    case OP_MUL_FLOAT:
+    case OP_DIV_FLOAT:
+    case OP_REM_FLOAT:
+        checkBinop(workRegs, insnRegCount, &decInsn,
+            kRegTypeFloat, kRegTypeFloat, kRegTypeFloat, false, &okay);
+        break;
+    case OP_ADD_DOUBLE:
+    case OP_SUB_DOUBLE:
+    case OP_MUL_DOUBLE:
+    case OP_DIV_DOUBLE:
+    case OP_REM_DOUBLE:
+        checkBinop(workRegs, insnRegCount, &decInsn,
+            kRegTypeDoubleLo, kRegTypeDoubleLo, kRegTypeDoubleLo, false, &okay);
+        break;
+    case OP_ADD_INT_2ADDR:
+    case OP_SUB_INT_2ADDR:
+    case OP_MUL_INT_2ADDR:
+    case OP_REM_INT_2ADDR:
+    case OP_SHL_INT_2ADDR:
+    case OP_SHR_INT_2ADDR:
+    case OP_USHR_INT_2ADDR:
+        checkBinop2addr(workRegs, insnRegCount, &decInsn,
+            kRegTypeInteger, kRegTypeInteger, kRegTypeInteger, false, &okay);
+        break;
+    case OP_AND_INT_2ADDR:
+    case OP_OR_INT_2ADDR:
+    case OP_XOR_INT_2ADDR:
+        checkBinop2addr(workRegs, insnRegCount, &decInsn,
+            kRegTypeInteger, kRegTypeInteger, kRegTypeInteger, true, &okay);
+        break;
+    case OP_DIV_INT_2ADDR:
+        checkBinop2addr(workRegs, insnRegCount, &decInsn,
+            kRegTypeInteger, kRegTypeInteger, kRegTypeInteger, false, &okay);
+        break;
+    case OP_ADD_LONG_2ADDR:
+    case OP_SUB_LONG_2ADDR:
+    case OP_MUL_LONG_2ADDR:
+    case OP_DIV_LONG_2ADDR:
+    case OP_REM_LONG_2ADDR:
+    case OP_AND_LONG_2ADDR:
+    case OP_OR_LONG_2ADDR:
+    case OP_XOR_LONG_2ADDR:
+        checkBinop2addr(workRegs, insnRegCount, &decInsn,
+            kRegTypeLongLo, kRegTypeLongLo, kRegTypeLongLo, false, &okay);
+        break;
+    case OP_SHL_LONG_2ADDR:
+    case OP_SHR_LONG_2ADDR:
+    case OP_USHR_LONG_2ADDR:
+        checkBinop2addr(workRegs, insnRegCount, &decInsn,
+            kRegTypeLongLo, kRegTypeLongLo, kRegTypeInteger, false, &okay);
+        break;
+    case OP_ADD_FLOAT_2ADDR:
+    case OP_SUB_FLOAT_2ADDR:
+    case OP_MUL_FLOAT_2ADDR:
+    case OP_DIV_FLOAT_2ADDR:
+    case OP_REM_FLOAT_2ADDR:
+        checkBinop2addr(workRegs, insnRegCount, &decInsn,
+            kRegTypeFloat, kRegTypeFloat, kRegTypeFloat, false, &okay);
+        break;
+    case OP_ADD_DOUBLE_2ADDR:
+    case OP_SUB_DOUBLE_2ADDR:
+    case OP_MUL_DOUBLE_2ADDR:
+    case OP_DIV_DOUBLE_2ADDR:
+    case OP_REM_DOUBLE_2ADDR:
+        checkBinop2addr(workRegs, insnRegCount, &decInsn,
+            kRegTypeDoubleLo, kRegTypeDoubleLo, kRegTypeDoubleLo, false, &okay);
+        break;
+    case OP_ADD_INT_LIT16:
+    case OP_RSUB_INT:
+    case OP_MUL_INT_LIT16:
+    case OP_DIV_INT_LIT16:
+    case OP_REM_INT_LIT16:
+        checkLitop(workRegs, insnRegCount, &decInsn,
+            kRegTypeInteger, kRegTypeInteger, false, &okay);
+        break;
+    case OP_AND_INT_LIT16:
+    case OP_OR_INT_LIT16:
+    case OP_XOR_INT_LIT16:
+        checkLitop(workRegs, insnRegCount, &decInsn,
+            kRegTypeInteger, kRegTypeInteger, true, &okay);
+        break;
+    case OP_ADD_INT_LIT8:
+    case OP_RSUB_INT_LIT8:
+    case OP_MUL_INT_LIT8:
+    case OP_DIV_INT_LIT8:
+    case OP_REM_INT_LIT8:
+    case OP_SHL_INT_LIT8:
+    case OP_SHR_INT_LIT8:
+    case OP_USHR_INT_LIT8:
+        checkLitop(workRegs, insnRegCount, &decInsn,
+            kRegTypeInteger, kRegTypeInteger, false, &okay);
+        break;
+    case OP_AND_INT_LIT8:
+    case OP_OR_INT_LIT8:
+    case OP_XOR_INT_LIT8:
+        checkLitop(workRegs, insnRegCount, &decInsn,
+            kRegTypeInteger, kRegTypeInteger, true, &okay);
+        break;
+
+
+    case OP_EXECUTE_INLINE:
+    case OP_INVOKE_DIRECT_EMPTY:
+        okay = false;               // TODO - implement optimized opcodes
+        break;
+    case OP_IGET_QUICK:
+    case OP_IGET_WIDE_QUICK:
+    case OP_IGET_OBJECT_QUICK:
+    case OP_IPUT_QUICK:
+    case OP_IPUT_WIDE_QUICK:
+    case OP_IPUT_OBJECT_QUICK:
+    case OP_INVOKE_VIRTUAL_QUICK:
+    case OP_INVOKE_VIRTUAL_QUICK_RANGE:
+    case OP_INVOKE_SUPER_QUICK:
+    case OP_INVOKE_SUPER_QUICK_RANGE:
+        okay = false;               // TODO - implement optimized opcodes
+        break;
+
+    /* these should never appear */
+    case OP_UNUSED_3E:
+    case OP_UNUSED_3F:
+    case OP_UNUSED_40:
+    case OP_UNUSED_41:
+    case OP_UNUSED_42:
+    case OP_UNUSED_43:
+    case OP_UNUSED_73:
+    case OP_UNUSED_79:
+    case OP_UNUSED_7A:
+    case OP_UNUSED_E3:
+    case OP_UNUSED_E4:
+    case OP_UNUSED_E5:
+    case OP_UNUSED_E6:
+    case OP_UNUSED_E7:
+    case OP_UNUSED_E8:
+    case OP_UNUSED_E9:
+    case OP_UNUSED_EA:
+    case OP_UNUSED_EB:
+    case OP_UNUSED_EC:
+    case OP_UNUSED_ED:
+    case OP_UNUSED_EF:
+    case OP_UNUSED_F1:
+    case OP_UNUSED_FC:
+    case OP_UNUSED_FD:
+    case OP_UNUSED_FE:
+    case OP_UNUSED_FF:
+        okay = false;
+        break;
+
+    /*
+     * DO NOT add a "default" clause here.  Without it the compiler will
+     * complain if an instruction is missing (which is desirable).
+     */
+    }
+
+    if (!okay) {
+        LOG_VFY_METH(meth, "VFY:  rejecting opcode 0x%02x at 0x%04x\n",
+            decInsn.opCode, insnIdx);
+        goto bail;
+    }
+
+    /*
+     * If we didn't just set the result register, clear it out.  This
+     * ensures that you can only use "move-result" immediately after the
+     * result is set.
+     */
+    if (!justSetResult) {
+        int reg = RESULT_REGISTER(insnRegCount);
+        workRegs[reg] = workRegs[reg+1] = kRegTypeUnknown;
+    }
+
+    /*
+     * Handle "continue".  Tag the next consecutive instruction.
+     */
+    if ((nextFlags & kInstrCanContinue) != 0) {
+        int insnWidth = dvmInsnGetWidth(insnFlags, insnIdx);
+        if (insnIdx+insnWidth >= insnsSize) {
+            LOG_VFY_METH(meth,
+                "VFY: execution can walk off end of code area (from 0x%x)\n",
+                insnIdx);
+            goto bail;
+        }
+
+        /*
+         * The only way to get to a move-exception instruction is to get
+         * thrown there.  Make sure the next instruction isn't one.
+         */
+        if (!checkMoveException(meth, insnIdx+insnWidth, "next"))
+            goto bail;
+
+        /*
+         * We want to update the registers and set the "changed" flag on the
+         * next instruction (if necessary).  We may not be storing register
+         * changes for all addresses, so for non-branch targets we just
+         * compare "entry" vs. "work" to see if we've changed anything.
+         */
+        if (getRegisterLine(regTable, insnIdx+insnWidth) != NULL) {
+            updateRegisters(meth, insnFlags, regTable, insnIdx+insnWidth,
+                workRegs);
+        } else {
+            /* if not yet visited, or regs were updated, set "changed" */
+            if (!dvmInsnIsVisited(insnFlags, insnIdx+insnWidth) ||
+                compareRegisters(workRegs, entryRegs,
+                    insnRegCount + kExtraRegs) != 0)
+            {
+                dvmInsnSetChanged(insnFlags, insnIdx+insnWidth, true);
+            }
+        }
+    }
+
+    /*
+     * Handle "branch".  Tag the branch target.
+     */
+    if ((nextFlags & kInstrCanBranch) != 0) {
+        bool isConditional;
+
+        if (!dvmGetBranchTarget(meth, insnFlags, insnIdx, &branchTarget,
+                &isConditional))
+        {
+            /* should never happen after static verification */
+            LOG_VFY_METH(meth, "VFY: bad branch at %d\n", insnIdx);
+            goto bail;
+        }
+        assert(isConditional || (nextFlags & kInstrCanContinue) == 0);
+        assert(!isConditional || (nextFlags & kInstrCanContinue) != 0);
+
+        if (!checkMoveException(meth, insnIdx+branchTarget, "branch"))
+            goto bail;
+
+        updateRegisters(meth, insnFlags, regTable, insnIdx+branchTarget,
+            workRegs);
+    }
+
+    /*
+     * Handle "switch".  Tag all possible branch targets.
+     *
+     * We've already verified that the table is structurally sound, so we
+     * just need to walk through and tag the targets.
+     */
+    if ((nextFlags & kInstrCanSwitch) != 0) {
+        int offsetToSwitch = insns[1] | (((s4)insns[2]) << 16);
+        const u2* switchInsns = insns + offsetToSwitch;
+        int switchCount = switchInsns[1];
+        int offsetToTargets, targ;
+
+        if ((*insns & 0xff) == OP_PACKED_SWITCH) {
+            /* 0=sig, 1=count, 2/3=firstKey */
+            offsetToTargets = 4;
+        } else {
+            /* 0=sig, 1=count, 2..count*2 = keys */
+            assert((*insns & 0xff) == OP_SPARSE_SWITCH);
+            offsetToTargets = 2 + 2*switchCount;
+        }
+
+        /* verify each switch target */
+        for (targ = 0; targ < switchCount; targ++) {
+            int offset, absOffset;
+
+            /* offsets are 32-bit, and only partly endian-swapped */
+            offset = switchInsns[offsetToTargets + targ*2] |
+                     (((s4) switchInsns[offsetToTargets + targ*2 +1]) << 16);
+            absOffset = insnIdx + offset;
+
+            assert(absOffset >= 0 && absOffset < insnsSize);
+
+            if (!checkMoveException(meth, absOffset, "switch"))
+                goto bail;
+
+            updateRegisters(meth, insnFlags, regTable, absOffset, workRegs);
+        }
+    }
+
+    /*
+     * Handle instructions that can throw and that are sitting in a
+     * "try" block.  (If they're not in a "try" block when they throw,
+     * control transfers out of the method.)
+     */
+    if ((nextFlags & kInstrCanThrow) != 0 && dvmInsnIsInTry(insnFlags, insnIdx))
+    {
+        DexFile* pDexFile = meth->clazz->pDvmDex->pDexFile;
+        const DexCode* pCode = dvmGetMethodCode(meth);
+        DexCatchIterator iterator;
+
+        if (dexFindCatchHandler(&iterator, pCode, insnIdx)) {
+            for (;;) {
+                DexCatchHandler* handler = dexCatchIteratorNext(&iterator);
+
+                if (handler == NULL) {
+                    break;
+                }
+
+                /* note we use entryRegs, not workRegs */
+                updateRegisters(meth, insnFlags, regTable, handler->address,
+                    entryRegs);
+            }
+        }
+    }
+
+    /*
+     * Update startGuess.  Advance to the next instruction of that's
+     * possible, otherwise use the branch target if one was found.  If
+     * neither of those exists we're in a return or throw; leave startGuess
+     * alone and let the caller sort it out.
+     */
+    if ((nextFlags & kInstrCanContinue) != 0) {
+        *pStartGuess = insnIdx + dvmInsnGetWidth(insnFlags, insnIdx);
+    } else if ((nextFlags & kInstrCanBranch) != 0) {
+        /* okay if branchTarget is zero */
+        *pStartGuess = insnIdx + branchTarget;
+    }
+
+    assert(*pStartGuess >= 0 && *pStartGuess < insnsSize &&
+        dvmInsnGetWidth(insnFlags, *pStartGuess) != 0);
+
+    result = true;
+
+bail:
+    return result;
+}
+
+/*
+ * callback function used in dumpRegTypes to print local vars
+ * valid at a given address.
+ */ 
+static void logLocalsCb(void *cnxt, u2 reg, u4 startAddress, u4 endAddress,
+        const char *name, const char *descriptor,
+        const char *signature)
+{
+    int addr = *((int *)cnxt);
+
+    if (addr >= (int) startAddress && addr < (int) endAddress)
+    {
+        LOGI("        %2d: '%s' %s\n", reg, name, descriptor);
+    }
+}
+
+/*
+ * Dump the register types for the specifed address to the log file.
+ */
+static void dumpRegTypes(const Method* meth, const InsnFlags* insnFlags,
+    const RegType* addrRegs, int addr, const char* addrName,
+    const UninitInstanceMap* uninitMap, int displayFlags)
+{
+    int regCount = meth->registersSize;
+    int fullRegCount = regCount + kExtraRegs;
+    bool branchTarget = dvmInsnIsBranchTarget(insnFlags, addr);
+    int i;
+
+    assert(addr >= 0 && addr < (int) dvmGetMethodInsnsSize(meth));
+
+    int regCharSize = fullRegCount + (fullRegCount-1)/4 + 2 +1;
+    char regChars[regCharSize +1];
+    memset(regChars, ' ', regCharSize);
+    regChars[0] = '[';
+    if (regCount == 0)
+        regChars[1] = ']';
+    else
+        regChars[1 + (regCount-1) + (regCount-1)/4 +1] = ']';
+    regChars[regCharSize] = '\0';
+
+    //const RegType* addrRegs = getRegisterLine(regTable, addr);
+
+    for (i = 0; i < regCount + kExtraRegs; i++) {
+        char tch;
+
+        switch (addrRegs[i]) {
+        case kRegTypeUnknown:       tch = '.';  break;
+        case kRegTypeConflict:      tch = 'X';  break;
+        case kRegTypeFloat:         tch = 'F';  break;
+        case kRegTypeZero:          tch = '0';  break;
+        case kRegTypeOne:           tch = '1';  break;
+        case kRegTypeBoolean:       tch = 'Z';  break;
+        case kRegTypePosByte:       tch = 'b';  break;
+        case kRegTypeByte:          tch = 'B';  break;
+        case kRegTypePosShort:      tch = 's';  break;
+        case kRegTypeShort:         tch = 'S';  break;
+        case kRegTypeChar:          tch = 'C';  break;
+        case kRegTypeInteger:       tch = 'I';  break;
+        case kRegTypeLongLo:        tch = 'J';  break;
+        case kRegTypeLongHi:        tch = 'j';  break;
+        case kRegTypeDoubleLo:      tch = 'D';  break;
+        case kRegTypeDoubleHi:      tch = 'd';  break;
+        default:
+            if (regTypeIsReference(addrRegs[i])) {
+                if (regTypeIsUninitReference(addrRegs[i]))
+                    tch = 'U';
+                else
+                    tch = 'L';
+            } else {
+                tch = '*';
+                assert(false);
+            }
+            break;
+        }
+
+        if (i < regCount)
+            regChars[1 + i + (i/4)] = tch;
+        else
+            regChars[1 + i + (i/4) + 2] = tch;
+    }
+
+    if (addr == 0 && addrName != NULL)
+        LOGI("%c%s %s\n", branchTarget ? '>' : ' ', addrName, regChars);
+    else
+        LOGI("%c0x%04x %s\n", branchTarget ? '>' : ' ', addr, regChars);
+
+    if (displayFlags & DRT_SHOW_REF_TYPES) {
+        for (i = 0; i < regCount + kExtraRegs; i++) {
+            if (regTypeIsReference(addrRegs[i]) && addrRegs[i] != kRegTypeZero)
+            {
+                ClassObject* clazz;
+                
+                clazz = regTypeReferenceToClass(addrRegs[i], uninitMap);
+                assert(dvmValidateObject((Object*)clazz));
+                if (i < regCount) {
+                    LOGI("        %2d: 0x%08x %s%s\n",
+                        i, addrRegs[i],
+                        regTypeIsUninitReference(addrRegs[i]) ? "[U]" : "",
+                        clazz->descriptor);
+                } else {
+                    LOGI("        RS: 0x%08x %s%s\n",
+                        addrRegs[i],
+                        regTypeIsUninitReference(addrRegs[i]) ? "[U]" : "",
+                        clazz->descriptor);
+                }
+            }
+        }
+    }
+    if (displayFlags & DRT_SHOW_LOCALS) {
+        dexDecodeDebugInfo(meth->clazz->pDvmDex->pDexFile,
+                dvmGetMethodCode(meth),
+                meth->clazz->descriptor,
+                meth->prototype.protoIdx,
+                meth->accessFlags,
+                NULL, logLocalsCb, &addr);
+    }
+}
diff --git a/vm/analysis/CodeVerify.h b/vm/analysis/CodeVerify.h
new file mode 100644
index 0000000..ad93897
--- /dev/null
+++ b/vm/analysis/CodeVerify.h
@@ -0,0 +1,186 @@
+/*
+ * Copyright (C) 2008 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/*
+ * Dalvik bytecode verifier.
+ */
+#ifndef _DALVIK_CODEVERIFY
+#define _DALVIK_CODEVERIFY
+
+
+/*
+ * InsnFlags is a 32-bit integer with the following layout:
+ *  0-15  instruction length (or 0 if this address doesn't hold an opcode)
+ *  16    opcode flag (indicating this address holds an opcode)
+ *  17    try block (indicating exceptions thrown here may be caught locally)
+ *  30    visited (verifier has examined this instruction at least once)
+ *  31    changed (set/cleared as bytecode verifier runs)
+ */
+typedef u4 InsnFlags;
+
+#define kInsnFlagWidthMask      0x0000ffff
+#define kInsnFlagInTry          (1 << 16)
+#define kInsnFlagBranchTarget   (1 << 17)
+#define kInsnFlagVisited        (1 << 30)
+#define kInsnFlagChanged        (1 << 31)
+
+/*
+ * Returns "true" if the flags indicate that this address holds the start
+ * of an instruction.
+ */
+INLINE bool dvmInsnIsOpcode(const InsnFlags* insnFlags, int addr) {
+    return (insnFlags[addr] & kInsnFlagWidthMask) != 0;
+}
+
+/*
+ * Extract the unsigned 16-bit instruction width from "flags".
+ */
+INLINE int dvmInsnGetWidth(const InsnFlags* insnFlags, int addr) {
+    return insnFlags[addr] & kInsnFlagWidthMask;
+}
+
+/*
+ * Changed?
+ */
+INLINE bool dvmInsnIsChanged(const InsnFlags* insnFlags, int addr) {
+    return (insnFlags[addr] & kInsnFlagChanged) != 0;
+}
+INLINE void dvmInsnSetChanged(InsnFlags* insnFlags, int addr, bool changed)
+{
+    if (changed)
+        insnFlags[addr] |= kInsnFlagChanged;
+    else
+        insnFlags[addr] &= ~kInsnFlagChanged;
+}
+
+/*
+ * Visited?
+ */
+INLINE bool dvmInsnIsVisited(const InsnFlags* insnFlags, int addr) {
+    return (insnFlags[addr] & kInsnFlagVisited) != 0;
+}
+INLINE void dvmInsnSetVisited(InsnFlags* insnFlags, int addr, bool changed)
+{
+    if (changed)
+        insnFlags[addr] |= kInsnFlagVisited;
+    else
+        insnFlags[addr] &= ~kInsnFlagVisited;
+}
+
+/*
+ * Visited or changed?
+ */
+INLINE bool dvmInsnIsVisitedOrChanged(const InsnFlags* insnFlags, int addr) {
+    return (insnFlags[addr] & (kInsnFlagVisited|kInsnFlagChanged)) != 0;
+}
+
+/*
+ * In a "try" block?
+ */
+INLINE bool dvmInsnIsInTry(const InsnFlags* insnFlags, int addr) {
+    return (insnFlags[addr] & kInsnFlagInTry) != 0;
+}
+INLINE void dvmInsnSetInTry(InsnFlags* insnFlags, int addr, bool inTry)
+{
+    assert(inTry);
+    //if (inTry)
+        insnFlags[addr] |= kInsnFlagInTry;
+    //else
+    //    insnFlags[addr] &= ~kInsnFlagInTry;
+}
+
+/*
+ * Instruction is a branch target or exception handler?
+ */
+INLINE bool dvmInsnIsBranchTarget(const InsnFlags* insnFlags, int addr) {
+    return (insnFlags[addr] & kInsnFlagBranchTarget) != 0;
+}
+INLINE void dvmInsnSetBranchTarget(InsnFlags* insnFlags, int addr,
+    bool isBranch)
+{
+    assert(isBranch);
+    //if (isBranch)
+        insnFlags[addr] |= kInsnFlagBranchTarget;
+    //else
+    //    insnFlags[addr] &= ~kInsnFlagBranchTarget;
+}
+
+
+/*
+ * Table that maps uninitialized instances to classes, based on the
+ * address of the new-instance instruction.
+ */
+typedef struct UninitInstanceMap {
+    int numEntries;
+    struct {
+        int             addr;   /* code offset, or -1 for method arg ("this") */
+        ClassObject*    clazz;  /* class created at this address */
+    } map[1];
+} UninitInstanceMap;
+#define kUninitThisArgAddr  (-1)
+#define kUninitThisArgSlot  0
+
+/*
+ * Create a new UninitInstanceMap.
+ */
+UninitInstanceMap* dvmCreateUninitInstanceMap(const Method* meth,
+    const InsnFlags* insnFlags, int newInstanceCount);
+
+/*
+ * Release the storage associated with an UninitInstanceMap.
+ */
+void dvmFreeUninitInstanceMap(UninitInstanceMap* uninitMap);
+
+/*
+ * Associate a class with an address.  Returns the map slot index, or -1
+ * if the address isn't listed in the map (shouldn't happen) or if a
+ * different class is already associated with the address (shouldn't
+ * happen either).
+ */
+int dvmSetUninitInstance(UninitInstanceMap* uninitMap, int addr, 
+    ClassObject* clazz);
+
+/*
+ * Return the class associated with an uninitialized reference.  Pass in
+ * the map index.
+ */
+ClassObject* dvmGetUninitInstance(const UninitInstanceMap* uninitMap, int idx);
+
+/*
+ * Clear the class associated with an uninitialized reference.  Pass in
+ * the map index.
+ */
+//void dvmClearUninitInstance(UninitInstanceMap* uninitMap, int idx);
+
+
+/*
+ * Verify bytecode in "meth".  "insnFlags" should be populated with
+ * instruction widths and "in try" flags.
+ */
+bool dvmVerifyCodeFlow(const Method* meth, InsnFlags* insnFlags,
+    UninitInstanceMap* uninitMap);
+
+/*
+ * Log standard method info for rejection message.
+ */
+void dvmLogVerifyFailure(const Method* meth, const char* format, ...);
+
+/*
+ * Extract the relative branch target from a branch instruction.
+ */
+bool dvmGetBranchTarget(const Method* meth, InsnFlags* insnFlags,
+    int curOffset, int* pOffset, bool* pConditional);
+
+#endif /*_DALVIK_CODEVERIFY*/
diff --git a/vm/analysis/DexOptimize.c b/vm/analysis/DexOptimize.c
new file mode 100644
index 0000000..a726bf9
--- /dev/null
+++ b/vm/analysis/DexOptimize.c
@@ -0,0 +1,2037 @@
+/*
+ * Copyright (C) 2008 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/*
+ * Convert the output from "dx" into a locally-optimized DEX file.
+ *
+ * TODO: the format of the optimized header is currently "whatever we
+ * happen to write", since the VM that writes it is by definition the same
+ * as the VM that reads it.  Still, it should be better documented and
+ * more rigorously structured.
+ */
+#include "Dalvik.h"
+#include "libdex/InstrUtils.h"
+#include "libdex/OptInvocation.h"
+
+#include <zlib.h>
+
+#include <stdlib.h>
+#include <unistd.h>
+#include <sys/mman.h>
+#include <sys/stat.h>
+#include <sys/file.h>
+#include <sys/wait.h>
+#include <fcntl.h>
+#include <errno.h>
+
+/*
+ * Virtual/direct calls to "method" are replaced with an execute-inline
+ * instruction with index "idx".
+ */
+typedef struct InlineSub {
+    Method* method;
+    int     inlineIdx;
+} InlineSub;
+
+/* fwd */
+static int writeDependencies(int fd, u4 modWhen, u4 crc);
+static bool writeAuxData(int fd, const DexClassLookup* pClassLookup);
+static void logFailedWrite(size_t expected, ssize_t actual, const char* msg,
+    int err);
+
+static bool rewriteDex(u1* addr, int len, bool doVerify, bool doOpt,\
+    u4* pHeaderFlags, DexClassLookup** ppClassLookup);
+static bool loadAllClasses(DvmDex* pDvmDex);
+static void optimizeLoadedClasses(DexFile* pDexFile);
+static void optimizeClass(ClassObject* clazz, const InlineSub* inlineSubs);
+static bool optimizeMethod(Method* method, const InlineSub* inlineSubs);
+static void rewriteInstField(Method* method, u2* insns, OpCode newOpc);
+static bool rewriteVirtualInvoke(Method* method, u2* insns, OpCode newOpc);
+static bool rewriteDirectInvoke(Method* method, u2* insns);
+static bool rewriteExecuteInline(Method* method, u2* insns,
+    MethodType methodType, const InlineSub* inlineSubs);
+
+
+/*
+ * Return the fd of an open file in the DEX file cache area.  If the cache
+ * file doesn't exist or is out of date, this will remove the old entry,
+ * create a new one (writing only the file header), and return with the
+ * "new file" flag set.
+ *
+ * It's possible to execute from an unoptimized DEX file directly,
+ * assuming the byte ordering and structure alignment is correct, but
+ * disadvantageous because some significant optimizations are not possible.
+ * It's not generally possible to do the same from an uncompressed Jar
+ * file entry, because we have to guarantee 32-bit alignment in the
+ * memory-mapped file.
+ *
+ * For a Jar/APK file (a zip archive with "classes.dex" inside), "modWhen"
+ * and "crc32" come from the Zip directory entry.  For a stand-alone DEX
+ * file, it's the modification date of the file and the Adler32 from the
+ * DEX header (which immediately follows the magic).  If these don't
+ * match what's stored in the opt header, we reject the file immediately.
+ *
+ * On success, the file descriptor will be positioned just past the "opt"
+ * file header, and will be locked with flock.  "*pCachedName" will point
+ * to newly-allocated storage.
+ */
+int dvmOpenCachedDexFile(const char* fileName, const char* subFileName,
+    u4 modWhen, u4 crc, bool isBootstrap, char** pCachedName, bool* pNewFile,
+    bool createIfMissing)
+{
+    char cacheFileName[512];
+    int fd, cc;
+    struct stat fdStat, fileStat;
+    bool readOnly = false;
+
+    *pNewFile = false;
+
+    if (!dexOptGenerateCacheFileName(fileName, subFileName, cacheFileName,
+            sizeof(cacheFileName)))
+        return -1;
+
+retry:
+    /*
+     * Try to open the cache file.  If we've been asked to,
+     * create it if it doesn't exist.
+     */
+    fd = createIfMissing ? open(cacheFileName, O_CREAT|O_RDWR, 0644) : -1;
+    if (fd < 0) {
+        fd = open(cacheFileName, O_RDONLY, 0);
+        if (fd < 0) {
+            if (createIfMissing) {
+                LOGE("Can't open dex cache '%s': %s\n",
+                    cacheFileName, strerror(errno));
+            }
+            return fd;
+        }
+        readOnly = true;
+    }
+
+    /*
+     * Grab an exclusive lock on the cache file.  If somebody else is
+     * working on it, we'll block here until they complete.  Because
+     * we're waiting on an external resource, we go into VMWAIT mode.
+     */
+    int oldStatus;
+    LOGV("DexOpt: locking cache file %s (fd=%d, boot=%d)\n",
+        cacheFileName, fd, isBootstrap);
+    oldStatus = dvmChangeStatus(NULL, THREAD_VMWAIT);
+    cc = flock(fd, LOCK_EX | LOCK_NB);
+    if (cc != 0) {
+        LOGD("DexOpt: sleeping on flock(%s)\n", cacheFileName);
+        cc = flock(fd, LOCK_EX);
+    }
+    dvmChangeStatus(NULL, oldStatus);
+    if (cc != 0) {
+        LOGE("Can't lock dex cache '%s': %d\n", cacheFileName, cc);
+        close(fd);
+        return -1;
+    }
+    LOGV("DexOpt:  locked cache file\n");
+
+    /*
+     * Check to see if the fd we opened and locked matches the file in
+     * the filesystem.  If they don't, then somebody else unlinked ours
+     * and created a new file, and we need to use that one instead.  (If
+     * we caught them between the unlink and the create, we'll get an
+     * ENOENT from the file stat.)
+     */
+    cc = fstat(fd, &fdStat);
+    if (cc != 0) {
+        LOGE("Can't stat open file '%s'\n", cacheFileName);
+        LOGVV("DexOpt: unlocking cache file %s\n", cacheFileName);
+        goto close_fail;
+    }
+    cc = stat(cacheFileName, &fileStat);
+    if (cc != 0 || 
+        fdStat.st_dev != fileStat.st_dev || fdStat.st_ino != fileStat.st_ino)
+    {
+        LOGD("DexOpt: our open cache file is stale; sleeping and retrying\n");
+        LOGVV("DexOpt: unlocking cache file %s\n", cacheFileName);
+        flock(fd, LOCK_UN);
+        close(fd);
+        usleep(250 * 1000);     /* if something is hosed, don't peg machine */
+        goto retry;
+    }
+
+    /*
+     * We have the correct file open and locked.  If the file size is zero,
+     * then it was just created by us, and we want to fill in some fields
+     * in the "opt" header and set "*pNewFile".  Otherwise, we want to
+     * verify that the fields in the header match our expectations, and
+     * reset the file if they don't.
+     */
+    if (fdStat.st_size == 0) {
+        if (readOnly) {
+            LOGW("DexOpt: file has zero length and isn't writable\n");
+            goto close_fail;
+        }
+        cc = dexOptCreateEmptyHeader(fd);
+        if (cc != 0)
+            goto close_fail;
+        *pNewFile = true;
+        LOGV("DexOpt: successfully initialized new cache file\n");
+    } else {
+        bool expectVerify, expectOpt;
+
+        if (gDvm.classVerifyMode == VERIFY_MODE_NONE)
+            expectVerify = false;
+        else if (gDvm.classVerifyMode == VERIFY_MODE_REMOTE)
+            expectVerify = !isBootstrap;
+        else /*if (gDvm.classVerifyMode == VERIFY_MODE_ALL)*/
+            expectVerify = true;
+
+        if (gDvm.dexOptMode == OPTIMIZE_MODE_NONE)
+            expectOpt = false;
+        else if (gDvm.dexOptMode == OPTIMIZE_MODE_VERIFIED)
+            expectOpt = expectVerify;
+        else /*if (gDvm.dexOptMode == OPTIMIZE_MODE_ALL)*/
+            expectOpt = true;
+
+        LOGV("checking deps, expecting vfy=%d opt=%d\n",
+            expectVerify, expectOpt);
+
+        if (!dvmCheckOptHeaderAndDependencies(fd, true, modWhen, crc,
+                expectVerify, expectOpt))
+        {
+            if (readOnly) {
+                /*
+                 * We could unlink and rewrite the file if we own it or
+                 * the "sticky" bit isn't set on the directory.  However,
+                 * we're not able to truncate it, which spoils things.  So,
+                 * give up now.
+                 */
+                if (createIfMissing) {
+                    LOGW("Cached DEX '%s' (%s) is stale and not writable\n",
+                        fileName, cacheFileName);
+                }
+                goto close_fail;
+            }
+
+            /*
+             * If we truncate the existing file before unlinking it, any
+             * process that has it mapped will fail when it tries to touch
+             * the pages.
+             *
+             * This is very important.  The zygote process will have the
+             * boot DEX files (core, framework, etc.) mapped early.  If
+             * (say) core.dex gets updated, and somebody launches an app
+             * that uses App.dex, then App.dex gets reoptimized because it's
+             * dependent upon the boot classes.  However, dexopt will be
+             * using the *new* core.dex to do the optimizations, while the
+             * app will actually be running against the *old* core.dex
+             * because it starts from zygote.
+             *
+             * Even without zygote, it's still possible for a class loader
+             * to pull in an APK that was optimized against an older set
+             * of DEX files.  We must ensure that everything fails when a
+             * boot DEX gets updated, and for general "why aren't my
+             * changes doing anything" purposes its best if we just make
+             * everything crash when a DEX they're using gets updated.
+             */
+            LOGD("Stale deps in cache file; removing and retrying\n");
+            if (ftruncate(fd, 0) != 0) {
+                LOGW("Warning: unable to truncate cache file '%s': %s\n",
+                    cacheFileName, strerror(errno));
+                /* keep going */
+            }
+            if (unlink(cacheFileName) != 0) {
+                LOGW("Warning: unable to remove cache file '%s': %d %s\n",
+                    cacheFileName, errno, strerror(errno));
+                /* keep going; permission failure should probably be fatal */
+            }
+            LOGVV("DexOpt: unlocking cache file %s\n", cacheFileName);
+            flock(fd, LOCK_UN);
+            close(fd);
+            goto retry;
+        } else {
+            LOGV("DexOpt: good deps in cache file\n");
+        }
+    }
+
+    *pCachedName = strdup(cacheFileName);
+
+    assert(fd >= 0);
+    return fd;
+
+close_fail:
+    flock(fd, LOCK_UN);
+    close(fd);
+    return -1;
+}
+
+/*
+ * Unlock the file descriptor.
+ *
+ * Returns "true" on success.
+ */
+bool dvmUnlockCachedDexFile(int fd)
+{
+    LOGVV("DexOpt: unlocking cache file fd=%d\n", fd);
+    return (flock(fd, LOCK_UN) == 0);
+}
+
+
+/*
+ * Given a descriptor for a file with DEX data in it, produce an
+ * optimized version.
+ *
+ * The file pointed to by "fd" is expected to be a locked shared resource
+ * (or private); we make no efforts to enforce multi-process correctness
+ * here.
+ *
+ * "fileName" is only used for debug output.  "modWhen" and "crc" are stored
+ * in the dependency set.
+ *
+ * The "isBootstrap" flag determines how the optimizer and verifier handle
+ * package-scope access checks.  When optimizing, we only load the bootstrap
+ * class DEX files and the target DEX, so the flag determines whether the
+ * target DEX classes are given a (synthetic) non-NULL classLoader pointer.
+ * This only really matters if the target DEX contains classes that claim to
+ * be in the same package as bootstrap classes.
+ *
+ * The optimizer will need to load every class in the target DEX file.
+ * This is generally undesirable, so we start a subprocess to do the
+ * work and wait for it to complete.
+ *
+ * Returns "true" on success.  All data will have been written to "fd".
+ */
+bool dvmOptimizeDexFile(int fd, off_t dexOffset, long dexLength,
+    const char* fileName, u4 modWhen, u4 crc, bool isBootstrap)
+{
+    const char* lastPart = strrchr(fileName, '/');
+    if (lastPart != NULL)
+        lastPart++;
+    else
+        lastPart = "";
+
+    /*
+     * For basic optimizations (byte-swapping and structure aligning) we
+     * don't need to fork().  It looks like fork+exec is causing problems
+     * with gdb on our bewildered Linux distro, so in some situations we
+     * want to avoid this.
+     *
+     * For optimization and/or verification, we need to load all the classes.
+     */
+    if (gDvm.classVerifyMode == VERIFY_MODE_NONE &&
+        (gDvm.dexOptMode == OPTIMIZE_MODE_NONE ||
+         gDvm.dexOptMode == OPTIMIZE_MODE_VERIFIED))
+    {
+        LOGD("DexOpt: --- BEGIN (quick) '%s' ---\n", lastPart);
+        return dvmContinueOptimization(fd, dexOffset, dexLength,
+                fileName, modWhen, crc, isBootstrap);
+    }
+
+
+    LOGD("DexOpt: --- BEGIN '%s' (bootstrap=%d) ---\n", lastPart, isBootstrap);
+
+    pid_t pid;
+
+    /*
+     * This could happen if something in our bootclasspath, which we thought
+     * was all optimized, got rejected.
+     */
+    if (gDvm.optimizing) {
+        LOGW("Rejecting recursive optimization attempt on '%s'\n", fileName);
+        return false;
+    }
+
+    pid = fork();
+    if (pid == 0) {
+        static const int kUseValgrind = 0;
+        static const char* kDexOptBin = "/bin/dexopt";
+        static const char* kValgrinder = "/usr/bin/valgrind";
+        static const int kFixedArgCount = 10;
+        static const int kValgrindArgCount = 5;
+        static const int kMaxIntLen = 12;   // '-'+10dig+'\0' -OR- 0x+8dig
+        int bcpSize = dvmGetBootPathSize();
+        int argc = kFixedArgCount + bcpSize
+            + (kValgrindArgCount * kUseValgrind);
+        char* argv[argc+1];             // last entry is NULL
+        char values[argc][kMaxIntLen];
+        char* execFile;
+        char* androidRoot;
+        int flags;
+
+        /* full path to optimizer */
+        androidRoot = getenv("ANDROID_ROOT");
+        if (androidRoot == NULL) {
+            LOGW("ANDROID_ROOT not set, defaulting to /system\n");
+            androidRoot = "/system";
+        }
+        execFile = malloc(strlen(androidRoot) + strlen(kDexOptBin) + 1);
+        strcpy(execFile, androidRoot);
+        strcat(execFile, kDexOptBin);
+
+        /*
+         * Create arg vector.
+         */
+        int curArg = 0;
+
+        if (kUseValgrind) {
+            /* probably shouldn't ship the hard-coded path */
+            argv[curArg++] = (char*)kValgrinder;
+            argv[curArg++] = "--tool=memcheck";
+            argv[curArg++] = "--leak-check=yes";        // check for leaks too
+            argv[curArg++] = "--leak-resolution=med";   // increase from 2 to 4
+            argv[curArg++] = "--num-callers=16";        // default is 12
+            assert(curArg == kValgrindArgCount);
+        }
+        argv[curArg++] = execFile;
+
+        argv[curArg++] = "--dex";
+
+        sprintf(values[2], "%d", DALVIK_VM_BUILD);
+        argv[curArg++] = values[2];
+
+        sprintf(values[3], "%d", fd);
+        argv[curArg++] = values[3];
+
+        sprintf(values[4], "%d", (int) dexOffset);
+        argv[curArg++] = values[4];
+
+        sprintf(values[5], "%d", (int) dexLength);
+        argv[curArg++] = values[5];
+
+        argv[curArg++] = (char*)fileName;
+
+        sprintf(values[7], "%d", (int) modWhen);
+        argv[curArg++] = values[7];
+
+        sprintf(values[8], "%d", (int) crc);
+        argv[curArg++] = values[8];
+
+        flags = 0;
+        if (gDvm.dexOptMode != OPTIMIZE_MODE_NONE) {
+            flags |= DEXOPT_OPT_ENABLED;
+            if (gDvm.dexOptMode == OPTIMIZE_MODE_ALL)
+                flags |= DEXOPT_OPT_ALL;
+        }
+        if (gDvm.classVerifyMode != VERIFY_MODE_NONE) {
+            flags |= DEXOPT_VERIFY_ENABLED;
+            if (gDvm.classVerifyMode == VERIFY_MODE_ALL)
+                flags |= DEXOPT_VERIFY_ALL;
+        }
+        if (isBootstrap)
+            flags |= DEXOPT_IS_BOOTSTRAP;
+        sprintf(values[9], "%d", flags);
+        argv[curArg++] = values[9];
+
+        assert(((!kUseValgrind && curArg == kFixedArgCount) ||
+               ((kUseValgrind && curArg == kFixedArgCount+kValgrindArgCount))));
+
+        ClassPathEntry* cpe;
+        for (cpe = gDvm.bootClassPath; cpe->ptr != NULL; cpe++) {
+            argv[curArg++] = cpe->fileName;
+        }
+        assert(curArg == argc);
+
+        argv[curArg] = NULL;
+
+        if (kUseValgrind)
+            execv(kValgrinder, argv);
+        else
+            execv(execFile, argv);
+
+        LOGE("execv '%s'%s failed: %s\n", execFile,
+            kUseValgrind ? " [valgrind]" : "", strerror(errno));
+        exit(1);
+    } else {
+        LOGV("DexOpt: waiting for verify+opt, pid=%d\n", (int) pid);
+        int status;
+        pid_t gotPid;
+        int oldStatus;
+
+        /*
+         * Wait for the optimization process to finish.  We go into VMWAIT
+         * mode here so GC suspension won't have to wait for us.
+         */
+        oldStatus = dvmChangeStatus(NULL, THREAD_VMWAIT);
+        while (true) {
+            gotPid = waitpid(pid, &status, 0);
+            if (gotPid == -1 && errno == EINTR) {
+                LOGD("waitpid interrupted, retrying\n");
+            } else {
+                break;
+            }
+        }
+        dvmChangeStatus(NULL, oldStatus);
+        if (gotPid != pid) {
+            LOGE("waitpid failed: wanted %d, got %d: %s\n",
+                (int) pid, (int) gotPid, strerror(errno));
+            return false;
+        }
+
+        if (WIFEXITED(status) && WEXITSTATUS(status) == 0) {
+            LOGD("DexOpt: --- END '%s' (success) ---\n", lastPart);
+            return true;
+        } else {
+            LOGW("DexOpt: --- END '%s' --- status=0x%04x, process failed\n",
+                lastPart, status);
+            return false;
+        }
+    }
+}
+
+/*
+ * Do the actual optimization.  This is called directly, for "minimal"
+ * optimization, or from a newly-created process.
+ *
+ * For best use of disk/memory, we want to extract once and perform
+ * optimizations in place.  If the file has to expand or contract
+ * to match local structure padding/alignment expectations, we want
+ * to do the rewrite as part of the extract, rather than extracting
+ * into a temp file and slurping it back out.  (The structure alignment
+ * is currently correct for all platforms, and this isn't expected to
+ * change, so we should be okay with having it already extracted.)
+ *
+ * Returns "true" on success.
+ */
+bool dvmContinueOptimization(int fd, off_t dexOffset, long dexLength,
+    const char* fileName, u4 modWhen, u4 crc, bool isBootstrap)
+{
+    DexClassLookup* pClassLookup = NULL;
+    bool doVerify, doOpt;
+    u4 headerFlags = 0;
+
+    if (gDvm.classVerifyMode == VERIFY_MODE_NONE)
+        doVerify = false;
+    else if (gDvm.classVerifyMode == VERIFY_MODE_REMOTE)
+        doVerify = !isBootstrap;
+    else /*if (gDvm.classVerifyMode == VERIFY_MODE_ALL)*/
+        doVerify = true;
+
+    if (gDvm.dexOptMode == OPTIMIZE_MODE_NONE)
+        doOpt = false;
+    else if (gDvm.dexOptMode == OPTIMIZE_MODE_VERIFIED)
+        doOpt = doVerify;
+    else /*if (gDvm.dexOptMode == OPTIMIZE_MODE_ALL)*/
+        doOpt = true;
+
+    LOGV("Continuing optimization (%s, isb=%d, vfy=%d, opt=%d)\n",
+        fileName, isBootstrap, doVerify, doOpt);
+
+    assert(dexOffset >= 0);
+
+    /* quick test so we don't blow up on empty file */
+    if (dexLength < (int) sizeof(DexHeader)) {
+        LOGE("too small to be DEX\n");
+        return false;
+    }
+
+    bool result = false;
+
+    /*
+     * Drop this into a global so we don't have to pass it around.  We could
+     * also add a field to DexFile, but since it only pertains to DEX
+     * creation that probably doesn't make sense.
+     */
+    gDvm.optimizingBootstrapClass = isBootstrap;
+
+    {
+        /*
+         * Map the entire file (so we don't have to worry about page alignment).
+         * The expectation is that the output file contains our DEX data plus
+         * a small header.
+         */
+        bool success;
+        void* mapAddr;
+        mapAddr = mmap(NULL, dexOffset + dexLength, PROT_READ|PROT_WRITE,
+                    MAP_SHARED, fd, 0);
+        if (mapAddr == MAP_FAILED) {
+            LOGE("unable to mmap DEX cache: %s\n", strerror(errno));
+            goto bail;
+        }
+
+        /*
+         * Rewrite the file.  Byte reordering, structure realigning,
+         * class verification, and bytecode optimization are all performed
+         * here.
+         */
+        success = rewriteDex(((u1*) mapAddr) + dexOffset, dexLength,
+                    doVerify, doOpt, &headerFlags, &pClassLookup);
+
+        /* unmap the read-write version, forcing writes to disk */
+        if (msync(mapAddr, dexOffset + dexLength, MS_SYNC) != 0) {
+            LOGW("msync failed: %s\n", strerror(errno));
+            // weird, but keep going
+        }
+#if 1
+        /*
+         * This causes clean shutdown to fail, because we have loaded classes
+         * that point into it.  For the optimizer this isn't a problem,
+         * because it's more efficient for the process to simply exit.
+         * Exclude this code when doing clean shutdown for valgrind.
+         */
+        if (munmap(mapAddr, dexOffset + dexLength) != 0) {
+            LOGE("munmap failed: %s\n", strerror(errno));
+            goto bail;
+        }
+#endif
+
+        if (!success)
+            goto bail;
+    }
+
+    /* get start offset, and adjust deps start for 64-bit alignment */
+    off_t depsOffset, auxOffset, endOffset, adjOffset;
+    int depsLength, auxLength;
+
+    depsOffset = lseek(fd, 0, SEEK_END);
+    if (depsOffset < 0) {
+        LOGE("lseek to EOF failed: %s\n", strerror(errno));
+        goto bail;
+    }
+    adjOffset = (depsOffset + 7) & ~(0x07);
+    if (adjOffset != depsOffset) {
+        LOGV("Adjusting deps start from %d to %d\n",
+            (int) depsOffset, (int) adjOffset);
+        depsOffset = adjOffset;
+        lseek(fd, depsOffset, SEEK_SET);
+    }
+
+    /*
+     * Append the dependency list.
+     */
+    if (writeDependencies(fd, modWhen, crc) != 0) {
+        LOGW("Failed writing dependencies\n");
+        goto bail;
+    }
+
+
+    /* compute deps length, and adjust aux start for 64-bit alignment */
+    auxOffset = lseek(fd, 0, SEEK_END);
+    depsLength = auxOffset - depsOffset;
+
+    adjOffset = (auxOffset + 7) & ~(0x07);
+    if (adjOffset != auxOffset) {
+        LOGV("Adjusting aux start from %d to %d\n",
+            (int) auxOffset, (int) adjOffset);
+        auxOffset = adjOffset;
+        lseek(fd, auxOffset, SEEK_SET);
+    }
+
+    /*
+     * Append any auxillary pre-computed data structures.
+     */
+    if (!writeAuxData(fd, pClassLookup)) {
+        LOGW("Failed writing aux data\n");
+        goto bail;
+    }
+
+    endOffset = lseek(fd, 0, SEEK_END);
+    auxLength = endOffset - auxOffset;
+
+    /*
+     * Output the "opt" header with all values filled in and a correct
+     * magic number.
+     */
+    DexOptHeader optHdr;
+    memset(&optHdr, 0xff, sizeof(optHdr));
+    memcpy(optHdr.magic, DEX_OPT_MAGIC, 4);
+    memcpy(optHdr.magic+4, DEX_OPT_MAGIC_VERS, 4);
+    optHdr.dexOffset = (u4) dexOffset;
+    optHdr.dexLength = (u4) dexLength;
+    optHdr.depsOffset = (u4) depsOffset;
+    optHdr.depsLength = (u4) depsLength;
+    optHdr.auxOffset = (u4) auxOffset;
+    optHdr.auxLength = (u4) auxLength;
+
+    optHdr.flags = headerFlags;
+
+    ssize_t actual;
+    lseek(fd, 0, SEEK_SET);
+    actual = write(fd, &optHdr, sizeof(optHdr));
+    if (actual != sizeof(optHdr)) {
+        logFailedWrite(sizeof(optHdr), actual, "opt header", errno);
+        goto bail;
+    }
+
+    LOGV("Successfully wrote DEX header\n");
+    result = true;
+
+bail:
+    free(pClassLookup);
+    return result;
+}
+
+
+/*
+ * Get the cache file name from a ClassPathEntry.
+ */
+static const char* getCacheFileName(const ClassPathEntry* cpe)
+{
+    switch (cpe->kind) {
+    case kCpeJar:
+        return dvmGetJarFileCacheFileName((JarFile*) cpe->ptr);
+    case kCpeDex:
+        return dvmGetRawDexFileCacheFileName((RawDexFile*) cpe->ptr);
+    default:
+        LOGE("DexOpt: unexpected cpe kind %d\n", cpe->kind);
+        dvmAbort();
+        return NULL;
+    }
+}
+
+/*
+ * Get the SHA-1 signature.
+ */
+static const u1* getSignature(const ClassPathEntry* cpe)
+{
+    DvmDex* pDvmDex;
+
+    switch (cpe->kind) {
+    case kCpeJar:
+        pDvmDex = dvmGetJarFileDex((JarFile*) cpe->ptr);
+        break;
+    case kCpeDex:
+        pDvmDex = dvmGetRawDexFileDex((RawDexFile*) cpe->ptr);
+        break;
+    default:
+        LOGE("unexpected cpe kind %d\n", cpe->kind);
+        dvmAbort();
+        pDvmDex = NULL;         // make gcc happy
+    }
+
+    assert(pDvmDex != NULL);
+    return pDvmDex->pDexFile->pHeader->signature;
+}
+
+
+/*
+ * Dependency layout:
+ *  4b  Source file modification time, in seconds since 1970 UTC
+ *  4b  CRC-32 from Zip entry, or Adler32 from source DEX header
+ *  4b  Dalvik VM build number
+ *  4b  Number of dependency entries that follow
+ *  Dependency entries:
+ *    4b  Name length (including terminating null)
+ *    var Full path of cache entry (null terminated)
+ *    20b SHA-1 signature from source DEX file
+ *
+ * If this changes, update DEX_OPT_MAGIC_VERS.
+ */
+static const size_t kMinDepSize = 4 * 4;
+static const size_t kMaxDepSize = 4 * 4 + 1024;     // sanity check
+
+/*
+ * Read the "opt" header, verify it, then read the dependencies section
+ * and verify that data as well.
+ *
+ * If "sourceAvail" is "true", this will verify that "modWhen" and "crc"
+ * match up with what is stored in the header.  If they don't, we reject
+ * the file so that it can be recreated from the updated original.  If
+ * "sourceAvail" isn't set, e.g. for a .odex file, we ignore these arguments.
+ *
+ * On successful return, the file will be seeked immediately past the
+ * "opt" header.
+ */
+bool dvmCheckOptHeaderAndDependencies(int fd, bool sourceAvail, u4 modWhen,
+    u4 crc, bool expectVerify, bool expectOpt)
+{
+    DexOptHeader optHdr;
+    u1* depData = NULL;
+    const u1* magic;
+    off_t posn;
+    int result = false;
+    ssize_t actual;
+
+    /*
+     * Start at the start.  The "opt" header, when present, will always be
+     * the first thing in the file.
+     */
+    if (lseek(fd, 0, SEEK_SET) != 0) {
+        LOGE("DexOpt: failed to seek to start of file: %s\n", strerror(errno));
+        goto bail;
+    }
+
+    /*
+     * Read and do trivial verification on the opt header.  The header is
+     * always in host byte order.
+     */
+    if (read(fd, &optHdr, sizeof(optHdr)) != sizeof(optHdr)) {
+        LOGE("DexOpt: failed reading opt header: %s\n", strerror(errno));
+        goto bail;
+    }
+
+    magic = optHdr.magic;
+    if (memcmp(magic, DEX_OPT_MAGIC, 4) != 0) {
+        LOGW("DexOpt: incorrect opt magic number (0x%02x %02x %02x %02x)\n",
+            magic[0], magic[1], magic[2], magic[3]);
+        goto bail;
+    }
+    if (memcmp(magic+4, DEX_OPT_MAGIC_VERS, 4) != 0) {
+        LOGW("DexOpt: stale opt version (0x%02x %02x %02x %02x)\n",
+            magic[4], magic[5], magic[6], magic[7]);
+        goto bail;
+    }
+    if (optHdr.depsLength < kMinDepSize || optHdr.depsLength > kMaxDepSize) {
+        LOGW("DexOpt: weird deps length %d, bailing\n", optHdr.depsLength);
+        goto bail;
+    }
+
+    /*
+     * Do the header flags match up with what we want?
+     *
+     * This is useful because it allows us to automatically regenerate
+     * a file when settings change (e.g. verification is now mandatory),
+     * but can cause difficulties if the bootstrap classes we depend upon
+     * were handled differently than the current options specify.  We get
+     * upset because they're not verified or optimized, but we're not able
+     * to regenerate them because the installer won't let us.
+     *
+     * (This is also of limited value when !sourceAvail.)
+     *
+     * So, for now, we essentially ignore "expectVerify" and "expectOpt"
+     * by limiting the match mask.
+     *
+     * The only thing we really can't handle is incorrect byte-ordering.
+     */
+    const u4 matchMask = DEX_OPT_FLAG_BIG;
+    u4 expectedFlags = 0;
+#if __BYTE_ORDER != __LITTLE_ENDIAN
+    expectedFlags |= DEX_OPT_FLAG_BIG;
+#endif
+    if (expectVerify)
+        expectedFlags |= DEX_FLAG_VERIFIED;
+    if (expectOpt)
+        expectedFlags |= DEX_OPT_FLAG_FIELDS | DEX_OPT_FLAG_INVOCATIONS;
+    if ((expectedFlags & matchMask) != (optHdr.flags & matchMask)) {
+        LOGI("DexOpt: header flag mismatch (0x%02x vs 0x%02x, mask=0x%02x)\n",
+            expectedFlags, optHdr.flags, matchMask);
+        goto bail;
+    }
+
+    posn = lseek(fd, optHdr.depsOffset, SEEK_SET);
+    if (posn < 0) {
+        LOGW("DexOpt: seek to deps failed: %s\n", strerror(errno));
+        goto bail;
+    }
+
+    /*
+     * Read all of the dependency stuff into memory.
+     */
+    depData = (u1*) malloc(optHdr.depsLength);
+    if (depData == NULL) {
+        LOGW("DexOpt: unable to allocate %d bytes for deps\n",
+            optHdr.depsLength);
+        goto bail;
+    }
+    actual = read(fd, depData, optHdr.depsLength);
+    if (actual != (ssize_t) optHdr.depsLength) {
+        LOGW("DexOpt: failed reading deps: %d of %d (err=%s)\n",
+            (int) actual, optHdr.depsLength, strerror(errno));
+        goto bail;
+    }
+
+    /*
+     * Verify simple items.
+     */
+    const u1* ptr;
+    u4 val;
+
+    ptr = depData;
+    val = read4LE(&ptr);
+    if (sourceAvail && val != modWhen) {
+        LOGI("DexOpt: source file mod time mismatch (%08x vs %08x)\n",
+            val, modWhen);
+        goto bail;
+    }
+    val = read4LE(&ptr);
+    if (sourceAvail && val != crc) {
+        LOGI("DexOpt: source file CRC mismatch (%08x vs %08x)\n", val, crc);
+        goto bail;
+    }
+    val = read4LE(&ptr);
+    if (val != DALVIK_VM_BUILD) {
+        LOGI("DexOpt: VM build mismatch (%d vs %d)\n", val, DALVIK_VM_BUILD);
+        goto bail;
+    }
+
+    /*
+     * Verify dependencies on other cached DEX files.  It must match
+     * exactly with what is currently defined in the bootclasspath.
+     */
+    ClassPathEntry* cpe;
+    u4 numDeps;
+
+    numDeps = read4LE(&ptr);
+    LOGV("+++ DexOpt: numDeps = %d\n", numDeps);
+    for (cpe = gDvm.bootClassPath; cpe->ptr != NULL; cpe++) {
+        const char* cacheFileName = getCacheFileName(cpe);
+        const u1* signature = getSignature(cpe);
+        size_t len = strlen(cacheFileName) +1;
+        u4 storedStrLen;
+
+        if (numDeps == 0) {
+            /* more entries in bootclasspath than in deps list */
+            LOGI("DexOpt: not all deps represented\n");
+            goto bail;
+        }
+
+        storedStrLen = read4LE(&ptr);
+        if (len != storedStrLen ||
+            strcmp(cacheFileName, (const char*) ptr) != 0)
+        {
+            LOGI("DexOpt: mismatch dep name: '%s' vs. '%s'\n",
+                cacheFileName, ptr);
+            goto bail;
+        }
+
+        ptr += storedStrLen;
+
+        if (memcmp(signature, ptr, kSHA1DigestLen) != 0) {
+            LOGI("DexOpt: mismatch dep signature for '%s'\n", cacheFileName);
+            goto bail;
+        }
+        ptr += kSHA1DigestLen;
+
+        LOGV("DexOpt: dep match on '%s'\n", cacheFileName);
+
+        numDeps--;
+    }
+
+    if (numDeps != 0) {
+        /* more entries in deps list than in classpath */
+        LOGI("DexOpt: Some deps went away\n");
+        goto bail;
+    }
+
+    // consumed all data and no more?
+    if (ptr != depData + optHdr.depsLength) {
+        LOGW("DexOpt: Spurious dep data? %d vs %d\n",
+            (int) (ptr - depData), optHdr.depsLength);
+        assert(false);
+    }
+
+    result = true;
+
+bail:
+    free(depData);
+    return result;
+}
+
+/*
+ * Write the dependency info to "fd" at the current file position.
+ */
+static int writeDependencies(int fd, u4 modWhen, u4 crc)
+{
+    u1* buf = NULL;
+    ssize_t actual;
+    int result = -1;
+    ssize_t bufLen;
+    ClassPathEntry* cpe;
+    int i, numDeps;
+
+    /*
+     * Count up the number of completed entries in the bootclasspath.
+     */
+    numDeps = 0;
+    bufLen = 0;
+    for (cpe = gDvm.bootClassPath; cpe->ptr != NULL; cpe++) {
+        const char* cacheFileName = getCacheFileName(cpe);
+        LOGV("+++ DexOpt: found dep '%s'\n", cacheFileName);
+
+        numDeps++;
+        bufLen += strlen(cacheFileName) +1;
+    }
+
+    bufLen += 4*4 + numDeps * (4+kSHA1DigestLen);
+
+    buf = malloc(bufLen);
+
+    set4LE(buf+0, modWhen);
+    set4LE(buf+4, crc);
+    set4LE(buf+8, DALVIK_VM_BUILD);
+    set4LE(buf+12, numDeps);
+
+    // TODO: do we want to add dvmGetInlineOpsTableLength() here?  Won't
+    // help us if somebody replaces an existing entry, but it'd catch
+    // additions/removals.
+
+    u1* ptr = buf + 4*4;
+    for (cpe = gDvm.bootClassPath; cpe->ptr != NULL; cpe++) {
+        const char* cacheFileName = getCacheFileName(cpe);
+        const u1* signature = getSignature(cpe);
+        int len = strlen(cacheFileName) +1;
+
+        if (ptr + 4 + len + kSHA1DigestLen > buf + bufLen) {
+            LOGE("DexOpt: overran buffer\n");
+            dvmAbort();
+        }
+
+        set4LE(ptr, len);
+        ptr += 4;
+        memcpy(ptr, cacheFileName, len);
+        ptr += len;
+        memcpy(ptr, signature, kSHA1DigestLen);
+        ptr += kSHA1DigestLen;
+    }
+
+    assert(ptr == buf + bufLen);
+
+    actual = write(fd, buf, bufLen);
+    if (actual != bufLen) {
+        result = (errno != 0) ? errno : -1;
+        logFailedWrite(bufLen, actual, "dep info", errno);
+    } else {
+        result = 0;
+    }
+
+    free(buf);
+    return result;
+}
+
+/*
+ * Write aux data.
+ *
+ * At the moment this is just the DexClassLookup structure.  In theory we
+ * will tuck other stuff in here.  When theory becomes reality, we will need
+ * to stow a TOC in here (or use a "chunk" format).  Until then we just
+ * write a zero into the first 4 bytes so future generations have something
+ * to test for.
+ */
+static bool writeAuxData(int fd, const DexClassLookup* pClassLookup)
+{
+    DexFile* pDexFile;
+    ssize_t actual;
+    int zero = 0;
+
+    actual = write(fd, &zero, sizeof(zero));
+    if (actual != sizeof(zero)) {
+        logFailedWrite(sizeof(zero), actual, "aux header", errno);
+        return false;
+    }
+
+    actual = write(fd, pClassLookup, pClassLookup->size);
+    if (actual != pClassLookup->size) {
+        logFailedWrite(pClassLookup->size, actual, "class lookup table", errno);
+        return false;
+    }
+
+    return true;
+}
+
+/*
+ * Log a failed write.
+ */
+static void logFailedWrite(size_t expected, ssize_t actual, const char* msg,
+    int err)
+{
+    LOGE("Write failed: %s (%d of %d): %s\n",
+        msg, (int)actual, (int)expected, strerror(err));
+}
+
+
+/*
+ * ===========================================================================
+ *      Optimizations
+ * ===========================================================================
+ */
+
+/*
+ * Perform in-place rewrites on a memory-mapped DEX file.
+ *
+ * This happens in a short-lived child process, so we can go nutty with
+ * loading classes and allocating memory.
+ */
+static bool rewriteDex(u1* addr, int len, bool doVerify, bool doOpt,
+    u4* pHeaderFlags, DexClassLookup** ppClassLookup)
+{
+    u8 prepWhen, loadWhen, verifyWhen, optWhen;
+    DvmDex* pDvmDex = NULL;
+    bool result = false;
+
+    *pHeaderFlags = 0;
+
+    LOGV("+++ swapping bytes\n");
+    if (dexFixByteOrdering(addr, len) != 0)
+        goto bail;
+#if __BYTE_ORDER != __LITTLE_ENDIAN
+    *pHeaderFlags |= DEX_OPT_FLAG_BIG;
+#endif
+
+    /*
+     * Now that the DEX file can be read directly, create a DexFile for it.
+     */
+    if (dvmDexFileOpenPartial(addr, len, &pDvmDex) != 0) {
+        LOGE("Unable to create DexFile\n");
+        goto bail;
+    }
+
+    /*
+     * Create the class lookup table.
+     */
+    //startWhen = dvmGetRelativeTimeUsec();
+    *ppClassLookup = dexCreateClassLookup(pDvmDex->pDexFile);
+    if (*ppClassLookup == NULL)
+        goto bail;
+
+    /*
+     * Bail out early if they don't want The Works.  The current implementation
+     * doesn't fork a new process if this flag isn't set, so we really don't
+     * want to continue on with the crazy class loading.
+     */
+    if (!doVerify && !doOpt) {
+        result = true;
+        goto bail;
+    }
+
+    /* this is needed for the next part */
+    pDvmDex->pDexFile->pClassLookup = *ppClassLookup;
+
+    prepWhen = dvmGetRelativeTimeUsec();
+
+    /*
+     * Load all classes found in this DEX file.  If they fail to load for
+     * some reason, they won't get verified (which is as it should be).
+     */
+    if (!loadAllClasses(pDvmDex))
+        goto bail;
+    loadWhen = dvmGetRelativeTimeUsec();
+
+    /*
+     * Verify all classes in the DEX file.  Export the "is verified" flag
+     * to the DEX file we're creating.
+     */
+    if (doVerify) {
+        dvmVerifyAllClasses(pDvmDex->pDexFile);
+        *pHeaderFlags |= DEX_FLAG_VERIFIED;
+    }
+    verifyWhen = dvmGetRelativeTimeUsec();
+
+    /*
+     * Optimize the classes we successfully loaded.  If the opt mode is
+     * OPTIMIZE_MODE_VERIFIED, each class must have been successfully
+     * verified or we'll skip it.
+     */
+#ifndef PROFILE_FIELD_ACCESS
+    if (doOpt) {
+        optimizeLoadedClasses(pDvmDex->pDexFile);
+        *pHeaderFlags |= DEX_OPT_FLAG_FIELDS | DEX_OPT_FLAG_INVOCATIONS;
+    }
+#endif
+    optWhen = dvmGetRelativeTimeUsec();
+
+    LOGD("DexOpt: load %dms, verify %dms, opt %dms\n",
+        (int) (loadWhen - prepWhen) / 1000,
+        (int) (verifyWhen - loadWhen) / 1000,
+        (int) (optWhen - verifyWhen) / 1000);
+
+    result = true;
+
+bail:
+    /*
+     * Free up storage.
+     */
+    dvmDexFileFree(pDvmDex);
+
+    return result;
+}
+
+/*
+ * Try to load all classes in the specified DEX.  If they have some sort
+ * of broken dependency, e.g. their superclass lives in a different DEX
+ * that wasn't previously loaded into the bootstrap class path, loading
+ * will fail.  This is the desired behavior.
+ *
+ * We have no notion of class loader at this point, so we load all of
+ * the classes with the bootstrap class loader.  It turns out this has
+ * exactly the behavior we want, and has no ill side effects because we're
+ * running in a separate process and anything we load here will be forgotten.
+ *
+ * We set the CLASS_MULTIPLE_DEFS flag here if we see multiple definitions.
+ * This works because we only call here as part of optimization / pre-verify,
+ * not during verification as part of loading a class into a running VM.
+ *
+ * This returns "false" if the world is too screwed up to do anything
+ * useful at all.
+ */
+static bool loadAllClasses(DvmDex* pDvmDex)
+{
+    u4 count = pDvmDex->pDexFile->pHeader->classDefsSize;
+    u4 idx;
+    int loaded = 0;
+
+    LOGV("DexOpt: +++ trying to load %d classes\n", count);
+
+    dvmSetBootPathExtraDex(pDvmDex);
+
+    /*
+     * We have some circularity issues with Class and Object that are most
+     * easily avoided by ensuring that Object is never the first thing we
+     * try to find.  Take care of that here.  (We only need to do this when
+     * loading classes from the DEX file that contains Object, and only
+     * when Object comes first in the list, but it costs very little to
+     * do it in all cases.)
+     */
+    if (dvmFindSystemClass("Ljava/lang/Class;") == NULL) {
+        LOGE("ERROR: java.lang.Class does not exist!\n");
+        return false;
+    }
+
+    for (idx = 0; idx < count; idx++) {
+        const DexClassDef* pClassDef;
+        const char* classDescriptor;
+        ClassObject* newClass;
+        
+        pClassDef = dexGetClassDef(pDvmDex->pDexFile, idx);
+        classDescriptor =
+            dexStringByTypeIdx(pDvmDex->pDexFile, pClassDef->classIdx);
+
+        LOGV("+++  loading '%s'", classDescriptor);
+        //newClass = dvmDefineClass(pDexFile, classDescriptor,
+        //        NULL);
+        newClass = dvmFindSystemClassNoInit(classDescriptor);
+        if (newClass == NULL) {
+            LOGV("DexOpt: failed loading '%s'\n", classDescriptor);
+            dvmClearOptException(dvmThreadSelf());
+        } else if (newClass->pDvmDex != pDvmDex) {
+            /*
+             * We don't load the new one, and we tag the first one found
+             * with the "multiple def" flag so the resolver doesn't try
+             * to make it available.
+             */
+            LOGD("DexOpt: '%s' has an earlier definition; blocking out\n",
+                classDescriptor);
+            SET_CLASS_FLAG(newClass, CLASS_MULTIPLE_DEFS);
+        } else {
+            loaded++;
+        }
+    }
+    LOGV("DexOpt: +++ successfully loaded %d classes\n", loaded);
+
+    dvmSetBootPathExtraDex(NULL);
+    return true;
+}
+
+
+/*
+ * Create a table of inline substitutions.
+ *
+ * TODO: this is currently just a linear array.  We will want to put this
+ * into a hash table as the list size increases.
+ */
+static InlineSub* createInlineSubsTable(void)
+{
+    const InlineOperation* ops = dvmGetInlineOpsTable();
+    const int count = dvmGetInlineOpsTableLength();
+    InlineSub* table;
+    Method* method;
+    ClassObject* clazz;
+    int i, tableIndex;
+
+    /*
+     * Allocate for optimism: one slot per entry, plus an end-of-list marker.
+     */
+    table = malloc(sizeof(InlineSub) * (count+1));
+
+    tableIndex = 0;
+    for (i = 0; i < count; i++) {
+        clazz = dvmFindClassNoInit(ops[i].classDescriptor, NULL);
+        if (clazz == NULL) {
+            LOGV("DexOpt: can't inline for class '%s': not found\n",
+                ops[i].classDescriptor);
+            dvmClearOptException(dvmThreadSelf());
+        } else {
+            /*
+             * Method could be virtual or direct.  Try both.  Don't use
+             * the "hier" versions.
+             */
+            if (!dvmIsFinalClass(clazz)) {
+                LOGW("DexOpt: WARNING: inline op on non-final class '%s'\n",
+                    clazz->descriptor);
+                // TODO: final methods in non-final classes are okay too
+                /* keep going, I guess */
+            }
+            method = dvmFindDirectMethodByDescriptor(clazz, ops[i].methodName,
+                        ops[i].methodSignature);
+            if (method == NULL)
+                method = dvmFindVirtualMethodByDescriptor(clazz, ops[i].methodName,
+                        ops[i].methodSignature);
+            if (method == NULL) {
+                LOGW("DexOpt: can't inline %s.%s %s: method not found\n",
+                    ops[i].classDescriptor, ops[i].methodName,
+                    ops[i].methodSignature);
+            } else {
+                table[tableIndex].method = method;
+                table[tableIndex].inlineIdx = i;
+                tableIndex++;
+
+                LOGV("DexOpt: will inline %d: %s.%s %s\n", i,
+                    ops[i].classDescriptor, ops[i].methodName,
+                    ops[i].methodSignature);
+            }
+        }
+    }
+
+    /* mark end of table */
+    table[tableIndex].method = NULL;
+    LOGV("DexOpt: inline table has %d entries\n", tableIndex);
+
+    return table;
+}
+
+/*
+ * Run through all classes that were successfully loaded from this DEX
+ * file and optimize their code sections.
+ */
+static void optimizeLoadedClasses(DexFile* pDexFile)
+{
+    u4 count = pDexFile->pHeader->classDefsSize;
+    u4 idx;
+    InlineSub* inlineSubs = NULL;
+
+    LOGV("DexOpt: +++ optimizing up to %d classes\n", count);
+    assert(gDvm.dexOptMode != OPTIMIZE_MODE_NONE);
+
+    inlineSubs = createInlineSubsTable();
+
+    for (idx = 0; idx < count; idx++) {
+        const DexClassDef* pClassDef;
+        const char* classDescriptor;
+        ClassObject* clazz;
+        
+        pClassDef = dexGetClassDef(pDexFile, idx);
+        classDescriptor= dexStringByTypeIdx(pDexFile, pClassDef->classIdx);
+
+        /* all classes are loaded into the bootstrap class loader */
+        clazz = dvmLookupClass(classDescriptor, NULL, false);
+        if (clazz != NULL) {
+            if ((pClassDef->accessFlags & CLASS_ISPREVERIFIED) == 0 &&
+                gDvm.dexOptMode == OPTIMIZE_MODE_VERIFIED)
+            {
+                LOGV("DexOpt: not optimizing '%s': not verified\n",
+                    classDescriptor);
+            } else if (clazz->pDvmDex->pDexFile != pDexFile) {
+                /* shouldn't be here -- verifier should have caught */
+                LOGD("DexOpt: not optimizing '%s': multiple definitions\n",
+                    classDescriptor);
+            } else {
+                optimizeClass(clazz, inlineSubs);
+
+                /* set the flag whether or not we actually did anything */
+                ((DexClassDef*)pClassDef)->accessFlags |=
+                    CLASS_ISOPTIMIZED;
+            }
+        } else {
+            LOGV("DexOpt: not optimizing unavailable class '%s'\n",
+                classDescriptor);
+        }
+    }
+    
+    free(inlineSubs);
+}
+
+/*
+ * Optimize the specified class.
+ */
+static void optimizeClass(ClassObject* clazz, const InlineSub* inlineSubs)
+{
+    int i;
+
+    for (i = 0; i < clazz->directMethodCount; i++) {
+        if (!optimizeMethod(&clazz->directMethods[i], inlineSubs))
+            goto fail;
+    }
+    for (i = 0; i < clazz->virtualMethodCount; i++) {
+        if (!optimizeMethod(&clazz->virtualMethods[i], inlineSubs))
+            goto fail;
+    }
+
+    return;
+
+fail:
+    LOGV("DexOpt: ceasing optimization attempts on %s\n", clazz->descriptor);
+}
+
+/*
+ * Optimize instructions in a method.
+ *
+ * Returns "true" if all went well, "false" if we bailed out early when
+ * something failed.
+ */
+static bool optimizeMethod(Method* method, const InlineSub* inlineSubs)
+{
+    u4 insnsSize;
+    u2* insns;
+    u2 inst;
+
+    if (dvmIsNativeMethod(method) || dvmIsAbstractMethod(method))
+        return true;
+
+    insns = (u2*) method->insns;
+    assert(insns != NULL);
+    insnsSize = dvmGetMethodInsnsSize(method);
+
+    while (insnsSize > 0) {
+        int width;
+
+        inst = *insns & 0xff;
+
+        switch (inst) {
+        case OP_IGET:
+        case OP_IGET_BOOLEAN:
+        case OP_IGET_BYTE:
+        case OP_IGET_CHAR:
+        case OP_IGET_SHORT:
+            rewriteInstField(method, insns, OP_IGET_QUICK);
+            break;
+        case OP_IGET_WIDE:
+            rewriteInstField(method, insns, OP_IGET_WIDE_QUICK);
+            break;
+        case OP_IGET_OBJECT:
+            rewriteInstField(method, insns, OP_IGET_OBJECT_QUICK);
+            break;
+        case OP_IPUT:
+        case OP_IPUT_BOOLEAN:
+        case OP_IPUT_BYTE:
+        case OP_IPUT_CHAR:
+        case OP_IPUT_SHORT:
+            rewriteInstField(method, insns, OP_IPUT_QUICK);
+            break;
+        case OP_IPUT_WIDE:
+            rewriteInstField(method, insns, OP_IPUT_WIDE_QUICK);
+            break;
+        case OP_IPUT_OBJECT:
+            rewriteInstField(method, insns, OP_IPUT_OBJECT_QUICK);
+            break;
+
+        case OP_INVOKE_VIRTUAL:
+            if (!rewriteExecuteInline(method, insns, METHOD_VIRTUAL,inlineSubs))
+            {
+                if (!rewriteVirtualInvoke(method, insns, OP_INVOKE_VIRTUAL_QUICK))
+                    return false;
+            }
+            break;
+        case OP_INVOKE_VIRTUAL_RANGE:
+            if (!rewriteVirtualInvoke(method, insns, OP_INVOKE_VIRTUAL_QUICK_RANGE))
+                return false;
+            break;
+        case OP_INVOKE_SUPER:
+            if (!rewriteVirtualInvoke(method, insns, OP_INVOKE_SUPER_QUICK))
+                return false;
+            break;
+        case OP_INVOKE_SUPER_RANGE:
+            if (!rewriteVirtualInvoke(method, insns, OP_INVOKE_SUPER_QUICK_RANGE))
+                return false;
+            break;
+
+        case OP_INVOKE_DIRECT:
+            if (!rewriteExecuteInline(method, insns, METHOD_DIRECT, inlineSubs))
+            {
+                if (!rewriteDirectInvoke(method, insns))
+                    return false;
+            }
+            break;
+        case OP_INVOKE_STATIC:
+            rewriteExecuteInline(method, insns, METHOD_STATIC, inlineSubs);
+            break;
+
+        default:
+            // ignore this instruction
+            ;
+        }
+
+        if (*insns == kPackedSwitchSignature) {
+            width = 4 + insns[1] * 2;
+        } else if (*insns == kSparseSwitchSignature) {
+            width = 2 + insns[1] * 4;
+        } else if (*insns == kArrayDataSignature) {
+            u2 elemWidth = insns[1];
+            u4 len = insns[2] | (((u4)insns[3]) << 16);
+            width = 4 + (elemWidth * len + 1) / 2;
+        } else {
+            width = dexGetInstrWidth(gDvm.instrWidth, inst);
+        }
+        assert(width > 0);
+
+        insns += width;
+        insnsSize -= width;
+    }
+
+    assert(insnsSize == 0);
+    return true;
+}
+
+
+/*
+ * If "referrer" and "resClass" don't come from the same DEX file, and
+ * the DEX we're working on is not destined for the bootstrap class path,
+ * tweak the class loader so package-access checks work correctly.
+ *
+ * Only do this if we're doing pre-verification or optimization.
+ */
+static void tweakLoader(ClassObject* referrer, ClassObject* resClass)
+{
+    if (!gDvm.optimizing)
+        return;
+    assert(referrer->classLoader == NULL);
+    assert(resClass->classLoader == NULL);
+
+    if (!gDvm.optimizingBootstrapClass) {
+        /* class loader for an array class comes from element type */
+        if (dvmIsArrayClass(resClass))
+            resClass = resClass->elementClass;
+        if (referrer->pDvmDex != resClass->pDvmDex)
+            resClass->classLoader = (Object*) 0xdead3333;
+    }
+}
+
+/*
+ * Undo the effects of tweakLoader.
+ */
+static void untweakLoader(ClassObject* referrer, ClassObject* resClass)
+{
+    if (!gDvm.optimizing || gDvm.optimizingBootstrapClass)
+        return;
+
+    if (dvmIsArrayClass(resClass))
+        resClass = resClass->elementClass;
+    resClass->classLoader = NULL;
+}
+
+
+/*
+ * Alternate version of dvmResolveClass for use with verification and
+ * optimization.  Performs access checks on every resolve, and refuses
+ * to acknowledge the existence of classes defined in more than one DEX
+ * file.
+ *
+ * Exceptions caused by failures are cleared before returning.
+ */
+ClassObject* dvmOptResolveClass(ClassObject* referrer, u4 classIdx)
+{
+    DvmDex* pDvmDex = referrer->pDvmDex;
+    ClassObject* resClass;
+
+    /*
+     * Check the table first.  If not there, do the lookup by name.
+     */
+    resClass = dvmDexGetResolvedClass(pDvmDex, classIdx);
+    if (resClass == NULL) {
+        resClass = dvmFindClassNoInit(
+                    dexStringByTypeIdx(pDvmDex->pDexFile, classIdx),
+                    referrer->classLoader);
+        if (resClass == NULL) {
+            /* not found, exception should be raised */
+            LOGV("DexOpt: class %d (%s) not found\n",
+                classIdx,
+                dexStringByTypeIdx(pDvmDex->pDexFile, classIdx));
+            dvmClearOptException(dvmThreadSelf());
+            return NULL;
+        }
+
+        /*
+         * Add it to the resolved table so we're faster on the next lookup.
+         */
+        dvmDexSetResolvedClass(pDvmDex, classIdx, resClass);
+    }
+
+    /* multiple definitions? */
+    if (IS_CLASS_FLAG_SET(resClass, CLASS_MULTIPLE_DEFS)) {
+        LOGI("DexOpt: not resolving ambiguous class '%s'\n",
+            resClass->descriptor);
+        return NULL;
+    }
+
+    /* access allowed? */
+    tweakLoader(referrer, resClass);
+    bool allowed = dvmCheckClassAccess(referrer, resClass);
+    untweakLoader(referrer, resClass);
+    if (!allowed) {
+        LOGI("DexOpt: resolve class illegal access: %s -> %s\n",
+            referrer->descriptor, resClass->descriptor);
+        return NULL;
+    }
+
+    return resClass;
+}
+
+/*
+ * Alternate version of dvmResolveInstField().
+ */
+InstField* dvmOptResolveInstField(ClassObject* referrer, u4 ifieldIdx)
+{
+    DvmDex* pDvmDex = referrer->pDvmDex;
+    InstField* resField;
+
+    resField = (InstField*) dvmDexGetResolvedField(pDvmDex, ifieldIdx);
+    if (resField == NULL) {
+        const DexFieldId* pFieldId;
+        ClassObject* resClass;
+
+        pFieldId = dexGetFieldId(pDvmDex->pDexFile, ifieldIdx);
+
+        /*
+         * Find the field's class.
+         */
+        resClass = dvmOptResolveClass(referrer, pFieldId->classIdx);
+        if (resClass == NULL) {
+            //dvmClearOptException(dvmThreadSelf());
+            assert(!dvmCheckException(dvmThreadSelf()));
+            return NULL;
+        }
+
+        resField = dvmFindInstanceFieldHier(resClass,
+            dexStringById(pDvmDex->pDexFile, pFieldId->nameIdx),
+            dexStringByTypeIdx(pDvmDex->pDexFile, pFieldId->typeIdx));
+        if (resField == NULL) {
+            LOGD("DexOpt: couldn't find field %s.%s\n",
+                resClass->descriptor,
+                dexStringById(pDvmDex->pDexFile, pFieldId->nameIdx));
+            return NULL;
+        }
+
+        /*
+         * Add it to the resolved table so we're faster on the next lookup.
+         */
+        dvmDexSetResolvedField(pDvmDex, ifieldIdx, (Field*) resField);
+    }
+
+    /* access allowed? */
+    tweakLoader(referrer, resField->field.clazz);
+    bool allowed = dvmCheckFieldAccess(referrer, (Field*)resField);
+    untweakLoader(referrer, resField->field.clazz);
+    if (!allowed) {
+        LOGI("DexOpt: access denied from %s to field %s.%s\n",
+            referrer->descriptor, resField->field.clazz->descriptor,
+            resField->field.name);
+        return NULL;
+    }
+
+    return resField;
+}
+
+/*
+ * Alternate version of dvmResolveStaticField().
+ *
+ * Does not force initialization of the resolved field's class.
+ */
+StaticField* dvmOptResolveStaticField(ClassObject* referrer, u4 sfieldIdx)
+{
+    DvmDex* pDvmDex = referrer->pDvmDex;
+    StaticField* resField;
+
+    resField = (StaticField*)dvmDexGetResolvedField(pDvmDex, sfieldIdx);
+    if (resField == NULL) {
+        const DexFieldId* pFieldId;
+        ClassObject* resClass;
+
+        pFieldId = dexGetFieldId(pDvmDex->pDexFile, sfieldIdx);
+
+        /*
+         * Find the field's class.
+         */
+        resClass = dvmOptResolveClass(referrer, pFieldId->classIdx);
+        if (resClass == NULL) {
+            //dvmClearOptException(dvmThreadSelf());
+            assert(!dvmCheckException(dvmThreadSelf()));
+            return NULL;
+        }
+
+        resField = dvmFindStaticFieldHier(resClass,
+                    dexStringById(pDvmDex->pDexFile, pFieldId->nameIdx),
+                    dexStringByTypeIdx(pDvmDex->pDexFile, pFieldId->typeIdx));
+        if (resField == NULL) {
+            LOGD("DexOpt: couldn't find static field\n");
+            return NULL;
+        }
+
+        /*
+         * Add it to the resolved table so we're faster on the next lookup.
+         *
+         * We can only do this if we're in "dexopt", because the presence
+         * of a valid value in the resolution table implies that the class
+         * containing the static field has been initialized.
+         */
+        if (gDvm.optimizing)
+            dvmDexSetResolvedField(pDvmDex, sfieldIdx, (Field*) resField);
+    }
+
+    /* access allowed? */
+    tweakLoader(referrer, resField->field.clazz);
+    bool allowed = dvmCheckFieldAccess(referrer, (Field*)resField);
+    untweakLoader(referrer, resField->field.clazz);
+    if (!allowed) {
+        LOGI("DexOpt: access denied from %s to field %s.%s\n",
+            referrer->descriptor, resField->field.clazz->descriptor,
+            resField->field.name);
+        return NULL;
+    }
+
+    return resField;
+}
+
+
+/*
+ * Rewrite an iget/iput instruction.  These all have the form:
+ *   op vA, vB, field@CCCC
+ *
+ * Where vA holds the value, vB holds the object reference, and CCCC is
+ * the field reference constant pool offset.  We want to replace CCCC
+ * with the byte offset from the start of the object.
+ *
+ * "clazz" is the referring class.  We need this because we verify
+ * access rights here.
+ */
+static void rewriteInstField(Method* method, u2* insns, OpCode newOpc)
+{
+    ClassObject* clazz = method->clazz;
+    u2 fieldIdx = insns[1];
+    InstField* field;
+    int byteOffset;
+
+    field = dvmOptResolveInstField(clazz, fieldIdx);
+    if (field == NULL) {
+        LOGI("DexOpt: unable to optimize field ref 0x%04x at 0x%02x in %s.%s\n",
+            fieldIdx, (int) (insns - method->insns), clazz->descriptor,
+            method->name);
+        return;
+    }
+
+    if (field->byteOffset >= 65536) {
+        LOGI("DexOpt: field offset exceeds 64K (%d)\n", field->byteOffset);
+        return;
+    }
+
+    insns[0] = (insns[0] & 0xff00) | (u2) newOpc;
+    insns[1] = (u2) field->byteOffset;
+    LOGVV("DexOpt: rewrote access to %s.%s --> %d\n",
+        field->field.clazz->descriptor, field->field.name,
+        field->byteOffset);
+}
+
+/*
+ * Alternate version of dvmResolveMethod().
+ *
+ * Doesn't throw exceptions, and checks access on every lookup.
+ */
+Method* dvmOptResolveMethod(ClassObject* referrer, u4 methodIdx,
+    MethodType methodType)
+{
+    DvmDex* pDvmDex = referrer->pDvmDex;
+    Method* resMethod;
+
+    assert(methodType != METHOD_INTERFACE);
+
+    LOGVV("--- resolving method %u (referrer=%s)\n", methodIdx,
+        referrer->descriptor);
+
+    resMethod = dvmDexGetResolvedMethod(pDvmDex, methodIdx);
+    if (resMethod == NULL) {
+        const DexMethodId* pMethodId;
+        ClassObject* resClass;
+
+        pMethodId = dexGetMethodId(pDvmDex->pDexFile, methodIdx);
+
+        resClass = dvmOptResolveClass(referrer, pMethodId->classIdx);
+        if (resClass == NULL) {
+            /* can't find the class that the method is a part of */
+            LOGV("DexOpt: can't find called method's class (?.%s)\n",
+                dexStringById(pDvmDex->pDexFile, pMethodId->nameIdx));
+            return NULL;
+        }
+        if (dvmIsInterfaceClass(resClass)) {
+            /* method is part of an interface; this is wrong method for that */
+            LOGW("DexOpt: method is in an interface\n");
+            return NULL;
+        }
+
+        /*
+         * We need to chase up the class hierarchy to find methods defined
+         * in super-classes.  (We only want to check the current class
+         * if we're looking for a constructor.)
+         */
+        DexProto proto;
+        dexProtoSetFromMethodId(&proto, pDvmDex->pDexFile, pMethodId);
+
+        if (methodType == METHOD_DIRECT) {
+            resMethod = dvmFindDirectMethod(resClass,
+                dexStringById(pDvmDex->pDexFile, pMethodId->nameIdx), &proto);
+        } else if (methodType == METHOD_STATIC) {
+            resMethod = dvmFindDirectMethodHier(resClass,
+                dexStringById(pDvmDex->pDexFile, pMethodId->nameIdx), &proto);
+        } else {
+            resMethod = dvmFindVirtualMethodHier(resClass,
+                dexStringById(pDvmDex->pDexFile, pMethodId->nameIdx), &proto);
+        }
+
+        if (resMethod == NULL) {
+            LOGV("DexOpt: couldn't find method '%s'\n",
+                dexStringById(pDvmDex->pDexFile, pMethodId->nameIdx));
+            return NULL;
+        }
+
+        /* see if this is a pure-abstract method */
+        if (dvmIsAbstractMethod(resMethod) && !dvmIsAbstractClass(resClass)) {
+            LOGW("DexOpt: pure-abstract method '%s'\n",
+                dexStringById(pDvmDex->pDexFile, pMethodId->nameIdx));
+            return NULL;
+        }
+
+        /*
+         * Add it to the resolved table so we're faster on the next lookup.
+         *
+         * We can only do this for static methods if we're not in "dexopt",
+         * because the presence of a valid value in the resolution table
+         * implies that the class containing the static field has been
+         * initialized.
+         */
+        if (methodType != METHOD_STATIC || gDvm.optimizing)
+            dvmDexSetResolvedMethod(pDvmDex, methodIdx, resMethod);
+    }
+
+    LOGVV("--- found method %d (%s.%s)\n",
+        methodIdx, resMethod->clazz->descriptor, resMethod->name);
+
+    /* access allowed? */
+    tweakLoader(referrer, resMethod->clazz);
+    bool allowed = dvmCheckMethodAccess(referrer, resMethod);
+    untweakLoader(referrer, resMethod->clazz);
+    if (!allowed) {
+        IF_LOGI() {
+            char* desc = dexProtoCopyMethodDescriptor(&resMethod->prototype);
+            LOGI("DexOpt: illegal method access (call %s.%s %s from %s)\n",
+                resMethod->clazz->descriptor, resMethod->name, desc,
+                referrer->descriptor);
+            free(desc);
+        }
+        return NULL;
+    }
+
+    return resMethod;
+}
+
+/*
+ * Rewrite invoke-virtual, invoke-virtual/range, invoke-super, and
+ * invoke-super/range.  These all have the form:
+ *   op vAA, meth@BBBB, reg stuff @CCCC
+ *
+ * We want to replace the method constant pool index BBBB with the
+ * vtable index.
+ */
+static bool rewriteVirtualInvoke(Method* method, u2* insns, OpCode newOpc)
+{
+    ClassObject* clazz = method->clazz;
+    Method* baseMethod;
+    u2 methodIdx = insns[1];
+
+    baseMethod = dvmOptResolveMethod(clazz, methodIdx, METHOD_VIRTUAL);
+    if (baseMethod == NULL) {
+        LOGD("DexOpt: unable to optimize virt call 0x%04x at 0x%02x in %s.%s\n",
+            methodIdx,
+            (int) (insns - method->insns), clazz->descriptor,
+            method->name);
+        return false;
+    }
+
+    assert((insns[0] & 0xff) == OP_INVOKE_VIRTUAL ||
+           (insns[0] & 0xff) == OP_INVOKE_VIRTUAL_RANGE ||
+           (insns[0] & 0xff) == OP_INVOKE_SUPER ||
+           (insns[0] & 0xff) == OP_INVOKE_SUPER_RANGE);
+
+    /*
+     * Note: Method->methodIndex is a u2 and is range checked during the
+     * initial load.
+     */
+    insns[0] = (insns[0] & 0xff00) | (u2) newOpc;
+    insns[1] = baseMethod->methodIndex;
+
+    //LOGI("DexOpt: rewrote call to %s.%s --> %s.%s\n",
+    //    method->clazz->descriptor, method->name,
+    //    baseMethod->clazz->descriptor, baseMethod->name);
+
+    return true;
+}
+
+/*
+ * Rewrite invoke-direct, which has the form:
+ *   op vAA, meth@BBBB, reg stuff @CCCC
+ *
+ * There isn't a lot we can do to make this faster, but in some situations
+ * we can make it go away entirely.
+ *
+ * This must only be used when the invoked method does nothing and has
+ * no return value (the latter being very important for verification).
+ */
+static bool rewriteDirectInvoke(Method* method, u2* insns)
+{
+    ClassObject* clazz = method->clazz;
+    Method* calledMethod;
+    u2 methodIdx = insns[1];
+
+    calledMethod = dvmOptResolveMethod(clazz, methodIdx, METHOD_DIRECT);
+    if (calledMethod == NULL) {
+        LOGD("DexOpt: unable to opt direct call 0x%04x at 0x%02x in %s.%s\n",
+            methodIdx,
+            (int) (insns - method->insns), clazz->descriptor,
+            method->name);
+        return false;
+    }
+
+    /* TODO: verify that java.lang.Object() is actually empty! */
+    if (calledMethod->clazz == gDvm.classJavaLangObject &&
+        dvmCompareNameDescriptorAndMethod("<init>", "()V", calledMethod) == 0)
+    {
+        /*
+         * Replace with "empty" instruction.  DO NOT disturb anything
+         * else about it, as we want it to function the same as
+         * OP_INVOKE_DIRECT when debugging is enabled.
+         */
+        assert((insns[0] & 0xff) == OP_INVOKE_DIRECT);
+        insns[0] = (insns[0] & 0xff00) | (u2) OP_INVOKE_DIRECT_EMPTY;
+
+        //LOGI("DexOpt: marked-empty call to %s.%s --> %s.%s\n",
+        //    method->clazz->descriptor, method->name,
+        //    calledMethod->clazz->descriptor, calledMethod->name);
+    }
+
+    return true;
+}
+
+/*
+ * Resolve an interface method reference.
+ *
+ * Returns NULL if the method was not found.  Does not throw an exception.
+ */
+Method* dvmOptResolveInterfaceMethod(ClassObject* referrer, u4 methodIdx)
+{
+    DvmDex* pDvmDex = referrer->pDvmDex;
+    Method* resMethod;
+    int i;
+
+    LOGVV("--- resolving interface method %d (referrer=%s)\n",
+        methodIdx, referrer->descriptor);
+
+    resMethod = dvmDexGetResolvedMethod(pDvmDex, methodIdx);
+    if (resMethod == NULL) {
+        const DexMethodId* pMethodId;
+        ClassObject* resClass;
+
+        pMethodId = dexGetMethodId(pDvmDex->pDexFile, methodIdx);
+
+        resClass = dvmOptResolveClass(referrer, pMethodId->classIdx);
+        if (resClass == NULL) {
+            /* can't find the class that the method is a part of */
+            dvmClearOptException(dvmThreadSelf());
+            return NULL;
+        }
+        if (!dvmIsInterfaceClass(resClass)) {
+            /* whoops */
+            LOGI("Interface method not part of interface class\n");
+            return NULL;
+        }
+
+        const char* methodName =
+            dexStringById(pDvmDex->pDexFile, pMethodId->nameIdx);
+        DexProto proto;
+        dexProtoSetFromMethodId(&proto, pDvmDex->pDexFile, pMethodId);
+
+        LOGVV("+++ looking for '%s' '%s' in resClass='%s'\n",
+            methodName, methodSig, resClass->descriptor);
+        resMethod = dvmFindVirtualMethod(resClass, methodName, &proto);
+        if (resMethod == NULL) {
+            /* scan superinterfaces and superclass interfaces */
+            LOGVV("+++ did not resolve immediately\n");
+            for (i = 0; i < resClass->iftableCount; i++) {
+                resMethod = dvmFindVirtualMethod(resClass->iftable[i].clazz,
+                                methodName, &proto);
+                if (resMethod != NULL)
+                    break;
+            }
+
+            if (resMethod == NULL) {
+                LOGVV("+++ unable to resolve method %s\n", methodName);
+                return NULL;
+            }
+        } else {
+            LOGVV("+++ resolved immediately: %s (%s %d)\n", resMethod->name,
+                resMethod->clazz->descriptor, (u4) resMethod->methodIndex);
+        }
+
+        /* we're expecting this to be abstract */
+        if (!dvmIsAbstractMethod(resMethod)) {
+            char* desc = dexProtoCopyMethodDescriptor(&resMethod->prototype);
+            LOGW("Found non-abstract interface method %s.%s %s\n",
+                resMethod->clazz->descriptor, resMethod->name, desc);
+            free(desc);
+            return NULL;
+        }
+
+        /*
+         * Add it to the resolved table so we're faster on the next lookup.
+         */
+        dvmDexSetResolvedMethod(pDvmDex, methodIdx, resMethod);
+    }
+
+    LOGVV("--- found interface method %d (%s.%s)\n",
+        methodIdx, resMethod->clazz->descriptor, resMethod->name);
+
+    /* interface methods are always public; no need to check access */
+
+    return resMethod;
+}
+/*
+ * See if the method being called can be rewritten as an inline operation.
+ * Works for invoke-virtual, invoke-direct, and invoke-static.
+ *
+ * Returns "true" if we replace it.
+ */
+static bool rewriteExecuteInline(Method* method, u2* insns,
+    MethodType methodType, const InlineSub* inlineSubs)
+{
+    ClassObject* clazz = method->clazz;
+    Method* calledMethod;
+    u2 methodIdx = insns[1];
+
+    //return false;
+
+    calledMethod = dvmOptResolveMethod(clazz, methodIdx, methodType);
+    if (calledMethod == NULL) {
+        LOGV("+++ DexOpt inline: can't find %d\n", methodIdx);
+        return false;
+    }
+
+    while (inlineSubs->method != NULL) {
+        /*
+        if (extra) {
+            LOGI("comparing %p vs %p %s.%s %s\n",
+                inlineSubs->method, calledMethod,
+                inlineSubs->method->clazz->descriptor,
+                inlineSubs->method->name,
+                inlineSubs->method->signature);
+        }
+        */
+        if (inlineSubs->method == calledMethod) {
+            assert((insns[0] & 0xff) == OP_INVOKE_DIRECT ||
+                   (insns[0] & 0xff) == OP_INVOKE_STATIC ||
+                   (insns[0] & 0xff) == OP_INVOKE_VIRTUAL);
+            insns[0] = (insns[0] & 0xff00) | (u2) OP_EXECUTE_INLINE;
+            insns[1] = (u2) inlineSubs->inlineIdx;
+
+            //LOGI("DexOpt: execute-inline %s.%s --> %s.%s\n",
+            //    method->clazz->descriptor, method->name,
+            //    calledMethod->clazz->descriptor, calledMethod->name);
+            return true;
+        }
+
+        inlineSubs++;
+    }
+
+    return false;
+}
+
diff --git a/vm/analysis/DexOptimize.h b/vm/analysis/DexOptimize.h
new file mode 100644
index 0000000..207140b
--- /dev/null
+++ b/vm/analysis/DexOptimize.h
@@ -0,0 +1,87 @@
+/*
+ * Copyright (C) 2008 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/*
+ * DEX optimization declarations.
+ */
+#ifndef _DALVIK_DEXOPTIMIZE
+#define _DALVIK_DEXOPTIMIZE
+
+/*
+ * Global DEX optimizer control.  Determines the circumstances in which we
+ * try to rewrite instructions in the DEX file.
+ */
+typedef enum DexOptimizerMode {
+    OPTIMIZE_MODE_UNKNOWN = 0,
+    OPTIMIZE_MODE_NONE,         /* never optimize */
+    OPTIMIZE_MODE_VERIFIED,     /* only optimize verified classes (default) */
+    OPTIMIZE_MODE_ALL           /* optimize all classes */
+} DexOptimizerMode;
+
+/*
+ * Given the full path to a DEX or Jar file, and (if appropriate) the name
+ * within the Jar, open the optimized version from the cache.
+ *
+ * If "*pNewFile" is set, a new file has been created with only a stub
+ * "opt" header, and the caller is expected to fill in the blanks.
+ *
+ * Returns the file descriptor, locked and seeked past the "opt" header.
+ */
+int dvmOpenCachedDexFile(const char* fileName, const char* subFileName,
+    u4 modWhen, u4 crc, bool isBootstrap, char** pCachedName, bool* pNewFile,
+    bool createIfMissing);
+
+/*
+ * Unlock the specified file descriptor.  Use in conjunction with
+ * dvmOpenCachedDexFile().
+ *
+ * Returns true on success.
+ */
+bool dvmUnlockCachedDexFile(int fd);
+
+/*
+ * Verify the contents of the "opt" header, and check the DEX file's
+ * dependencies on its source zip (if available).
+ */
+bool dvmCheckOptHeaderAndDependencies(int fd, bool sourceAvail, u4 modWhen,
+    u4 crc, bool expectVerify, bool expectOpt);
+
+/*
+ * Optimize a DEX file.  The file must start with the "opt" header, followed
+ * by the plain DEX data.  It must be mmap()able.
+ *
+ * "fileName" is only used for debug output.
+ */
+bool dvmOptimizeDexFile(int fd, off_t dexOffset, long dexLen,
+    const char* fileName, u4 modWhen, u4 crc, bool isBootstrap);
+
+/*
+ * Continue the optimization process on the other side of a fork/exec.
+ */
+bool dvmContinueOptimization(int fd, off_t dexOffset, long dexLength,
+    const char* fileName, u4 modWhen, u4 crc, bool isBootstrap);
+
+/*
+ * Abbreviated resolution functions, for use by optimization and verification
+ * code.
+ */
+ClassObject* dvmOptResolveClass(ClassObject* referrer, u4 classIdx);
+Method* dvmOptResolveMethod(ClassObject* referrer, u4 methodIdx,
+        MethodType methodType);
+Method* dvmOptResolveInterfaceMethod(ClassObject* referrer, u4 methodIdx);
+InstField* dvmOptResolveInstField(ClassObject* referrer, u4 ifieldIdx);
+StaticField* dvmOptResolveStaticField(ClassObject* referrer, u4 sfieldIdx);
+
+#endif /*_DALVIK_DEXOPTIMIZE*/
diff --git a/vm/analysis/DexVerify.c b/vm/analysis/DexVerify.c
new file mode 100644
index 0000000..ab1e50b
--- /dev/null
+++ b/vm/analysis/DexVerify.c
@@ -0,0 +1,823 @@
+/*
+ * Copyright (C) 2008 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/*
+ * Dalvik classfile verification.  This file contains the verifier entry
+ * points and the static constraint checks.
+ */
+#include "Dalvik.h"
+#include "analysis/CodeVerify.h"
+#include "libdex/DexCatch.h"
+#include "libdex/InstrUtils.h"
+
+//#define static
+
+/* verification failure reporting */
+#define LOG_VFY(...)                dvmLogVerifyFailure(NULL, __VA_ARGS__);
+#define LOG_VFY_METH(_meth, ...)    dvmLogVerifyFailure(_meth, __VA_ARGS__);
+
+
+/* fwd */
+static bool computeCodeWidths(const Method* meth, InsnFlags* insnFlags,\
+    int* pNewInstanceCount);
+static bool setTryFlags(const Method* meth, InsnFlags* insnFlags);
+static bool verifyMethod(Method* meth, int verifyFlags);
+static bool verifyInstructions(const Method* meth, InsnFlags* insnFlags,
+    int verifyFlags);
+static bool checkNewInstance(const Method* meth, int insnIdx);
+static bool checkNewArray(const Method* meth, int insnIdx);
+
+
+/*
+ * Initialize some things we need for verification.
+ */
+bool dvmVerificationStartup(void)
+{
+    gDvm.instrWidth = dexCreateInstrWidthTable();
+    gDvm.instrFormat = dexCreateInstrFormatTable();
+    gDvm.instrFlags = dexCreateInstrFlagsTable();
+    return (gDvm.instrWidth != NULL && gDvm.instrFormat!= NULL);
+}
+
+/*
+ * Initialize some things we need for verification.
+ */
+void dvmVerificationShutdown(void)
+{
+    free(gDvm.instrWidth);
+    free(gDvm.instrFormat);
+    free(gDvm.instrFlags);
+}
+
+/*
+ * Induce verification on all classes loaded from this DEX file as part
+ * of pre-verification and optimization.  This is never called from a
+ * normally running VM.
+ *
+ * Returns "true" when all classes have been processed.
+ */
+bool dvmVerifyAllClasses(DexFile* pDexFile)
+{
+    u4 count = pDexFile->pHeader->classDefsSize;
+    u4 idx;
+
+    assert(gDvm.optimizing);
+
+    if (gDvm.classVerifyMode == VERIFY_MODE_NONE) {
+        LOGV("+++ verification is disabled, skipping all classes\n");
+        return true;
+    }
+    if (gDvm.classVerifyMode == VERIFY_MODE_REMOTE &&
+        gDvm.optimizingBootstrapClass)
+    {
+        LOGV("+++ verification disabled for bootstrap classes\n");
+        return true;
+    }
+
+    for (idx = 0; idx < count; idx++) {
+        const DexClassDef* pClassDef;
+        const char* classDescriptor;
+        ClassObject* clazz;
+        
+        pClassDef = dexGetClassDef(pDexFile, idx);
+        classDescriptor = dexStringByTypeIdx(pDexFile, pClassDef->classIdx);
+
+        /* all classes are loaded into the bootstrap class loader */
+        clazz = dvmLookupClass(classDescriptor, NULL, false);
+        if (clazz != NULL) {
+            if (clazz->pDvmDex->pDexFile != pDexFile) {
+                LOGD("DexOpt: not verifying '%s': multiple definitions\n",
+                    classDescriptor);
+            } else {
+                if (dvmVerifyClass(clazz, VERIFY_DEFAULT)) {
+                    assert((clazz->accessFlags & JAVA_FLAGS_MASK) ==
+                        pClassDef->accessFlags);
+                    ((DexClassDef*)pClassDef)->accessFlags |=
+                        CLASS_ISPREVERIFIED;
+                }
+                /* keep going even if one fails */
+            }
+        } else {
+            LOGV("DexOpt: +++  not verifying '%s'\n", classDescriptor);
+        }
+    }
+
+    return true;
+}
+
+/*
+ * Verify a class.
+ *
+ * By the time we get here, the value of gDvm.classVerifyMode should already
+ * have been factored in.  If you want to call into the verifier even
+ * though verification is disabled, that's your business.
+ *
+ * Returns "true" on success.
+ */
+bool dvmVerifyClass(ClassObject* clazz, int verifyFlags)
+{
+    int i;
+
+    if (dvmIsClassVerified(clazz)) {
+        LOGD("Ignoring duplicate verify attempt on %s\n", clazz->descriptor);
+        return true;
+    }
+
+    //LOGI("Verify1 '%s'\n", clazz->descriptor);
+
+    // TODO - verify class structure in DEX?
+
+    for (i = 0; i < clazz->directMethodCount; i++) {
+        if (!verifyMethod(&clazz->directMethods[i], verifyFlags)) {
+            LOG_VFY("Verifier rejected class %s\n", clazz->descriptor);
+            return false;
+        }
+    }
+    for (i = 0; i < clazz->virtualMethodCount; i++) {
+        if (!verifyMethod(&clazz->virtualMethods[i], verifyFlags)) {
+            LOG_VFY("Verifier rejected class %s\n", clazz->descriptor);
+            return false;
+        }
+    }
+
+    return true;
+}
+
+/*
+ * Perform verification on a single method.
+ *
+ * We do this in three passes:
+ *  (1) Walk through all code units, determining instruction lengths.
+ *  (2) Do static checks, including branch target and operand validation.
+ *  (3) Do structural checks, including data-flow analysis.
+ *
+ * Some checks may be bypassed depending on the verification mode.  We can't
+ * turn this stuff off completely if we want to do "exact" GC.
+ *
+ * - operands of getfield, putfield, getstatic, putstatic must be valid
+ * - operands of method invocation instructions must be valid
+ *
+ * - code array must not be empty
+ * - (N/A) code_length must be less than 65536
+ * - opcode of first instruction begins at index 0
+ * - only documented instructions may appear
+ * - each instruction follows the last
+ * - (below) last byte of last instruction is at (code_length-1)
+ */
+static bool verifyMethod(Method* meth, int verifyFlags)
+{
+    bool result = false;
+    UninitInstanceMap* uninitMap = NULL;
+    InsnFlags* insnFlags = NULL;
+    int i, newInstanceCount;
+
+    /*
+     * If there aren't any instructions, make sure that's expected, then
+     * exit successfully. Note: meth->insns gets set to a native function
+     * pointer on first call.
+     */
+    if (dvmGetMethodInsnsSize(meth) == 0) {
+        if (!dvmIsNativeMethod(meth) && !dvmIsAbstractMethod(meth)) {
+            LOG_VFY_METH(meth,
+                "VFY: zero-length code in concrete non-native method\n");
+            goto bail;
+        }
+
+        goto success;
+    }
+        
+    /*
+     * Allocate and populate an array to hold instruction data.
+     *
+     * TODO: Consider keeping a reusable pre-allocated array sitting
+     * around for smaller methods.
+     */
+    insnFlags = (InsnFlags*)
+        calloc(dvmGetMethodInsnsSize(meth), sizeof(InsnFlags));
+    if (insnFlags == NULL)
+        goto bail;
+
+    /*
+     * Compute the width of each instruction and store the result in insnFlags.
+     * Count up the #of occurrences of new-instance instructions while we're
+     * at it.
+     */
+    if (!computeCodeWidths(meth, insnFlags, &newInstanceCount))
+        goto bail;
+
+    /*
+     * Allocate a map to hold the classes of uninitialized instances.
+     */
+    uninitMap = dvmCreateUninitInstanceMap(meth, insnFlags, newInstanceCount);
+    if (uninitMap == NULL)
+        goto bail;
+
+    /*
+     * Set the "in try" flags for all instructions guarded by a "try" block.
+     */
+    if (!setTryFlags(meth, insnFlags))
+        goto bail;
+
+    /*
+     * Perform static instruction verification.
+     */
+    if (!verifyInstructions(meth, insnFlags, verifyFlags))
+        goto bail;
+
+    /*
+     * Do code-flow analysis.  Do this after verifying the branch targets
+     * so we don't need to worry about it here.
+     *
+     * If there are no registers, we don't need to do much in the way of
+     * analysis, but we still need to verify that nothing actually tries
+     * to use a register.
+     */
+    if (!dvmVerifyCodeFlow(meth, insnFlags, uninitMap)) {
+        //LOGD("+++ %s failed code flow\n", meth->name);
+        goto bail;
+    }
+
+success:
+    result = true;
+
+bail:
+    dvmFreeUninitInstanceMap(uninitMap);
+    free(insnFlags);
+    return result;
+}
+
+
+/*
+ * Compute the width of the instruction at each address in the instruction
+ * stream.  Addresses that are in the middle of an instruction, or that
+ * are part of switch table data, are not set (so the caller should probably
+ * initialize "insnFlags" to zero).
+ *
+ * Logs an error and returns "false" on failure.
+ */
+static bool computeCodeWidths(const Method* meth, InsnFlags* insnFlags,
+    int* pNewInstanceCount)
+{
+    const int insnCount = dvmGetMethodInsnsSize(meth);
+    const u2* insns = meth->insns;
+    bool result = false;
+    int i;
+
+    *pNewInstanceCount = 0;
+
+    for (i = 0; i < insnCount; /**/) {
+        int width;
+
+        /*
+         * Switch tables are identified with "extended NOP" opcodes.  They
+         * contain no executable code, so we can just skip past them.
+         */
+        if (*insns == kPackedSwitchSignature) {
+            width = 4 + insns[1] * 2;
+        } else if (*insns == kSparseSwitchSignature) {
+            width = 2 + insns[1] * 4;
+        /*
+         * Array data table is identified with similar extended NOP opcode.
+         */
+        } else if (*insns == kArrayDataSignature) {
+            u4 size = insns[2] | (((u4)insns[3]) << 16);
+            width = 4 + (insns[1] * size + 1) / 2;
+        } else {
+            int instr = *insns & 0xff;
+            width = dexGetInstrWidthAbs(gDvm.instrWidth, instr);
+            if (width == 0) {
+                LOG_VFY_METH(meth,
+                    "VFY: invalid post-opt instruction (0x%x)\n", instr);
+                goto bail;
+            }
+            if (width < 0 || width > 5) {
+                LOGE("VFY: bizarre width value %d\n", width);
+                dvmAbort();
+            }
+
+            if (instr == OP_NEW_INSTANCE)
+                (*pNewInstanceCount)++;
+        }
+
+        if (width > 65535) {
+            LOG_VFY_METH(meth, "VFY: insane width %d\n", width);
+            goto bail;
+        }
+
+        insnFlags[i] |= width;
+        i += width;
+        insns += width;
+    }
+    if (i != (int) dvmGetMethodInsnsSize(meth)) {
+        LOG_VFY_METH(meth, "VFY: code did not end where expected (%d vs. %d)\n",
+            i, dvmGetMethodInsnsSize(meth));
+        goto bail;
+    }
+
+    result = true;
+
+bail:
+    return result;
+}
+
+/*
+ * Set the "in try" flags for all instructions protected by "try" statements.
+ * Also sets the "branch target" flags for exception handlers.
+ *
+ * Call this after widths have been set in "insnFlags".
+ *
+ * Returns "false" if something in the exception table looks fishy, but
+ * we're expecting the exception table to be somewhat sane.
+ */
+static bool setTryFlags(const Method* meth, InsnFlags* insnFlags)
+{
+    u4 insnsSize = dvmGetMethodInsnsSize(meth);
+    DexFile* pDexFile = meth->clazz->pDvmDex->pDexFile;
+    const DexCode* pCode = dvmGetMethodCode(meth);
+    u4 triesSize = pCode->triesSize;
+    const DexTry* pTries;
+    u4 handlersSize;
+    u4 offset;
+    u4 i;
+
+    if (triesSize == 0) {
+        return true;
+    }
+
+    pTries = dexGetTries(pCode);
+    handlersSize = dexGetHandlersSize(pCode);
+
+    for (i = 0; i < triesSize; i++) {
+        const DexTry* pTry = &pTries[i];
+        u4 start = pTry->startAddr;
+        u4 end = start + pTry->insnCount;
+        u4 addr;
+
+        if ((start >= end) || (start >= insnsSize) || (end > insnsSize)) {
+            LOG_VFY_METH(meth,
+                "VFY: bad exception entry: startAddr=%d endAddr=%d (size=%d)\n",
+                start, end, insnsSize);
+            return false;
+        }
+
+        if (dvmInsnGetWidth(insnFlags, start) == 0) {
+            LOG_VFY_METH(meth,
+                "VFY: 'try' block starts inside an instruction (%d)\n",
+                start);
+            return false;
+        }
+
+        for (addr = start; addr < end;
+            addr += dvmInsnGetWidth(insnFlags, addr))
+        {
+            assert(dvmInsnGetWidth(insnFlags, addr) != 0);
+            dvmInsnSetInTry(insnFlags, addr, true);
+        }
+    }
+
+    /* Iterate over each of the handlers to verify target addresses. */
+    offset = dexGetFirstHandlerOffset(pCode);
+    for (i = 0; i < handlersSize; i++) {
+        DexCatchIterator iterator;
+        dexCatchIteratorInit(&iterator, pCode, offset);
+
+        for (;;) {
+            DexCatchHandler* handler = dexCatchIteratorNext(&iterator);
+            u4 addr;
+            
+            if (handler == NULL) {
+                break;
+            }
+
+            addr = handler->address;
+            if (dvmInsnGetWidth(insnFlags, addr) == 0) {
+                LOG_VFY_METH(meth,
+                    "VFY: exception handler starts at bad address (%d)\n",
+                    addr);
+                return false;
+            }
+        
+            dvmInsnSetBranchTarget(insnFlags, addr, true);
+        }
+
+        offset = dexCatchIteratorGetEndOffset(&iterator, pCode);
+    }
+
+    return true;
+}
+
+/*
+ * Verify a switch table.  "curOffset" is the offset of the switch
+ * instruction.
+ */
+static bool checkSwitchTargets(const Method* meth, InsnFlags* insnFlags,
+    int curOffset)
+{
+    const int insnCount = dvmGetMethodInsnsSize(meth);
+    const u2* insns = meth->insns + curOffset;
+    const u2* switchInsns;
+    int switchCount, tableSize;
+    int offsetToSwitch, offsetToKeys, offsetToTargets, targ;
+    int offset, absOffset;
+
+    assert(curOffset >= 0 && curOffset < insnCount);
+
+    /* make sure the start of the switch is in range */
+    offsetToSwitch = (s2) insns[1];
+    if (curOffset + offsetToSwitch < 0 ||
+        curOffset + offsetToSwitch + 2 >= insnCount)
+    {
+        LOG_VFY_METH(meth,
+            "VFY: invalid switch start: at %d, switch offset %d, count %d\n",
+            curOffset, offsetToSwitch, insnCount);
+        return false;
+    }
+
+    /* offset to switch table is a relative branch-style offset */
+    switchInsns = insns + offsetToSwitch;
+
+    /* make sure the table is 32-bit aligned */
+    if ((((u4) switchInsns) & 0x03) != 0) {
+        LOG_VFY_METH(meth,
+            "VFY: unaligned switch table: at %d, switch offset %d\n",
+            curOffset, offsetToSwitch);
+        return false;
+    }
+
+    switchCount = switchInsns[1];
+
+    if ((*insns & 0xff) == OP_PACKED_SWITCH) {
+        /* 0=sig, 1=count, 2/3=firstKey */
+        offsetToTargets = 4;
+        offsetToKeys = -1;
+    } else {
+        /* 0=sig, 1=count, 2..count*2 = keys */
+        offsetToKeys = 2;
+        offsetToTargets = 2 + 2*switchCount;
+    }
+    tableSize = offsetToTargets + switchCount*2;
+
+    /* make sure the end of the switch is in range */
+    if (curOffset + offsetToSwitch + tableSize > insnCount) {
+        LOG_VFY_METH(meth,
+            "VFY: invalid switch end: at %d, switch offset %d, end %d, count %d\n",
+            curOffset, offsetToSwitch, curOffset + offsetToSwitch + tableSize,
+            insnCount);
+        return false;
+    }
+
+    /* for a sparse switch, verify the keys are in ascending order */
+    if (offsetToKeys > 0 && switchCount > 1) {
+        s4 lastKey;
+        
+        lastKey = switchInsns[offsetToKeys] |
+                  (switchInsns[offsetToKeys+1] << 16);
+        for (targ = 1; targ < switchCount; targ++) {
+            s4 key = (s4) switchInsns[offsetToKeys + targ*2] |
+                    (s4) (switchInsns[offsetToKeys + targ*2 +1] << 16);
+            if (key <= lastKey) {
+                LOG_VFY_METH(meth,
+                    "VFY: invalid packed switch: last key=%d, this=%d\n",
+                    lastKey, key);
+                return false;
+            }
+
+            lastKey = key;
+        }
+    }
+
+    /* verify each switch target */
+    for (targ = 0; targ < switchCount; targ++) {
+        offset = (s4) switchInsns[offsetToTargets + targ*2] |
+                (s4) (switchInsns[offsetToTargets + targ*2 +1] << 16);
+        absOffset = curOffset + offset;
+
+        if (absOffset < 0 || absOffset >= insnCount ||
+            !dvmInsnIsOpcode(insnFlags, absOffset))
+        {
+            LOG_VFY_METH(meth,
+                "VFY: invalid switch target %d (-> 0x%x) at 0x%x[%d]\n",
+                offset, absOffset, curOffset, targ);
+            return false;
+        }
+        dvmInsnSetBranchTarget(insnFlags, absOffset, true);
+    }
+
+    return true;
+}
+
+/*
+ * Verify an array data table.  "curOffset" is the offset of the fill-array-data
+ * instruction.
+ */
+static bool checkArrayData(const Method* meth, int curOffset)
+{
+    const int insnCount = dvmGetMethodInsnsSize(meth);
+    const u2* insns = meth->insns + curOffset;
+    const u2* arrayData;
+    int valueCount, valueWidth, tableSize;
+    int offsetToArrayData;
+
+    assert(curOffset >= 0 && curOffset < insnCount);
+
+    /* make sure the start of the array data table is in range */
+    offsetToArrayData = insns[1] | (((s4)insns[2]) << 16);
+    if (curOffset + offsetToArrayData < 0 ||
+        curOffset + offsetToArrayData + 2 >= insnCount)
+    {
+        LOG_VFY_METH(meth,
+            "VFY: invalid array data start: at %d, data offset %d, count %d\n",
+            curOffset, offsetToArrayData, insnCount);
+        return false;
+    }
+
+    /* offset to array data table is a relative branch-style offset */
+    arrayData = insns + offsetToArrayData;
+
+    /* make sure the table is 32-bit aligned */
+    if ((((u4) arrayData) & 0x03) != 0) {
+        LOG_VFY_METH(meth,
+            "VFY: unaligned array data table: at %d, data offset %d\n",
+            curOffset, offsetToArrayData);
+        return false;
+    }
+
+    valueWidth = arrayData[1];
+    valueCount = *(u4*)(&arrayData[2]);
+
+    tableSize = 4 + (valueWidth * valueCount + 1) / 2;
+
+    /* make sure the end of the switch is in range */
+    if (curOffset + offsetToArrayData + tableSize > insnCount) {
+        LOG_VFY_METH(meth,
+            "VFY: invalid array data end: at %d, data offset %d, end %d, "
+            "count %d\n",
+            curOffset, offsetToArrayData, 
+            curOffset + offsetToArrayData + tableSize,
+            insnCount);
+        return false;
+    }
+
+    return true;
+}
+
+/*
+ * Verify that the target of a branch instruction is valid.
+ *
+ * We don't expect code to jump directly into an exception handler, but
+ * it's valid to do so as long as the target isn't a "move-exception"
+ * instruction.  We verify that in a later stage.
+ *
+ * The VM spec doesn't forbid an instruction from branching to itself,
+ * but the Dalvik spec declares that only certain instructions can do so.
+ */
+static bool checkBranchTarget(const Method* meth, InsnFlags* insnFlags,
+    int curOffset, bool selfOkay)
+{
+    const int insnCount = dvmGetMethodInsnsSize(meth);
+    const u2* insns = meth->insns + curOffset;
+    int offset, absOffset;
+    bool isConditional;
+
+    if (!dvmGetBranchTarget(meth, insnFlags, curOffset, &offset,
+            &isConditional))
+        return false;
+
+    if (!selfOkay && offset == 0) {
+        LOG_VFY_METH(meth, "VFY: branch offset of zero not allowed at 0x%x\n",
+            curOffset);
+        return false;
+    }
+
+    /*
+     * Check for 32-bit overflow.  This isn't strictly necessary if we can
+     * depend on the VM to have identical "wrap-around" behavior, but
+     * it's unwise to depend on that.
+     */
+    if (((s8) curOffset + (s8) offset) != (s8)(curOffset + offset)) {
+        LOG_VFY_METH(meth, "VFY: branch target overflow 0x%x +%d\n",
+            curOffset, offset);
+        return false;
+    }
+    absOffset = curOffset + offset;
+    if (absOffset < 0 || absOffset >= insnCount ||
+        !dvmInsnIsOpcode(insnFlags, absOffset))
+    {
+        LOG_VFY_METH(meth,
+            "VFY: invalid branch target %d (-> 0x%x) at 0x%x\n",
+            offset, absOffset, curOffset);
+        return false;
+    }
+    dvmInsnSetBranchTarget(insnFlags, absOffset, true);
+
+    return true;
+}
+
+/*
+ * Perform static verification on instructions.
+ *
+ * As a side effect, this sets the "branch target" flags in InsnFlags.
+ *
+ * "(CF)" items are handled during code-flow analysis.
+ *
+ * v3 4.10.1
+ * - target of each jump and branch instruction must be valid
+ * - targets of switch statements must be valid
+ * - (CF) operands referencing constant pool entries must be valid
+ * - (CF) operands of getfield, putfield, getstatic, putstatic must be valid
+ * - (new) verify operands of "quick" field ops
+ * - (CF) operands of method invocation instructions must be valid
+ * - (new) verify operands of "quick" method invoke ops
+ * - (CF) only invoke-direct can call a method starting with '<'
+ * - (CF) <clinit> must never be called explicitly
+ * - (CF) operands of instanceof, checkcast, new (and variants) must be valid
+ * - new-array[-type] limited to 255 dimensions
+ * - can't use "new" on an array class
+ * - (?) limit dimensions in multi-array creation
+ * - (CF) local variable load/store register values must be in valid range
+ *
+ * v3 4.11.1.2
+ * - branches must be within the bounds of the code array
+ * - targets of all control-flow instructions are the start of an instruction
+ * - (CF) register accesses fall within range of allocated registers
+ * - (N/A) access to constant pool must be of appropriate type
+ * - (CF) code does not end in the middle of an instruction
+ * - (CF) execution cannot fall off the end of the code
+ * - (earlier) for each exception handler, the "try" area must begin and
+ *   end at the start of an instruction (end can be at the end of the code)
+ * - (earlier) for each exception handler, the handler must start at a valid
+ *   instruction
+ */
+static bool verifyInstructions(const Method* meth, InsnFlags* insnFlags,
+    int verifyFlags)
+{
+    const int insnCount = dvmGetMethodInsnsSize(meth);
+    const u2* insns = meth->insns;
+    int i, width, offset, absOffset;
+
+    /* the start of the method is a "branch target" */
+    dvmInsnSetBranchTarget(insnFlags, 0, true);
+
+    for (i = 0; i < insnCount; /**/) {
+        width = dvmInsnGetWidth(insnFlags, i);
+
+        switch (*insns & 0xff) {
+        case OP_NOP:
+            /* plain no-op or switch table data; nothing to do here */
+            break;
+
+        case OP_PACKED_SWITCH:
+        case OP_SPARSE_SWITCH:
+            /* verify the associated table */
+            if (!checkSwitchTargets(meth, insnFlags, i))
+                return false;
+            break;
+
+        case OP_FILL_ARRAY_DATA:
+            /* verify the associated table */
+            if (!checkArrayData(meth, i))
+                return false;
+            break;
+            
+        case OP_GOTO:
+        case OP_GOTO_16:
+        case OP_IF_EQ:
+        case OP_IF_NE:
+        case OP_IF_LT:
+        case OP_IF_GE:
+        case OP_IF_GT:
+        case OP_IF_LE:
+        case OP_IF_EQZ:
+        case OP_IF_NEZ:
+        case OP_IF_LTZ:
+        case OP_IF_GEZ:
+        case OP_IF_GTZ:
+        case OP_IF_LEZ:
+            /* check the destination */
+            if (!checkBranchTarget(meth, insnFlags, i, false))
+                return false;
+            break;
+        case OP_GOTO_32:
+            /* check the destination; self-branch is okay */
+            if (!checkBranchTarget(meth, insnFlags, i, true))
+                return false;
+            break;
+
+        case OP_NEW_INSTANCE:
+            if (!checkNewInstance(meth, i))
+                return false;
+            break;
+
+        case OP_NEW_ARRAY:
+            if (!checkNewArray(meth, i))
+                return false;
+            break;
+
+        case OP_EXECUTE_INLINE:
+        case OP_INVOKE_DIRECT_EMPTY:
+        case OP_IGET_QUICK:
+        case OP_IGET_WIDE_QUICK:
+        case OP_IGET_OBJECT_QUICK:
+        case OP_IPUT_QUICK:
+        case OP_IPUT_WIDE_QUICK:
+        case OP_IPUT_OBJECT_QUICK:
+        case OP_INVOKE_VIRTUAL_QUICK:
+        case OP_INVOKE_VIRTUAL_QUICK_RANGE:
+        case OP_INVOKE_SUPER_QUICK:
+        case OP_INVOKE_SUPER_QUICK_RANGE:
+            if ((verifyFlags & VERIFY_ALLOW_OPT_INSTRS) == 0) {
+                LOG_VFY("VFY: not expecting optimized instructions\n");
+                return false;
+            }
+            break;
+
+        default:
+            /* nothing to do */
+            break;
+        }
+
+        assert(width > 0);
+        i += width;
+        insns += width;
+    }
+
+    /* make sure the last instruction ends at the end of the insn area */
+    if (i != insnCount) {
+        LOG_VFY_METH(meth,
+            "VFY: code did not end when expected (end at %d, count %d)\n",
+            i, insnCount);
+        return false;
+    }
+
+    return true;
+}
+
+/*
+ * Perform static checks on a "new-instance" instruction.  Specifically,
+ * make sure the class reference isn't for an array class.
+ *
+ * We don't need the actual class, just a pointer to the class name.
+ */
+static bool checkNewInstance(const Method* meth, int insnIdx)
+{
+    DexFile* pDexFile = meth->clazz->pDvmDex->pDexFile;
+    DecodedInstruction decInsn;
+    const char* classDescriptor;
+
+    dexDecodeInstruction(gDvm.instrFormat, meth->insns + insnIdx, &decInsn);
+    classDescriptor = dexStringByTypeIdx(pDexFile, decInsn.vB); // 2nd item
+
+    if (classDescriptor[0] != 'L') {
+        LOG_VFY_METH(meth, "VFY: can't call new-instance on type '%s'\n",
+            classDescriptor);
+        return false;
+    }
+
+    return true;
+}
+
+/*
+ * Perform static checks on a "new-array" instruction.  Specifically, make
+ * sure they aren't creating an array of arrays that causes the number of
+ * dimensions to exceed 255.
+ */
+static bool checkNewArray(const Method* meth, int insnIdx)
+{
+    DexFile* pDexFile = meth->clazz->pDvmDex->pDexFile;
+    DecodedInstruction decInsn;
+    const char* classDescriptor;
+
+    dexDecodeInstruction(gDvm.instrFormat, meth->insns + insnIdx, &decInsn);
+    classDescriptor = dexStringByTypeIdx(pDexFile, decInsn.vC); // 3rd item
+
+    int bracketCount = 0;
+    const char* cp = classDescriptor;
+    while (*cp++ == '[')
+        bracketCount++;
+
+    if (bracketCount == 0) {
+        /* The given class must be an array type. */
+        LOG_VFY_METH(meth, "VFY: can't new-array class '%s' (not an array)\n",
+            classDescriptor);
+        return false;
+    } else if (bracketCount > 255) {
+        /* It is illegal to create an array of more than 255 dimensions. */
+        LOG_VFY_METH(meth, "VFY: can't new-array class '%s' (exceeds limit)\n",
+            classDescriptor);
+        return false;
+    }
+
+    return true;
+}
diff --git a/vm/analysis/DexVerify.h b/vm/analysis/DexVerify.h
new file mode 100644
index 0000000..ceaf6e0
--- /dev/null
+++ b/vm/analysis/DexVerify.h
@@ -0,0 +1,59 @@
+/*
+ * Copyright (C) 2008 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/*
+ * Dalvik classfile verification.
+ */
+#ifndef _DALVIK_DEXVERIFY
+#define _DALVIK_DEXVERIFY
+
+/*
+ * Global verification mode.  These must be in order from least verification
+ * to most.  If we're using "exact GC", we may need to perform some of
+ * the verification steps anyway.
+ */
+typedef enum {
+    VERIFY_MODE_UNKNOWN = 0,
+    VERIFY_MODE_NONE,
+    VERIFY_MODE_REMOTE,
+    VERIFY_MODE_ALL
+} DexClassVerifyMode;
+
+/*
+ * Bit values for dvmVerifyClass() "verifyFlags" arg.
+ *
+ * (Verification is currently a prerequisite for optimization, not an
+ * after-effect, so we don't currently use VERIFY_ALLOW_OPT_INSTRS.)
+ */
+enum {
+    VERIFY_DEFAULT              = 0,
+    VERIFY_ALLOW_OPT_INSTRS     = 1,    // allow instrs emitted by optimizer
+};
+
+bool dvmVerificationStartup(void);
+void dvmVerificationShutdown(void);
+
+/*
+ * Perform verification on all classes loaded from this DEX file.  This
+ * should be done before optimization.
+ */
+bool dvmVerifyAllClasses(DexFile* pDexFile);
+
+/*
+ * Verify a single class.
+ */
+bool dvmVerifyClass(ClassObject* clazz, int verifyFlags);
+
+#endif /*_DALVIK_DEXVERIFY*/
diff --git a/vm/arch/arm/CallEABI.S b/vm/arch/arm/CallEABI.S
new file mode 100644
index 0000000..e3d6c6f
--- /dev/null
+++ b/vm/arch/arm/CallEABI.S
@@ -0,0 +1,406 @@
+/*
+ * Copyright (C) 2008 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/*
+ * JNI method invocation.  This is used to call a C/C++ JNI method.  The
+ * argument list has to be pushed onto the native stack according to
+ * local calling conventions.
+ *
+ * This version supports the "new" ARM EABI.
+ */
+#ifdef __ARM_EABI__
+
+#ifdef EXTENDED_EABI_DEBUG
+# define DBG
+#else
+# define DBG @
+#endif
+
+
+/*
+Function prototype:
+
+void dvmPlatformInvoke(void* pEnv, ClassObject* clazz, int argInfo, int argc,
+    const u4* argv, const char* signature, void* func, JValue* pReturn) 
+
+The method we are calling has the form:
+
+  return_type func(JNIEnv* pEnv, ClassObject* clazz, ...)
+    -or-
+  return_type func(JNIEnv* pEnv, Object* this, ...)
+
+We receive a collection of 32-bit values which correspond to arguments from
+the interpreter (e.g. float occupies one, double occupies two).  It's up to
+us to convert these into local calling conventions.
+*/
+
+/*
+ARM EABI notes:
+
+r0-r3 hold first 4 args to a method
+r9 is given special treatment in some situations, but not for us
+r10 (sl) seems to be generally available
+r11 (fp) is used by gcc (unless -fomit-frame-pointer is set)
+r12 (ip) is scratch -- not preserved across method calls
+r13 (sp) should be managed carefully in case a signal arrives
+r14 (lr) must be preserved
+r15 (pc) can be tinkered with directly
+
+r0 holds returns of <= 4 bytes
+r0-r1 hold returns of 8 bytes, low word in r0
+
+Callee must save/restore r4+ (except r12) if it modifies them.
+
+Stack is "full descending".  Only the arguments that don't fit in the first 4
+registers are placed on the stack.  "sp" points at the first stacked argument
+(i.e. the 5th arg).
+
+VFP: single-precision results in s0, double-precision results in d0.
+
+In the EABI, "sp" must be 64-bit aligned on entry to a function, and any
+64-bit quantities (long long, double) must be 64-bit aligned.  This means
+we have to scan the method signature, identify arguments that must be
+padded, and fix them up appropriately.
+*/
+
+    .text
+    .align  2
+    .global dvmPlatformInvoke
+    .type   dvmPlatformInvoke, %function
+
+/*
+ * On entry:
+ *   r0  JNIEnv (can be left alone)
+ *   r1  clazz (NULL for virtual method calls, non-NULL for static)
+ *   r2  arg info
+ *   r3  argc (number of 32-bit values in argv)
+ *   [sp]     argv
+ *   [sp,#4]  short signature
+ *   [sp,#8]  func
+ *   [sp,#12] pReturn
+ *
+ * For a virtual method call, the "this" reference is in argv[0].
+ *
+ * argInfo (32-bit int) layout:
+ *   SRRRLLLL FFFFFFFF FFFFFFFF FFFFFFFF
+ *
+ *   S - if set, do things the hard way (scan the signature)
+ *   R - return type enumeration, really only important for hardware FP
+ *   L - number of double-words of storage required on stack (0-30 words)
+ *   F - pad flag -- if set, write a pad word to the stack
+ *
+ * With this arrangement we can efficiently push up to 24 words of arguments
+ * onto the stack.  Anything requiring more than that -- which should happen
+ * rarely to never -- can do the slow signature scan.
+ *
+ * (We could pack the Fs more efficiently -- we know we never push two pads
+ * in a row, and the first word can never be a pad -- but there's really
+ * no need for it.)
+ *
+ * TODO: could reduce register-saving overhead for "fast" case, since we
+ * don't use a couple of registers.  Another thought is to rearrange the
+ * arguments such that r0/r1 get passed in on the stack, allowing us to
+ * use r0/r1 freely here and then load them with a single ldm.  Might be
+ * faster than saving/restoring other registers so that we can leave r0/r1
+ * undisturbed.
+ *
+ * NOTE: if the called function has more than 4 words of arguments, gdb
+ * will not be able to unwind the stack past this method.  The only way
+ * around this is to convince gdb to respect an explicit frame pointer.
+ */
+dvmPlatformInvoke:
+    .fnstart
+    @ Save regs.  Same style as gcc with "-fomit-frame-pointer" -- we don't
+    @ disturb "fp" in case somebody else wants it.  Copy "sp" to r4 and use
+    @ that to access local vars.
+    @
+    @ On entry to a function, "sp" must be 64-bit aligned.  This means
+    @ we have to adjust sp manually if we push an odd number of regs here
+    @ (both here and when exiting).  Easier to just push an even number
+    @ of registers.
+    mov     ip, sp                      @ ip<- original stack pointer
+    .save {r4, r5, r6, r7, r8, r9, ip, lr}
+    stmfd   sp!, {r4, r5, r6, r7, r8, r9, ip, lr}
+
+    mov     r4, ip                      @ r4<- original stack pointer
+
+    @ Ensure 64-bit alignment.  EABI guarantees sp is aligned on entry, make
+    @ sure we're aligned properly now.
+DBG tst     sp, #4                      @ 64-bit aligned?
+DBG bne     dvmAbort
+
+    cmp     r1, #0                      @ Is this a static method?
+    ldr     r9, [r4]                    @ r9<- argv
+
+    @ Not static: set r1 to *argv++ ("this"), and set argc--.
+    @
+    @ Note the "this" pointer is not included in the method signature.
+    ldreq   r1, [r9], #4
+    subeq   r3, r3, #1
+
+    @ Do we have arg padding flags in "argInfo"? (just need to check hi bit)
+    teqs    r2, #0
+    bmi     .Lno_arg_info
+
+    /*
+     * "Fast" path.
+     *
+     * Make room on the stack for the arguments and copy them over,
+     * inserting pad words when appropriate.
+     *
+     * Currently:
+     *   r0  don't touch
+     *   r1  don't touch
+     *   r2  arg info
+     *   r3  argc
+     *   r4  original stack pointer
+     *   r5-r8 (available)
+     *   r9  argv
+     */
+.Lhave_arg_info:
+    @ Expand the stack by the specified amount.  We want to extract the
+    @ count of double-words from r2, multiply it by 8, and subtract that
+    @ from the stack pointer.
+    and     ip, r2, #0x0f000000         @ ip<- double-words required
+    mov     r5, r2, lsr #28             @ r5<- return type
+    sub     sp, sp, ip, lsr #21         @ shift right 24, then left 3
+    mov     r8, sp                      @ r8<- sp  (arg copy dest)
+
+    @ Stick argv in r7 and advance it past the argv values that will be
+    @ held in r2-r3.  It's possible r3 will hold a pad, so check the
+    @ bit in r2.  We do this by ignoring the first bit (which would
+    @ indicate a pad in r2) and shifting the second into the carry flag.
+    @ If the carry is set, r3 will hold a pad, so we adjust argv less.
+    @
+    @ (This is harmless if argc==0)
+    mov     r7, r9
+    movs    r2, r2, lsr #2
+    addcc   r7, r7, #8                  @ skip past 2 words, for r2 and r3
+    subcc   r3, r3, #2
+    addcs   r7, r7, #4                  @ skip past 1 word, for r2
+    subcs   r3, r3, #1
+
+.Lfast_copy_loop:
+    @ if (--argc < 0) goto invoke
+    subs    r3, r3, #1
+    bmi     .Lcopy_done                 @ NOTE: expects original argv in r9
+
+.Lfast_copy_loop2:
+    @ Get pad flag into carry bit.  If it's set, we don't pull a value
+    @ out of argv.
+    movs    r2, r2, lsr #1
+
+    ldrcc   ip, [r7], #4                @ ip = *r7++ (pull from argv)
+    strcc   ip, [r8], #4                @ *r8++ = ip (write to stack)
+    bcc     .Lfast_copy_loop
+
+DBG movcs   ip, #-3                     @ DEBUG DEBUG - make pad word obvious
+DBG strcs   ip, [r8]                    @ DEBUG DEBUG
+    add     r8, r8, #4                  @ if pad, just advance ip without store
+    b       .Lfast_copy_loop2           @ don't adjust argc after writing pad
+
+
+
+.Lcopy_done:
+    /*
+     * Currently:
+     *  r0-r3  args (JNIEnv*, thisOrClass, arg0, arg1)
+     *  r4  original saved sp
+     *  r5  return type (enum DalvikJniReturnType)
+     *  r9  original argv
+     *
+     * The stack copy is complete.  Grab the first two words off of argv
+     * and tuck them into r2/r3.  If the first arg is 32-bit and the second
+     * arg is 64-bit, then r3 "holds" a pad word and the load is unnecessary
+     * but harmless.
+     *
+     * If there are 0 or 1 arg words in argv, we will be loading uninitialized
+     * data into the registers, but since nothing tries to use it it's also
+     * harmless (assuming argv[0] and argv[1] point to valid memory, which
+     * is a reasonable assumption for Dalvik's interpreted stacks).
+     *
+     */
+    ldmia   r9, {r2-r3}                 @ r2/r3<- argv[0]/argv[1]
+
+    @ call the method
+    ldr     ip, [r4, #8]                @ func
+    blx     ip
+
+    @ We're back, result is in r0 or (for long/double) r0-r1.
+    @
+    @ In theory, we need to use the "return type" arg to figure out what
+    @ we have and how to return it.  However, unless we have an FPU,
+    @ all we need to do is copy r0-r1 into the JValue union.
+    @
+    @ Thought: could redefine DalvikJniReturnType such that single-word
+    @ and double-word values occupy different ranges; simple comparison
+    @ allows us to choose between str and stm.  Probably not worthwhile.
+    @
+    cmp     r5, #0                      @ DALVIK_JNI_RETURN_VOID?
+    ldrne   ip, [r4, #12]               @ pReturn
+    stmneia ip, {r0-r1}                 @ pReturn->j <- r0/r1
+
+    @ Restore the registers we saved and return (restores lr into pc, and
+    @ the initial stack pointer into sp).
+    ldmdb   r4, {r4, r5, r6, r7, r8, r9, sp, pc}
+    .fnend
+
+
+
+    /*
+     * "Slow" path.
+     * Walk through the argument list, counting up the number of 32-bit words
+     * required to contain it.  Then walk through it a second time, copying
+     * values out to the stack.  (We could pre-compute the size to save
+     * ourselves a trip, but we'd have to store that somewhere -- this is
+     * sufficiently unlikely that it's not worthwhile.)
+     *
+     * Try not to make any assumptions about the number of args -- I think
+     * the class file format allows up to 64K words (need to verify that).
+     *
+     * Currently:
+     *   r0  don't touch
+     *   r1  don't touch
+     *   r2  (available)
+     *   r3  argc
+     *   r4  original stack pointer
+     *   r5-r8 (available)
+     *   r9  argv
+     */
+.Lno_arg_info:
+    mov     r5, r2, lsr #28             @ r5<- return type
+    ldr     r6, [r4, #4]                @ r6<- short signature
+    mov     r2, #0                      @ r2<- word count, init to zero
+
+.Lcount_loop:
+    ldrb    ip, [r6], #1                @ ip<- *signature++
+    cmp     ip, #0                      @ end?
+    beq     .Lcount_done                @ all done, bail
+    add     r2, r2, #1                  @ count++
+    cmp     ip, #'D'                    @ look for 'D' or 'J', which are 64-bit
+    cmpne   ip, #'J'
+    bne     .Lcount_loop
+
+    @ 64-bit value, insert padding if we're not aligned
+    tst     r2, #1                      @ odd after initial incr?
+    addne   r2, #1                      @ no, add 1 more to cover 64 bits
+    addeq   r2, #2                      @ yes, treat prev as pad, incr 2 now
+    b       .Lcount_loop
+.Lcount_done:
+
+    @ We have the padded-out word count in r2.  We subtract 2 from it
+    @ because we don't push the first two arg words on the stack (they're
+    @ destined for r2/r3).  Pushing them on and popping them off would be
+    @ simpler but slower.
+    subs    r2, r2, #2                  @ subtract 2 (for contents of r2/r3)
+    movmis  r2, #0                      @ if negative, peg at zero, set Z-flag
+    beq     .Lcopy_done                 @ zero args, skip stack copy
+
+DBG tst     sp, #7                      @ DEBUG - make sure sp is aligned now
+DBG bne     dvmAbort                    @ DEBUG
+
+    @ Set up to copy from r7 to r8.  We copy from the second arg to the
+    @ last arg, which means reading and writing to ascending addresses.
+    sub     sp, sp, r2, asl #2          @ sp<- sp - r2*4
+    bic     sp, #4                      @ subtract another 4 ifn
+    mov     r7, r9                      @ r7<- argv
+    mov     r8, sp                      @ r8<- sp
+
+    @ We need to copy words from [r7] to [r8].  We walk forward through
+    @ the signature again, "copying" pad words when appropriate, storing
+    @ upward into the stack.
+    ldr     r6, [r4, #4]                @ r6<- signature
+    add     r7, r7, #8                  @ r7<- r7+8 (assume argv 0/1 in r2/r3)
+
+    @ Eat first arg or two, for the stuff that goes into r2/r3.
+    ldrb    ip, [r6], #1                @ ip<- *signature++
+    cmp     ip, #'D'
+    cmpne   ip, #'J'
+    beq     .Lstack_copy_loop           @ 64-bit arg fills r2+r3
+
+    @ First arg was 32-bit, check the next
+    ldrb    ip, [r6], #1                @ ip<- *signature++
+    cmp     r6, #'D'
+    cmpne   r6, #'J'
+    subeq   r7, #4                      @ r7<- r7-4 (take it back - pad word)
+    beq     .Lstack_copy_loop2          @ start with char we already have
+
+    @ Two 32-bit args, fall through and start with next arg
+
+.Lstack_copy_loop:
+    ldrb    ip, [r6], #1                @ ip<- *signature++
+.Lstack_copy_loop2:
+    cmp     ip, #0                      @ end of shorty?
+    beq     .Lcopy_done                 @ yes
+
+    cmp     ip, #'D'
+    cmpne   ip, #'J'
+    beq     .Lcopy64
+
+    @ Copy a 32-bit value.  [r8] is initially at the end of the stack.  We
+    @ use "full descending" stacks, so we store into [r8] and incr as we
+    @ move toward the end of the arg list.
+.Lcopy32:
+    ldr     ip, [r7], #4
+    str     ip, [r8], #4
+    b       .Lstack_copy_loop
+
+.Lcopy64:
+    @ Copy a 64-bit value.  If necessary, leave a hole in the stack to
+    @ ensure alignment.  We know the [r8] output area is 64-bit aligned,
+    @ so we can just mask the address.
+    add     r8, r8, #7          @ r8<- (r8+7) & ~7
+    ldr     ip, [r7], #4
+    bic     r8, r8, #7
+    ldr     r2, [r7], #4
+    str     ip, [r8], #4
+    str     r2, [r8], #4
+    b       .Lstack_copy_loop
+
+
+
+#if 0
+
+/*
+ * Spit out a "we were here", preserving all registers.  (The attempt
+ * to save ip won't work, but we need to save an even number of
+ * registers for EABI 64-bit stack alignment.)
+ */
+     .macro SQUEAK num
+common_squeak\num:
+    stmfd   sp!, {r0, r1, r2, r3, ip, lr}
+    ldr     r0, strSqueak
+    mov     r1, #\num
+    bl      printf
+    ldmfd   sp!, {r0, r1, r2, r3, ip, pc}
+    .endm
+
+    SQUEAK  0
+    SQUEAK  1
+    SQUEAK  2
+    SQUEAK  3
+    SQUEAK  4
+    SQUEAK  5
+
+strSqueak:
+    .word   .LstrSqueak
+.LstrSqueak:
+    .asciz  "<%d>"
+
+    .align  2
+
+#endif
+
+#endif /*__ARM_EABI__*/
diff --git a/vm/arch/arm/CallOldABI.S b/vm/arch/arm/CallOldABI.S
new file mode 100644
index 0000000..bdc14eb
--- /dev/null
+++ b/vm/arch/arm/CallOldABI.S
@@ -0,0 +1,163 @@
+/*
+ * Copyright (C) 2008 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/*
+ * JNI method invocation.  This is used to call a C/C++ JNI method.  The
+ * argument list has to be pushed onto the native stack according to
+ * local calling conventions.
+ *
+ * This version supports the "old" ARM ABI.
+ */
+
+#ifndef __ARM_EABI__
+
+/*
+Function prototype:
+
+void dvmPlatformInvoke(void* pEnv, ClassObject* clazz, int argInfo, int argc,
+    const u4* argv, const char* signature, void* func, JValue* pReturn) 
+
+The method we are calling has the form:
+
+  return_type func(JNIEnv* pEnv, ClassObject* clazz, ...)
+    -or-
+  return_type func(JNIEnv* pEnv, Object* this, ...)
+
+We receive a collection of 32-bit values which correspond to arguments from
+the interpreter (e.g. float occupies one, double occupies two).  It's up to
+us to convert these into local calling conventions.
+ */
+
+/*
+ARM ABI notes:
+
+r0-r3 hold first 4 args to a method
+r9 is given special treatment in some situations, but not for us
+r10 (sl) seems to be generally available
+r11 (fp) is used by gcc
+r12 (ip) is scratch -- not preserved across method calls
+r13 (sp) should be managed carefully in case a signal arrives
+r14 (lr) must be preserved
+r15 (pc) can be tinkered with directly
+
+r0 holds returns <= 4 bytes
+r0-r1 hold returns of 5-8 bytes, low word in r0
+
+Stack is "full descending".  Only the arguments that don't fit in the first 4
+registers are placed on the stack.  "sp" points at the first stacked argument
+(i.e. the 5th arg).
+
+VFP: single-precision results in s0, double-precision results in d0.
+
+Happily we don't have to do anything special here -- the args from the
+interpreter work directly as C/C++ args on ARM (with the "classic" ABI).
+*/
+
+	.text
+	.align	2
+	.global	dvmPlatformInvoke
+	.type	dvmPlatformInvoke, %function
+
+/*
+On entry:
+  r0  JNIEnv
+  r1  clazz (NULL for virtual method calls, non-NULL for static)
+  r2  arg info (ignored)
+  r3  argc
+  [sp]     argv
+  [sp,#4]  signature (ignored)
+  [sp,#8]  func
+  [sp,#12] pReturn
+*/
+dvmPlatformInvoke:
+	@ Standard gcc stack frame setup.  We don't need to push the original
+	@ sp or the current pc if "-fomit-frame-pointer" is in use for the
+	@ rest of the code.  If we don't plan to use a debugger we can speed
+	@ this up a little.
+	mov		ip, sp
+	stmfd	sp!, {r4, r5, r6, fp, ip, lr, pc}
+	sub		fp, ip, #4			@ set up fp, same way gdb does
+
+	@ We need to push a variable number of arguments onto the stack.
+	@ Rather than keep a count and pop them off after, we just hold on to
+	@ the stack pointers.
+	@
+	@ In theory we don't need to keep sp -- we can do an ldmdb instead of
+	@ an ldmia -- but we're doing the gcc frame trick where we push the
+	@ pc on with stmfd and don't pop it off.
+	mov		r4, ip
+	mov		r5, sp
+
+	@ argc is already in a scratch register (r3).  Put argv into one.  Note
+	@ argv can't go into r0-r3 because we need to use it to load those.
+	ldr		ip, [r4, #0]		@ ip <-- argv
+
+	@ Is this a static method?
+	cmp		r1, #0
+
+	@ No: set r1 to *argv++, and set argc--.
+	@ (r0=pEnv, r1=this)
+	ldreq	r1, [ip], #4
+	subeq   r3, r3, #1
+
+	@ While we still have the use of r2/r3, copy excess args from argv
+	@ to the stack.  We need to push the last item in argv first, and we
+	@ want the first two items in argv to end up in r2/r3.
+	subs	r3, r3, #2
+	ble		.Lno_copy
+
+	@ If there are N args, we want to skip 0 and 1, and push (N-1)..2.  We
+	@ have N-2 in r3.  If we set argv=argv+1, we can count from N-2 to 1
+	@ inclusive and get the right set of args.
+	add		r6, ip, #4
+
+.Lcopy:
+	@ *--sp = argv[count]
+	ldr		r2, [r6, r3, lsl #2]
+	str		r2, [sp, #-4]!
+	subs	r3, r3, #1
+	bne		.Lcopy
+
+.Lno_copy:
+	@ Load the last two args.  These are coming out of the interpreted stack,
+	@ and the VM preserves an overflow region at the bottom, so it should be
+	@ safe to load two items out of argv even if we're at the end.
+	ldr		r2, [ip]
+	ldr		r3, [ip, #4]
+
+	@ Show time.  Tuck the pc into lr and load the pc from the method
+	@ address supplied by the caller.  The value for "pc" is offset by 8
+	@ due to instruction prefetching.
+	@
+	@ This works for the ARM5 architecture.  Earlier versions may require
+	@ a blx here.
+	mov		lr, pc
+	ldr		pc, [r4, #8]
+
+
+	@ We're back, result is in r0 or (for long/double) r0-r1.
+	@
+	@ In theory, we need to use the "return type" arg to figure out what
+	@ we have and how to return it.  However, unless we have an FPU,
+	@ all we need to do is copy r0-r1 into the JValue union.
+	ldr		ip, [r4, #12]
+	stmia	ip, {r0-r1}
+
+	@ Restore the registers we saved and return.  Note this remaps stuff,
+	@ so that "sp" comes from "ip", "pc" comes from "lr", and the "pc"
+	@ we pushed on evaporates when we restore "sp".
+	ldmfd	r5, {r4, r5, r6, fp, sp, pc}
+
+#endif /*__ARM_EABI__*/
diff --git a/vm/arch/generic/Call.c b/vm/arch/generic/Call.c
new file mode 100644
index 0000000..4f084d0
--- /dev/null
+++ b/vm/arch/generic/Call.c
@@ -0,0 +1,107 @@
+/*
+ * Copyright (C) 2008 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/*
+ * This uses the FFI (Foreign Function Interface) library to abstract away
+ * the system-dependent stuff.  The FFI code is slower than a custom
+ * assembly version, but has the distinct advantage of having been
+ * written already for several platforms.
+ */
+#include "Dalvik.h"
+#include "ffi.h"
+
+#include <assert.h>
+
+/*
+ * Convert a signature type character to an FFI type.
+ */
+static ffi_type* getFfiType(char sigType)
+{
+    switch (sigType) {
+    case 'V': return &ffi_type_void;
+    case 'F': return &ffi_type_float;
+    case 'D': return &ffi_type_double;
+    case 'J': return &ffi_type_sint64;
+    case '[':
+    case 'L': return &ffi_type_pointer;
+    default:  return &ffi_type_uint32;
+    }
+}
+
+/*
+ * Call "func" with the specified arguments.
+ *
+ * The second argument to JNI native functions is either the object (the
+ * "this" pointer) or, for static functions, a pointer to the class object.
+ * The Dalvik instructions will push "this" into argv[0], but it's up to
+ * us to insert the class object.
+ *
+ * Because there is no such thing in as a null "this" pointer, we use
+ * the non-NULL state of "clazz" to determine whether or not it's static.
+ *
+ * For maximum efficiency we should compute the CIF once and save it with
+ * the method.  However, this requires storing the data with every native
+ * method.  Since the goal is to have custom assembly versions of this
+ * on the platforms where performance matters, I'm recomputing the CIF on
+ * every call.
+ */
+void dvmPlatformInvoke(void* pEnv, ClassObject* clazz, int argInfo, int argc,
+    const u4* argv, const char* shorty, void* func, JValue* pReturn)
+{
+    const int kMaxArgs = argc+2;    /* +1 for env, maybe +1 for clazz */
+    ffi_cif cif;
+    ffi_type* types[kMaxArgs];
+    void* values[kMaxArgs];
+    ffi_type* retType;
+    const char* sig;
+    char sigByte;
+    int srcArg, dstArg;
+
+    types[0] = &ffi_type_pointer;
+    values[0] = &pEnv;
+
+    types[1] = &ffi_type_pointer;
+    if (clazz != NULL) {
+        values[1] = &clazz;
+        srcArg = 0;
+    } else {
+        values[1] = (void*) argv++;
+        srcArg = 1;
+    }
+    dstArg = 2;
+
+    /*
+     * Scan the types out of the short signature.  Use them to fill out the
+     * "types" array.  Store the start address of the argument in "values".
+     */
+    retType = getFfiType(*shorty);
+    while ((sigByte = *++shorty) != '\0') {
+        types[dstArg] = getFfiType(sigByte);
+        values[dstArg++] = (void*) argv++;
+        if (sigByte == 'D' || sigByte == 'J')
+            argv++;
+    }
+
+    /*
+     * Prep the CIF (Call InterFace object).
+     */
+    if (ffi_prep_cif(&cif, FFI_DEFAULT_ABI, dstArg, retType, types) != FFI_OK) {
+        LOGE("ffi_prep_cif failed\n");
+        dvmAbort();
+    }
+
+    ffi_call(&cif, FFI_FN(func), pReturn, values);
+}
+
diff --git a/vm/hprof/Hprof.c b/vm/hprof/Hprof.c
new file mode 100644
index 0000000..e195aee
--- /dev/null
+++ b/vm/hprof/Hprof.c
@@ -0,0 +1,128 @@
+/*
+ * Copyright (C) 2008 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#include "Hprof.h"
+#include <errno.h>
+#include <sys/time.h>
+#include <time.h>
+
+#define TWO_FILES 1
+
+hprof_context_t *
+hprofStartup(const char *outputDir)
+{
+    hprof_context_t *ctx;
+
+    ctx = malloc(sizeof(*ctx));
+    if (ctx != NULL) {
+        FILE *fp;
+        struct timeval tv;
+
+        /* Construct the output file name.
+         */
+        int len = strlen(outputDir);
+        len += 64;  // hprofShutdown assumes that there's some slack
+        gettimeofday(&tv, NULL);
+        char *fileName = malloc(len);
+        if (fileName == NULL) {
+            LOGE("hprof: can't malloc %d bytes.\n", len);
+            free(ctx);
+            return NULL;
+        }
+        snprintf(fileName, len, "%s/heap-dump-tm%d-pid%d.hprof",
+                outputDir, (int)tv.tv_sec, getpid());
+        fileName[len-1] = '\0';
+
+        fp = fopen(fileName, "w");
+        if (fp == NULL) {
+            LOGE("hprof: can't open %s: %s.\n", fileName, strerror(errno));
+            free(ctx);
+            return NULL;
+        }
+        LOGI("hprof: dumping VM heap to \"%s\".\n", fileName);
+
+        hprofStartup_String();
+        hprofStartup_Class();
+#if WITH_HPROF_STACK
+        hprofStartup_StackFrame();
+        hprofStartup_Stack();
+#endif
+#if TWO_FILES
+        hprofContextInit(ctx, fileName, fp, false);
+#else
+        hprofContextInit(ctx, fileName, fp, true);
+#endif
+    } else {
+        LOGE("hprof: can't allocate context.\n");
+    }
+
+    return ctx;
+}
+
+void
+hprofShutdown(hprof_context_t *ctx)
+{
+#if TWO_FILES
+    FILE *fp;
+
+    /* hprofStartup allocated some slack, so the strcat() should be ok.
+     */
+    char *fileName = strcat(ctx->fileName, "-head");
+
+    hprofFlushCurrentRecord(ctx);
+    fclose(ctx->fp);
+    free(ctx->curRec.body);
+    ctx->curRec.allocLen = 0;
+
+    LOGI("hprof: dumping heap strings to \"%s\".\n", fileName);
+    fp = fopen(fileName, "w");
+    if (fp == NULL) {
+        LOGE("can't open %s: %s\n", fileName, strerror(errno));
+        free(ctx->fileName);
+        free(ctx);
+        return;
+    }
+    hprofContextInit(ctx, ctx->fileName, fp, true);
+#endif
+
+    hprofDumpStrings(ctx);
+    hprofDumpClasses(ctx);
+
+    /* Write a dummy stack trace record so the analysis
+     * tools don't freak out.
+     */
+    hprofStartNewRecord(ctx, HPROF_TAG_STACK_TRACE, HPROF_TIME);
+    hprofAddU4ToRecord(&ctx->curRec, HPROF_NULL_STACK_TRACE);
+    hprofAddU4ToRecord(&ctx->curRec, HPROF_NULL_THREAD);
+    hprofAddU4ToRecord(&ctx->curRec, 0);    // no frames
+
+#if WITH_HPROF_STACK
+    hprofDumpStackFrames(ctx);
+    hprofDumpStacks(ctx);
+#endif
+
+    hprofFlushCurrentRecord(ctx);
+
+    hprofShutdown_Class();
+    hprofShutdown_String();
+#if WITH_HPROF_STACK
+    hprofShutdown_Stack();
+    hprofShutdown_StackFrame();
+#endif
+
+    fclose(ctx->fp);
+    free(ctx->fileName);
+    free(ctx);
+}
diff --git a/vm/hprof/Hprof.h b/vm/hprof/Hprof.h
new file mode 100644
index 0000000..6c120f3
--- /dev/null
+++ b/vm/hprof/Hprof.h
@@ -0,0 +1,250 @@
+/*
+ * Copyright (C) 2008 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#ifndef _DALVIK_HPROF_HPROF
+#define _DALVIK_HPROF_HPROF
+
+#include "Dalvik.h"
+
+#define HPROF_ID_SIZE (sizeof (u4))
+
+#define UNIQUE_ERROR() \
+    -((((uintptr_t)__func__) << 16 | __LINE__) & (0x7fffffff))
+
+#define HPROF_TIME 0
+#define HPROF_NULL_STACK_TRACE   0
+#define HPROF_NULL_THREAD        0
+
+typedef u4 hprof_id;
+typedef hprof_id hprof_string_id;
+typedef hprof_id hprof_object_id;
+typedef hprof_id hprof_class_object_id;
+#if WITH_HPROF_STACK
+typedef hprof_id hprof_stack_frame_id;
+#endif
+
+typedef enum hprof_basic_type {
+    hprof_basic_object = 2,
+    hprof_basic_boolean = 4,
+    hprof_basic_char = 5,
+    hprof_basic_float = 6,
+    hprof_basic_double = 7,
+    hprof_basic_byte = 8,
+    hprof_basic_short = 9,
+    hprof_basic_int = 10,
+    hprof_basic_long = 11,
+} hprof_basic_type;
+
+typedef enum hprof_tag_t {
+    HPROF_TAG_STRING = 0x01,
+    HPROF_TAG_LOAD_CLASS = 0x02,
+    HPROF_TAG_UNLOAD_CLASS = 0x03,
+    HPROF_TAG_STACK_FRAME = 0x04,
+    HPROF_TAG_STACK_TRACE = 0x05,
+    HPROF_TAG_ALLOC_SITES = 0x06,
+    HPROF_TAG_HEAP_SUMMARY = 0x07,
+    HPROF_TAG_START_THREAD = 0x0A,
+    HPROF_TAG_END_THREAD = 0x0B,
+    HPROF_TAG_HEAP_DUMP = 0x0C,
+    HPROF_TAG_HEAP_DUMP_SEGMENT = 0x1C,
+    HPROF_TAG_HEAP_DUMP_END = 0x2C,
+    HPROF_TAG_CPU_SAMPLES = 0x0D,
+    HPROF_TAG_CONTROL_SETTINGS = 0x0E,
+} hprof_tag_t;
+
+/* Values for the first byte of
+ * HEAP_DUMP and HEAP_DUMP_SEGMENT
+ * records:
+ */
+typedef enum hprof_heap_tag_t {
+    /* standard */
+    HPROF_ROOT_UNKNOWN = 0xFF,
+    HPROF_ROOT_JNI_GLOBAL = 0x01,
+    HPROF_ROOT_JNI_LOCAL = 0x02,
+    HPROF_ROOT_JAVA_FRAME = 0x03,
+    HPROF_ROOT_NATIVE_STACK = 0x04,
+    HPROF_ROOT_STICKY_CLASS = 0x05,
+    HPROF_ROOT_THREAD_BLOCK = 0x06,
+    HPROF_ROOT_MONITOR_USED = 0x07,
+    HPROF_ROOT_THREAD_OBJECT = 0x08,
+    HPROF_CLASS_DUMP = 0x20,
+    HPROF_INSTANCE_DUMP = 0x21,
+    HPROF_OBJECT_ARRAY_DUMP = 0x22,
+    HPROF_PRIMITIVE_ARRAY_DUMP = 0x23,
+
+    /* Android */
+    HPROF_HEAP_DUMP_INFO = 0xfe,
+    HPROF_ROOT_INTERNED_STRING = 0x89,
+    HPROF_ROOT_FINALIZING = 0x8a,
+    HPROF_ROOT_DEBUGGER = 0x8b,
+    HPROF_ROOT_REFERENCE_CLEANUP = 0x8c,
+    HPROF_ROOT_VM_INTERNAL = 0x8d,
+    HPROF_ROOT_JNI_MONITOR = 0x8e,
+    HPROF_UNREACHABLE = 0x90,
+    HPROF_PRIMITIVE_ARRAY_NODATA_DUMP = 0xc3,
+} hprof_heap_tag_t;
+
+/* Represents a top-level hprof record, whose serialized
+ * format is:
+ *
+ *     u1     TAG: denoting the type of the record
+ *     u4     TIME: number of microseconds since the time stamp in the header
+ *     u4     LENGTH: number of bytes that follow this u4 field
+ *                    and belong to this record
+ *     [u1]*  BODY: as many bytes as specified in the above u4 field
+ */
+typedef struct hprof_record_t {
+    unsigned char *body;
+    u4 time;
+    u4 length;
+    size_t allocLen;
+    u1 tag;
+    bool dirty;
+} hprof_record_t;
+
+typedef enum {
+    HPROF_HEAP_DEFAULT = 0,
+    HPROF_HEAP_ZYGOTE = 'Z',
+    HPROF_HEAP_APP = 'A'
+} HprofHeapId;
+
+typedef struct hprof_context_t {
+    /* curRec *must* be first so that we
+     * can cast from a context to a record.
+     */
+    hprof_record_t curRec;
+    char *fileName;
+    FILE *fp;
+    u4 gcThreadSerialNumber;
+    u1 gcScanState;
+    HprofHeapId currentHeap;    // which heap we're currently emitting
+    u4 stackTraceSerialNumber;
+    size_t objectsInSegment;
+} hprof_context_t;
+
+
+/*
+ * HprofString.c functions
+ */
+
+hprof_string_id hprofLookupStringId(const char *str);
+
+int hprofDumpStrings(hprof_context_t *ctx);
+
+int hprofStartup_String(void);
+int hprofShutdown_String(void);
+
+
+/*
+ * HprofClass.c functions
+ */
+
+hprof_class_object_id hprofLookupClassId(const ClassObject *clazz);
+
+int hprofDumpClasses(hprof_context_t *ctx);
+
+int hprofStartup_Class(void);
+int hprofShutdown_Class(void);
+
+
+/*
+ * HprofHeap.c functions
+ */
+
+int hprofStartHeapDump(hprof_context_t *ctx);
+int hprofFinishHeapDump(hprof_context_t *ctx);
+
+int hprofSetGcScanState(hprof_context_t *ctx,
+                        hprof_heap_tag_t state, u4 threadSerialNumber);
+int hprofMarkRootObject(hprof_context_t *ctx,
+                        const Object *obj, jobject jniObj);
+
+int hprofDumpHeapObject(hprof_context_t *ctx, const Object *obj);
+
+/*
+ * HprofOutput.c functions
+ */
+
+void hprofContextInit(hprof_context_t *ctx, char *fileName, FILE *fp,
+                      bool newFile);
+
+int hprofFlushRecord(hprof_record_t *rec, FILE *fp);
+int hprofFlushCurrentRecord(hprof_context_t *ctx);
+int hprofStartNewRecord(hprof_context_t *ctx, u1 tag, u4 time);
+
+int hprofAddU1ToRecord(hprof_record_t *rec, u1 value);
+int hprofAddU1ListToRecord(hprof_record_t *rec,
+                           const u1 *values, size_t numValues);
+
+int hprofAddUtf8StringToRecord(hprof_record_t *rec, const char *str);
+
+int hprofAddU2ToRecord(hprof_record_t *rec, u2 value);
+int hprofAddU2ListToRecord(hprof_record_t *rec,
+                           const u2 *values, size_t numValues);
+
+int hprofAddU4ToRecord(hprof_record_t *rec, u4 value);
+int hprofAddU4ListToRecord(hprof_record_t *rec,
+                           const u4 *values, size_t numValues);
+
+int hprofAddU8ToRecord(hprof_record_t *rec, u8 value);
+int hprofAddU8ListToRecord(hprof_record_t *rec,
+                           const u8 *values, size_t numValues);
+
+#define hprofAddIdToRecord(rec, id) hprofAddU4ToRecord((rec), (u4)(id))
+#define hprofAddIdListToRecord(rec, values, numValues) \
+            hprofAddU4ListToRecord((rec), (const u4 *)(values), (numValues))
+
+#if WITH_HPROF_STACK
+
+/*
+ * HprofStack.c functions
+ */
+
+void hprofFillInStackTrace(void *objectPtr);
+
+int hprofDumpStacks(hprof_context_t *ctx);
+
+int hprofStartup_Stack(void);
+int hprofShutdown_Stack(void);
+
+/*
+ * HprofStackFrame.c functions
+ */
+
+int hprofDumpStackFrames(hprof_context_t *ctx);
+
+int hprofStartup_StackFrame(void);
+int hprofShutdown_StackFrame(void);
+
+#endif
+
+/*
+ * Hprof.c functions
+ */
+
+hprof_context_t *hprofStartup(const char *outputDir);
+void hprofShutdown(hprof_context_t *ctx);
+
+/*
+ * Heap.c functions
+ *
+ * The contents of the hprof directory have no knowledge of
+ * the heap implementation; these functions require heap knowledge,
+ * so they are implemented in Heap.c.
+ */
+void hprofDumpHeap();
+void dvmHeapSetHprofGcScanState(hprof_heap_tag_t state, u4 threadSerialNumber);
+
+#endif  // _DALVIK_HPROF_HPROF
diff --git a/vm/hprof/HprofClass.c b/vm/hprof/HprofClass.c
new file mode 100644
index 0000000..821ca9d
--- /dev/null
+++ b/vm/hprof/HprofClass.c
@@ -0,0 +1,232 @@
+/*
+ * Copyright (C) 2008 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/*
+ * Class object pool
+ */
+
+#include "Hprof.h"
+
+static HashTable *gClassHashTable;
+
+int
+hprofStartup_Class()
+{
+    gClassHashTable = dvmHashTableCreate(128, NULL);
+    if (gClassHashTable == NULL) {
+        return UNIQUE_ERROR();
+    }
+    return 0;
+}
+
+int
+hprofShutdown_Class()
+{
+    dvmHashTableFree(gClassHashTable);
+
+    return 0;
+}
+
+static u4
+computeClassHash(const ClassObject *clazz)
+{
+    u4 hash;
+    const char *cp;
+    char c;
+
+    cp = clazz->descriptor;
+    hash = (u4)clazz->classLoader;
+    while ((c = *cp++) != '\0') {
+        hash = hash * 31 + c;
+    }
+
+    return hash;
+}
+
+static int
+classCmp(const void *v1, const void *v2)
+{
+    const ClassObject *c1 = (const ClassObject *)v1;
+    const ClassObject *c2 = (const ClassObject *)v2;
+    intptr_t diff;
+
+    diff = (uintptr_t)c1->classLoader - (uintptr_t)c2->classLoader;
+    if (diff == 0) {
+        return strcmp(c1->descriptor, c2->descriptor);
+    }
+    return diff;
+}
+
+static int
+getPrettyClassNameId(const char *descriptor)
+{
+    hprof_string_id classNameId;
+    char *dotName = dvmDescriptorToDot(descriptor);
+    
+    /* Hprof suggests that array class names be converted from, e.g.,
+     * "[[[I" to "int[][][]" and "[Lorg.blort.Spaz;" to
+     * "org.blort.Spaz[]".
+     */
+    if (dotName[0] == '[') {
+        const char *c;
+        char *newName;
+        char *nc;
+        size_t dim;
+        size_t newLen;
+
+        c = dotName;
+        dim = 0;
+        while (*c == '[') {
+            dim++;
+            c++;
+        }
+        if (*c == 'L') {
+            c++;
+        } else {
+            /* It's a primitive type;  we should use a pretty name.
+             * Add semicolons to make all strings have the format
+             * of object class names.
+             */
+            switch (*c) {
+            case 'Z': c = "boolean;";    break;
+            case 'C': c = "char;";       break;
+            case 'F': c = "float;";      break;
+            case 'D': c = "double;";     break;
+            case 'B': c = "byte;";       break;
+            case 'S': c = "short;";      break;
+            case 'I': c = "int;";        break;
+            case 'J': c = "long;";       break;
+            default: assert(false); c = "UNKNOWN;"; break;
+            }
+        }
+
+        /* We have a string of the form "name;" and
+         * we want to replace the semicolon with as many
+         * "[]" pairs as is in dim.
+         */
+        newLen = strlen(c)-1 + dim*2;
+        newName = malloc(newLen + 1);
+        if (newName == NULL) {
+            return -1;
+        }
+        strcpy(newName, c);
+        newName[newLen] = '\0';
+
+        /* Point nc to the semicolon.
+         */
+        nc = newName + newLen - dim*2;
+        assert(*nc == ';');
+
+        while (dim--) {
+            *nc++ = '[';
+            *nc++ = ']';
+        }
+        assert(*nc == '\0');
+
+        classNameId = hprofLookupStringId(newName);
+        free(newName);
+    } else {
+        classNameId = hprofLookupStringId(dotName);
+    }
+
+    free(dotName);
+    return classNameId;
+}
+
+static u4 gSerialNumber = 0x50000000;
+
+hprof_class_object_id
+hprofLookupClassId(const ClassObject *clazz)
+{
+    void *val;
+
+    if (clazz == NULL) {
+        /* Someone's probably looking up the superclass
+         * of java.lang.Object or of a primitive class.
+         */
+        return (hprof_class_object_id)0;
+    }
+
+    dvmHashTableLock(gClassHashTable);
+
+    /* We're using the hash table as a list.
+     * TODO: replace the hash table with a more suitable structure
+     */
+    val = dvmHashTableLookup(gClassHashTable, computeClassHash(clazz),
+            (void *)clazz, classCmp, true);
+    assert(val != NULL);
+#if WITH_HPROF_STACK
+    /* Assign a serial number to the class */
+    if (clazz->hprofSerialNumber == 0) {
+        ((ClassObject *) clazz)->hprofSerialNumber = ++gSerialNumber;
+    }
+#endif
+
+    dvmHashTableUnlock(gClassHashTable);
+
+    /* Make sure that the class's name is in the string table.
+     * This is a bunch of extra work that we only have to do
+     * because of the order of tables in the output file
+     * (strings need to be dumped before classes).
+     */
+    getPrettyClassNameId(clazz->descriptor);
+
+    return (hprof_class_object_id)clazz;
+}
+
+int
+hprofDumpClasses(hprof_context_t *ctx)
+{
+    HashIter iter;
+    hprof_record_t *rec = &ctx->curRec;
+    int err;
+
+    dvmHashTableLock(gClassHashTable);
+
+    for (err = 0, dvmHashIterBegin(gClassHashTable, &iter);
+         err == 0 && !dvmHashIterDone(&iter);
+         dvmHashIterNext(&iter))
+    {
+        err = hprofStartNewRecord(ctx, HPROF_TAG_LOAD_CLASS, HPROF_TIME);
+        if (err == 0) {
+            const ClassObject *clazz;
+
+            clazz = (const ClassObject *)dvmHashIterData(&iter);
+            assert(clazz != NULL);
+
+            /* LOAD CLASS format:
+             *
+             * u4:     class serial number (always > 0)
+             * ID:     class object ID
+             * u4:     stack trace serial number
+             * ID:     class name string ID
+             * 
+             * We use the address of the class object structure as its ID.
+             */
+#if WITH_HPROF_STACK
+            hprofAddU4ToRecord(rec, clazz->hprofSerialNumber);
+#else
+            hprofAddU4ToRecord(rec, ++gSerialNumber);
+#endif
+            hprofAddIdToRecord(rec, (hprof_class_object_id)clazz);
+            hprofAddU4ToRecord(rec, HPROF_NULL_STACK_TRACE);
+            hprofAddIdToRecord(rec, getPrettyClassNameId(clazz->descriptor));
+        }
+    }
+
+    dvmHashTableUnlock(gClassHashTable);
+
+    return err;
+}
diff --git a/vm/hprof/HprofHeap.c b/vm/hprof/HprofHeap.c
new file mode 100644
index 0000000..a69e3c6
--- /dev/null
+++ b/vm/hprof/HprofHeap.c
@@ -0,0 +1,484 @@
+/*
+ * Copyright (C) 2008 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/*
+ * Heap object dump
+ */
+#include "Hprof.h"
+
+#include "alloc/HeapInternal.h"
+#include "alloc/HeapSource.h"
+
+/* Set DUMP_PRIM_DATA to 1 if you want to include the contents
+ * of primitive arrays (byte arrays, character arrays, etc.)
+ * in heap dumps.  This can be a large amount of data.
+ */
+#define DUMP_PRIM_DATA 1
+
+#define OBJECTS_PER_SEGMENT     ((size_t)128)
+#define BYTES_PER_SEGMENT       ((size_t)4096)
+
+int
+hprofStartHeapDump(hprof_context_t *ctx)
+{
+    UNUSED_PARAMETER(ctx);
+
+    ctx->objectsInSegment = OBJECTS_PER_SEGMENT;
+    ctx->currentHeap = HPROF_HEAP_DEFAULT;
+    return 0;
+}
+
+int
+hprofFinishHeapDump(hprof_context_t *ctx)
+{
+    return hprofStartNewRecord(ctx, HPROF_TAG_HEAP_DUMP_END, HPROF_TIME);
+}
+
+int
+hprofSetGcScanState(hprof_context_t *ctx,
+                    hprof_heap_tag_t state, u4 threadSerialNumber)
+{
+    /* Used by hprofMarkRootObject()
+     */
+    ctx->gcScanState = state;
+    ctx->gcThreadSerialNumber = threadSerialNumber;
+    return 0;
+}
+
+static hprof_basic_type
+signatureToBasicTypeAndSize(const char *sig, size_t *sizeOut)
+{
+    char c = sig[0];
+    hprof_basic_type ret;
+    size_t size;
+
+    switch (c) {
+    case '[':
+    case 'L': ret = hprof_basic_object;  size = 4; break;
+    case 'Z': ret = hprof_basic_boolean; size = 1; break;
+    case 'C': ret = hprof_basic_char;    size = 2; break;
+    case 'F': ret = hprof_basic_float;   size = 4; break;
+    case 'D': ret = hprof_basic_double;  size = 8; break;
+    case 'B': ret = hprof_basic_byte;    size = 1; break;
+    case 'S': ret = hprof_basic_short;   size = 2; break;
+    default: assert(false);
+    case 'I': ret = hprof_basic_int;     size = 4; break;
+    case 'J': ret = hprof_basic_long;    size = 8; break;
+    }
+
+    if (sizeOut != NULL) {
+        *sizeOut = size;
+    }
+
+    return ret;
+}
+
+static hprof_basic_type
+primitiveToBasicTypeAndSize(PrimitiveType prim, size_t *sizeOut)
+{
+    hprof_basic_type ret;
+    size_t size;
+
+    switch (prim) {
+    case PRIM_BOOLEAN: ret = hprof_basic_boolean; size = 1; break;
+    case PRIM_CHAR:    ret = hprof_basic_char;    size = 2; break;
+    case PRIM_FLOAT:   ret = hprof_basic_float;   size = 4; break;
+    case PRIM_DOUBLE:  ret = hprof_basic_double;  size = 8; break;
+    case PRIM_BYTE:    ret = hprof_basic_byte;    size = 1; break;
+    case PRIM_SHORT:   ret = hprof_basic_short;   size = 2; break;
+    default: assert(false);
+    case PRIM_INT:     ret = hprof_basic_int;     size = 4; break;
+    case PRIM_LONG:    ret = hprof_basic_long;    size = 8; break;
+    }
+
+    if (sizeOut != NULL) {
+        *sizeOut = size;
+    }
+
+    return ret;
+}
+
+/* Always called when marking objects, but only does
+ * something when ctx->gcScanState is non-zero, which is usually
+ * only true when marking the root set or unreachable
+ * objects.  Used to add rootset references to obj.
+ */
+int
+hprofMarkRootObject(hprof_context_t *ctx, const Object *obj, jobject jniObj)
+{
+    hprof_record_t *rec = &ctx->curRec;
+    int err;
+    hprof_heap_tag_t heapTag = ctx->gcScanState;
+
+    if (heapTag == 0) {
+        return 0;
+    }
+
+    if (ctx->objectsInSegment >= OBJECTS_PER_SEGMENT ||
+        rec->length >= BYTES_PER_SEGMENT)
+    {
+        /* This flushes the old segment and starts a new one.
+         */
+        hprofStartNewRecord(ctx, HPROF_TAG_HEAP_DUMP_SEGMENT, HPROF_TIME);
+        ctx->objectsInSegment = 0;
+    }
+
+    switch (heapTag) {
+    /* ID: object ID
+     */
+    case HPROF_ROOT_UNKNOWN:
+    case HPROF_ROOT_STICKY_CLASS:
+    case HPROF_ROOT_MONITOR_USED:
+    case HPROF_ROOT_INTERNED_STRING:
+    case HPROF_ROOT_FINALIZING:
+    case HPROF_ROOT_DEBUGGER:
+    case HPROF_ROOT_REFERENCE_CLEANUP:
+    case HPROF_ROOT_VM_INTERNAL:
+    case HPROF_UNREACHABLE:
+        hprofAddU1ToRecord(rec, heapTag);
+        hprofAddIdToRecord(rec, (hprof_object_id)obj);
+        break;
+
+    /* ID: object ID
+     * ID: JNI global ref ID
+     */
+    case HPROF_ROOT_JNI_GLOBAL:
+        hprofAddU1ToRecord(rec, heapTag);
+        hprofAddIdToRecord(rec, (hprof_object_id)obj);
+        hprofAddIdToRecord(rec, (hprof_id)jniObj);
+        break;
+
+    /* ID: object ID
+     * u4: thread serial number
+     * u4: frame number in stack trace (-1 for empty)
+     */
+    case HPROF_ROOT_JNI_LOCAL:
+    case HPROF_ROOT_JNI_MONITOR:
+    case HPROF_ROOT_JAVA_FRAME:
+        hprofAddU1ToRecord(rec, heapTag);
+        hprofAddIdToRecord(rec, (hprof_object_id)obj);
+        hprofAddU4ToRecord(rec, ctx->gcThreadSerialNumber);
+        hprofAddU4ToRecord(rec, (u4)-1);
+        break;
+
+    /* ID: object ID
+     * u4: thread serial number
+     */
+    case HPROF_ROOT_NATIVE_STACK:
+    case HPROF_ROOT_THREAD_BLOCK:
+        hprofAddU1ToRecord(rec, heapTag);
+        hprofAddIdToRecord(rec, (hprof_object_id)obj);
+        hprofAddU4ToRecord(rec, ctx->gcThreadSerialNumber);
+        break;
+
+    /* ID: thread object ID
+     * u4: thread serial number
+     * u4: stack trace serial number
+     */
+    case HPROF_ROOT_THREAD_OBJECT:
+        hprofAddU1ToRecord(rec, heapTag);
+        hprofAddIdToRecord(rec, (hprof_object_id)obj);
+        hprofAddU4ToRecord(rec, ctx->gcThreadSerialNumber);
+        hprofAddU4ToRecord(rec, (u4)-1);    //xxx
+        break;
+
+    default:
+        err = 0;
+        break;
+    }
+
+    ctx->objectsInSegment++;
+
+    return err;
+}
+
+static int
+stackTraceSerialNumber(const void *obj)
+
+{
+#if WITH_HPROF_STACK
+    DvmHeapChunk *chunk = ptr2chunk(obj);
+    return chunk->stackTraceSerialNumber;
+#else
+    return HPROF_NULL_STACK_TRACE;
+#endif
+}
+
+int
+hprofDumpHeapObject(hprof_context_t *ctx, const Object *obj)
+{
+    const ClassObject *clazz;
+    hprof_record_t *rec = &ctx->curRec;
+    HprofHeapId desiredHeap;
+
+    desiredHeap = 
+            dvmHeapSourceGetPtrFlag(ptr2chunk(obj), HS_ALLOCATED_IN_ZYGOTE) ?
+            HPROF_HEAP_ZYGOTE : HPROF_HEAP_APP;
+    
+    if (ctx->objectsInSegment >= OBJECTS_PER_SEGMENT ||
+        rec->length >= BYTES_PER_SEGMENT)
+    {
+        /* This flushes the old segment and starts a new one.
+         */
+        hprofStartNewRecord(ctx, HPROF_TAG_HEAP_DUMP_SEGMENT, HPROF_TIME);
+        ctx->objectsInSegment = 0;
+
+        /* Starting a new HEAP_DUMP resets the heap to default.
+         */
+        ctx->currentHeap = HPROF_HEAP_DEFAULT;
+    }
+
+    if (desiredHeap != ctx->currentHeap) {
+        hprof_string_id nameId;
+
+        /* This object is in a different heap than the current one.
+         * Emit a HEAP_DUMP_INFO tag to change heaps.
+         */
+        hprofAddU1ToRecord(rec, HPROF_HEAP_DUMP_INFO);
+        hprofAddU4ToRecord(rec, (u4)desiredHeap);   // u4: heap id
+        switch (desiredHeap) {
+        case HPROF_HEAP_APP:
+            nameId = hprofLookupStringId("app");
+            break;
+        case HPROF_HEAP_ZYGOTE:
+            nameId = hprofLookupStringId("zygote");
+            break;
+        default:
+            /* Internal error. */
+            assert(!"Unexpected desiredHeap");
+            nameId = hprofLookupStringId("<ILLEGAL>");
+            break;
+        }
+        hprofAddIdToRecord(rec, nameId);
+        ctx->currentHeap = desiredHeap;
+    }
+
+    clazz = obj->clazz;
+
+    if (clazz == NULL) {
+        /* This object was probably just allocated and hasn't been
+         * initialized yet.  Add an instance entry to make a note of
+         * it;  there's not much else that we can do.
+         */
+        hprofAddU1ToRecord(rec, HPROF_INSTANCE_DUMP);
+
+        hprofAddIdToRecord(rec, (hprof_object_id)obj);
+        hprofAddU4ToRecord(rec, stackTraceSerialNumber(obj));
+        hprofAddIdToRecord(rec, (hprof_class_object_id)clazz);  // NULL
+        hprofAddIdToRecord(rec, 0);    // no instance data
+    } else if (clazz == gDvm.unlinkedJavaLangClass) {
+        /* obj is a ClassObject that hasn't been linked yet.
+         */
+        hprofAddU1ToRecord(rec, HPROF_CLASS_DUMP);
+
+    //TODO: use hprofLookupClassId() for this:
+        hprofAddIdToRecord(rec, (hprof_class_object_id)obj);
+        hprofAddU4ToRecord(rec, stackTraceSerialNumber(obj));
+        hprofAddIdToRecord(rec, (hprof_class_object_id)0); // no super class
+        hprofAddIdToRecord(rec, (hprof_object_id)0);       // no class loader
+        hprofAddIdToRecord(rec, (hprof_object_id)0);       // no signer
+        hprofAddIdToRecord(rec, (hprof_object_id)0);       // no prot domain
+        hprofAddIdToRecord(rec, (hprof_id)0);              // reserved
+        hprofAddIdToRecord(rec, (hprof_id)0);              // reserved
+        hprofAddU4ToRecord(rec, 0);                        // zero instance size
+        hprofAddU2ToRecord(rec, 0);                        // empty const pool
+        hprofAddU2ToRecord(rec, 0);                        // no statics
+        hprofAddU2ToRecord(rec, 0);                        // no instance fields
+    } else {
+        hprof_class_object_id clazzId;
+
+        clazzId = hprofLookupClassId(clazz);
+
+        if (clazz == gDvm.classJavaLangClass) {
+            const ClassObject *thisClass = (const ClassObject *)obj;
+            int i, n;
+            /* obj is a ClassObject.
+             */
+            hprofAddU1ToRecord(rec, HPROF_CLASS_DUMP);
+
+            hprofAddIdToRecord(rec, hprofLookupClassId(thisClass));
+            hprofAddU4ToRecord(rec, stackTraceSerialNumber(thisClass));
+            hprofAddIdToRecord(rec, hprofLookupClassId(thisClass->super));
+            hprofAddIdToRecord(rec, (hprof_object_id)thisClass->classLoader);
+            hprofAddIdToRecord(rec, (hprof_object_id)0);    // no signer
+            hprofAddIdToRecord(rec, (hprof_object_id)0);    // no prot domain
+            hprofAddIdToRecord(rec, (hprof_id)0);           // reserved
+            hprofAddIdToRecord(rec, (hprof_id)0);           // reserved
+            if (obj == (Object *)gDvm.classJavaLangClass) {
+                hprofAddU4ToRecord(rec, sizeof(ClassObject)); // instance size
+            } else {
+                hprofAddU4ToRecord(rec, thisClass->objectSize); // instance size
+            }
+
+            hprofAddU2ToRecord(rec, 0);                     // empty const pool
+
+            /* Static fields
+             */
+            n = thisClass->sfieldCount;
+            hprofAddU2ToRecord(rec, (u2)n);
+            for (i = 0; i < n; i++) {
+                const StaticField *f = &thisClass->sfields[i];
+                hprof_basic_type t;
+                size_t size;
+
+                t = signatureToBasicTypeAndSize(f->field.signature, &size);
+                hprofAddIdToRecord(rec, hprofLookupStringId(f->field.name));
+                hprofAddU1ToRecord(rec, t);
+                if (size == 1) {
+                    hprofAddU1ToRecord(rec, (u1)f->value.b);
+                } else if (size == 2) {
+                    hprofAddU2ToRecord(rec, (u2)f->value.c);
+                } else if (size == 4) {
+                    hprofAddU4ToRecord(rec, (u4)f->value.i);
+                } else if (size == 8) {
+                    hprofAddU8ToRecord(rec, (u8)f->value.j);
+                } else {
+                    assert(false);
+                }
+            }
+
+            /* Instance fields for this class (no superclass fields)
+             */
+            n = thisClass->ifieldCount;
+            hprofAddU2ToRecord(rec, (u2)n);
+            for (i = 0; i < n; i++) {
+                const InstField *f = &thisClass->ifields[i];
+                hprof_basic_type t;
+
+                t = signatureToBasicTypeAndSize(f->field.signature, NULL);
+                hprofAddIdToRecord(rec, hprofLookupStringId(f->field.name));
+                hprofAddU1ToRecord(rec, t);
+            }
+        } else if (IS_CLASS_FLAG_SET(clazz, CLASS_ISARRAY)) {
+            const ArrayObject *aobj = (const ArrayObject *)obj;
+            u4 length = aobj->length;
+
+            if (IS_CLASS_FLAG_SET(clazz, CLASS_ISOBJECTARRAY)) {
+                /* obj is an object array.
+                 */
+                hprofAddU1ToRecord(rec, HPROF_OBJECT_ARRAY_DUMP);
+
+                hprofAddIdToRecord(rec, (hprof_object_id)obj);
+                hprofAddU4ToRecord(rec, stackTraceSerialNumber(obj));
+                hprofAddU4ToRecord(rec, length);
+                hprofAddIdToRecord(rec, hprofLookupClassId(clazz));
+
+                /* Dump the elements, which are always objects or NULL.
+                 */
+                hprofAddIdListToRecord(rec,
+                        (const hprof_object_id *)aobj->contents, length);
+            } else {
+                hprof_basic_type t;
+                size_t size;
+
+                t = primitiveToBasicTypeAndSize(clazz->elementClass->
+                                                primitiveType, &size);
+
+                /* obj is a primitive array.
+                 */
+#if DUMP_PRIM_DATA
+                hprofAddU1ToRecord(rec, HPROF_PRIMITIVE_ARRAY_DUMP);
+#else
+                hprofAddU1ToRecord(rec, HPROF_PRIMITIVE_ARRAY_NODATA_DUMP);
+#endif
+
+                hprofAddIdToRecord(rec, (hprof_object_id)obj);
+                hprofAddU4ToRecord(rec, stackTraceSerialNumber(obj));
+                hprofAddU4ToRecord(rec, length);
+                hprofAddU1ToRecord(rec, t);
+
+#if DUMP_PRIM_DATA
+                /* Dump the raw, packed element values.
+                 */
+                if (size == 1) {
+                    hprofAddU1ListToRecord(rec, (const u1 *)aobj->contents,
+                            length);
+                } else if (size == 2) {
+                    hprofAddU2ListToRecord(rec, (const u2 *)aobj->contents,
+                            length);
+                } else if (size == 4) {
+                    hprofAddU4ListToRecord(rec, (const u4 *)aobj->contents,
+                            length);
+                } else if (size == 8) {
+                    hprofAddU8ListToRecord(rec, (const u8 *)aobj->contents,
+                            length);
+                }
+#endif
+            }
+        } else {
+            const ClassObject *sclass;
+            size_t sizePatchOffset, savedLen;
+
+            /* obj is an instance object.
+             */
+            hprofAddU1ToRecord(rec, HPROF_INSTANCE_DUMP);
+            hprofAddIdToRecord(rec, (hprof_object_id)obj);
+            hprofAddU4ToRecord(rec, stackTraceSerialNumber(obj));
+            hprofAddIdToRecord(rec, hprofLookupClassId(clazz));
+
+            /* Reserve some space for the length of the instance
+             * data, which we won't know until we're done writing
+             * it.
+             */
+            sizePatchOffset = rec->length;
+            hprofAddU4ToRecord(rec, 0x77777777);
+
+            /* Write the instance data;  fields for this
+             * class, followed by super class fields, and so on.
+             */
+            sclass = clazz;
+            while (sclass != NULL) {
+                int i, ifieldCount;
+
+                ifieldCount = sclass->ifieldCount;
+                for (i = 0; i < ifieldCount; i++) {
+                    const InstField *f = &sclass->ifields[i];
+                    hprof_basic_type t;
+                    size_t size;
+
+                    t = signatureToBasicTypeAndSize(f->field.signature, &size);
+                    if (size == 1) {
+                        hprofAddU1ToRecord(rec,
+                                (u1)dvmGetFieldByte(obj, f->byteOffset));
+                    } else if (size == 2) {
+                        hprofAddU2ToRecord(rec,
+                                (u2)dvmGetFieldChar(obj, f->byteOffset));
+                    } else if (size == 4) {
+                        hprofAddU4ToRecord(rec,
+                                (u4)dvmGetFieldInt(obj, f->byteOffset));
+                    } else if (size == 8) {
+                        hprofAddU8ToRecord(rec,
+                                (u8)dvmGetFieldLong(obj, f->byteOffset));
+                    } else {
+                        assert(false);
+                    }
+                }
+
+                sclass = sclass->super;
+            }
+
+            /* Patch the instance field length.
+             */
+            savedLen = rec->length;
+            rec->length = sizePatchOffset;
+            hprofAddU4ToRecord(rec, savedLen - (sizePatchOffset + 4));
+            rec->length = savedLen;
+        }
+    }
+
+    ctx->objectsInSegment++;
+
+    return 0;
+}
diff --git a/vm/hprof/HprofOutput.c b/vm/hprof/HprofOutput.c
new file mode 100644
index 0000000..8571dac
--- /dev/null
+++ b/vm/hprof/HprofOutput.c
@@ -0,0 +1,316 @@
+/*
+ * Copyright (C) 2008 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#include <sys/time.h>
+#include <time.h>
+#include "Hprof.h"
+
+#define HPROF_MAGIC_STRING  "JAVA PROFILE 1.0.3"
+
+#define U2_TO_BUF_BE(buf, offset, value) \
+    do { \
+        unsigned char *buf_ = (unsigned char *)(buf); \
+        int offset_ = (int)(offset); \
+        u2 value_ = (u2)(value); \
+        buf_[offset_ + 0] = (unsigned char)(value_ >>  8); \
+        buf_[offset_ + 1] = (unsigned char)(value_      ); \
+    } while (0)
+
+#define U4_TO_BUF_BE(buf, offset, value) \
+    do { \
+        unsigned char *buf_ = (unsigned char *)(buf); \
+        int offset_ = (int)(offset); \
+        u4 value_ = (u4)(value); \
+        buf_[offset_ + 0] = (unsigned char)(value_ >> 24); \
+        buf_[offset_ + 1] = (unsigned char)(value_ >> 16); \
+        buf_[offset_ + 2] = (unsigned char)(value_ >>  8); \
+        buf_[offset_ + 3] = (unsigned char)(value_      ); \
+    } while (0)
+
+#define U8_TO_BUF_BE(buf, offset, value) \
+    do { \
+        unsigned char *buf_ = (unsigned char *)(buf); \
+        int offset_ = (int)(offset); \
+        u8 value_ = (u8)(value); \
+        buf_[offset_ + 0] = (unsigned char)(value_ >> 56); \
+        buf_[offset_ + 1] = (unsigned char)(value_ >> 48); \
+        buf_[offset_ + 2] = (unsigned char)(value_ >> 40); \
+        buf_[offset_ + 3] = (unsigned char)(value_ >> 32); \
+        buf_[offset_ + 4] = (unsigned char)(value_ >> 24); \
+        buf_[offset_ + 5] = (unsigned char)(value_ >> 16); \
+        buf_[offset_ + 6] = (unsigned char)(value_ >>  8); \
+        buf_[offset_ + 7] = (unsigned char)(value_      ); \
+    } while (0)
+
+void
+hprofContextInit(hprof_context_t *ctx, char *fileName, FILE *fp, bool newFile)
+{
+    memset(ctx, 0, sizeof (*ctx));
+    ctx->fileName = fileName;
+    ctx->fp = fp;
+
+    ctx->curRec.allocLen = 128;
+    ctx->curRec.body = malloc(ctx->curRec.allocLen);
+//xxx check for/return an error
+
+    if (newFile) {
+        char magic[] = HPROF_MAGIC_STRING;
+        unsigned char buf[4];
+        struct timeval now;
+        u8 nowMs;
+
+        /* Write the file header.
+         *
+         * [u1]*: NUL-terminated magic string.
+         */
+        fwrite(magic, 1, sizeof(magic), fp);
+
+        /* u4: size of identifiers.  We're using addresses
+         *     as IDs, so make sure a pointer fits.
+         */
+        U4_TO_BUF_BE(buf, 0, sizeof(void *));
+        fwrite(buf, 1, sizeof(u4), fp);
+
+        /* The current time, in milliseconds since 0:00 GMT, 1/1/70.
+         */
+        if (gettimeofday(&now, NULL) < 0) {
+            nowMs = 0;
+        } else {
+            nowMs = (u8)now.tv_sec * 1000 + now.tv_usec / 1000;
+        }
+
+        /* u4: high word of the 64-bit time.
+         */
+        U4_TO_BUF_BE(buf, 0, (u4)(nowMs >> 32));
+        fwrite(buf, 1, sizeof(u4), fp);
+
+        /* u4: low word of the 64-bit time.
+         */
+        U4_TO_BUF_BE(buf, 0, (u4)(nowMs & 0xffffffffULL));
+        fwrite(buf, 1, sizeof(u4), fp); //xxx fix the time
+    }
+}
+
+int
+hprofFlushRecord(hprof_record_t *rec, FILE *fp)
+{
+    if (rec->dirty) {
+        unsigned char headBuf[sizeof (u1) + 2 * sizeof (u4)];
+        int nb;
+
+        headBuf[0] = rec->tag;
+        U4_TO_BUF_BE(headBuf, 1, rec->time);
+        U4_TO_BUF_BE(headBuf, 5, rec->length);
+
+        nb = fwrite(headBuf, 1, sizeof(headBuf), fp);
+        if (nb != sizeof(headBuf)) {
+            return UNIQUE_ERROR();
+        }
+        nb = fwrite(rec->body, 1, rec->length, fp);
+        if (nb != (int)rec->length) {
+            return UNIQUE_ERROR();
+        }
+
+        rec->dirty = false;
+    }
+//xxx if we used less than half (or whatever) of allocLen, shrink the buffer.
+
+    return 0;
+}
+
+int
+hprofFlushCurrentRecord(hprof_context_t *ctx)
+{
+    return hprofFlushRecord(&ctx->curRec, ctx->fp);
+}
+
+int
+hprofStartNewRecord(hprof_context_t *ctx, u1 tag, u4 time)
+{
+    hprof_record_t *rec = &ctx->curRec;
+    int err;
+
+    err = hprofFlushRecord(rec, ctx->fp);
+    if (err != 0) {
+        return err;
+    } else if (rec->dirty) {
+        return UNIQUE_ERROR();
+    }
+
+    rec->dirty = true;
+    rec->tag = tag;
+    rec->time = time;
+    rec->length = 0;
+
+    return 0;
+}
+
+static inline int
+guaranteeRecordAppend(hprof_record_t *rec, size_t nmore)
+{
+    size_t minSize;
+
+    minSize = rec->length + nmore;
+    if (minSize > rec->allocLen) {
+        unsigned char *newBody;
+        size_t newAllocLen;
+
+        newAllocLen = rec->allocLen * 2;
+        if (newAllocLen < minSize) {
+            newAllocLen = rec->allocLen + nmore + nmore/2;
+        }
+        newBody = realloc(rec->body, newAllocLen);
+        if (newBody != NULL) {
+            rec->body = newBody;
+            rec->allocLen = newAllocLen;
+        } else {
+//TODO: set an error flag so future ops will fail
+            return UNIQUE_ERROR();
+        }
+    }
+
+    assert(rec->length + nmore <= rec->allocLen);
+    return 0;
+}
+
+int
+hprofAddU1ListToRecord(hprof_record_t *rec, const u1 *values, size_t numValues)
+{
+    int err;
+
+    err = guaranteeRecordAppend(rec, numValues);
+    if (err != 0) {
+        return err;
+    }
+
+    memcpy(rec->body + rec->length, values, numValues);
+    rec->length += numValues;
+
+    return 0;
+}
+
+int
+hprofAddU1ToRecord(hprof_record_t *rec, u1 value)
+{
+    int err;
+
+    err = guaranteeRecordAppend(rec, 1);
+    if (err != 0) {
+        return err;
+    }
+
+    rec->body[rec->length++] = value;
+
+    return 0;
+}
+
+int
+hprofAddUtf8StringToRecord(hprof_record_t *rec, const char *str)
+{
+    /* The terminating NUL character is NOT written.
+     */
+//xxx don't do a strlen;  add and grow as necessary, until NUL
+    return hprofAddU1ListToRecord(rec, (const u1 *)str, strlen(str));
+}
+
+int
+hprofAddU2ListToRecord(hprof_record_t *rec, const u2 *values, size_t numValues)
+{
+    unsigned char *insert;
+    size_t i;
+    int err;
+
+    err = guaranteeRecordAppend(rec, numValues * 2);
+    if (err != 0) {
+        return err;
+    }
+
+//xxx this can be way smarter
+//xxx also, don't do this bytewise if aligned and on a matching-endian arch
+    insert = rec->body + rec->length;
+    for (i = 0; i < numValues; i++) {
+        U2_TO_BUF_BE(insert, 0, *values++);
+        insert += sizeof(*values);
+    }
+    rec->length += numValues * 2;
+
+    return 0;
+}
+
+int
+hprofAddU2ToRecord(hprof_record_t *rec, u2 value)
+{
+    return hprofAddU2ListToRecord(rec, &value, 1);
+}
+
+int
+hprofAddU4ListToRecord(hprof_record_t *rec, const u4 *values, size_t numValues)
+{
+    unsigned char *insert;
+    size_t i;
+    int err;
+
+    err = guaranteeRecordAppend(rec, numValues * 4);
+    if (err != 0) {
+        return err;
+    }
+
+//xxx this can be way smarter
+//xxx also, don't do this bytewise if aligned and on a matching-endian arch
+    insert = rec->body + rec->length;
+    for (i = 0; i < numValues; i++) {
+        U4_TO_BUF_BE(insert, 0, *values++);
+        insert += sizeof(*values);
+    }
+    rec->length += numValues * 4;
+
+    return 0;
+}
+
+int
+hprofAddU4ToRecord(hprof_record_t *rec, u4 value)
+{
+    return hprofAddU4ListToRecord(rec, &value, 1);
+}
+
+int
+hprofAddU8ListToRecord(hprof_record_t *rec, const u8 *values, size_t numValues)
+{
+    unsigned char *insert;
+    size_t i;
+    int err;
+
+    err = guaranteeRecordAppend(rec, numValues * 8);
+    if (err != 0) {
+        return err;
+    }
+
+//xxx this can be way smarter
+//xxx also, don't do this bytewise if aligned and on a matching-endian arch
+    insert = rec->body + rec->length;
+    for (i = 0; i < numValues; i++) {
+        U8_TO_BUF_BE(insert, 0, *values++);
+        insert += sizeof(*values);
+    }
+    rec->length += numValues * 8;
+
+    return 0;
+}
+
+int
+hprofAddU8ToRecord(hprof_record_t *rec, u8 value)
+{
+    return hprofAddU8ListToRecord(rec, &value, 1);
+}
diff --git a/vm/hprof/HprofStack.c b/vm/hprof/HprofStack.c
new file mode 100644
index 0000000..241e01e
--- /dev/null
+++ b/vm/hprof/HprofStack.c
@@ -0,0 +1,266 @@
+/*
+ * Copyright (C) 2008 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "Hprof.h"
+#include "HprofStack.h"
+#include "alloc/HeapInternal.h"
+
+static HashTable *gStackTraceHashTable = NULL;
+static int gSerialNumber = 0;
+
+/* Number of stack frames to cache */
+#define STACK_DEPTH 8
+
+typedef struct {
+    int serialNumber;
+    int threadSerialNumber;
+    int frameIds[STACK_DEPTH];
+} StackTrace;
+
+typedef struct {
+    StackTrace trace;
+    u1 live;
+} StackTraceEntry;
+
+static u4 computeStackTraceHash(const StackTraceEntry *stackTraceEntry);
+
+int
+hprofStartup_Stack()
+{
+    HashIter iter;
+
+    /* This will be called when a GC begins. */
+    for (dvmHashIterBegin(gStackTraceHashTable, &iter);
+         !dvmHashIterDone(&iter);
+         dvmHashIterNext(&iter)) {
+        StackTraceEntry *stackTraceEntry;
+
+        /* Clear the 'live' bit at the start of the GC pass. */
+        stackTraceEntry = (StackTraceEntry *) dvmHashIterData(&iter);
+        stackTraceEntry->live = 0;
+    }
+
+    return 0;
+}
+
+int
+hprofShutdown_Stack()
+{
+    HashIter iter;
+
+    /* This will be called when a GC has completed. */
+    for (dvmHashIterBegin(gStackTraceHashTable, &iter);
+         !dvmHashIterDone(&iter);
+         dvmHashIterNext(&iter)) {
+        StackTraceEntry *stackTraceEntry;
+
+        /*
+         * If the 'live' bit is 0, the trace is not in use by any current
+         * heap object and may be destroyed.
+         */
+        stackTraceEntry = (StackTraceEntry *) dvmHashIterData(&iter);
+        if (!stackTraceEntry->live) {
+            dvmHashTableRemove(gStackTraceHashTable,
+                    computeStackTraceHash(stackTraceEntry), stackTraceEntry);
+            free(stackTraceEntry);
+        }
+    }
+
+    return 0;
+}
+
+static u4
+computeStackTraceHash(const StackTraceEntry *stackTraceEntry)
+{
+    u4 hash = 0;
+    const char *cp = (const char *) &stackTraceEntry->trace;
+    int i;
+
+    for (i = 0; i < (int) sizeof(StackTrace); i++) {
+        hash = hash * 31 + cp[i];
+    }
+
+    return hash;
+}
+
+/* Only compare the 'trace' portion of the StackTraceEntry. */
+static int
+stackCmp(const void *tableItem, const void *looseItem)
+{
+    return memcmp(&((StackTraceEntry *) tableItem)->trace,
+            &((StackTraceEntry *) looseItem)->trace, sizeof(StackTrace));
+}
+
+static StackTraceEntry *
+stackDup(const StackTraceEntry *stackTrace)
+{
+    StackTraceEntry *newStackTrace = malloc(sizeof(StackTraceEntry));
+    memcpy(newStackTrace, stackTrace, sizeof(StackTraceEntry));
+    return newStackTrace;
+}
+
+static u4
+hprofLookupStackSerialNumber(const StackTraceEntry *stackTrace)
+{
+    StackTraceEntry *val;
+    u4 hashValue;
+    int serial;
+
+    /*
+     * Create the hash table on first contact.  We can't do this in
+     * hprofStartupStack, because we have to compute stack trace
+     * serial numbers and place them into object headers before the
+     * rest of hprof is triggered by a GC event.
+     */
+    if (gStackTraceHashTable == NULL) {
+        gStackTraceHashTable = dvmHashTableCreate(512, free);
+    }
+    dvmHashTableLock(gStackTraceHashTable);
+
+    hashValue = computeStackTraceHash(stackTrace);
+    val = dvmHashTableLookup(gStackTraceHashTable, hashValue, (void *)stackTrace,
+            (HashCompareFunc)stackCmp, false);
+    if (val == NULL) {
+        StackTraceEntry *newStackTrace;
+
+        newStackTrace = stackDup(stackTrace);
+        newStackTrace->trace.serialNumber = ++gSerialNumber;
+        val = dvmHashTableLookup(gStackTraceHashTable, hashValue,
+                (void *)newStackTrace, (HashCompareFunc)stackCmp, true);
+        assert(val != NULL);
+    }
+
+    /* Mark the trace as live (in use by an object in the current heap). */
+    val->live = 1;
+
+    /* Grab the serial number before unlocking the table. */
+    serial = val->trace.serialNumber;
+
+    dvmHashTableUnlock(gStackTraceHashTable);
+
+    return serial;
+}
+
+int
+hprofDumpStacks(hprof_context_t *ctx)
+{
+    HashIter iter;
+    hprof_record_t *rec = &ctx->curRec;
+
+    dvmHashTableLock(gStackTraceHashTable);
+
+    for (dvmHashIterBegin(gStackTraceHashTable, &iter);
+         !dvmHashIterDone(&iter);
+         dvmHashIterNext(&iter))
+    {
+        const StackTraceEntry *stackTraceEntry;
+        int count;
+        int i;
+
+        hprofStartNewRecord(ctx, HPROF_TAG_STACK_TRACE, HPROF_TIME);
+        
+        stackTraceEntry = (const StackTraceEntry *) dvmHashIterData(&iter);
+        assert(stackTraceEntry != NULL);
+
+        /* STACK TRACE format:
+         *
+         * u4:     serial number for this stack
+         * u4:     serial number for the running thread
+         * u4:     number of frames
+         * [ID]*:  ID for the stack frame
+         */
+        hprofAddU4ToRecord(rec, stackTraceEntry->trace.serialNumber);
+        hprofAddU4ToRecord(rec, stackTraceEntry->trace.threadSerialNumber);
+        
+        count = 0;
+        while ((count < STACK_DEPTH) &&
+               (stackTraceEntry->trace.frameIds[count] != 0)) {
+            count++;
+        }
+        hprofAddU4ToRecord(rec, count);
+        for (i = 0; i < count; i++) {
+            hprofAddU4ToRecord(rec, stackTraceEntry->trace.frameIds[i]);
+        }
+    }
+
+    dvmHashTableUnlock(gStackTraceHashTable);
+
+    return 0;
+}
+
+void
+hprofFillInStackTrace(void *objectPtr)
+
+{
+    DvmHeapChunk *chunk;
+    StackTraceEntry stackTraceEntry;
+    Thread* self;
+    void* fp;
+    int i;
+    
+    if (objectPtr == NULL) {
+        return;
+    }
+    self = dvmThreadSelf();
+    if (self == NULL) {
+        return;
+    }
+    fp = self->curFrame;
+
+    /* Serial number to be filled in later. */
+    stackTraceEntry.trace.serialNumber = -1;
+
+    /*
+     * TODO - The HAT tool doesn't care about thread data, so we can defer
+     * actually emitting thread records and assigning thread serial numbers.
+     */
+    stackTraceEntry.trace.threadSerialNumber = (int) self;
+
+    memset(&stackTraceEntry.trace.frameIds, 0,
+            sizeof(stackTraceEntry.trace.frameIds));
+
+    i = 0;
+    while ((fp != NULL) && (i < STACK_DEPTH)) {
+        const StackSaveArea* saveArea = SAVEAREA_FROM_FP(fp);
+        const Method* method = saveArea->method;
+        StackFrameEntry frame;
+
+        if (!dvmIsBreakFrame(fp)) {
+            frame.frame.method = method;
+            if (dvmIsNativeMethod(method)) {
+                frame.frame.pc = 0; /* no saved PC for native methods */
+            } else {
+                assert(saveArea->xtra.currentPc >= method->insns &&
+                        saveArea->xtra.currentPc <
+                        method->insns + dvmGetMethodInsnsSize(method));
+                frame.frame.pc = (int) (saveArea->xtra.currentPc -
+                        method->insns);
+            }
+
+            // Canonicalize the frame and cache it in the hprof context
+            stackTraceEntry.trace.frameIds[i++] =
+                hprofLookupStackFrameId(&frame);
+        }
+
+        assert(fp != saveArea->prevFrame);
+        fp = saveArea->prevFrame;
+    }
+
+    /* Store the stack trace serial number in the object header */
+    chunk = ptr2chunk(objectPtr);
+    chunk->stackTraceSerialNumber =
+            hprofLookupStackSerialNumber(&stackTraceEntry);
+}
diff --git a/vm/hprof/HprofStack.h b/vm/hprof/HprofStack.h
new file mode 100644
index 0000000..1f16c1e
--- /dev/null
+++ b/vm/hprof/HprofStack.h
@@ -0,0 +1,42 @@
+/*
+ * Copyright (C) 2008 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#ifndef _DALVIK_HPROF_STACK
+#define _DALVIK_HPROF_STACK
+
+#include "../alloc/HeapInternal.h"
+
+typedef struct {
+    const Method *method;
+    int pc;
+} StackFrame;
+
+typedef struct {
+    StackFrame frame;
+    unsigned char live;
+} StackFrameEntry;
+
+int hprofStartupStack();
+int hprofShutdown_Stack();
+int hprofDumpStacks(hprof_context_t *ctx);
+void hprofFillInStackTrace(void *objectPtr);
+
+int hprofStartup_StackFrame();
+int hprofShutdown_StackFrame();
+hprof_stack_frame_id hprofLookupStackFrameId(const StackFrameEntry
+    *stackFrameEntry);
+int hprofDumpStackFrames(hprof_context_t *ctx);
+
+#endif /* _DALVIK_HPROF_STACK */
diff --git a/vm/hprof/HprofStackFrame.c b/vm/hprof/HprofStackFrame.c
new file mode 100644
index 0000000..9828934
--- /dev/null
+++ b/vm/hprof/HprofStackFrame.c
@@ -0,0 +1,242 @@
+/*
+ * Copyright (C) 2008 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "Hprof.h"
+#include "HprofStack.h"
+
+#include "alloc/HeapInternal.h"
+
+static HashTable *gStackFrameHashTable;
+
+static u4 computeStackFrameHash(const StackFrameEntry *stackFrameEntry);
+
+int
+hprofStartup_StackFrame()
+{
+    HashIter iter;
+
+    /* Cache the string "<unknown>" for use when the source file is
+     * unknown.
+     */
+    hprofLookupStringId("<unknown>");
+
+    /* This will be called when a GC begins. */
+    for (dvmHashIterBegin(gStackFrameHashTable, &iter);
+         !dvmHashIterDone(&iter);
+         dvmHashIterNext(&iter)) {
+        StackFrameEntry *stackFrameEntry;
+        const Method *method;
+
+        /* Clear the 'live' bit at the start of the GC pass. */
+        stackFrameEntry = (StackFrameEntry *) dvmHashIterData(&iter);
+        stackFrameEntry->live = 0;
+
+        method = stackFrameEntry->frame.method;
+        if (method == NULL) {
+            continue;
+        }
+
+        /* Make sure the method name, descriptor, and source file are in
+         * the string table, and that the method class is in the class
+         * table. This is needed because strings and classes will be dumped
+         * before stack frames.
+         */
+
+        if (method->name) {
+            hprofLookupStringId(method->name);
+        }
+
+        DexStringCache cache;
+        const char* descriptor;
+
+        dexStringCacheInit(&cache);
+        descriptor = dexProtoGetMethodDescriptor(&method->prototype, &cache);
+        hprofLookupStringId(descriptor);
+        dexStringCacheRelease(&cache);
+        
+        const char* sourceFile = dvmGetMethodSourceFile(method);
+        if (sourceFile) {
+            hprofLookupStringId(sourceFile);
+        }
+
+        if (method->clazz) {
+            hprofLookupClassId(method->clazz);
+        }
+    }
+    
+    return 0;
+}
+
+int
+hprofShutdown_StackFrame()
+{
+    HashIter iter;
+
+    /* This will be called when a GC has completed. */
+    for (dvmHashIterBegin(gStackFrameHashTable, &iter);
+         !dvmHashIterDone(&iter);
+         dvmHashIterNext(&iter)) {
+        const StackFrameEntry *stackFrameEntry;
+
+        /*
+         * If the 'live' bit is 0, the frame is not in use by any current
+         * heap object and may be destroyed.
+         */
+        stackFrameEntry = (const StackFrameEntry *) dvmHashIterData(&iter);
+        if (!stackFrameEntry->live) {
+            dvmHashTableRemove(gStackFrameHashTable,
+                    computeStackFrameHash(stackFrameEntry),
+                    (void*) stackFrameEntry);
+            free((void*) stackFrameEntry);
+        }
+    }
+
+    return 0;
+}
+
+/* Only hash the 'frame' portion of the StackFrameEntry. */
+static u4
+computeStackFrameHash(const StackFrameEntry *stackFrameEntry)
+{
+    u4 hash = 0;
+    const char *cp = (char *) &stackFrameEntry->frame;
+    int i;
+    
+    for (i = 0; i < (int) sizeof(StackFrame); i++) {
+        hash = 31 * hash + cp[i];
+    }
+    return hash;
+}
+
+/* Only compare the 'frame' portion of the StackFrameEntry. */
+static int
+stackFrameCmp(const void *tableItem, const void *looseItem)
+{
+    return memcmp(&((StackFrameEntry *)tableItem)->frame,
+            &((StackFrameEntry *) looseItem)->frame, sizeof(StackFrame));
+}
+
+static StackFrameEntry *
+stackFrameDup(const StackFrameEntry *stackFrameEntry)
+{
+    StackFrameEntry *newStackFrameEntry = malloc(sizeof(StackFrameEntry));
+    memcpy(newStackFrameEntry, stackFrameEntry, sizeof(StackFrameEntry));
+    return newStackFrameEntry;
+}
+
+hprof_stack_frame_id
+hprofLookupStackFrameId(const StackFrameEntry *stackFrameEntry)
+{
+    StackFrameEntry *val;
+    u4 hashValue;
+
+    /*
+     * Create the hash table on first contact.  We can't do this in
+     * hprofStartupStackFrame, because we have to compute stack trace
+     * serial numbers and place them into object headers before the
+     * rest of hprof is triggered by a GC event.
+     */
+    if (gStackFrameHashTable == NULL) {
+        gStackFrameHashTable = dvmHashTableCreate(512, free);
+    }
+    dvmHashTableLock(gStackFrameHashTable);
+
+    hashValue = computeStackFrameHash(stackFrameEntry);
+    val = dvmHashTableLookup(gStackFrameHashTable, hashValue,
+        (void *)stackFrameEntry, (HashCompareFunc)stackFrameCmp, false);
+    if (val == NULL) {
+        const StackFrameEntry *newStackFrameEntry;
+
+        newStackFrameEntry = stackFrameDup(stackFrameEntry);
+        val = dvmHashTableLookup(gStackFrameHashTable, hashValue,
+            (void *)newStackFrameEntry, (HashCompareFunc)stackFrameCmp, true);
+        assert(val != NULL);
+    }
+
+    /* Mark the frame as live (in use by an object in the current heap). */
+    val->live = 1;
+
+    dvmHashTableUnlock(gStackFrameHashTable);
+
+    return (hprof_stack_frame_id) val;
+}
+
+int
+hprofDumpStackFrames(hprof_context_t *ctx)
+{
+    HashIter iter;
+    hprof_record_t *rec = &ctx->curRec;
+
+    dvmHashTableLock(gStackFrameHashTable);
+
+    for (dvmHashIterBegin(gStackFrameHashTable, &iter);
+         !dvmHashIterDone(&iter);
+         dvmHashIterNext(&iter))
+    {
+        const StackFrameEntry *stackFrameEntry;
+        const Method *method;
+        int pc;
+        const char *sourceFile;
+        ClassObject *clazz;
+        int lineNum;
+        
+        hprofStartNewRecord(ctx, HPROF_TAG_STACK_FRAME, HPROF_TIME);
+        
+        stackFrameEntry = (const StackFrameEntry *) dvmHashIterData(&iter);
+        assert(stackFrameEntry != NULL);
+        
+        method = stackFrameEntry->frame.method;
+        pc = stackFrameEntry->frame.pc;
+        sourceFile = dvmGetMethodSourceFile(method);
+        if (sourceFile == NULL) {
+            sourceFile = "<unknown>";
+            lineNum = 0;
+        } else {
+            lineNum = dvmLineNumFromPC(method, pc);
+        }
+        clazz = (ClassObject *) hprofLookupClassId(method->clazz);
+
+        /* STACK FRAME format:
+         *
+         * ID:     ID for this stack frame
+         * ID:     ID for the method name
+         * ID:     ID for the method descriptor
+         * ID:     ID for the source file name
+         * u4:     class serial number
+         * u4:     line number, 0 = no line information
+         *
+         * We use the address of the stack frame as its ID.
+         */
+
+        DexStringCache cache;
+        const char* descriptor;
+
+        dexStringCacheInit(&cache);
+        descriptor = dexProtoGetMethodDescriptor(&method->prototype, &cache);
+
+        hprofAddIdToRecord(rec, (u4) stackFrameEntry);
+        hprofAddIdToRecord(rec, hprofLookupStringId(method->name));
+        hprofAddIdToRecord(rec, hprofLookupStringId(descriptor));
+        hprofAddIdToRecord(rec, hprofLookupStringId(sourceFile));
+        hprofAddU4ToRecord(rec, (u4) clazz->hprofSerialNumber);
+        hprofAddU4ToRecord(rec, (u4) lineNum);
+
+        dexStringCacheRelease(&cache);
+    }
+
+    dvmHashTableUnlock(gStackFrameHashTable);
+    return 0;
+}
diff --git a/vm/hprof/HprofString.c b/vm/hprof/HprofString.c
new file mode 100644
index 0000000..4e14efb
--- /dev/null
+++ b/vm/hprof/HprofString.c
@@ -0,0 +1,118 @@
+/*
+ * Copyright (C) 2008 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/*
+ * Common string pool for the profiler
+ */
+#include "Hprof.h"
+
+static HashTable *gStringHashTable;
+
+int
+hprofStartup_String()
+{
+    gStringHashTable = dvmHashTableCreate(512, free);
+    if (gStringHashTable == NULL) {
+        return UNIQUE_ERROR();
+    }
+    return 0;
+}
+
+int
+hprofShutdown_String()
+{
+    dvmHashTableFree(gStringHashTable);
+    return 0;
+}
+
+static u4
+computeUtf8Hash(const char *str)
+{
+    u4 hash = 0;
+    const char *cp;
+    char c;
+
+    cp = str;
+    while ((c = *cp++) != '\0') {
+        hash = hash * 31 + c;
+    }
+
+    return hash;
+}
+
+hprof_string_id
+hprofLookupStringId(const char *str)
+{
+    void *val;
+    u4 hashValue;
+
+    dvmHashTableLock(gStringHashTable);
+
+    hashValue = computeUtf8Hash(str);
+    val = dvmHashTableLookup(gStringHashTable, hashValue, (void *)str,
+            (HashCompareFunc)strcmp, false);
+    if (val == NULL) {
+        const char *newStr;
+
+        newStr = strdup(str);
+        val = dvmHashTableLookup(gStringHashTable, hashValue, (void *)newStr,
+                (HashCompareFunc)strcmp, true);
+        assert(val != NULL);
+    }
+
+    dvmHashTableUnlock(gStringHashTable);
+
+    return (hprof_string_id)val;
+}
+
+int
+hprofDumpStrings(hprof_context_t *ctx)
+{
+    HashIter iter;
+    hprof_record_t *rec = &ctx->curRec;
+    int err;
+
+    dvmHashTableLock(gStringHashTable);
+
+    for (err = 0, dvmHashIterBegin(gStringHashTable, &iter);
+         err == 0 && !dvmHashIterDone(&iter);
+         dvmHashIterNext(&iter))
+    {
+        err = hprofStartNewRecord(ctx, HPROF_TAG_STRING, HPROF_TIME);
+        if (err == 0) {
+            const char *str;
+
+            str = (const char *)dvmHashIterData(&iter);
+            assert(str != NULL);
+
+            /* STRING format:
+             *
+             * ID:     ID for this string
+             * [u1]*:  UTF8 characters for string (NOT NULL terminated)
+             *         (the record format encodes the length)
+             * 
+             * We use the address of the string data as its ID.
+             */
+            err = hprofAddU4ToRecord(rec, (u4)str);
+            if (err == 0) {
+                err = hprofAddUtf8StringToRecord(rec, str);
+            }
+        }
+    }
+
+    dvmHashTableUnlock(gStringHashTable);
+
+    return err;
+}
diff --git a/vm/interp/Interp.c b/vm/interp/Interp.c
new file mode 100644
index 0000000..41c5cf5
--- /dev/null
+++ b/vm/interp/Interp.c
@@ -0,0 +1,715 @@
+/*
+ * Copyright (C) 2008 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/*
+ * Main interpreter entry point and support functions.
+ *
+ * The entry point selects the "standard" or "debug" interpreter and
+ * facilitates switching between them.  The standard interpreter may
+ * use the "fast" or "portable" implementation.
+ *
+ * Some debugger support functions are included here.  Ideally their
+ * entire existence would be "#ifdef WITH_DEBUGGER", but we're not that
+ * aggressive in other parts of the code yet.
+ */
+#include "Dalvik.h"
+#include "interp/InterpDefs.h"
+
+
+/*
+ * ===========================================================================
+ *      Debugger support
+ * ===========================================================================
+ */
+
+/*
+ * Initialize the breakpoint address lookup table when the debugger attaches.
+ *
+ * This shouldn't be necessary -- the global area is initially zeroed out,
+ * and the events should be cleaning up after themselves.
+ */
+void dvmInitBreakpoints(void)
+{
+#ifdef WITH_DEBUGGER
+    memset(gDvm.debugBreakAddr, 0, sizeof(gDvm.debugBreakAddr));
+#else
+    assert(false);
+#endif
+}
+
+/*
+ * Add an address to the list, putting it in the first non-empty slot.
+ *
+ * Sometimes the debugger likes to add two entries for one breakpoint.
+ * We add two entries here, so that we get the right behavior when it's
+ * removed twice.
+ *
+ * This will only be run from the JDWP thread, and it will happen while
+ * we are updating the event list, which is synchronized.  We're guaranteed
+ * to be the only one adding entries, and the lock ensures that nobody
+ * will be trying to remove them while we're in here.
+ *
+ * "addr" is the absolute address of the breakpoint bytecode.
+ */
+void dvmAddBreakAddr(Method* method, int instrOffset)
+{
+#ifdef WITH_DEBUGGER
+    const u2* addr = method->insns + instrOffset;
+    const u2** ptr = gDvm.debugBreakAddr;
+    int i;
+
+    LOGV("BKP: add %p %s.%s (%s:%d)\n",
+        addr, method->clazz->descriptor, method->name,
+        dvmGetMethodSourceFile(method), dvmLineNumFromPC(method, instrOffset));
+
+    method->debugBreakpointCount++;
+    for (i = 0; i < MAX_BREAKPOINTS; i++, ptr++) {
+        if (*ptr == NULL) {
+            *ptr = addr;
+            break;
+        }
+    }
+    if (i == MAX_BREAKPOINTS) {
+        /* no room; size is too small or we're not cleaning up properly */
+        LOGE("ERROR: max breakpoints exceeded\n");
+        assert(false);
+    }
+#else
+    assert(false);
+#endif
+}
+
+/*
+ * Remove an address from the list by setting the entry to NULL.
+ *
+ * This can be called from the JDWP thread (because the debugger has
+ * cancelled the breakpoint) or from an event thread (because it's a
+ * single-shot breakpoint, e.g. "run to line").  We only get here as
+ * the result of removing an entry from the event list, which is
+ * synchronized, so it should not be possible for two threads to be
+ * updating breakpoints at the same time.
+ */
+void dvmClearBreakAddr(Method* method, int instrOffset)
+{
+#ifdef WITH_DEBUGGER
+    const u2* addr = method->insns + instrOffset;
+    const u2** ptr = gDvm.debugBreakAddr;
+    int i;
+
+    LOGV("BKP: clear %p %s.%s (%s:%d)\n",
+        addr, method->clazz->descriptor, method->name,
+        dvmGetMethodSourceFile(method), dvmLineNumFromPC(method, instrOffset));
+
+    method->debugBreakpointCount--;
+    assert(method->debugBreakpointCount >= 0);
+    for (i = 0; i < MAX_BREAKPOINTS; i++, ptr++) {
+        if (*ptr == addr) {
+            *ptr = NULL;
+            break;
+        }
+    }
+    if (i == MAX_BREAKPOINTS) {
+        /* didn't find it */
+        LOGE("ERROR: breakpoint on %p not found\n", addr);
+        assert(false);
+    }
+#else
+    assert(false);
+#endif
+}
+
+/*
+ * Add a single step event.  Currently this is a global item.
+ *
+ * We set up some initial values based on the thread's current state.  This
+ * won't work well if the thread is running, so it's up to the caller to
+ * verify that it's suspended.
+ *
+ * This is only called from the JDWP thread.
+ */
+bool dvmAddSingleStep(Thread* thread, int size, int depth)
+{
+#ifdef WITH_DEBUGGER
+    StepControl* pCtrl = &gDvm.stepControl;
+
+    if (pCtrl->active && thread != pCtrl->thread) {
+        LOGW("WARNING: single-step active for %p; adding %p\n",
+            pCtrl->thread, thread);
+
+        /*
+         * Keep going, overwriting previous.  This can happen if you
+         * suspend a thread in Object.wait, hit the single-step key, then
+         * switch to another thread and do the same thing again.
+         * The first thread's step is still pending.
+         *
+         * TODO: consider making single-step per-thread.  Adds to the
+         * overhead, but could be useful in rare situations.
+         */
+    }
+
+    pCtrl->size = size;
+    pCtrl->depth = depth;
+    pCtrl->thread = thread;
+
+    /*
+     * We may be stepping into or over method calls, or running until we
+     * return from the current method.  To make this work we need to track
+     * the current line, current method, and current stack depth.  We need
+     * to be checking these after most instructions, notably those that
+     * call methods, return from methods, or are on a different line from the
+     * previous instruction.
+     *
+     * We have to start with a snapshot of the current state.  If we're in
+     * an interpreted method, everything we need is in the current frame.  If
+     * we're in a native method, possibly with some extra JNI frames pushed
+     * on by PushLocalFrame, we want to use the topmost native method.
+     */
+    const StackSaveArea* saveArea;
+    void* fp;
+    void* prevFp = NULL;
+    
+    for (fp = thread->curFrame; fp != NULL; fp = saveArea->prevFrame) {
+        const Method* method;
+
+        saveArea = SAVEAREA_FROM_FP(fp);
+        method = saveArea->method;
+
+        if (!dvmIsBreakFrame(fp) && !dvmIsNativeMethod(method))
+            break;
+        prevFp = fp;
+    }
+    if (fp == NULL) {
+        LOGW("Unexpected: step req in native-only threadid=%d\n",
+            thread->threadId);
+        return false;
+    }
+    if (prevFp != NULL) {
+        /*
+         * First interpreted frame wasn't the one at the bottom.  Break
+         * frames are only inserted when calling from native->interp, so we
+         * don't need to worry about one being here.
+         */
+        LOGV("##### init step while in native method\n");
+        fp = prevFp;
+        assert(!dvmIsBreakFrame(fp));
+        assert(dvmIsNativeMethod(SAVEAREA_FROM_FP(fp)->method));
+        saveArea = SAVEAREA_FROM_FP(fp);
+    }
+
+    /*
+     * Pull the goodies out.  "xtra.currentPc" should be accurate since
+     * we update it on every instruction while the debugger is connected.
+     */
+    pCtrl->method = saveArea->method;
+    // Clear out any old address set
+    if (pCtrl->pAddressSet != NULL) {
+        // (discard const)
+        free((void *)pCtrl->pAddressSet);
+        pCtrl->pAddressSet = NULL;
+    }
+    if (dvmIsNativeMethod(pCtrl->method)) {
+        pCtrl->line = -1;
+    } else {
+        pCtrl->line = dvmLineNumFromPC(saveArea->method,
+                        saveArea->xtra.currentPc - saveArea->method->insns);
+        pCtrl->pAddressSet 
+                = dvmAddressSetForLine(saveArea->method, pCtrl->line);
+    }
+    pCtrl->frameDepth = dvmComputeVagueFrameDepth(thread, thread->curFrame);
+    pCtrl->active = true;
+
+    LOGV("##### step init: thread=%p meth=%p '%s' line=%d frameDepth=%d depth=%s size=%s\n",
+        pCtrl->thread, pCtrl->method, pCtrl->method->name,
+        pCtrl->line, pCtrl->frameDepth,
+        dvmJdwpStepDepthStr(pCtrl->depth),
+        dvmJdwpStepSizeStr(pCtrl->size));
+
+    return true;
+#else
+    assert(false);
+    return false;
+#endif
+}
+
+/*
+ * Disable a single step event.
+ */
+void dvmClearSingleStep(Thread* thread)
+{
+#ifdef WITH_DEBUGGER
+    UNUSED_PARAMETER(thread);
+
+    gDvm.stepControl.active = false;
+#else
+    assert(false);
+#endif
+}
+
+
+/*
+ * Recover the "this" pointer from the current interpreted method.  "this"
+ * is always in "in0" for non-static methods.
+ *
+ * The "ins" start at (#of registers - #of ins).  Note in0 != v0.
+ *
+ * This works because "dx" guarantees that it will work.  It's probably
+ * fairly common to have a virtual method that doesn't use its "this"
+ * pointer, in which case we're potentially wasting a register.  However,
+ * the debugger doesn't treat "this" as just another argument.  For
+ * example, events (such as breakpoints) can be enabled for specific
+ * values of "this".  There is also a separate StackFrame.ThisObject call
+ * in JDWP that is expected to work for any non-native non-static method.
+ *
+ * Because we need it when setting up debugger event filters, we want to
+ * be able to do this quickly.
+ */
+Object* dvmGetThisPtr(const Method* method, const u4* fp)
+{
+    if (dvmIsStaticMethod(method))
+        return NULL;
+    return (Object*)fp[method->registersSize - method->insSize];
+}
+
+
+#if defined(WITH_TRACKREF_CHECKS)
+/*
+ * Verify that all internally-tracked references have been released.  If
+ * they haven't, print them and abort the VM.
+ *
+ * "debugTrackedRefStart" indicates how many refs were on the list when
+ * we were first invoked.
+ */
+void dvmInterpCheckTrackedRefs(Thread* self, const Method* method,
+    int debugTrackedRefStart)
+{
+    if (dvmReferenceTableEntries(&self->internalLocalRefTable)
+        != (size_t) debugTrackedRefStart)
+    {
+        char* desc;
+        Object** top;
+        int count;
+
+        count = dvmReferenceTableEntries(&self->internalLocalRefTable);
+
+        LOGE("TRACK: unreleased internal reference (prev=%d total=%d)\n",
+            debugTrackedRefStart, count);
+        desc = dexProtoCopyMethodDescriptor(&method->prototype);
+        LOGE("       current method is %s.%s %s\n", method->clazz->descriptor,
+            method->name, desc);
+        free(desc);
+        top = self->internalLocalRefTable.table + debugTrackedRefStart;
+        while (top < self->internalLocalRefTable.nextEntry) {
+            LOGE("  %p (%s)\n",
+                 *top,
+                 ((*top)->clazz != NULL) ? (*top)->clazz->descriptor : "");
+            top++;
+        }
+        dvmDumpThread(self, false);
+
+        dvmAbort();
+    }
+    //LOGI("TRACK OK\n");
+}
+#endif
+
+
+#ifdef LOG_INSTR
+/*
+ * Dump the v-registers.  Sent to the ILOG log tag.
+ */
+void dvmDumpRegs(const Method* method, const u4* framePtr, bool inOnly)
+{
+    int i, localCount;
+
+    localCount = method->registersSize - method->insSize;
+
+    LOG(LOG_VERBOSE, LOG_TAG"i", "Registers (fp=%p):\n", framePtr);
+    for (i = method->registersSize-1; i >= 0; i--) {
+        if (i >= localCount) {
+            LOG(LOG_VERBOSE, LOG_TAG"i", "  v%-2d in%-2d : 0x%08x\n",
+                i, i-localCount, framePtr[i]);
+        } else {
+            if (inOnly) {
+                LOG(LOG_VERBOSE, LOG_TAG"i", "  [...]\n");
+                break;
+            }
+            const char* name = "";
+            int j;
+#if 0   // "locals" structure has changed -- need to rewrite this
+            DexFile* pDexFile = method->clazz->pDexFile;
+            const DexCode* pDexCode = dvmGetMethodCode(method);
+            int localsSize = dexGetLocalsSize(pDexFile, pDexCode);
+            const DexLocal* locals = dvmDexGetLocals(pDexFile, pDexCode);
+            for (j = 0; j < localsSize, j++) {
+                if (locals[j].registerNum == (u4) i) {
+                    name = dvmDexStringStr(locals[j].pName);
+                    break;
+                }
+            }
+#endif
+            LOG(LOG_VERBOSE, LOG_TAG"i", "  v%-2d      : 0x%08x %s\n",
+                i, framePtr[i], name);
+        }
+    }
+}
+#endif
+
+
+/*
+ * ===========================================================================
+ *      Entry point and general support functions
+ * ===========================================================================
+ */
+
+/* 
+ * Construct an s4 from two consecutive half-words of switch data.
+ * This needs to check endianness because the DEX optimizer only swaps
+ * half-words in instruction stream.
+ *
+ * "switchData" must be 32-bit aligned.
+ */
+#if __BYTE_ORDER == __LITTLE_ENDIAN
+static inline s4 s4FromSwitchData(const void* switchData) {
+    return *(s4*) switchData;
+}
+#else
+static inline s4 s4FromSwitchData(const void* switchData) {
+    u2* data = switchData;
+    return data[0] | (((s4) data[1]) << 16);
+#endif
+
+/*
+ * Find the matching case.  Returns the offset to the handler instructions.
+ *
+ * Returns 3 if we don't find a match (it's the size of the packed-switch
+ * instruction).
+ */
+s4 dvmInterpHandlePackedSwitch(const u2* switchData, s4 testVal)
+{
+    const int kInstrLen = 3;
+    u2 size;
+    s4 firstKey;
+    const s4* entries;
+
+    /*
+     * Packed switch data format:
+     *  ushort ident = 0x0100   magic value
+     *  ushort size             number of entries in the table
+     *  int first_key           first (and lowest) switch case value
+     *  int targets[size]       branch targets, relative to switch opcode
+     *
+     * Total size is (4+size*2) 16-bit code units.
+     */
+    if (*switchData++ != kPackedSwitchSignature) {
+        /* should have been caught by verifier */
+        dvmThrowException("Ljava/lang/InternalError;",
+            "bad packed switch magic");
+        return kInstrLen;
+    }
+
+    size = *switchData++;
+    assert(size > 0);
+
+    firstKey = *switchData++;
+    firstKey |= (*switchData++) << 16;
+
+    if (testVal < firstKey || testVal >= firstKey + size) {
+        LOGVV("Value %d not found in switch (%d-%d)\n",
+            testVal, firstKey, firstKey+size-1);
+        return kInstrLen;
+    }
+
+    /* The entries are guaranteed to be aligned on a 32-bit boundary;
+     * we can treat them as a native int array.
+     */
+    entries = (const s4*) switchData;
+    assert(((u4)entries & 0x3) == 0);
+
+    assert(testVal - firstKey >= 0 && testVal - firstKey < size);
+    LOGVV("Value %d found in slot %d (goto 0x%02x)\n",
+        testVal, testVal - firstKey,
+        s4FromSwitchData(&entries[testVal - firstKey]));
+    return s4FromSwitchData(&entries[testVal - firstKey]);
+}
+
+/*
+ * Find the matching case.  Returns the offset to the handler instructions.
+ *
+ * Returns 3 if we don't find a match (it's the size of the sparse-switch
+ * instruction).
+ */
+s4 dvmInterpHandleSparseSwitch(const u2* switchData, s4 testVal)
+{
+    const int kInstrLen = 3;
+    u2 ident, size;
+    const s4* keys;
+    const s4* entries;
+    int i;
+
+    /*
+     * Sparse switch data format:
+     *  ushort ident = 0x0200   magic value
+     *  ushort size             number of entries in the table; > 0
+     *  int keys[size]          keys, sorted low-to-high; 32-bit aligned
+     *  int targets[size]       branch targets, relative to switch opcode
+     *
+     * Total size is (2+size*4) 16-bit code units.
+     */
+
+    if (*switchData++ != kSparseSwitchSignature) {
+        /* should have been caught by verifier */
+        dvmThrowException("Ljava/lang/InternalError;",
+            "bad sparse switch magic");
+        return kInstrLen;
+    }
+
+    size = *switchData++;
+    assert(size > 0);
+    
+    /* The keys are guaranteed to be aligned on a 32-bit boundary;
+     * we can treat them as a native int array.
+     */
+    keys = (const s4*) switchData;
+    assert(((u4)keys & 0x3) == 0);
+
+    /* The entries are guaranteed to be aligned on a 32-bit boundary;
+     * we can treat them as a native int array.
+     */
+    entries = keys + size;
+    assert(((u4)entries & 0x3) == 0);
+
+    /*
+     * Run through the list of keys, which are guaranteed to
+     * be sorted low-to-high.
+     *
+     * Most tables have 3-4 entries.  Few have more than 10.  A binary
+     * search here is probably not useful.
+     */
+    for (i = 0; i < size; i++) {
+        s4 k = s4FromSwitchData(&keys[i]);
+        if (k == testVal) {
+            LOGVV("Value %d found in entry %d (goto 0x%02x)\n",
+                testVal, i, s4FromSwitchData(&entries[i]));
+            return s4FromSwitchData(&entries[i]);
+        } else if (k > testVal) {
+            break;
+        }
+    }
+
+    LOGVV("Value %d not found in switch\n", testVal);
+    return kInstrLen;
+}
+
+/*
+ * Fill the array with predefined constant values.
+ *
+ * Returns true if job is completed, otherwise false to indicate that
+ * an exception has been thrown.
+ */
+bool dvmInterpHandleFillArrayData(ArrayObject* arrayObj, 
+                                  const u2* arrayData)
+{
+    u2 width;
+    u4 size;
+
+    if (!checkForNull((Object*) arrayObj)) {
+        return false;
+    }
+    /*
+     * Array data table format:
+     *  ushort ident = 0x0300   magic value
+     *  ushort width            width of each element in the table
+     *  uint   size             number of elements in the table
+     *  ubyte  data[size*width] table of data values (may contain a single-byte
+     *                          padding at the end)
+     *
+     * Total size is 4+(width * size + 1)/2 16-bit code units.
+     */
+    if (arrayData[0] != kArrayDataSignature) {
+        dvmThrowException("Ljava/lang/InternalError;", "bad array data magic");
+        return false;
+    }
+
+    width = arrayData[1];
+    size = arrayData[2] | (((u4)arrayData[3]) << 16);
+
+    if (size != arrayObj->length) {
+        dvmThrowException("Ljava/lang/ArrayIndexOutOfBoundsException;", NULL);
+        return false;
+    }
+    memcpy(arrayObj->contents, &arrayData[4], size*width);
+    return true;
+}
+
+/*
+ * Find the concrete method that corresponds to "methodIdx".  The code in
+ * "method" is executing invoke-method with "thisClass" as its first argument.
+ *
+ * Returns NULL with an exception raised on failure.
+ */
+Method* dvmInterpFindInterfaceMethod(ClassObject* thisClass, u4 methodIdx,
+    const Method* method, DvmDex* methodClassDex)
+{
+    Method* absMethod;
+    Method* methodToCall;
+    int i, vtableIndex;
+
+    /*
+     * Resolve the method.  This gives us the abstract method from the
+     * interface class declaration.
+     */
+    absMethod = dvmDexGetResolvedMethod(methodClassDex, methodIdx);
+    if (absMethod == NULL) {
+        absMethod = dvmResolveInterfaceMethod(method->clazz, methodIdx);
+        if (absMethod == NULL) {
+            LOGV("+ unknown method\n");
+            return NULL;
+        }
+    }
+
+    /* make sure absMethod->methodIndex means what we think it means */
+    assert(dvmIsAbstractMethod(absMethod));
+
+    /*
+     * Run through the "this" object's iftable.  Find the entry for
+     * absMethod's class, then use absMethod->methodIndex to find
+     * the method's entry.  The value there is the offset into our
+     * vtable of the actual method to execute.
+     *
+     * The verifier does not guarantee that objects stored into
+     * interface references actually implement the interface, so this
+     * check cannot be eliminated.
+     */
+    for (i = 0; i < thisClass->iftableCount; i++) {
+        if (thisClass->iftable[i].clazz == absMethod->clazz)
+            break;
+    }
+    if (i == thisClass->iftableCount) {
+        /* impossible in verified DEX, need to check for it in unverified */
+        dvmThrowException("Ljava/lang/IncompatibleClassChangeError;",
+            "interface not implemented");
+        return NULL;
+    }
+
+    assert(absMethod->methodIndex <
+        thisClass->iftable[i].clazz->virtualMethodCount);
+
+    vtableIndex =
+        thisClass->iftable[i].methodIndexArray[absMethod->methodIndex];
+    assert(vtableIndex >= 0 && vtableIndex < thisClass->vtableCount);
+    methodToCall = thisClass->vtable[vtableIndex];
+
+#if 0
+    /* this can happen when there's a stale class file */
+    if (dvmIsAbstractMethod(methodToCall)) {
+        dvmThrowException("Ljava/lang/AbstractMethodError;",
+            "interface method not implemented");
+        return NULL;
+    }
+#else
+    assert(!dvmIsAbstractMethod(methodToCall) ||
+        methodToCall->nativeFunc != NULL);
+#endif
+
+    LOGVV("+++ interface=%s.%s concrete=%s.%s\n",
+        absMethod->clazz->descriptor, absMethod->name,
+        methodToCall->clazz->descriptor, methodToCall->name);
+    assert(methodToCall != NULL);
+
+    return methodToCall;
+}
+
+
+/*
+ * Main interpreter loop entry point.  Select "standard" or "debug"
+ * interpreter and switch between them as required.
+ *
+ * This begins executing code at the start of "method".  On exit, "pResult"
+ * holds the return value of the method (or, if "method" returns NULL, it
+ * holds an undefined value).
+ *
+ * The interpreted stack frame, which holds the method arguments, has
+ * already been set up.
+ */
+void dvmInterpret(Thread* self, const Method* method, JValue* pResult)
+{
+    InterpState interpState;
+    bool change;
+
+#if defined(WITH_TRACKREF_CHECKS)
+    interpState.debugTrackedRefStart =
+        dvmReferenceTableEntries(&self->internalLocalRefTable);
+#endif
+#if defined(WITH_PROFILER) || defined(WITH_DEBUGGER)
+    interpState.debugIsMethodEntry = true;
+#endif
+
+    /*
+     * Initialize working state.
+     *
+     * No need to initialize "retval".
+     */
+    interpState.method = method;
+    interpState.fp = (u4*) self->curFrame;
+    interpState.pc = method->insns;
+    interpState.entryPoint = kInterpEntryInstr;
+
+    if (dvmDebuggerOrProfilerActive())
+        interpState.nextMode = INTERP_DBG;
+    else
+        interpState.nextMode = INTERP_STD;
+
+    assert(!dvmIsNativeMethod(method));
+
+    /*
+     * Make sure the class is ready to go.  Shouldn't be possible to get
+     * here otherwise.
+     */
+    if (method->clazz->status < CLASS_INITIALIZING ||
+        method->clazz->status == CLASS_ERROR)
+    {
+        LOGE("ERROR: tried to execute code in unprepared class '%s' (%d)\n",
+            method->clazz->descriptor, method->clazz->status);
+        dvmDumpThread(self, false);
+        dvmAbort();
+    }
+
+    typedef bool (*Interpreter)(Thread*, InterpState*);
+    Interpreter stdInterp;
+    if (gDvm.executionMode == kExecutionModeInterpFast)
+        stdInterp = dvmMterpStd;
+    else
+        stdInterp = dvmInterpretStd;
+
+    change = true;
+    while (change) {
+        switch (interpState.nextMode) {
+        case INTERP_STD:
+            LOGVV("threadid=%d: interp STD\n", self->threadId);
+            change = (*stdInterp)(self, &interpState);
+            break;
+#if defined(WITH_PROFILER) || defined(WITH_DEBUGGER)
+        case INTERP_DBG:
+            LOGVV("threadid=%d: interp DBG\n", self->threadId);
+            change = dvmInterpretDbg(self, &interpState);
+            break;
+#endif
+        default:
+            dvmAbort();
+        }
+    }
+
+    *pResult = interpState.retval;
+}
diff --git a/vm/interp/Interp.h b/vm/interp/Interp.h
new file mode 100644
index 0000000..eb36b9f
--- /dev/null
+++ b/vm/interp/Interp.h
@@ -0,0 +1,37 @@
+/*
+ * Copyright (C) 2008 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/*
+ * Dalvik interpreter public definitions.
+ */
+#ifndef _DALVIK_INTERP_INTERP
+#define _DALVIK_INTERP_INTERP
+
+/*
+ * Interpreter entry point.  Call here after setting up the interpreted
+ * stack (most code will want to get here via dvmCallMethod().)
+ */
+void dvmInterpret(Thread* thread, const Method* method, JValue* pResult);
+
+/*
+ * Breakpoint optimization table.
+ */
+void dvmInitBreakpoints();
+void dvmAddBreakAddr(Method* method, int instrOffset);
+void dvmClearBreakAddr(Method* method, int instrOffset);
+bool dvmAddSingleStep(Thread* thread, int size, int depth);
+void dvmClearSingleStep(Thread* thread);
+
+#endif /*_DALVIK_INTERP_INTERP*/
diff --git a/vm/interp/InterpCore.h b/vm/interp/InterpCore.h
new file mode 100644
index 0000000..79f21dc
--- /dev/null
+++ b/vm/interp/InterpCore.h
@@ -0,0 +1,2953 @@
+/*
+ * Copyright (C) 2008 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/*
+ * Main interpreter loop.  This is in an include file so that we can
+ * generate multiple versions (debug, nondebug).
+ *
+ * For thread suspension, the plan is to check for thread suspension on
+ * backward branches, exceptions, and method returns, the theory being that
+ * a given code path will do one of these in short order.
+ *
+ * The INTERP_TYPE preprocessing variable will be set to INTERP_STD or
+ * INTERP_DBG.  If it's set to INTERP_DBG, we know that either WITH_DEBUGGER
+ * or WITH_PROFILING is set; if neither is set, the debug version of the
+ * interpreter is simply not built.
+ *
+ * TODO: now that we have "mterp", this should be generated from the mterp
+ * sources, so that we don't have the two copies of the same code.  This
+ * requires concatenating the various pieces together (as we would for the
+ * C-stub-only form of mterp), but combining everything into a single large
+ * function.  We also need to merge some debugger support functions in,
+ * which requires some additional work because the mterp C code doesn't
+ * presently have that.
+ */
+#include "Dalvik.h"
+#include "interp/InterpDefs.h"
+
+#include <stdlib.h>
+#include <math.h>
+
+
+/*
+ * Periodically check for thread suspension.
+ *
+ * While we're at it, see if a debugger has attached or the profiler has
+ * started.  If so, switch to a different "goto" table.
+ */
+#define PERIODIC_CHECKS(_entryPoint, _pcadj) {                              \
+        dvmCheckSuspendQuick(self);                                         \
+        if (NEED_INTERP_SWITCH(INTERP_TYPE)) {                              \
+            ADJUST_PC(_pcadj);                                              \
+            interpState->entryPoint = _entryPoint;                          \
+            LOGVV("threadid=%d: switch to %s ep=%d adj=%d\n",               \
+                self->threadId,                                             \
+                (interpState->nextMode == INTERP_STD) ? "STD" : "DBG",      \
+                (_entryPoint), (_pcadj));                                   \
+            goto bail_switch;                                               \
+        }                                                                   \
+    }
+
+
+
+#if INTERP_TYPE == INTERP_DBG
+/* code in here is only included if profiling or debugging is enabled */
+
+/*
+ * Determine if an address is "interesting" to the debugger.  This allows
+ * us to avoid scanning the entire event list before every instruction.
+ *
+ * The "debugBreakAddr" table is global and not synchronized.
+ */
+static bool isInterestingAddr(const u2* pc)
+{
+    const u2** ptr = gDvm.debugBreakAddr;
+    int i;
+
+    for (i = 0; i < MAX_BREAKPOINTS; i++, ptr++) {
+        if (*ptr == pc) {
+            LOGV("BKP: hit on %p\n", pc);
+            return true;
+        }
+    }
+    return false;
+}
+
+/*
+ * Update the debugger on interesting events, such as hitting a breakpoint
+ * or a single-step point.  This is called from the top of the interpreter
+ * loop, before the current instruction is processed.
+ *
+ * Set "methodEntry" if we've just entered the method.  This detects
+ * method exit by checking to see if the next instruction is "return".
+ *
+ * This can't catch native method entry/exit, so we have to handle that
+ * at the point of invocation.  We also need to catch it in dvmCallMethod
+ * if we want to capture native->native calls made through JNI.
+ *
+ * Notes to self:
+ * - Don't want to switch to VMWAIT while posting events to the debugger.
+ *   Let the debugger code decide if we need to change state.
+ * - We may want to check for debugger-induced thread suspensions on
+ *   every instruction.  That would make a "suspend all" more responsive
+ *   and reduce the chances of multiple simultaneous events occurring.
+ *   However, it could change the behavior some.
+ *
+ * TODO: method entry/exit events are probably less common than location
+ * breakpoints.  We may be able to speed things up a bit if we don't query
+ * the event list unless we know there's at least one lurking within.
+ */
+static void updateDebugger(const Method* method, const u2* pc, const u4* fp,
+    bool methodEntry, Thread* self)
+{
+    int eventFlags = 0;
+
+    /*
+     * Update xtra.currentPc on every instruction.  We need to do this if
+     * there's a chance that we could get suspended.  This can happen if
+     * eventFlags != 0 here, or somebody manually requests a suspend
+     * (which gets handled at PERIOD_CHECKS time).  One place where this
+     * needs to be correct is in dvmAddSingleStep().
+     */
+    EXPORT_PC();
+
+    if (methodEntry)
+        eventFlags |= DBG_METHOD_ENTRY;
+
+    /*
+     * See if we have a breakpoint here.
+     *
+     * Depending on the "mods" associated with event(s) on this address,
+     * we may or may not actually send a message to the debugger.
+     *
+     * Checking method->debugBreakpointCount is slower on the device than
+     * just scanning the table (!).  We could probably work something out
+     * where we just check it on method entry/exit and remember the result,
+     * but that's more fragile and requires passing more stuff around.
+     */
+#ifdef WITH_DEBUGGER
+    if (method->debugBreakpointCount > 0 && isInterestingAddr(pc)) {
+        eventFlags |= DBG_BREAKPOINT;
+    }
+#endif
+
+    /*
+     * If the debugger is single-stepping one of our threads, check to
+     * see if we're that thread and we've reached a step point.
+     */
+    const StepControl* pCtrl = &gDvm.stepControl;
+    if (pCtrl->active && pCtrl->thread == self) {
+        int line, frameDepth;
+        bool doStop = false;
+        const char* msg = NULL;
+
+        assert(!dvmIsNativeMethod(method));
+
+        if (pCtrl->depth == SD_INTO) {
+            /*
+             * Step into method calls.  We break when the line number
+             * or method pointer changes.  If we're in SS_MIN mode, we
+             * always stop.
+             */
+            if (pCtrl->method != method) {
+                doStop = true;
+                msg = "new method";
+            } else if (pCtrl->size == SS_MIN) {
+                doStop = true;
+                msg = "new instruction";
+            } else if (!dvmAddressSetGet(
+                    pCtrl->pAddressSet, pc - method->insns)) {
+                doStop = true;
+                msg = "new line";
+            }
+        } else if (pCtrl->depth == SD_OVER) {
+            /*
+             * Step over method calls.  We break when the line number is
+             * different and the frame depth is <= the original frame
+             * depth.  (We can't just compare on the method, because we
+             * might get unrolled past it by an exception, and it's tricky
+             * to identify recursion.)
+             */
+            frameDepth = dvmComputeVagueFrameDepth(self, fp);
+            if (frameDepth < pCtrl->frameDepth) {
+                /* popped up one or more frames, always trigger */
+                doStop = true;
+                msg = "method pop";
+            } else if (frameDepth == pCtrl->frameDepth) {
+                /* same depth, see if we moved */
+                if (pCtrl->size == SS_MIN) {
+                    doStop = true;
+                    msg = "new instruction";
+                } else if (!dvmAddressSetGet(pCtrl->pAddressSet, 
+                            pc - method->insns)) {
+                    doStop = true;
+                    msg = "new line";
+                }
+            }
+        } else {
+            assert(pCtrl->depth == SD_OUT);
+            /*
+             * Return from the current method.  We break when the frame
+             * depth pops up.
+             *
+             * This differs from the "method exit" break in that it stops
+             * with the PC at the next instruction in the returned-to
+             * function, rather than the end of the returning function.
+             */
+            frameDepth = dvmComputeVagueFrameDepth(self, fp);
+            if (frameDepth < pCtrl->frameDepth) {
+                doStop = true;
+                msg = "method pop";
+            }
+        }
+
+        if (doStop) {
+            LOGV("#####S %s\n", msg);
+            eventFlags |= DBG_SINGLE_STEP;
+        }
+    }
+
+    /*
+     * Check to see if this is a "return" instruction.  JDWP says we should
+     * send the event *after* the code has been executed, but it also says
+     * the location we provide is the last instruction.  Since the "return"
+     * instruction has no interesting side effects, we should be safe.
+     * (We can't just move this down to the returnFromMethod label because
+     * we potentially need to combine it with other events.)
+     *
+     * We're also not supposed to generate a method exit event if the method
+     * terminates "with a thrown exception".
+     */
+    u2 inst = INST_INST(FETCH(0));
+    if (inst == OP_RETURN_VOID || inst == OP_RETURN || inst == OP_RETURN_WIDE ||
+        inst == OP_RETURN_OBJECT)
+    {
+        eventFlags |= DBG_METHOD_EXIT;
+    }
+
+    /*
+     * If there's something interesting going on, see if it matches one
+     * of the debugger filters.
+     */
+    if (eventFlags != 0) {
+        Object* thisPtr = dvmGetThisPtr(method, fp);
+        if (thisPtr != NULL && !dvmIsValidObject(thisPtr)) {
+            /*
+             * TODO: remove this check if we're confident that the "this"
+             * pointer is where it should be -- slows us down, especially
+             * during single-step.
+             */
+            char* desc = dexProtoCopyMethodDescriptor(&method->prototype);
+            LOGE("HEY: invalid 'this' ptr %p (%s.%s %s)\n", thisPtr,
+                method->clazz->descriptor, method->name, desc);
+            free(desc);
+            dvmAbort();
+        }
+        dvmDbgPostLocationEvent(method, pc - method->insns, thisPtr,
+            eventFlags);
+    }
+}
+
+/*
+ * Perform some operations at the "top" of the interpreter loop.
+ * This stuff is required to support debugging and profiling.
+ *
+ * Using" __attribute__((noinline))" seems to do more harm than good.  This
+ * is best when inlined due to the large number of parameters, most of
+ * which are local vars in the main interp loop.
+ */
+static void checkDebugAndProf(const u2* pc, const u4* fp, Thread* self,
+    const Method* method, bool* pIsMethodEntry)
+{
+    /* check to see if we've run off end of method */
+    assert(pc >= method->insns && pc <
+            method->insns + dvmGetMethodInsnsSize(method));
+
+#if 0
+    /*
+     * When we hit a specific method, enable verbose instruction logging.
+     * Sometimes it's helpful to use the debugger attach as a trigger too.
+     */
+    if (*pIsMethodEntry) {
+        static const char* cd = "Landroid/test/Arithmetic;";
+        static const char* mn = "shiftTest2";
+        static const char* sg = "()V";
+
+        if (/*gDvm.debuggerActive &&*/
+            strcmp(method->clazz->descriptor, cd) == 0 &&
+            strcmp(method->name, mn) == 0 &&
+            strcmp(method->signature, sg) == 0)
+        {
+            LOGW("Reached %s.%s, enabling verbose mode\n",
+                method->clazz->descriptor, method->name);
+            android_setMinPriority(LOG_TAG"i", ANDROID_LOG_VERBOSE);
+            dumpRegs(method, fp, true);
+        }
+
+        if (!gDvm.debuggerActive)
+            *pIsMethodEntry = false;
+    }
+#endif
+
+    /*
+     * If the debugger is attached, check for events.
+     *
+     * This doesn't work quite right for "*pIsMethodEntry" if we're profiling
+     * while the debugger is attached, but that's not very useful anyway.
+     */
+    if (gDvm.debuggerActive) {
+        updateDebugger(method, pc, fp, *pIsMethodEntry, self);
+        *pIsMethodEntry = false;
+    }
+#ifdef WITH_PROFILER
+    else {
+        if (*pIsMethodEntry) {
+            TRACE_METHOD_ENTER(self, method);
+            *pIsMethodEntry = false;
+        }
+    }
+    if (gDvm.instructionCountEnableCount != 0) {
+        /*
+         * Count up the #of executed instructions.  This isn't synchronized
+         * for thread-safety; if we need that we should make this
+         * thread-local and merge counts into the global area when threads
+         * exit (suspending all threads GC-style).
+         */
+        int inst = *pc & 0xff;
+        gDvm.executedInstrCounts[inst]++;
+    }
+#endif
+}
+
+#endif /*INTERP_TYPE == INTERP_DBG*/
+
+
+
+/*
+ * Main interpreter loop.
+ *
+ * This was written with an ARM implementation in mind.
+ */
+bool INTERP_FUNC_NAME(Thread* self, InterpState* interpState)
+{
+#if defined(EASY_GDB)
+    StackSaveArea* debugSaveArea = SAVEAREA_FROM_FP(self->curFrame);
+#endif
+#if INTERP_TYPE == INTERP_DBG
+    bool debugIsMethodEntry = interpState->debugIsMethodEntry;
+#endif
+#if defined(WITH_TRACKREF_CHECKS)
+    int debugTrackedRefStart = interpState->debugTrackedRefStart;
+#endif
+    DvmDex* methodClassDex;     // method->clazz->pDvmDex
+    JValue retval;
+
+    /* core state */
+    const Method* method;       // method we're interpreting
+    const u2* pc;               // program counter
+    u4* fp;                     // frame pointer
+    u2 inst;                    // current instruction
+    /* instruction decoding */
+    u2 ref;                     // 16-bit quantity fetched directly
+    u2 vsrc1, vsrc2, vdst;      // usually used for register indexes
+    /* method call setup */
+    const Method* methodToCall;
+    bool methodCallRange;
+
+#if defined(THREADED_INTERP)
+    /* static computed goto table */
+    DEFINE_GOTO_TABLE(handlerTable);
+#endif
+
+    /* copy state in */
+    method = interpState->method;
+    pc = interpState->pc;
+    fp = interpState->fp;
+    retval = interpState->retval;   /* only need for kInterpEntryReturn? */
+
+    methodClassDex = method->clazz->pDvmDex;
+
+    LOGVV("threadid=%d: entry(%s) %s.%s pc=0x%x fp=%p ep=%d\n",
+        self->threadId, (interpState->nextMode == INTERP_STD) ? "STD" : "DBG",
+        method->clazz->descriptor, method->name, pc - method->insns, fp,
+        interpState->entryPoint);
+
+    /*
+     * DEBUG: scramble this to ensure we're not relying on it.
+     */
+    methodToCall = (const Method*) -1;
+
+#if INTERP_TYPE == INTERP_DBG
+    if (debugIsMethodEntry) {
+        ILOGD("|-- Now interpreting %s.%s", method->clazz->descriptor,
+                method->name);
+        DUMP_REGS(method, interpState->fp, false);
+    }
+#endif
+
+    switch (interpState->entryPoint) {
+    case kInterpEntryInstr:
+        /* just fall through to instruction loop or threaded kickstart */
+        break;
+    case kInterpEntryReturn:
+        goto returnFromMethod;
+    case kInterpEntryThrow:
+        goto exceptionThrown;
+    default:
+        dvmAbort();
+    }
+
+#ifdef THREADED_INTERP
+    FINISH(0);                  /* fetch and execute first instruction */
+#else
+    while (1) {
+        CHECK_DEBUG_AND_PROF(); /* service debugger and profiling */
+        CHECK_TRACKED_REFS();   /* check local reference tracking */
+
+        /* fetch the next 16 bits from the instruction stream */
+        inst = FETCH(0);
+
+        switch (INST_INST(inst)) {
+#endif
+
+
+HANDLE_OPCODE(OP_NOP)
+    FINISH(1);
+
+HANDLE_OPCODE(OP_GOTO /*+AA*/)
+    vdst = INST_AA(inst);
+    if ((s1)vdst < 0)
+        ILOGV("|goto -0x%02x", -((s1)vdst));
+    else
+        ILOGV("|goto +0x%02x", ((s1)vdst));
+    ILOGV("> branch taken");
+    if ((s1)vdst < 0)
+        PERIODIC_CHECKS(kInterpEntryInstr, (s1)vdst);
+    FINISH((s1)vdst);
+
+HANDLE_OPCODE(OP_GOTO_16 /*+AAAA*/)
+    {
+        s4 offset = (s2) FETCH(1);          /* sign-extend next code unit */
+
+        if (offset < 0)
+            ILOGV("|goto/16 -0x%04x", -offset);
+        else
+            ILOGV("|goto/16 +0x%04x", offset);
+        ILOGV("> branch taken");
+        if (offset < 0)
+            PERIODIC_CHECKS(kInterpEntryInstr, offset);
+        FINISH(offset);
+    }
+
+HANDLE_OPCODE(OP_GOTO_32 /*+AAAAAAAA*/)
+    {
+        s4 offset = FETCH(1);               /* low-order 16 bits */
+        offset |= ((s4) FETCH(2)) << 16;    /* high-order 16 bits */
+
+        if (offset < 0)
+            ILOGV("|goto/32 -0x%08x", -offset);
+        else
+            ILOGV("|goto/32 +0x%08x", offset);
+        ILOGV("> branch taken");
+        if (offset <= 0)    /* allowed to branch to self */
+            PERIODIC_CHECKS(kInterpEntryInstr, offset);
+        FINISH(offset);
+    }
+
+#define HANDLE_NUMCONV(_opcode, _opname, _fromtype, _totype)                \
+    HANDLE_OPCODE(_opcode /*vA, vB*/)                                       \
+        vdst = INST_A(inst);                                                \
+        vsrc1 = INST_B(inst);                                               \
+        ILOGV("|%s v%d,v%d", (_opname), vdst, vsrc1);                       \
+        SET_REGISTER##_totype(vdst,                                         \
+            GET_REGISTER##_fromtype(vsrc1));                                \
+        FINISH(1);
+HANDLE_NUMCONV(OP_INT_TO_LONG,          "int-to-long", _INT, _WIDE)
+HANDLE_NUMCONV(OP_INT_TO_FLOAT,         "int-to-float", _INT, _FLOAT)
+HANDLE_NUMCONV(OP_INT_TO_DOUBLE,        "int-to-double", _INT, _DOUBLE)
+HANDLE_NUMCONV(OP_LONG_TO_INT,          "long-to-int", _WIDE, _INT)
+HANDLE_NUMCONV(OP_LONG_TO_FLOAT,        "long-to-float", _WIDE, _FLOAT)
+HANDLE_NUMCONV(OP_LONG_TO_DOUBLE,       "long-to-double", _WIDE, _DOUBLE)
+HANDLE_NUMCONV(OP_FLOAT_TO_DOUBLE,      "float-to-double", _FLOAT, _DOUBLE)
+HANDLE_NUMCONV(OP_DOUBLE_TO_FLOAT,      "double-to-float", _DOUBLE, _FLOAT)
+
+
+#define HANDLE_FLOAT_TO_INT(_opcode, _opname, _fromvtype, _fromrtype,       \
+        _tovtype, _tortype)                                                 \
+    HANDLE_OPCODE(_opcode /*vA, vB*/)                                       \
+    {                                                                       \
+        /* spec defines specific handling for +/- inf and NaN values */     \
+        _fromvtype val;                                                     \
+        _tovtype intMin, intMax, result;                                    \
+        vdst = INST_A(inst);                                                \
+        vsrc1 = INST_B(inst);                                               \
+        ILOGV("|%s v%d,v%d", (_opname), vdst, vsrc1);                       \
+        val = GET_REGISTER##_fromrtype(vsrc1);                              \
+        intMin = (_tovtype) 1 << (sizeof(_tovtype) * 8 -1);                 \
+        intMax = ~intMin;                                                   \
+        if (val >= intMax)          /* +inf */                              \
+            result = intMax;                                                \
+        else if (val <= intMin)     /* -inf */                              \
+            result = intMin;                                                \
+        else if (val != val)        /* NaN */                               \
+            result = 0;                                                     \
+        else                                                                \
+            result = (_tovtype) val;                                        \
+        SET_REGISTER##_tortype(vdst, result);                               \
+    }                                                                       \
+    FINISH(1);
+HANDLE_FLOAT_TO_INT(OP_FLOAT_TO_INT,    "float-to-int",
+    float, _FLOAT, s4, _INT)
+HANDLE_FLOAT_TO_INT(OP_FLOAT_TO_LONG,   "float-to-long",
+    float, _FLOAT, s8, _WIDE)
+HANDLE_FLOAT_TO_INT(OP_DOUBLE_TO_INT,   "double-to-int",
+    double, _DOUBLE, s4, _INT)
+HANDLE_FLOAT_TO_INT(OP_DOUBLE_TO_LONG,  "double-to-long",
+    double, _DOUBLE, s8, _WIDE)
+
+
+#define HANDLE_INT_TO_SMALL(_opcode, _opname, _type)                        \
+    HANDLE_OPCODE(_opcode /*vA, vB*/)                                       \
+        vdst = INST_A(inst);                                                \
+        vsrc1 = INST_B(inst);                                               \
+        ILOGV("|int-to-%s v%d,v%d", (_opname), vdst, vsrc1);                \
+        SET_REGISTER(vdst, (_type) GET_REGISTER(vsrc1));                    \
+        FINISH(1);
+HANDLE_INT_TO_SMALL(OP_INT_TO_BYTE,     "byte", s1)
+HANDLE_INT_TO_SMALL(OP_INT_TO_CHAR,     "char", u2)
+HANDLE_INT_TO_SMALL(OP_INT_TO_SHORT,    "short", s2)    /* want sign bit */
+
+
+HANDLE_OPCODE(OP_MOVE_FROM16 /*vAA, vBBBB*/)
+HANDLE_OPCODE(OP_MOVE_OBJECT_FROM16 /*vAA, vBBBB*/)
+    vdst = INST_AA(inst);
+    vsrc1 = FETCH(1);
+    ILOGV("|move%s/from16 v%d,v%d %s(v%d=0x%08x)",
+        (INST_INST(inst) == OP_MOVE_FROM16) ? "" : "-object", vdst, vsrc1,
+        kSpacing, vdst, GET_REGISTER(vsrc1));
+    SET_REGISTER(vdst, GET_REGISTER(vsrc1));
+    FINISH(2);
+
+HANDLE_OPCODE(OP_MOVE_16 /*vAAAA, vBBBB*/)
+HANDLE_OPCODE(OP_MOVE_OBJECT_16 /*vAAAA, vBBBB*/)
+    vdst = FETCH(1);
+    vsrc1 = FETCH(2);
+    ILOGV("|move%s/16 v%d,v%d %s(v%d=0x%08x)",
+        (INST_INST(inst) == OP_MOVE_16) ? "" : "-object", vdst, vsrc1,
+        kSpacing, vdst, GET_REGISTER(vsrc1));
+    SET_REGISTER(vdst, GET_REGISTER(vsrc1));
+    FINISH(3);
+
+HANDLE_OPCODE(OP_MOVE_WIDE /*vA, vB*/)
+    /* IMPORTANT: must correctly handle overlapping registers, e.g. both
+     * "move-wide v6, v7" and "move-wide v7, v6" */
+    vdst = INST_A(inst);
+    vsrc1 = INST_B(inst);
+    ILOGV("|move-wide v%d,v%d %s(v%d=0x%08llx)", vdst, vsrc1,
+        kSpacing+5, vdst, GET_REGISTER_WIDE(vsrc1));
+    SET_REGISTER_WIDE(vdst, GET_REGISTER_WIDE(vsrc1));
+    FINISH(1);
+
+HANDLE_OPCODE(OP_MOVE_WIDE_FROM16 /*vAA, vBBBB*/)
+    vdst = INST_AA(inst);
+    vsrc1 = FETCH(1);
+    ILOGV("|move-wide/from16 v%d,v%d  (v%d=0x%08llx)", vdst, vsrc1,
+        vdst, GET_REGISTER_WIDE(vsrc1));
+    SET_REGISTER_WIDE(vdst, GET_REGISTER_WIDE(vsrc1));
+    FINISH(2);
+
+HANDLE_OPCODE(OP_MOVE_WIDE_16 /*vAAAA, vBBBB*/)
+    vdst = FETCH(1);
+    vsrc1 = FETCH(2);
+    ILOGV("|move-wide/16 v%d,v%d %s(v%d=0x%08llx)", vdst, vsrc1,
+        kSpacing+8, vdst, GET_REGISTER_WIDE(vsrc1));
+    SET_REGISTER_WIDE(vdst, GET_REGISTER_WIDE(vsrc1));
+    FINISH(3);
+
+HANDLE_OPCODE(OP_MOVE_RESULT /*vAA*/)
+HANDLE_OPCODE(OP_MOVE_RESULT_OBJECT /*vAA*/)
+    vdst = INST_AA(inst);
+    ILOGV("|move-result%s v%d %s(v%d=0x%08x)",
+         (INST_INST(inst) == OP_MOVE_RESULT) ? "" : "-object",
+         vdst, kSpacing+4, vdst,retval.i);
+    SET_REGISTER(vdst, retval.i);
+    FINISH(1);
+
+HANDLE_OPCODE(OP_MOVE_RESULT_WIDE /*vAA*/)
+    vdst = INST_AA(inst);
+    ILOGV("|move-result-wide v%d %s(0x%08llx)", vdst, kSpacing, retval.j);
+    SET_REGISTER_WIDE(vdst, retval.j);
+    FINISH(1);
+
+HANDLE_OPCODE(OP_MOVE_EXCEPTION /*vAA*/)
+    vdst = INST_AA(inst);
+    ILOGV("|move-exception v%d", vdst);
+    assert(self->exception != NULL);
+    SET_REGISTER(vdst, (u4)self->exception);
+    dvmClearException(self);
+    FINISH(1);
+
+HANDLE_OPCODE(OP_MOVE /*vA, vB*/)
+HANDLE_OPCODE(OP_MOVE_OBJECT /*vA, vB*/)
+    vdst = INST_A(inst);
+    vsrc1 = INST_B(inst);
+    ILOGV("|move%s v%d,v%d %s(v%d=0x%08x)",
+        (INST_INST(inst) == OP_MOVE) ? "" : "-object", vdst, vsrc1,
+        kSpacing, vdst, GET_REGISTER(vsrc1));
+    SET_REGISTER(vdst, GET_REGISTER(vsrc1));
+    FINISH(1);
+
+/* NOTE: the comparison result is always a signed 4-byte integer */
+#define HANDLE_OP_CMPX(_opcode, _opname, _varType, _type, _nanVal)          \
+    HANDLE_OPCODE(_opcode /*vAA, vBB, vCC*/)                                \
+    {                                                                       \
+        int result;                                                         \
+        u2 regs;                                                            \
+        _varType val1, val2;                                                \
+        vdst = INST_AA(inst);                                               \
+        regs = FETCH(1);                                                    \
+        vsrc1 = regs & 0xff;                                                \
+        vsrc2 = regs >> 8;                                                  \
+        ILOGV("|cmp%s v%d,v%d,v%d", (_opname), vdst, vsrc1, vsrc2);         \
+        val1 = GET_REGISTER##_type(vsrc1);                                  \
+        val2 = GET_REGISTER##_type(vsrc2);                                  \
+        if (val1 == val2)                                                   \
+            result = 0;                                                     \
+        else if (val1 < val2)                                               \
+            result = -1;                                                    \
+        else if (val1 > val2)                                               \
+            result = 1;                                                     \
+        else                                                                \
+            result = (_nanVal);                                             \
+        ILOGV("+ result=%d\n", result);                                     \
+        SET_REGISTER(vdst, result);                                         \
+    }                                                                       \
+    FINISH(2);
+HANDLE_OP_CMPX(OP_CMPL_FLOAT, "l-float", float, _FLOAT, -1)
+HANDLE_OP_CMPX(OP_CMPG_FLOAT, "g-float", float, _FLOAT, 1)
+HANDLE_OP_CMPX(OP_CMPL_DOUBLE, "l-double", double, _DOUBLE, -1)
+HANDLE_OP_CMPX(OP_CMPG_DOUBLE, "g-double", double, _DOUBLE, 1)
+HANDLE_OP_CMPX(OP_CMP_LONG, "-long", s8, _WIDE, 0)
+
+
+#define HANDLE_OP_IF_XX(_opcode, _opname, _cmp)                             \
+    HANDLE_OPCODE(_opcode /*vA, vB, +CCCC*/)                                \
+        vsrc1 = INST_A(inst);                                               \
+        vsrc2 = INST_B(inst);                                               \
+        if ((s4) GET_REGISTER(vsrc1) _cmp (s4) GET_REGISTER(vsrc2)) {       \
+            int branchOffset = (s2)FETCH(1);    /* sign-extended */         \
+            ILOGV("|if-%s v%d,v%d,+0x%04x", (_opname), vsrc1, vsrc2,        \
+                branchOffset);                                              \
+            ILOGV("> branch taken");                                        \
+            if (branchOffset < 0)                                           \
+                PERIODIC_CHECKS(kInterpEntryInstr, branchOffset);           \
+            FINISH(branchOffset);                                           \
+        } else {                                                            \
+            ILOGV("|if-%s v%d,v%d,-", (_opname), vsrc1, vsrc2);             \
+            FINISH(2);                                                      \
+        }
+HANDLE_OP_IF_XX(OP_IF_EQ, "eq", ==)
+HANDLE_OP_IF_XX(OP_IF_NE, "ne", !=)
+HANDLE_OP_IF_XX(OP_IF_LT, "lt", <)
+HANDLE_OP_IF_XX(OP_IF_GE, "ge", >=)
+HANDLE_OP_IF_XX(OP_IF_GT, "gt", >)
+HANDLE_OP_IF_XX(OP_IF_LE, "le", <=)
+
+
+#define HANDLE_OP_IF_XXZ(_opcode, _opname, _cmp)                            \
+    HANDLE_OPCODE(_opcode /*vAA, +BBBB*/)                                   \
+        vsrc1 = INST_AA(inst);                                              \
+        if ((s4) GET_REGISTER(vsrc1) _cmp 0) {                              \
+            int branchOffset = (s2)FETCH(1);    /* sign-extended */         \
+            ILOGV("|if-%s v%d,+0x%04x", (_opname), vsrc1, branchOffset);    \
+            ILOGV("> branch taken");                                        \
+            if (branchOffset < 0)                                           \
+                PERIODIC_CHECKS(kInterpEntryInstr, branchOffset);           \
+            FINISH(branchOffset);                                           \
+        } else {                                                            \
+            ILOGV("|if-%s v%d,-", (_opname), vsrc1);                        \
+            FINISH(2);                                                      \
+        }
+HANDLE_OP_IF_XXZ(OP_IF_EQZ, "eqz", ==)
+HANDLE_OP_IF_XXZ(OP_IF_NEZ, "nez", !=)
+HANDLE_OP_IF_XXZ(OP_IF_LTZ, "ltz", <)
+HANDLE_OP_IF_XXZ(OP_IF_GEZ, "gez", >=)
+HANDLE_OP_IF_XXZ(OP_IF_GTZ, "gtz", >)
+HANDLE_OP_IF_XXZ(OP_IF_LEZ, "lez", <=)
+
+
+#define HANDLE_UNOP(_opcode, _opname, _pfx, _sfx, _type)                    \
+    HANDLE_OPCODE(_opcode /*vA, vB*/)                                       \
+        vdst = INST_A(inst);                                                \
+        vsrc1 = INST_B(inst);                                               \
+        ILOGV("|%s v%d,v%d", (_opname), vdst, vsrc1);                       \
+        SET_REGISTER##_type(vdst, _pfx GET_REGISTER##_type(vsrc1) _sfx);    \
+        FINISH(1);
+HANDLE_UNOP(OP_NEG_INT, "neg-int", -, , )
+HANDLE_UNOP(OP_NOT_INT, "not-int", , ^ 0xffffffff, )
+HANDLE_UNOP(OP_NEG_LONG, "neg-long", -, , _WIDE)
+HANDLE_UNOP(OP_NOT_LONG, "not-long", , ^ 0xffffffffffffffffULL, _WIDE)
+HANDLE_UNOP(OP_NEG_FLOAT, "neg-float", -, , _FLOAT)
+HANDLE_UNOP(OP_NEG_DOUBLE, "neg-double", -, , _DOUBLE)
+
+
+#define HANDLE_OP_X_INT(_opcode, _opname, _op, _chkdiv)                     \
+    HANDLE_OPCODE(_opcode /*vAA, vBB, vCC*/)                                \
+    {                                                                       \
+        u2 srcRegs;                                                         \
+        vdst = INST_AA(inst);                                               \
+        srcRegs = FETCH(1);                                                 \
+        vsrc1 = srcRegs & 0xff;                                             \
+        vsrc2 = srcRegs >> 8;                                               \
+        ILOGV("|%s-int v%d,v%d", (_opname), vdst, vsrc1);                   \
+        if (_chkdiv) {                                                      \
+            if (GET_REGISTER(vsrc2) == 0) {                                 \
+                EXPORT_PC();                                                \
+                dvmThrowException("Ljava/lang/ArithmeticException;",        \
+                    "divide by zero");                                      \
+                goto exceptionThrown;                                       \
+            }                                                               \
+        }                                                                   \
+        SET_REGISTER(vdst,                                                  \
+            (s4) GET_REGISTER(vsrc1) _op (s4) GET_REGISTER(vsrc2));         \
+    }                                                                       \
+    FINISH(2);
+HANDLE_OP_X_INT(OP_ADD_INT, "add", +, false)
+HANDLE_OP_X_INT(OP_SUB_INT, "sub", -, false)
+HANDLE_OP_X_INT(OP_MUL_INT, "mul", *, false)
+HANDLE_OP_X_INT(OP_DIV_INT, "div", /, true)
+HANDLE_OP_X_INT(OP_REM_INT, "rem", %, true)
+HANDLE_OP_X_INT(OP_AND_INT, "and", &, false)
+HANDLE_OP_X_INT(OP_OR_INT,  "or",  |, false)
+HANDLE_OP_X_INT(OP_XOR_INT, "xor", ^, false)
+
+
+#define HANDLE_OP_SHX_INT(_opcode, _opname, _cast, _op)                     \
+    HANDLE_OPCODE(_opcode /*vAA, vBB, vCC*/)                                \
+    {                                                                       \
+        u2 srcRegs;                                                         \
+        vdst = INST_AA(inst);                                               \
+        srcRegs = FETCH(1);                                                 \
+        vsrc1 = srcRegs & 0xff;                                             \
+        vsrc2 = srcRegs >> 8;                                               \
+        ILOGV("|%s-int v%d,v%d", (_opname), vdst, vsrc1);                   \
+        SET_REGISTER(vdst,                                                  \
+            _cast GET_REGISTER(vsrc1) _op (GET_REGISTER(vsrc2) & 0x1f));    \
+    }                                                                       \
+    FINISH(2);
+HANDLE_OP_SHX_INT(OP_SHL_INT, "shl", (s4), <<)
+HANDLE_OP_SHX_INT(OP_SHR_INT, "shr", (s4), >>)
+HANDLE_OP_SHX_INT(OP_USHR_INT, "ushr", (u4), >>)
+
+
+#define HANDLE_OP_X_INT_LIT16(_opcode, _opname, _cast, _op, _chkdiv)        \
+    HANDLE_OPCODE(_opcode /*vA, vB, #+CCCC*/)                               \
+        vdst = INST_A(inst);                                                \
+        vsrc1 = INST_B(inst);                                               \
+        vsrc2 = FETCH(1);                                                   \
+        ILOGV("|%s-int/lit16 v%d,v%d,#+0x%04x",                             \
+            (_opname), vdst, vsrc1, vsrc2);                                 \
+        if (_chkdiv) {                                                      \
+            if ((s2) vsrc2 == 0) {                                          \
+                EXPORT_PC();                                                \
+                dvmThrowException("Ljava/lang/ArithmeticException;",        \
+                    "divide by zero");                                      \
+                goto exceptionThrown;                                       \
+            }                                                               \
+        }                                                                   \
+        SET_REGISTER(vdst,                                                  \
+            _cast GET_REGISTER(vsrc1) _op (s2) vsrc2);                      \
+        FINISH(2);
+HANDLE_OP_X_INT_LIT16(OP_ADD_INT_LIT16, "add", (s4), +, false)
+HANDLE_OP_X_INT_LIT16(OP_MUL_INT_LIT16, "mul", (s4), *, false)
+HANDLE_OP_X_INT_LIT16(OP_DIV_INT_LIT16, "div", (s4), /, true)
+HANDLE_OP_X_INT_LIT16(OP_REM_INT_LIT16, "rem", (s4), %, true)
+HANDLE_OP_X_INT_LIT16(OP_AND_INT_LIT16, "and", (s4), &, false)
+HANDLE_OP_X_INT_LIT16(OP_OR_INT_LIT16,  "or",  (s4), |, false)
+HANDLE_OP_X_INT_LIT16(OP_XOR_INT_LIT16, "xor", (s4), ^, false)
+
+
+HANDLE_OPCODE(OP_RSUB_INT /*vA, vB, #+CCCC*/)
+    {
+        vdst = INST_A(inst);
+        vsrc1 = INST_B(inst);
+        vsrc2 = FETCH(1);
+        ILOGV("|rsub-int v%d,v%d,#+0x%04x", vdst, vsrc1, vsrc2);
+        SET_REGISTER(vdst, (s2) vsrc2 - (s4) GET_REGISTER(vsrc1));
+    }
+    FINISH(2);
+
+
+#define HANDLE_OP_X_INT_LIT8(_opcode, _opname, _op, _chkdiv)                \
+    HANDLE_OPCODE(_opcode /*vAA, vBB, #+CC*/)                               \
+    {                                                                       \
+        u2 litInfo;                                                         \
+        vdst = INST_AA(inst);                                               \
+        litInfo = FETCH(1);                                                 \
+        vsrc1 = litInfo & 0xff;                                             \
+        vsrc2 = litInfo >> 8;       /* constant */                          \
+        ILOGV("|%s-int/lit8 v%d,v%d,#+0x%02x",                              \
+            (_opname), vdst, vsrc1, vsrc2);                                 \
+        if (_chkdiv) {                                                      \
+            if ((s1) vsrc2 == 0) {                                          \
+                EXPORT_PC();                                                \
+                dvmThrowException("Ljava/lang/ArithmeticException;",        \
+                    "divide by zero");                                      \
+                goto exceptionThrown;                                       \
+            }                                                               \
+        }                                                                   \
+        SET_REGISTER(vdst,                                                  \
+            (s4) GET_REGISTER(vsrc1) _op (s1) vsrc2);                       \
+    }                                                                       \
+    FINISH(2);
+HANDLE_OP_X_INT_LIT8(OP_ADD_INT_LIT8,   "add", +, false)
+HANDLE_OP_X_INT_LIT8(OP_MUL_INT_LIT8,   "mul", *, false)
+HANDLE_OP_X_INT_LIT8(OP_DIV_INT_LIT8,   "div", /, true)
+HANDLE_OP_X_INT_LIT8(OP_REM_INT_LIT8,   "rem", %, true)
+HANDLE_OP_X_INT_LIT8(OP_AND_INT_LIT8,   "and", &, false)
+HANDLE_OP_X_INT_LIT8(OP_OR_INT_LIT8,    "or",  |, false)
+HANDLE_OP_X_INT_LIT8(OP_XOR_INT_LIT8,   "xor", ^, false)
+
+#define HANDLE_OP_SHX_INT_LIT8(_opcode, _opname, _cast, _op)                \
+    HANDLE_OPCODE(_opcode /*vAA, vBB, #+CC*/)                               \
+    {                                                                       \
+        u2 litInfo;                                                         \
+        vdst = INST_AA(inst);                                               \
+        litInfo = FETCH(1);                                                 \
+        vsrc1 = litInfo & 0xff;                                             \
+        vsrc2 = litInfo >> 8;       /* constant */                          \
+        ILOGV("|%s-int/lit8 v%d,v%d,#+0x%02x",                              \
+            (_opname), vdst, vsrc1, vsrc2);                                 \
+        SET_REGISTER(vdst,                                                  \
+            _cast GET_REGISTER(vsrc1) _op (vsrc2 & 0x1f));                  \
+    }                                                                       \
+    FINISH(2);
+
+HANDLE_OP_SHX_INT_LIT8(OP_SHL_INT_LIT8,   "shl", (s4), <<)
+HANDLE_OP_SHX_INT_LIT8(OP_SHR_INT_LIT8,   "shr", (s4), >>)
+HANDLE_OP_SHX_INT_LIT8(OP_USHR_INT_LIT8,  "ushr", (u4), >>)
+
+HANDLE_OPCODE(OP_RSUB_INT_LIT8 /*vAA, vBB, #+CC*/)
+    {
+        u2 litInfo;
+        vdst = INST_AA(inst);
+        litInfo = FETCH(1);
+        vsrc1 = litInfo & 0xff;
+        vsrc2 = litInfo >> 8;
+        ILOGV("|%s-int/lit8 v%d,v%d,#+0x%02x", "rsub", vdst, vsrc1, vsrc2);
+        SET_REGISTER(vdst, (s1) vsrc2 - (s4) GET_REGISTER(vsrc1));
+    }
+    FINISH(2);
+
+#define HANDLE_OP_X_INT_2ADDR(_opcode, _opname, _op, _chkdiv)               \
+    HANDLE_OPCODE(_opcode /*vA, vB*/)                                       \
+        vdst = INST_A(inst);                                                \
+        vsrc1 = INST_B(inst);                                               \
+        ILOGV("|%s-int-2addr v%d,v%d", (_opname), vdst, vsrc1);             \
+        if (_chkdiv) {                                                      \
+            if (GET_REGISTER(vsrc1) == 0) {                                 \
+                EXPORT_PC();                                                \
+                dvmThrowException("Ljava/lang/ArithmeticException;",        \
+                    "divide by zero");                                      \
+                goto exceptionThrown;                                       \
+            }                                                               \
+        }                                                                   \
+        SET_REGISTER(vdst,                                                  \
+            (s4) GET_REGISTER(vdst) _op (s4) GET_REGISTER(vsrc1));          \
+        FINISH(1);
+HANDLE_OP_X_INT_2ADDR(OP_ADD_INT_2ADDR, "add", +, false)
+HANDLE_OP_X_INT_2ADDR(OP_SUB_INT_2ADDR, "sub", -, false)
+HANDLE_OP_X_INT_2ADDR(OP_MUL_INT_2ADDR, "mul", *, false)
+HANDLE_OP_X_INT_2ADDR(OP_DIV_INT_2ADDR, "div", /, true)
+HANDLE_OP_X_INT_2ADDR(OP_REM_INT_2ADDR, "rem", %, true)
+HANDLE_OP_X_INT_2ADDR(OP_AND_INT_2ADDR, "and", &, false)
+HANDLE_OP_X_INT_2ADDR(OP_OR_INT_2ADDR,  "or", |, false)
+HANDLE_OP_X_INT_2ADDR(OP_XOR_INT_2ADDR, "xor", ^, false)
+
+
+#define HANDLE_OP_SHX_INT_2ADDR(_opcode, _opname, _cast, _op)               \
+    HANDLE_OPCODE(_opcode /*vA, vB*/)                                       \
+        vdst = INST_A(inst);                                                \
+        vsrc1 = INST_B(inst);                                               \
+        ILOGV("|%s-int-2addr v%d,v%d", (_opname), vdst, vsrc1);             \
+        SET_REGISTER(vdst,                                                  \
+            _cast GET_REGISTER(vdst) _op (GET_REGISTER(vsrc1) & 0x1f));     \
+        FINISH(1);
+
+HANDLE_OP_SHX_INT_2ADDR(OP_SHL_INT_2ADDR, "shl", (s4), <<)
+HANDLE_OP_SHX_INT_2ADDR(OP_SHR_INT_2ADDR, "shr", (s4), >>)
+HANDLE_OP_SHX_INT_2ADDR(OP_USHR_INT_2ADDR, "ushr", (u4), >>)
+
+
+#define HANDLE_OP_X_LONG(_opcode, _opname, _op, _chkdiv)                    \
+    HANDLE_OPCODE(_opcode /*vAA, vBB, vCC*/)                                \
+    {                                                                       \
+        u2 srcRegs;                                                         \
+        vdst = INST_AA(inst);                                               \
+        srcRegs = FETCH(1);                                                 \
+        vsrc1 = srcRegs & 0xff;                                             \
+        vsrc2 = srcRegs >> 8;                                               \
+        ILOGV("|%s-long v%d,v%d,v%d", (_opname), vdst, vsrc1, vsrc2);       \
+        if (_chkdiv) {                                                      \
+            if (GET_REGISTER_WIDE(vsrc2) == 0) {                            \
+                EXPORT_PC();                                                \
+                dvmThrowException("Ljava/lang/ArithmeticException;",        \
+                    "divide by zero");                                      \
+                goto exceptionThrown;                                       \
+            }                                                               \
+        }                                                                   \
+        SET_REGISTER_WIDE(vdst,                                             \
+            (s8) GET_REGISTER_WIDE(vsrc1) _op (s8) GET_REGISTER_WIDE(vsrc2)); \
+    }                                                                       \
+    FINISH(2);
+HANDLE_OP_X_LONG(OP_ADD_LONG, "add", +, false)
+HANDLE_OP_X_LONG(OP_SUB_LONG, "sub", -, false)
+HANDLE_OP_X_LONG(OP_MUL_LONG, "mul", *, false)
+HANDLE_OP_X_LONG(OP_DIV_LONG, "div", /, true)
+HANDLE_OP_X_LONG(OP_REM_LONG, "rem", %, true)
+HANDLE_OP_X_LONG(OP_AND_LONG, "and", &, false)
+HANDLE_OP_X_LONG(OP_OR_LONG,  "or", |, false)
+HANDLE_OP_X_LONG(OP_XOR_LONG, "xor", ^, false)
+
+#define HANDLE_OP_SHX_LONG(_opcode, _opname, _cast, _op)                    \
+    HANDLE_OPCODE(_opcode /*vAA, vBB, vCC*/)                                \
+    {                                                                       \
+        u2 srcRegs;                                                         \
+        vdst = INST_AA(inst);                                               \
+        srcRegs = FETCH(1);                                                 \
+        vsrc1 = srcRegs & 0xff;                                             \
+        vsrc2 = srcRegs >> 8;                                               \
+        ILOGV("|%s-long v%d,v%d,v%d", (_opname), vdst, vsrc1, vsrc2);       \
+        SET_REGISTER_WIDE(vdst,                                             \
+            _cast GET_REGISTER_WIDE(vsrc1) _op (GET_REGISTER(vsrc2) & 0x3f)); \
+    }                                                                       \
+    FINISH(2);
+HANDLE_OP_SHX_LONG(OP_SHL_LONG, "shl", (s8), <<)
+HANDLE_OP_SHX_LONG(OP_SHR_LONG, "shr", (s8), >>)
+HANDLE_OP_SHX_LONG(OP_USHR_LONG, "ushr", (u8), >>)
+
+
+#define HANDLE_OP_X_LONG_2ADDR(_opcode, _opname, _op, _chkdiv)              \
+    HANDLE_OPCODE(_opcode /*vA, vB*/)                                       \
+        vdst = INST_A(inst);                                                \
+        vsrc1 = INST_B(inst);                                               \
+        ILOGV("|%s-long-2addr v%d,v%d", (_opname), vdst, vsrc1);            \
+        if (_chkdiv) {                                                      \
+            if (GET_REGISTER_WIDE(vsrc1) == 0) {                            \
+                EXPORT_PC();                                                \
+                dvmThrowException("Ljava/lang/ArithmeticException;",        \
+                    "divide by zero");                                      \
+                goto exceptionThrown;                                       \
+            }                                                               \
+        }                                                                   \
+        SET_REGISTER_WIDE(vdst,                                             \
+            (s8) GET_REGISTER_WIDE(vdst) _op (s8)GET_REGISTER_WIDE(vsrc1)); \
+        FINISH(1);
+HANDLE_OP_X_LONG_2ADDR(OP_ADD_LONG_2ADDR, "add", +, false)
+HANDLE_OP_X_LONG_2ADDR(OP_SUB_LONG_2ADDR, "sub", -, false)
+HANDLE_OP_X_LONG_2ADDR(OP_MUL_LONG_2ADDR, "mul", *, false)
+HANDLE_OP_X_LONG_2ADDR(OP_DIV_LONG_2ADDR, "div", /, true)
+HANDLE_OP_X_LONG_2ADDR(OP_REM_LONG_2ADDR, "rem", %, true)
+HANDLE_OP_X_LONG_2ADDR(OP_AND_LONG_2ADDR, "and", &, false)
+HANDLE_OP_X_LONG_2ADDR(OP_OR_LONG_2ADDR,  "or", |, false)
+HANDLE_OP_X_LONG_2ADDR(OP_XOR_LONG_2ADDR, "xor", ^, false)
+
+
+#define HANDLE_OP_SHX_LONG_2ADDR(_opcode, _opname, _cast, _op)              \
+    HANDLE_OPCODE(_opcode /*vA, vB*/)                                       \
+        vdst = INST_A(inst);                                                \
+        vsrc1 = INST_B(inst);                                               \
+        ILOGV("|%s-long-2addr v%d,v%d", (_opname), vdst, vsrc1);            \
+        SET_REGISTER_WIDE(vdst,                                             \
+            _cast GET_REGISTER_WIDE(vdst) _op (GET_REGISTER(vsrc1) & 0x3f)); \
+        FINISH(1);
+
+HANDLE_OP_SHX_LONG_2ADDR(OP_SHL_LONG_2ADDR, "shl", (s8), <<)
+HANDLE_OP_SHX_LONG_2ADDR(OP_SHR_LONG_2ADDR, "shr", (s8), >>)
+HANDLE_OP_SHX_LONG_2ADDR(OP_USHR_LONG_2ADDR, "ushr", (u8), >>)
+
+
+#define HANDLE_OP_X_FLOAT(_opcode, _opname, _op)                            \
+    HANDLE_OPCODE(_opcode /*vAA, vBB, vCC*/)                                \
+    {                                                                       \
+        u2 srcRegs;                                                         \
+        vdst = INST_AA(inst);                                               \
+        srcRegs = FETCH(1);                                                 \
+        vsrc1 = srcRegs & 0xff;                                             \
+        vsrc2 = srcRegs >> 8;                                               \
+        ILOGV("|%s-float v%d,v%d,v%d", (_opname), vdst, vsrc1, vsrc2);      \
+        SET_REGISTER_FLOAT(vdst,                                            \
+            GET_REGISTER_FLOAT(vsrc1) _op GET_REGISTER_FLOAT(vsrc2));       \
+    }                                                                       \
+    FINISH(2);
+HANDLE_OP_X_FLOAT(OP_ADD_FLOAT, "add", +)
+HANDLE_OP_X_FLOAT(OP_SUB_FLOAT, "sub", -)
+HANDLE_OP_X_FLOAT(OP_MUL_FLOAT, "mul", *)
+HANDLE_OP_X_FLOAT(OP_DIV_FLOAT, "div", /)
+HANDLE_OPCODE(OP_REM_FLOAT /*vAA, vBB, vCC*/)
+    {
+        u2 srcRegs;
+        vdst = INST_AA(inst);
+        srcRegs = FETCH(1);
+        vsrc1 = srcRegs & 0xff;
+        vsrc2 = srcRegs >> 8;
+        ILOGV("|%s-float v%d,v%d,v%d", "rem", vdst, vsrc1, vsrc2);
+        SET_REGISTER_FLOAT(vdst,
+            fmodf(GET_REGISTER_FLOAT(vsrc1), GET_REGISTER_FLOAT(vsrc2)));
+    }
+    FINISH(2);
+
+
+#define HANDLE_OP_X_DOUBLE(_opcode, _opname, _op)                           \
+    HANDLE_OPCODE(_opcode /*vAA, vBB, vCC*/)                                \
+    {                                                                       \
+        u2 srcRegs;                                                         \
+        vdst = INST_AA(inst);                                               \
+        srcRegs = FETCH(1);                                                 \
+        vsrc1 = srcRegs & 0xff;                                             \
+        vsrc2 = srcRegs >> 8;                                               \
+        ILOGV("|%s-double v%d,v%d,v%d", (_opname), vdst, vsrc1, vsrc2);     \
+        SET_REGISTER_DOUBLE(vdst,                                           \
+            GET_REGISTER_DOUBLE(vsrc1) _op GET_REGISTER_DOUBLE(vsrc2));     \
+    }                                                                       \
+    FINISH(2);
+HANDLE_OP_X_DOUBLE(OP_ADD_DOUBLE, "add", +)
+HANDLE_OP_X_DOUBLE(OP_SUB_DOUBLE, "sub", -)
+HANDLE_OP_X_DOUBLE(OP_MUL_DOUBLE, "mul", *)
+HANDLE_OP_X_DOUBLE(OP_DIV_DOUBLE, "div", /)
+HANDLE_OPCODE(OP_REM_DOUBLE /*vAA, vBB, vCC*/)
+    {
+        u2 srcRegs;
+        vdst = INST_AA(inst);
+        srcRegs = FETCH(1);
+        vsrc1 = srcRegs & 0xff;
+        vsrc2 = srcRegs >> 8;
+        ILOGV("|%s-double v%d,v%d,v%d", "rem", vdst, vsrc1, vsrc2);
+        SET_REGISTER_DOUBLE(vdst,
+            fmod(GET_REGISTER_DOUBLE(vsrc1), GET_REGISTER_DOUBLE(vsrc2)));
+    }
+    FINISH(2);
+
+#define HANDLE_OP_X_FLOAT_2ADDR(_opcode, _opname, _op)                      \
+    HANDLE_OPCODE(_opcode /*vA, vB*/)                                       \
+        vdst = INST_A(inst);                                                \
+        vsrc1 = INST_B(inst);                                               \
+        ILOGV("|%s-float-2addr v%d,v%d", (_opname), vdst, vsrc1);           \
+        SET_REGISTER_FLOAT(vdst,                                            \
+            GET_REGISTER_FLOAT(vdst) _op GET_REGISTER_FLOAT(vsrc1));        \
+        FINISH(1);
+HANDLE_OP_X_FLOAT_2ADDR(OP_ADD_FLOAT_2ADDR, "add", +)
+HANDLE_OP_X_FLOAT_2ADDR(OP_SUB_FLOAT_2ADDR, "sub", -)
+HANDLE_OP_X_FLOAT_2ADDR(OP_MUL_FLOAT_2ADDR, "mul", *)
+HANDLE_OP_X_FLOAT_2ADDR(OP_DIV_FLOAT_2ADDR, "div", /)
+
+HANDLE_OPCODE(OP_REM_FLOAT_2ADDR /*vA, vB*/)
+    vdst = INST_A(inst);
+    vsrc1 = INST_B(inst);
+    ILOGV("|%s-float-2addr v%d,v%d", "rem", vdst, vsrc1);
+    SET_REGISTER_FLOAT(vdst,
+        fmodf(GET_REGISTER_FLOAT(vdst), GET_REGISTER_FLOAT(vsrc1)));
+    FINISH(1);
+
+
+#define HANDLE_OP_X_DOUBLE_2ADDR(_opcode, _opname, _op)                     \
+    HANDLE_OPCODE(_opcode /*vA, vB*/)                                       \
+        vdst = INST_A(inst);                                                \
+        vsrc1 = INST_B(inst);                                               \
+        ILOGV("|%s-double-2addr v%d,v%d", (_opname), vdst, vsrc1);          \
+        SET_REGISTER_DOUBLE(vdst,                                           \
+            GET_REGISTER_DOUBLE(vdst) _op GET_REGISTER_DOUBLE(vsrc1));      \
+        FINISH(1);
+
+HANDLE_OP_X_DOUBLE_2ADDR(OP_ADD_DOUBLE_2ADDR, "add", +)
+HANDLE_OP_X_DOUBLE_2ADDR(OP_SUB_DOUBLE_2ADDR, "sub", -)
+HANDLE_OP_X_DOUBLE_2ADDR(OP_MUL_DOUBLE_2ADDR, "mul", *)
+HANDLE_OP_X_DOUBLE_2ADDR(OP_DIV_DOUBLE_2ADDR, "div", /)
+
+HANDLE_OPCODE(OP_REM_DOUBLE_2ADDR /*vA, vB*/)
+    vdst = INST_A(inst);
+    vsrc1 = INST_B(inst);
+    ILOGV("|%s-double-2addr v%d,v%d", "rem", vdst, vsrc1);
+    SET_REGISTER_DOUBLE(vdst,
+        fmod(GET_REGISTER_DOUBLE(vdst), GET_REGISTER_DOUBLE(vsrc1)));
+    FINISH(1);
+
+
+
+#define HANDLE_OP_AGET(_opcode, _opname, _type, _regsize)                   \
+    HANDLE_OPCODE(_opcode /*vAA, vBB, vCC*/)                                \
+    {                                                                       \
+        ArrayObject* arrayObj;                                              \
+        u2 arrayInfo;                                                       \
+        EXPORT_PC();                                                        \
+        vdst = INST_AA(inst);                                               \
+        arrayInfo = FETCH(1);                                               \
+        vsrc1 = arrayInfo & 0xff;    /* array ptr */                        \
+        vsrc2 = arrayInfo >> 8;      /* index */                            \
+        ILOGV("|aget%s v%d,v%d,v%d", (_opname), vdst, vsrc1, vsrc2);        \
+        arrayObj = (ArrayObject*) GET_REGISTER(vsrc1);                      \
+        if (!checkForNull((Object*) arrayObj))                              \
+            goto exceptionThrown;                                           \
+        if (GET_REGISTER(vsrc2) >= arrayObj->length) {                      \
+            LOGV("Invalid array access: %p %d (len=%d)\n",                  \
+                arrayObj, vsrc2, arrayObj->length);                         \
+            dvmThrowException("Ljava/lang/ArrayIndexOutOfBoundsException;", \
+                NULL);                                                      \
+            goto exceptionThrown;                                           \
+        }                                                                   \
+        SET_REGISTER##_regsize(vdst,                                        \
+            ((_type*) arrayObj->contents)[GET_REGISTER(vsrc2)]);            \
+        ILOGV("+ AGET[%d]=0x%x", GET_REGISTER(vsrc2), GET_REGISTER(vdst));  \
+    }                                                                       \
+    FINISH(2);
+HANDLE_OP_AGET(OP_AGET, "", u4, )
+HANDLE_OP_AGET(OP_AGET_OBJECT, "-object", u4, )
+HANDLE_OP_AGET(OP_AGET_WIDE, "-wide", s8, _WIDE)
+HANDLE_OP_AGET(OP_AGET_BOOLEAN, "-boolean", u1, )
+HANDLE_OP_AGET(OP_AGET_BYTE, "-byte", s1, )
+HANDLE_OP_AGET(OP_AGET_CHAR, "-char", u2, )
+HANDLE_OP_AGET(OP_AGET_SHORT, "-short", s2, )
+
+
+#define HANDLE_OP_APUT(_opcode, _opname, _type, _regsize)                   \
+    HANDLE_OPCODE(_opcode /*vAA, vBB, vCC*/)                                \
+    {                                                                       \
+        ArrayObject* arrayObj;                                              \
+        u2 arrayInfo;                                                       \
+        EXPORT_PC();                                                        \
+        vdst = INST_AA(inst);       /* AA: source value */                  \
+        arrayInfo = FETCH(1);                                               \
+        vsrc1 = arrayInfo & 0xff;   /* BB: array ptr */                     \
+        vsrc2 = arrayInfo >> 8;     /* CC: index */                         \
+        ILOGV("|aput%s v%d,v%d,v%d", (_opname), vdst, vsrc1, vsrc2);        \
+        arrayObj = (ArrayObject*) GET_REGISTER(vsrc1);                      \
+        if (!checkForNull((Object*) arrayObj))                              \
+            goto exceptionThrown;                                           \
+        if (GET_REGISTER(vsrc2) >= arrayObj->length) {                      \
+            dvmThrowException("Ljava/lang/ArrayIndexOutOfBoundsException;", \
+                NULL);                                                      \
+            goto exceptionThrown;                                           \
+        }                                                                   \
+        ILOGV("+ APUT[%d]=0x%08x", GET_REGISTER(vsrc2), GET_REGISTER(vdst));\
+        ((_type*) arrayObj->contents)[GET_REGISTER(vsrc2)] =                \
+            GET_REGISTER##_regsize(vdst);                                   \
+    }                                                                       \
+    FINISH(2);
+HANDLE_OP_APUT(OP_APUT, "", u4, )
+HANDLE_OP_APUT(OP_APUT_WIDE, "-wide", s8, _WIDE)
+HANDLE_OP_APUT(OP_APUT_BOOLEAN, "-boolean", u1, )
+HANDLE_OP_APUT(OP_APUT_BYTE, "-byte", s1, )
+HANDLE_OP_APUT(OP_APUT_CHAR, "-char", u2, )
+HANDLE_OP_APUT(OP_APUT_SHORT, "-short", s2, )
+
+HANDLE_OPCODE(OP_APUT_OBJECT /*vAA, vBB, vCC*/)
+    {
+        ArrayObject* arrayObj;
+        Object* obj;
+        u2 arrayInfo;
+        EXPORT_PC();
+        vdst = INST_AA(inst);       /* AA: source value */
+        arrayInfo = FETCH(1);
+        vsrc1 = arrayInfo & 0xff;   /* BB: array ptr */
+        vsrc2 = arrayInfo >> 8;     /* CC: index */
+        ILOGV("|aput%s v%d,v%d,v%d", "-object", vdst, vsrc1, vsrc2);
+        arrayObj = (ArrayObject*) GET_REGISTER(vsrc1);
+        if (!checkForNull((Object*) arrayObj))
+            goto exceptionThrown;
+        if (GET_REGISTER(vsrc2) >= arrayObj->length) {
+            dvmThrowException("Ljava/lang/ArrayIndexOutOfBoundsException;",
+                NULL);
+            goto exceptionThrown;
+        }
+        obj = (Object*) GET_REGISTER(vdst);
+        if (obj != NULL) {
+            if (!checkForNull(obj))
+                goto exceptionThrown;
+            if (!dvmCanPutArrayElement(obj->clazz, arrayObj->obj.clazz)) {
+                LOGV("Can't put a '%s'(%p) into array type='%s'(%p)\n",
+                    obj->clazz->descriptor, obj,
+                    arrayObj->obj.clazz->descriptor, arrayObj);
+                //dvmDumpClass(obj->clazz);
+                //dvmDumpClass(arrayObj->obj.clazz);
+                dvmThrowException("Ljava/lang/ArrayStoreException;", NULL);
+                goto exceptionThrown;
+            }
+        }
+        ILOGV("+ APUT[%d]=0x%08x", GET_REGISTER(vsrc2), GET_REGISTER(vdst));
+        ((u4*) arrayObj->contents)[GET_REGISTER(vsrc2)] =
+            GET_REGISTER(vdst);
+    }
+    FINISH(2);
+
+/*
+ * It's possible to get a bad value out of a field with sub-32-bit stores
+ * because the -quick versions always operate on 32 bits.  Consider:
+ *   short foo = -1  (sets a 32-bit register to 0xffffffff)
+ *   iput-quick foo  (writes all 32 bits to the field)
+ *   short bar = 1   (sets a 32-bit register to 0x00000001)
+ *   iput-short      (writes the low 16 bits to the field)
+ *   iget-quick foo  (reads all 32 bits from the field, yielding 0xffff0001)
+ * This can only happen when optimized and non-optimized code has interleaved
+ * access to the same field.  This is unlikely but possible.
+ *
+ * The easiest way to fix this is to always read/write 32 bits at a time.  On
+ * a device with a 16-bit data bus this is sub-optimal.  (The alternative
+ * approach is to have sub-int versions of iget-quick, but now we're wasting
+ * Dalvik instruction space and making it less likely that handler code will
+ * already be in the CPU i-cache.)
+ */
+#define HANDLE_IGET_X(_opcode, _opname, _ftype, _regsize)                   \
+    HANDLE_OPCODE(_opcode /*vA, vB, field@CCCC*/)                           \
+    {                                                                       \
+        InstField* ifield;                                                  \
+        Object* obj;                                                        \
+        EXPORT_PC();                                                        \
+        vdst = INST_A(inst);                                                \
+        vsrc1 = INST_B(inst);   /* object ptr */                            \
+        ref = FETCH(1);         /* field ref */                             \
+        ILOGV("|iget%s v%d,v%d,field@0x%04x", (_opname), vdst, vsrc1, ref); \
+        obj = (Object*) GET_REGISTER(vsrc1);                                \
+        if (!checkForNull(obj))                                             \
+            goto exceptionThrown;                                           \
+        ifield = (InstField*) dvmDexGetResolvedField(methodClassDex, ref);  \
+        if (ifield == NULL) {                                               \
+            ifield = dvmResolveInstField(method->clazz, ref);               \
+            if (ifield == NULL)                                             \
+                goto exceptionThrown;                                       \
+        }                                                                   \
+        SET_REGISTER##_regsize(vdst,                                        \
+            dvmGetField##_ftype(obj, ifield->byteOffset));                  \
+        ILOGV("+ IGET '%s'=0x%08llx", ifield->field.name,                   \
+            (u8) GET_REGISTER##_regsize(vdst));                             \
+        UPDATE_FIELD_GET(&ifield->field);                                   \
+    }                                                                       \
+    FINISH(2);
+HANDLE_IGET_X(OP_IGET_WIDE,             "-wide", Long, _WIDE)
+HANDLE_IGET_X(OP_IGET_OBJECT,           "-object", Object, _AS_OBJECT)
+HANDLE_OPCODE(OP_IGET_BOOLEAN)
+HANDLE_OPCODE(OP_IGET_BYTE)
+HANDLE_OPCODE(OP_IGET_CHAR)
+HANDLE_OPCODE(OP_IGET_SHORT)
+HANDLE_IGET_X(OP_IGET,                  "", Int, )
+
+
+#define HANDLE_IGET_X_QUICK(_opcode, _opname, _ftype, _regsize)             \
+    HANDLE_OPCODE(_opcode /*vA, vB, field@CCCC*/)                           \
+    {                                                                       \
+        Object* obj;                                                        \
+        vdst = INST_A(inst);                                                \
+        vsrc1 = INST_B(inst);   /* object ptr */                            \
+        ref = FETCH(1);         /* field offset */                          \
+        ILOGV("|iget%s-quick v%d,v%d,field@+%u",                            \
+            (_opname), vdst, vsrc1, ref);                                   \
+        obj = (Object*) GET_REGISTER(vsrc1);                                \
+        if (!checkForNullExportPC(obj, fp, pc))                             \
+            goto exceptionThrown;                                           \
+        SET_REGISTER##_regsize(vdst, dvmGetField##_ftype(obj, ref));        \
+        ILOGV("+ IGETQ %d=0x%08llx", ref,                                   \
+            (u8) GET_REGISTER##_regsize(vdst));                             \
+    }                                                                       \
+    FINISH(2);
+HANDLE_IGET_X_QUICK(OP_IGET_QUICK,          "", Int, )
+HANDLE_IGET_X_QUICK(OP_IGET_OBJECT_QUICK,   "-object", Object, _AS_OBJECT)
+HANDLE_IGET_X_QUICK(OP_IGET_WIDE_QUICK,     "-wide", Long, _WIDE)
+
+
+#define HANDLE_IPUT_X(_opcode, _opname, _ftype, _regsize)                   \
+    HANDLE_OPCODE(_opcode /*vA, vB, field@CCCC*/)                           \
+    {                                                                       \
+        InstField* ifield;                                                  \
+        Object* obj;                                                        \
+        EXPORT_PC();                                                        \
+        vdst = INST_A(inst);                                                \
+        vsrc1 = INST_B(inst);   /* object ptr */                            \
+        ref = FETCH(1);         /* field ref */                             \
+        ILOGV("|iput%s v%d,v%d,field@0x%04x", (_opname), vdst, vsrc1, ref); \
+        obj = (Object*) GET_REGISTER(vsrc1);                                \
+        if (!checkForNull(obj))                                             \
+            goto exceptionThrown;                                           \
+        ifield = (InstField*) dvmDexGetResolvedField(methodClassDex, ref);  \
+        if (ifield == NULL) {                                               \
+            ifield = dvmResolveInstField(method->clazz, ref);               \
+            if (ifield == NULL)                                             \
+                goto exceptionThrown;                                       \
+        }                                                                   \
+        dvmSetField##_ftype(obj, ifield->byteOffset,                        \
+            GET_REGISTER##_regsize(vdst));                                  \
+        ILOGV("+ IPUT '%s'=0x%08llx", ifield->field.name,                   \
+            (u8) GET_REGISTER##_regsize(vdst));                             \
+        UPDATE_FIELD_PUT(&ifield->field);                                   \
+    }                                                                       \
+    FINISH(2);
+HANDLE_IPUT_X(OP_IPUT_WIDE,             "-wide", Long, _WIDE)
+HANDLE_IPUT_X(OP_IPUT_OBJECT,           "-object", Object, _AS_OBJECT)
+HANDLE_OPCODE(OP_IPUT_BOOLEAN)
+HANDLE_OPCODE(OP_IPUT_BYTE)
+HANDLE_OPCODE(OP_IPUT_CHAR)
+HANDLE_OPCODE(OP_IPUT_SHORT)
+HANDLE_IPUT_X(OP_IPUT,                  "", Int, )
+
+
+#define HANDLE_IPUT_X_QUICK(_opcode, _opname, _ftype, _regsize)             \
+    HANDLE_OPCODE(_opcode /*vA, vB, field@CCCC*/)                           \
+    {                                                                       \
+        Object* obj;                                                        \
+        vdst = INST_A(inst);                                                \
+        vsrc1 = INST_B(inst);   /* object ptr */                            \
+        ref = FETCH(1);         /* field offset */                          \
+        ILOGV("|iput%s-quick v%d,v%d,field@0x%04x",                         \
+            (_opname), vdst, vsrc1, ref);                                   \
+        obj = (Object*) GET_REGISTER(vsrc1);                                \
+        if (!checkForNullExportPC(obj, fp, pc))                             \
+            goto exceptionThrown;                                           \
+        dvmSetField##_ftype(obj, ref, GET_REGISTER##_regsize(vdst));        \
+        ILOGV("+ IPUTQ %d=0x%08llx", ref,                                   \
+            (u8) GET_REGISTER##_regsize(vdst));                             \
+    }                                                                       \
+    FINISH(2);
+HANDLE_IPUT_X_QUICK(OP_IPUT_QUICK,          "", Int, )
+HANDLE_IPUT_X_QUICK(OP_IPUT_OBJECT_QUICK,   "-object", Object, _AS_OBJECT)
+HANDLE_IPUT_X_QUICK(OP_IPUT_WIDE_QUICK,     "-wide", Long, _WIDE)
+
+
+#define HANDLE_SGET_X(_opcode, _opname, _ftype, _regsize)                   \
+    HANDLE_OPCODE(_opcode /*vAA, field@BBBB*/)                              \
+    {                                                                       \
+        StaticField* sfield;                                                \
+        vdst = INST_AA(inst);                                               \
+        ref = FETCH(1);         /* field ref */                             \
+        ILOGV("|sget%s v%d,sfield@0x%04x", (_opname), vdst, ref);           \
+        sfield = (StaticField*)dvmDexGetResolvedField(methodClassDex, ref); \
+        if (sfield == NULL) {                                               \
+            EXPORT_PC();                                                    \
+            sfield = dvmResolveStaticField(method->clazz, ref);             \
+            if (sfield == NULL)                                             \
+                goto exceptionThrown;                                       \
+        }                                                                   \
+        SET_REGISTER##_regsize(vdst, dvmGetStaticField##_ftype(sfield));    \
+        ILOGV("+ SGET '%s'=0x%08llx",                                       \
+            sfield->field.name, (u8)GET_REGISTER##_regsize(vdst));          \
+        UPDATE_FIELD_GET(&sfield->field);                                   \
+    }                                                                       \
+    FINISH(2);
+HANDLE_SGET_X(OP_SGET_WIDE,             "-wide", Long, _WIDE)
+HANDLE_SGET_X(OP_SGET_OBJECT,           "-object", Object, _AS_OBJECT)
+//HANDLE_SGET_X(OP_SGET_BOOLEAN,          "-boolean", Boolean, )
+//HANDLE_SGET_X(OP_SGET_BYTE,             "-byte", Int, )
+//HANDLE_SGET_X(OP_SGET_CHAR,             "-char", Int, )
+//HANDLE_SGET_X(OP_SGET_SHORT,            "-short", Int, )
+HANDLE_OPCODE(OP_SGET_BOOLEAN)
+HANDLE_OPCODE(OP_SGET_BYTE)
+HANDLE_OPCODE(OP_SGET_CHAR)
+HANDLE_OPCODE(OP_SGET_SHORT)
+HANDLE_SGET_X(OP_SGET,                  "", Int, )
+
+
+#define HANDLE_SPUT_X(_opcode, _opname, _ftype, _regsize)                   \
+    HANDLE_OPCODE(_opcode /*vAA, field@BBBB*/)                              \
+    {                                                                       \
+        StaticField* sfield;                                                \
+        vdst = INST_AA(inst);                                               \
+        ref = FETCH(1);         /* field ref */                             \
+        ILOGV("|sput%s v%d,sfield@0x%04x", (_opname), vdst, ref);           \
+        sfield = (StaticField*)dvmDexGetResolvedField(methodClassDex, ref); \
+        if (sfield == NULL) {                                               \
+            EXPORT_PC();                                                    \
+            sfield = dvmResolveStaticField(method->clazz, ref);             \
+            if (sfield == NULL)                                             \
+                goto exceptionThrown;                                       \
+        }                                                                   \
+        dvmSetStaticField##_ftype(sfield, GET_REGISTER##_regsize(vdst));    \
+        ILOGV("+ SPUT '%s'=0x%08llx",                                       \
+            sfield->field.name, (u8)GET_REGISTER##_regsize(vdst));          \
+        UPDATE_FIELD_PUT(&sfield->field);                                   \
+    }                                                                       \
+    FINISH(2);
+HANDLE_SPUT_X(OP_SPUT_WIDE,             "-wide", Long, _WIDE)
+HANDLE_SPUT_X(OP_SPUT_OBJECT,           "-object", Object, _AS_OBJECT)
+//HANDLE_SPUT_X(OP_SPUT_BOOLEAN,          "-boolean", Boolean, )
+//HANDLE_SPUT_X(OP_SPUT_BYTE,             "-byte", Int, )
+//HANDLE_SPUT_X(OP_SPUT_CHAR,             "-char", Int, )
+//HANDLE_SPUT_X(OP_SPUT_SHORT,            "-short", Int, )
+HANDLE_OPCODE(OP_SPUT_BOOLEAN)
+HANDLE_OPCODE(OP_SPUT_BYTE)
+HANDLE_OPCODE(OP_SPUT_CHAR)
+HANDLE_OPCODE(OP_SPUT_SHORT)
+HANDLE_SPUT_X(OP_SPUT,                  "", Int, )
+
+
+HANDLE_OPCODE(OP_CONST_4 /*vA, #+B*/)
+    {
+        s4 tmp;
+
+        vdst = INST_A(inst);
+        tmp = (s4) (INST_B(inst) << 28) >> 28;  // sign extend 4-bit value
+        ILOGV("|const/4 v%d,#0x%02x", vdst, (s4)tmp);
+        SET_REGISTER(vdst, tmp);
+    }
+    FINISH(1);
+
+HANDLE_OPCODE(OP_CONST_16 /*vAA, #+BBBB*/)
+    vdst = INST_AA(inst);
+    vsrc1 = FETCH(1);
+    ILOGV("|const/16 v%d,#0x%04x", vdst, (s2)vsrc1);
+    SET_REGISTER(vdst, (s2) vsrc1);
+    FINISH(2);
+
+HANDLE_OPCODE(OP_CONST /*vAA, #+BBBBBBBB*/)
+    {
+        u4 tmp;
+
+        vdst = INST_AA(inst);
+        tmp = FETCH(1);
+        tmp |= (u4)FETCH(2) << 16;
+        ILOGV("|const v%d,#0x%08x", vdst, tmp);
+        SET_REGISTER(vdst, tmp);
+    }
+    FINISH(3);
+
+HANDLE_OPCODE(OP_CONST_HIGH16 /*vAA, #+BBBB0000*/)
+    vdst = INST_AA(inst);
+    vsrc1 = FETCH(1);
+    ILOGV("|const/high16 v%d,#0x%04x0000", vdst, vsrc1);
+    SET_REGISTER(vdst, vsrc1 << 16);
+    FINISH(2);
+
+HANDLE_OPCODE(OP_CONST_WIDE_16 /*vAA, #+BBBB*/)
+    vdst = INST_AA(inst);
+    vsrc1 = FETCH(1);
+    ILOGV("|const-wide/16 v%d,#0x%04x", vdst, (s2)vsrc1);
+    SET_REGISTER_WIDE(vdst, (s2)vsrc1);
+    FINISH(2);
+
+HANDLE_OPCODE(OP_CONST_WIDE_32 /*vAA, #+BBBBBBBB*/)
+    {
+        u4 tmp;
+
+        vdst = INST_AA(inst);
+        tmp = FETCH(1);
+        tmp |= (u4)FETCH(2) << 16;
+        ILOGV("|const-wide/32 v%d,#0x%08x", vdst, tmp);
+        SET_REGISTER_WIDE(vdst, (s4) tmp);
+    }
+    FINISH(3);
+
+HANDLE_OPCODE(OP_CONST_WIDE /*vAA, #+BBBBBBBBBBBBBBBB*/)
+    {
+        u8 tmp;
+
+        vdst = INST_AA(inst);
+        tmp = FETCH(1);
+        tmp |= (u8)FETCH(2) << 16;
+        tmp |= (u8)FETCH(3) << 32;
+        tmp |= (u8)FETCH(4) << 48;
+        ILOGV("|const-wide v%d,#0x%08llx", vdst, tmp);
+        SET_REGISTER_WIDE(vdst, tmp);
+    }
+    FINISH(5);
+
+HANDLE_OPCODE(OP_CONST_WIDE_HIGH16 /*vAA, #+BBBB000000000000*/)
+    vdst = INST_AA(inst);
+    vsrc1 = FETCH(1);
+    ILOGV("|const-wide/high16 v%d,#0x%04x000000000000", vdst, vsrc1);
+    SET_REGISTER_WIDE(vdst, ((u8) vsrc1) << 48);
+    FINISH(2);
+
+HANDLE_OPCODE(OP_CONST_STRING /*vAA, string@BBBB*/)
+    {
+        StringObject* strObj;
+
+        vdst = INST_AA(inst);
+        ref = FETCH(1);
+        ILOGV("|const-string v%d string@0x%04x", vdst, ref);
+        strObj = dvmDexGetResolvedString(methodClassDex, ref);
+        if (strObj == NULL) {
+            EXPORT_PC();
+            strObj = dvmResolveString(method->clazz, ref);
+            if (strObj == NULL)
+                goto exceptionThrown;
+        }
+        SET_REGISTER(vdst, (u4) strObj);
+    }
+    FINISH(2);
+
+HANDLE_OPCODE(OP_CONST_STRING_JUMBO /*vAA, string@BBBBBBBB*/)
+    {
+        StringObject* strObj;
+        u4 tmp;
+
+        vdst = INST_AA(inst);
+        tmp = FETCH(1);
+        tmp |= (u4)FETCH(2) << 16;
+        ILOGV("|const-string/jumbo v%d string@0x%08x", vdst, tmp);
+        strObj = dvmDexGetResolvedString(methodClassDex, tmp);
+        if (strObj == NULL) {
+            EXPORT_PC();
+            strObj = dvmResolveString(method->clazz, tmp);
+            if (strObj == NULL)
+                goto exceptionThrown;
+        }
+        SET_REGISTER(vdst, (u4) strObj);
+    }
+    FINISH(3);
+
+HANDLE_OPCODE(OP_CONST_CLASS /*vAA, class@BBBB*/)
+    {
+        ClassObject* clazz;
+
+        vdst = INST_AA(inst);
+        ref = FETCH(1);
+        ILOGV("|const-class v%d class@0x%04x", vdst, ref);
+        clazz = dvmDexGetResolvedClass(methodClassDex, ref);
+        if (clazz == NULL) {
+            EXPORT_PC();
+            clazz = dvmResolveClass(method->clazz, ref, true);
+            if (clazz == NULL)
+                goto exceptionThrown;
+        }
+        SET_REGISTER(vdst, (u4) clazz);
+    }
+    FINISH(2);
+
+HANDLE_OPCODE(OP_MONITOR_ENTER /*vAA*/)
+    {
+        Object* obj;
+
+        vsrc1 = INST_AA(inst);
+        ILOGV("|monitor-enter v%d %s(0x%08x)",
+            vsrc1, kSpacing+6, GET_REGISTER(vsrc1));
+        obj = (Object*)GET_REGISTER(vsrc1);
+        if (!checkForNullExportPC(obj, fp, pc))
+            goto exceptionThrown;
+        ILOGV("+ locking %p %s\n", obj, obj->clazz->descriptor);
+#ifdef WITH_MONITOR_TRACKING
+        EXPORT_PC();        /* need for stack trace */
+#endif
+        dvmLockObject(self, obj);
+#ifdef WITH_DEADLOCK_PREDICTION
+        if (dvmCheckException(self))
+            goto exceptionThrown;
+#endif
+    }
+    FINISH(1);
+
+HANDLE_OPCODE(OP_MONITOR_EXIT /*vAA*/)
+    {
+        Object* obj;
+
+        EXPORT_PC();
+
+        vsrc1 = INST_AA(inst);
+        ILOGV("|monitor-exit v%d %s(0x%08x)",
+            vsrc1, kSpacing+5, GET_REGISTER(vsrc1));
+        obj = (Object*)GET_REGISTER(vsrc1);
+        if (!checkForNull(obj)) {
+            /*
+             * The exception needs to be processed at the *following*
+             * instruction, not the current instruction (see the Dalvik
+             * spec).  Because we're jumping to an exception handler,
+             * we're not actually at risk of skipping an instruction
+             * by doing so.
+             */
+            ADJUST_PC(1);           /* monitor-exit width is 1 */
+            goto exceptionThrown;
+        }
+        ILOGV("+ unlocking %p %s\n", obj, obj->clazz->descriptor);
+        if (!dvmUnlockObject(self, obj)) {
+            assert(dvmCheckException(self));
+            ADJUST_PC(1);
+            goto exceptionThrown;
+        }
+    }
+    FINISH(1);
+
+HANDLE_OPCODE(OP_CHECK_CAST /*vAA, class@BBBB*/)
+    {
+        ClassObject* clazz;
+        Object* obj;
+
+        EXPORT_PC();
+
+        vsrc1 = INST_AA(inst);
+        ref = FETCH(1);         /* class to check against */
+        ILOGV("|check-cast v%d,class@0x%04x", vsrc1, ref);
+
+        obj = (Object*)GET_REGISTER(vsrc1);
+        if (obj != NULL) {
+#if defined(WITH_EXTRA_OBJECT_VALIDATION)
+            if (!checkForNull(obj))     /* do additional checks */
+                goto exceptionThrown;
+#endif
+            clazz = dvmDexGetResolvedClass(methodClassDex, ref);
+            if (clazz == NULL) {
+                clazz = dvmResolveClass(method->clazz, ref, false);
+                if (clazz == NULL)
+                    goto exceptionThrown;
+            }
+            if (!dvmInstanceof(obj->clazz, clazz)) {
+                dvmThrowExceptionWithClassMessage(
+                    "Ljava/lang/ClassCastException;", obj->clazz->descriptor);
+                goto exceptionThrown;
+            }
+        }
+    }
+    FINISH(2);
+
+HANDLE_OPCODE(OP_INSTANCE_OF /*vA, vB, class@CCCC*/)
+    {
+        ClassObject* clazz;
+        Object* obj;
+
+        vdst = INST_A(inst);
+        vsrc1 = INST_B(inst);   /* object to check */
+        ref = FETCH(1);         /* class to check against */
+        ILOGV("|instance-of v%d,v%d,class@0x%04x", vdst, vsrc1, ref);
+
+        obj = (Object*)GET_REGISTER(vsrc1);
+        if (obj == NULL) {
+            SET_REGISTER(vdst, 0);
+        } else {
+#if defined(WITH_EXTRA_OBJECT_VALIDATION)
+            if (!checkForNullExportPC(obj, fp, pc)) /* do additional checks */
+                goto exceptionThrown;
+#endif
+            clazz = dvmDexGetResolvedClass(methodClassDex, ref);
+            if (clazz == NULL) {
+                EXPORT_PC();
+                clazz = dvmResolveClass(method->clazz, ref, true);
+                if (clazz == NULL)
+                    goto exceptionThrown;
+            }
+            SET_REGISTER(vdst, dvmInstanceof(obj->clazz, clazz));
+        }
+    }
+    FINISH(2);
+
+HANDLE_OPCODE(OP_ARRAY_LENGTH /*vA, vB*/)
+    {
+        ArrayObject* arrayObj;
+
+        vdst = INST_A(inst);
+        vsrc1 = INST_B(inst);
+        arrayObj = (ArrayObject*) GET_REGISTER(vsrc1);
+        ILOGV("|array-length v%d,v%d  (%p)", vdst, vsrc1, arrayObj);
+        if (!checkForNullExportPC((Object*) arrayObj, fp, pc))
+            goto exceptionThrown;
+        /* verifier guarantees this is an array reference */
+        SET_REGISTER(vdst, arrayObj->length);
+    }
+    FINISH(1);
+
+HANDLE_OPCODE(OP_NEW_INSTANCE /*vAA, class@BBBB*/)
+    {
+        ClassObject* clazz;
+        Object* newObj;
+
+        EXPORT_PC();
+
+        vdst = INST_AA(inst);
+        ref = FETCH(1);
+        ILOGV("|new-instance v%d,class@0x%04x", vdst, ref);
+        clazz = dvmDexGetResolvedClass(methodClassDex, ref);
+        if (clazz == NULL) {
+            clazz = dvmResolveClass(method->clazz, ref, false);
+            if (clazz == NULL)
+                goto exceptionThrown;
+        }
+
+        if (!dvmIsClassInitialized(clazz) && !dvmInitClass(clazz))
+            goto exceptionThrown;
+
+        /*
+         * Note: the verifier can ensure that this never happens, allowing us
+         * to remove the check.  However, the spec requires we throw the
+         * exception at runtime, not verify time, so the verifier would
+         * need to replace the new-instance call with a magic "throw
+         * InstantiationError" instruction.
+         *
+         * Since this relies on the verifier, which is optional, we would
+         * also need a "new-instance-quick" instruction to identify instances
+         * that don't require the check.
+         */
+        if (dvmIsInterfaceClass(clazz) || dvmIsAbstractClass(clazz)) {
+            dvmThrowExceptionWithClassMessage("Ljava/lang/InstantiationError;",
+                clazz->descriptor);
+            goto exceptionThrown;
+        }
+        newObj = dvmAllocObject(clazz, ALLOC_DONT_TRACK);
+        if (newObj == NULL)
+            goto exceptionThrown;
+        SET_REGISTER(vdst, (u4) newObj);
+    }
+    FINISH(2);
+
+HANDLE_OPCODE(OP_NEW_ARRAY /*vA, vB, class@CCCC*/)
+    {
+        ClassObject* arrayClass;
+        ArrayObject* newArray;
+        s4 length;
+
+        EXPORT_PC();
+
+        vdst = INST_A(inst);
+        vsrc1 = INST_B(inst);       /* length reg */
+        ref = FETCH(1);
+        ILOGV("|new-array v%d,v%d,class@0x%04x  (%d elements)",
+            vdst, vsrc1, ref, (s4) GET_REGISTER(vsrc1));
+        length = (s4) GET_REGISTER(vsrc1);
+        if (length < 0) {
+            dvmThrowException("Ljava/lang/NegativeArraySizeException;", NULL);
+            goto exceptionThrown;
+        }
+        arrayClass = dvmDexGetResolvedClass(methodClassDex, ref);
+        if (arrayClass == NULL) {
+            arrayClass = dvmResolveClass(method->clazz, ref, false);
+            if (arrayClass == NULL)
+                goto exceptionThrown;
+        }
+        /* verifier guarantees this is an array class */
+        assert(dvmIsArrayClass(arrayClass));
+        assert(dvmIsClassInitialized(arrayClass));
+
+        newArray = dvmAllocArrayByClass(arrayClass, length, ALLOC_DONT_TRACK);
+        if (newArray == NULL)
+            goto exceptionThrown;
+        SET_REGISTER(vdst, (u4) newArray);
+    }
+    FINISH(2);
+
+HANDLE_OPCODE(OP_FILLED_NEW_ARRAY_RANGE /*{vCCCC..v(CCCC+AA-1)}, class@BBBB*/)
+    methodCallRange = true;     // work this like the method call variants
+    goto filledNewArray;
+HANDLE_OPCODE(OP_FILLED_NEW_ARRAY /*vB, {vD, vE, vF, vG, vA}, class@CCCC*/)
+    methodCallRange = false;
+filledNewArray:
+    {
+        ClassObject* arrayClass;
+        ArrayObject* newArray;
+        int* contents;
+        char typeCh;
+        int i;
+        u4 arg5;
+
+        EXPORT_PC();
+
+        ref = FETCH(1);             /* class ref */
+        vdst = FETCH(2);            /* first 4 regs -or- range base */
+
+        if (methodCallRange) {
+            vsrc1 = INST_AA(inst);  /* #of elements */
+            arg5 = -1;              /* silence compiler warning */
+            ILOGV("|filled-new-array-range args=%d @0x%04x {regs=v%d-v%d}",
+                vsrc1, ref, vdst, vdst+vsrc1-1);
+        } else {
+            arg5 = INST_A(inst);
+            vsrc1 = INST_B(inst);   /* #of elements */
+            ILOGV("|filled-new-array args=%d @0x%04x {regs=0x%04x %x}",
+                vsrc1, ref, vdst, arg5);
+        }
+
+        /*
+         * Resolve the array class.
+         */
+        arrayClass = dvmDexGetResolvedClass(methodClassDex, ref);
+        if (arrayClass == NULL) {
+            arrayClass = dvmResolveClass(method->clazz, ref, false);
+            if (arrayClass == NULL)
+                goto exceptionThrown;
+        }
+        /*
+        if (!dvmIsArrayClass(arrayClass)) {
+            dvmThrowException("Ljava/lang/RuntimeError;",
+                "filled-new-array needs array class");
+            goto exceptionThrown;
+        }
+        */
+        /* verifier guarantees this is an array class */
+        assert(dvmIsArrayClass(arrayClass));
+        assert(dvmIsClassInitialized(arrayClass));
+
+        /*
+         * Create an array of the specified type.
+         */
+        LOGVV("+++ filled-new-array type is '%s'\n", arrayClass->descriptor);
+        typeCh = arrayClass->descriptor[1];
+        if (typeCh == 'D' || typeCh == 'J') {
+            /* category 2 primitives not allowed */
+            dvmThrowException("Ljava/lang/RuntimeError;",
+                "bad filled array req");
+            goto exceptionThrown;
+        } else if (typeCh == 'L' || typeCh == '[') {
+            /* create array of objects or array of arrays */
+            /* TODO: need some work in the verifier before we allow this */
+            LOGE("fnao not implemented\n");
+            dvmThrowException("Ljava/lang/InternalError;",
+                "filled-new-array not implemented for reference types");
+            goto exceptionThrown;
+        } else if (typeCh != 'I') {
+            /* TODO: requires multiple "fill in" loops with different widths */
+            LOGE("non-int not implemented\n");
+            dvmThrowException("Ljava/lang/InternalError;",
+                "filled-new-array not implemented for anything but 'int'");
+            goto exceptionThrown;
+        }
+
+        assert(strchr("BCIFZ", typeCh) != NULL);
+        newArray = dvmAllocPrimitiveArray(arrayClass->descriptor[1], vsrc1,
+                    ALLOC_DONT_TRACK);
+        if (newArray == NULL)
+            goto exceptionThrown;
+
+        /*
+         * Fill in the elements.  It's legal for vsrc1 to be zero.
+         */
+        contents = (int*) newArray->contents;
+        if (methodCallRange) {
+            for (i = 0; i < vsrc1; i++)
+                contents[i] = GET_REGISTER(vdst+i);
+        } else {
+            assert(vsrc1 <= 5);
+            if (vsrc1 == 5) {
+                contents[4] = GET_REGISTER(arg5);
+                vsrc1--;
+            }
+            for (i = 0; i < vsrc1; i++) {
+                contents[i] = GET_REGISTER(vdst & 0x0f);
+                vdst >>= 4;
+            }
+        }
+
+        retval.l = newArray;
+    }
+    FINISH(3);
+
+HANDLE_OPCODE(OP_FILL_ARRAY_DATA /*vAA, +BBBBBBBB*/)
+    {
+        const u2* arrayData;
+        s4 offset;
+        ArrayObject* arrayObj;
+
+        EXPORT_PC();
+        vsrc1 = INST_AA(inst);
+        offset = FETCH(1) | (((s4) FETCH(2)) << 16);
+        ILOGV("|fill-array-data v%d +0x%04x", vsrc1, offset);
+        arrayData = pc + offset;       // offset in 16-bit units
+#ifndef NDEBUG
+        if (arrayData < method->insns ||
+            arrayData >= method->insns + dvmGetMethodInsnsSize(method))
+        {
+            /* should have been caught in verifier */
+            dvmThrowException("Ljava/lang/InternalError;", 
+                              "bad fill array data");
+            goto exceptionThrown;
+        }
+#endif
+        arrayObj = (ArrayObject*) GET_REGISTER(vsrc1);
+        if (!dvmInterpHandleFillArrayData(arrayObj, arrayData)) {
+            goto exceptionThrown;
+        }
+        FINISH(3);
+    }
+
+HANDLE_OPCODE(OP_THROW /*vAA*/)
+    {
+        Object* obj;
+
+        vsrc1 = INST_AA(inst);
+        ILOGV("|throw v%d  (%p)", vsrc1, (void*)GET_REGISTER(vsrc1));
+        obj = (Object*) GET_REGISTER(vsrc1);
+        if (!checkForNullExportPC(obj, fp, pc)) {
+            /* will throw a null pointer exception */
+            LOGVV("Bad exception\n");
+        } else {
+            /* use the requested exception */
+            dvmSetException(self, obj);
+        }
+        goto exceptionThrown;
+    }
+
+HANDLE_OPCODE(OP_PACKED_SWITCH /*vAA, +BBBBBBBB*/)
+    {
+        const u2* switchData;
+        u4 testVal;
+        s4 offset;
+
+        vsrc1 = INST_AA(inst);
+        offset = FETCH(1) | (((s4) FETCH(2)) << 16);
+        ILOGV("|packed-switch v%d +0x%04x", vsrc1, offset);
+        switchData = pc + offset;       // offset in 16-bit units
+#ifndef NDEBUG
+        if (switchData < method->insns ||
+            switchData >= method->insns + dvmGetMethodInsnsSize(method))
+        {
+            /* should have been caught in verifier */
+            EXPORT_PC();
+            dvmThrowException("Ljava/lang/InternalError;", "bad packed switch");
+            goto exceptionThrown;
+        }
+#endif
+        testVal = GET_REGISTER(vsrc1);
+
+        offset = dvmInterpHandlePackedSwitch(switchData, testVal);
+        ILOGV("> branch taken (0x%04x)\n", offset);
+        if (offset <= 0)    /* uncommon */
+            PERIODIC_CHECKS(kInterpEntryInstr, offset);
+        FINISH(offset);
+    }
+
+HANDLE_OPCODE(OP_SPARSE_SWITCH /*vAA, +BBBBBBBB*/)
+    {
+        const u2* switchData;
+        u4 testVal;
+        s4 offset;
+
+        vsrc1 = INST_AA(inst);
+        offset = FETCH(1) | (((s4) FETCH(2)) << 16);
+        ILOGV("|sparse-switch v%d +0x%04x", vsrc1, offset);
+        switchData = pc + offset;       // offset in 16-bit units
+#ifndef NDEBUG
+        if (switchData < method->insns ||
+            switchData >= method->insns + dvmGetMethodInsnsSize(method))
+        {
+            /* should have been caught in verifier */
+            EXPORT_PC();
+            dvmThrowException("Ljava/lang/InternalError;", "bad sparse switch");
+            goto exceptionThrown;
+        }
+#endif
+        testVal = GET_REGISTER(vsrc1);
+
+        offset = dvmInterpHandleSparseSwitch(switchData, testVal);
+        ILOGV("> branch taken (0x%04x)\n", offset);
+        if (offset <= 0)  /* uncommon */
+            PERIODIC_CHECKS(kInterpEntryInstr, offset);
+        FINISH(offset);
+    }
+
+HANDLE_OPCODE(OP_EXECUTE_INLINE /*vB, {vD, vE, vF, vG}, inline@CCCC*/)
+    {
+        /*
+         * This has the same form as other method calls, but we ignore
+         * the 5th argument (vA).  This is chiefly because the first four
+         * arguments to a function on ARM are in registers.
+         *
+         * We only set the arguments that are actually used, leaving
+         * the rest uninitialized.  We're assuming that, if the method
+         * needs them, they'll be specified in the call.
+         *
+         * This annoys gcc when optimizations are enabled, causing a
+         * "may be used uninitialized" warning.  We can quiet the warnings
+         * for a slight penalty (5%: 373ns vs. 393ns on empty method).  Note
+         * that valgrind is perfectly happy with this arrangement, because
+         * the uninitialized values are never actually used.
+         */
+        u4 arg0, arg1, arg2, arg3;
+        //arg0 = arg1 = arg2 = arg3 = 0;
+
+        EXPORT_PC();
+
+        vsrc1 = INST_B(inst);       /* #of args */
+        ref = FETCH(1);             /* inline call "ref" */
+        vdst = FETCH(2);            /* 0-4 register indices */
+        ILOGV("|execute-inline args=%d @%d {regs=0x%04x}",
+            vsrc1, ref, vdst);
+
+        assert((vdst >> 16) == 0);  // 16-bit type -or- high 16 bits clear
+        assert(vsrc1 <= 4);
+
+        switch (vsrc1) {
+        case 4:
+            arg3 = GET_REGISTER(vdst >> 12);
+            /* fall through */
+        case 3:
+            arg2 = GET_REGISTER((vdst & 0x0f00) >> 8);
+            /* fall through */
+        case 2:
+            arg1 = GET_REGISTER((vdst & 0x00f0) >> 4);
+            /* fall through */
+        case 1:
+            arg0 = GET_REGISTER(vdst & 0x0f);
+            /* fall through */
+        default:        // case 0
+            ;
+        }
+
+#if INTERP_TYPE == INTERP_DBG
+        if (!dvmPerformInlineOp4Dbg(arg0, arg1, arg2, arg3, &retval, ref))
+            goto exceptionThrown;
+#else
+        if (!dvmPerformInlineOp4Std(arg0, arg1, arg2, arg3, &retval, ref))
+            goto exceptionThrown;
+#endif
+    }
+    FINISH(3);
+
+
+HANDLE_OPCODE(OP_INVOKE_VIRTUAL_RANGE /*{vCCCC..v(CCCC+AA-1)}, meth@BBBB*/)
+    methodCallRange = true;
+    goto invokeVirtual;
+HANDLE_OPCODE(OP_INVOKE_VIRTUAL /*vB, {vD, vE, vF, vG, vA}, meth@CCCC*/)
+    methodCallRange = false;
+invokeVirtual:
+    {
+        Method* baseMethod;
+        Object* thisPtr;
+
+        EXPORT_PC();
+
+        vsrc1 = INST_AA(inst);      /* AA (count) or BA (count + arg 5) */
+        ref = FETCH(1);             /* method ref */
+        vdst = FETCH(2);            /* 4 regs -or- first reg */
+
+        /*
+         * The object against which we are executing a method is always
+         * in the first argument.
+         */
+        assert(vsrc1 > 0);
+        if (methodCallRange) {
+            ILOGV("|invoke-virtual-range args=%d @0x%04x {regs=v%d-v%d}",
+                vsrc1, ref, vdst, vdst+vsrc1-1);
+            thisPtr = (Object*) GET_REGISTER(vdst);
+        } else {
+            ILOGV("|invoke-virtual args=%d @0x%04x {regs=0x%04x %x}",
+                vsrc1 >> 4, ref, vdst, vsrc1 & 0x0f);
+            thisPtr = (Object*) GET_REGISTER(vdst & 0x0f);
+        }
+
+        if (!checkForNull(thisPtr))
+            goto exceptionThrown;
+
+        /*
+         * Resolve the method.  This is the correct method for the static
+         * type of the object.  We also verify access permissions here.
+         */
+        baseMethod = dvmDexGetResolvedMethod(methodClassDex, ref);
+        if (baseMethod == NULL) {
+            baseMethod = dvmResolveMethod(method->clazz, ref, METHOD_VIRTUAL);
+            if (baseMethod == NULL) {
+                ILOGV("+ unknown method or access denied\n");
+                goto exceptionThrown;
+            }
+        }
+
+        /*
+         * Combine the object we found with the vtable offset in the
+         * method.
+         */
+        assert(baseMethod->methodIndex < thisPtr->clazz->vtableCount);
+        methodToCall = thisPtr->clazz->vtable[baseMethod->methodIndex];
+
+#if 0
+        if (dvmIsAbstractMethod(methodToCall)) {
+            /*
+             * This can happen if you create two classes, Base and Sub, where
+             * Sub is a sub-class of Base.  Declare a protected abstract
+             * method foo() in Base, and invoke foo() from a method in Base.
+             * Base is an "abstract base class" and is never instantiated
+             * directly.  Now, Override foo() in Sub, and use Sub.  This
+             * Works fine unless Sub stops providing an implementation of
+             * the method.
+             */
+            dvmThrowException("Ljava/lang/AbstractMethodError;",
+                "abstract method not implemented");
+            goto exceptionThrown;
+        }
+#else
+        assert(!dvmIsAbstractMethod(methodToCall) ||
+            methodToCall->nativeFunc != NULL);
+#endif
+
+        LOGVV("+++ base=%s.%s virtual[%d]=%s.%s\n",
+            baseMethod->clazz->descriptor, baseMethod->name,
+            (u4) baseMethod->methodIndex,
+            methodToCall->clazz->descriptor, methodToCall->name);
+        assert(methodToCall != NULL);
+
+#if 0
+        if (vsrc1 != methodToCall->insSize) {
+            LOGW("WRONG METHOD: base=%s.%s virtual[%d]=%s.%s\n",
+                baseMethod->clazz->descriptor, baseMethod->name,
+                (u4) baseMethod->methodIndex,
+                methodToCall->clazz->descriptor, methodToCall->name);
+            //dvmDumpClass(baseMethod->clazz);
+            //dvmDumpClass(methodToCall->clazz);
+            dvmDumpAllClasses(0);
+        }
+#endif
+
+        goto invokeMethod;
+    }
+
+HANDLE_OPCODE(OP_INVOKE_SUPER_RANGE /*{vCCCC..v(CCCC+AA-1)}, meth@BBBB*/)
+    methodCallRange = true;
+    goto invokeSuper;
+HANDLE_OPCODE(OP_INVOKE_SUPER /*vB, {vD, vE, vF, vG, vA}, meth@CCCC*/)
+    methodCallRange = false;
+invokeSuper:
+    {
+        Method* baseMethod;
+        u2 thisReg;
+
+        EXPORT_PC();
+
+        vsrc1 = INST_AA(inst);      /* AA (count) or BA (count + arg 5) */
+        ref = FETCH(1);             /* method ref */
+        vdst = FETCH(2);            /* 4 regs -or- first reg */
+
+        if (methodCallRange) {
+            ILOGV("|invoke-super-range args=%d @0x%04x {regs=v%d-v%d}",
+                vsrc1, ref, vdst, vdst+vsrc1-1);
+            thisReg = vdst;
+        } else {
+            ILOGV("|invoke-super args=%d @0x%04x {regs=0x%04x %x}",
+                vsrc1 >> 4, ref, vdst, vsrc1 & 0x0f);
+            thisReg = vdst & 0x0f;
+        }
+        /* impossible in well-formed code, but we must check nevertheless */
+        if (!checkForNull((Object*) GET_REGISTER(thisReg)))
+            goto exceptionThrown;
+
+        /*
+         * Resolve the method.  This is the correct method for the static
+         * type of the object.  We also verify access permissions here.
+         * The first arg to dvmResolveMethod() is just the referring class
+         * (used for class loaders and such), so we don't want to pass
+         * the superclass into the resolution call.
+         */
+        baseMethod = dvmDexGetResolvedMethod(methodClassDex, ref);
+        if (baseMethod == NULL) {
+            baseMethod = dvmResolveMethod(method->clazz, ref, METHOD_VIRTUAL);
+            if (baseMethod == NULL) {
+                ILOGV("+ unknown method or access denied\n");
+                goto exceptionThrown;
+            }
+        }
+
+        /*
+         * Combine the object we found with the vtable offset in the
+         * method's class.
+         *
+         * We're using the current method's class' superclass, not the
+         * superclass of "this".  This is because we might be executing
+         * in a method inherited from a superclass, and we want to run
+         * in that class' superclass.
+         */
+        if (baseMethod->methodIndex >= method->clazz->super->vtableCount) {
+            /*
+             * Method does not exist in the superclass.  Could happen if
+             * superclass gets updated.
+             */
+            dvmThrowException("Ljava/lang/NoSuchMethodError;",
+                baseMethod->name);
+            goto exceptionThrown;
+        }
+        methodToCall = method->clazz->super->vtable[baseMethod->methodIndex];
+#if 0
+        if (dvmIsAbstractMethod(methodToCall)) {
+            dvmThrowException("Ljava/lang/AbstractMethodError;",
+                "abstract method not implemented");
+            goto exceptionThrown;
+        }
+#else
+        assert(!dvmIsAbstractMethod(methodToCall) ||
+            methodToCall->nativeFunc != NULL);
+#endif
+        LOGVV("+++ base=%s.%s super-virtual=%s.%s\n",
+            baseMethod->clazz->descriptor, baseMethod->name,
+            methodToCall->clazz->descriptor, methodToCall->name);
+        assert(methodToCall != NULL);
+
+        goto invokeMethod;
+    }
+
+HANDLE_OPCODE(OP_INVOKE_INTERFACE_RANGE /*{vCCCC..v(CCCC+AA-1)}, meth@BBBB*/)
+    methodCallRange = true;
+    goto invokeInterface;
+HANDLE_OPCODE(OP_INVOKE_INTERFACE /*vB, {vD, vE, vF, vG, vA}, meth@CCCC*/)
+    methodCallRange = false;
+invokeInterface:
+    {
+        Object* thisPtr;
+        ClassObject* thisClass;
+
+        EXPORT_PC();
+
+        vsrc1 = INST_AA(inst);      /* AA (count) or BA (count + arg 5) */
+        ref = FETCH(1);             /* method ref */
+        vdst = FETCH(2);            /* 4 regs -or- first reg */
+
+        /*
+         * The object against which we are executing a method is always
+         * in the first argument.
+         */
+        assert(vsrc1 > 0);
+        if (methodCallRange) {
+            ILOGV("|invoke-interface-range args=%d @0x%04x {regs=v%d-v%d}",
+                vsrc1, ref, vdst, vdst+vsrc1-1);
+            thisPtr = (Object*) GET_REGISTER(vdst);
+        } else {
+            ILOGV("|invoke-interface args=%d @0x%04x {regs=0x%04x %x}",
+                vsrc1 >> 4, ref, vdst, vsrc1 & 0x0f);
+            thisPtr = (Object*) GET_REGISTER(vdst & 0x0f);
+        }
+        if (!checkForNull(thisPtr))
+            goto exceptionThrown;
+
+        thisClass = thisPtr->clazz;
+
+        /*
+         * Given a class and a method index, find the Method* with the
+         * actual code we want to execute.
+         */
+        methodToCall = dvmFindInterfaceMethodInCache(thisClass, ref, method,
+                        methodClassDex);
+        if (methodToCall == NULL) {
+            assert(dvmCheckException(self));
+            goto exceptionThrown;
+        }
+
+        goto invokeMethod;
+    }
+
+
+HANDLE_OPCODE(OP_INVOKE_DIRECT_RANGE /*{vCCCC..v(CCCC+AA-1)}, meth@BBBB*/)
+    methodCallRange = true;
+    goto invokeDirect;
+HANDLE_OPCODE(OP_INVOKE_DIRECT_EMPTY /*vB, {vD, vE, vF, vG, vA}, meth@CCCC*/)
+#if INTERP_TYPE != INTERP_DBG
+    //LOGI("Ignoring empty\n");
+    FINISH(3);
+#else
+    if (!gDvm.debuggerActive) {
+        //LOGI("Skipping empty\n");
+        FINISH(3);      // don't want it to show up in profiler output
+    } else {
+        //LOGI("Running empty\n");
+        /* fall through to OP_INVOKE_DIRECT */
+    }
+#endif
+HANDLE_OPCODE(OP_INVOKE_DIRECT /*vB, {vD, vE, vF, vG, vA}, meth@CCCC*/)
+    methodCallRange = false;
+invokeDirect:
+    {
+        u2 thisReg;
+
+        vsrc1 = INST_AA(inst);      /* AA (count) or BA (count + arg 5) */
+        ref = FETCH(1);             /* method ref */
+        vdst = FETCH(2);            /* 4 regs -or- first reg */
+
+        EXPORT_PC();
+
+        if (methodCallRange) {
+            ILOGV("|invoke-direct args=%d @0x%04x {regs=0x%04x %x}",
+                vsrc1 >> 4, ref, vdst, vsrc1 & 0x0f);
+            thisReg = vdst;
+        } else {
+            ILOGV("|invoke-direct-range args=%d @0x%04x {regs=v%d-v%d}",
+                vsrc1, ref, vdst, vdst+vsrc1-1);
+            thisReg = vdst & 0x0f;
+        }
+        if (!checkForNull((Object*) GET_REGISTER(thisReg)))
+            goto exceptionThrown;
+
+        methodToCall = dvmDexGetResolvedMethod(methodClassDex, ref);
+        if (methodToCall == NULL) {
+            methodToCall = dvmResolveMethod(method->clazz, ref, METHOD_DIRECT);
+            if (methodToCall == NULL) {
+                ILOGV("+ unknown direct method\n");     // should be impossible
+                goto exceptionThrown;
+            }
+        }
+        goto invokeMethod;
+    }
+
+HANDLE_OPCODE(OP_INVOKE_STATIC_RANGE /*{vCCCC..v(CCCC+AA-1)}, meth@BBBB*/)
+    methodCallRange = true;
+    goto invokeStatic;
+HANDLE_OPCODE(OP_INVOKE_STATIC /*vB, {vD, vE, vF, vG, vA}, meth@CCCC*/)
+    methodCallRange = false;
+invokeStatic:
+    vsrc1 = INST_AA(inst);      /* AA (count) or BA (count + arg 5) */
+    ref = FETCH(1);             /* method ref */
+    vdst = FETCH(2);            /* 4 regs -or- first reg */
+
+    EXPORT_PC();
+
+    if (methodCallRange)
+        ILOGV("|invoke-static-range args=%d @0x%04x {regs=v%d-v%d}",
+            vsrc1, ref, vdst, vdst+vsrc1-1);
+    else
+        ILOGV("|invoke-static args=%d @0x%04x {regs=0x%04x %x}",
+            vsrc1 >> 4, ref, vdst, vsrc1 & 0x0f);
+
+    methodToCall = dvmDexGetResolvedMethod(methodClassDex, ref);
+    if (methodToCall == NULL) {
+        methodToCall = dvmResolveMethod(method->clazz, ref, METHOD_STATIC);
+        if (methodToCall == NULL) {
+            ILOGV("+ unknown method\n");
+            goto exceptionThrown;
+        }
+    }
+    goto invokeMethod;
+
+
+HANDLE_OPCODE(OP_INVOKE_VIRTUAL_QUICK_RANGE/*{vCCCC..v(CCCC+AA-1)}, meth@BBBB*/)
+    methodCallRange = true;
+    goto invokeVirtualQuick;
+HANDLE_OPCODE(OP_INVOKE_VIRTUAL_QUICK /*vB, {vD, vE, vF, vG, vA}, meth@CCCC*/)
+    methodCallRange = false;
+invokeVirtualQuick:
+    {
+        Object* thisPtr;
+
+        EXPORT_PC();
+
+        vsrc1 = INST_AA(inst);      /* AA (count) or BA (count + arg 5) */
+        ref = FETCH(1);             /* vtable index */
+        vdst = FETCH(2);            /* 4 regs -or- first reg */
+
+        /*
+         * The object against which we are executing a method is always
+         * in the first argument.
+         */
+        assert(vsrc1 > 0);
+        if (methodCallRange) {
+            ILOGV("|invoke-virtual-quick-range args=%d @0x%04x {regs=v%d-v%d}",
+                vsrc1, ref, vdst, vdst+vsrc1-1);
+            thisPtr = (Object*) GET_REGISTER(vdst);
+        } else {
+            ILOGV("|invoke-virtual-quick args=%d @0x%04x {regs=0x%04x %x}",
+                vsrc1 >> 4, ref, vdst, vsrc1 & 0x0f);
+            thisPtr = (Object*) GET_REGISTER(vdst & 0x0f);
+        }
+
+        if (!checkForNull(thisPtr))
+            goto exceptionThrown;
+
+        /*
+         * Combine the object we found with the vtable offset in the
+         * method.
+         */
+        assert(ref < thisPtr->clazz->vtableCount);
+        methodToCall = thisPtr->clazz->vtable[ref];
+
+#if 0
+        if (dvmIsAbstractMethod(methodToCall)) {
+            dvmThrowException("Ljava/lang/AbstractMethodError;",
+                "abstract method not implemented");
+            goto exceptionThrown;
+        }
+#else
+        assert(!dvmIsAbstractMethod(methodToCall) ||
+            methodToCall->nativeFunc != NULL);
+#endif
+
+        LOGVV("+++ virtual[%d]=%s.%s\n",
+            ref, methodToCall->clazz->descriptor, methodToCall->name);
+        assert(methodToCall != NULL);
+
+        goto invokeMethod;
+    }
+HANDLE_OPCODE(OP_INVOKE_SUPER_QUICK_RANGE /*{vCCCC..v(CCCC+AA-1)}, meth@BBBB*/)
+    methodCallRange = true;
+    goto invokeSuperQuick;
+HANDLE_OPCODE(OP_INVOKE_SUPER_QUICK /*vB, {vD, vE, vF, vG, vA}, meth@CCCC*/)
+    methodCallRange = false;
+invokeSuperQuick:
+    {
+        u2 thisReg;
+
+        EXPORT_PC();
+
+        vsrc1 = INST_AA(inst);      /* AA (count) or BA (count + arg 5) */
+        ref = FETCH(1);             /* vtable index */
+        vdst = FETCH(2);            /* 4 regs -or- first reg */
+
+        if (methodCallRange) {
+            ILOGV("|invoke-super-quick-range args=%d @0x%04x {regs=v%d-v%d}",
+                vsrc1, ref, vdst, vdst+vsrc1-1);
+            thisReg = vdst;
+        } else {
+            ILOGV("|invoke-super-quick args=%d @0x%04x {regs=0x%04x %x}",
+                vsrc1 >> 4, ref, vdst, vsrc1 & 0x0f);
+            thisReg = vdst & 0x0f;
+        }
+        /* impossible in well-formed code, but we must check nevertheless */
+        if (!checkForNull((Object*) GET_REGISTER(thisReg)))
+            goto exceptionThrown;
+
+#if 0   /* impossible in optimized + verified code */
+        if (ref >= method->clazz->super->vtableCount) {
+            dvmThrowException("Ljava/lang/NoSuchMethodError;", NULL);
+            goto exceptionThrown;
+        }
+#else
+        assert(ref < method->clazz->super->vtableCount);
+#endif
+
+        /*
+         * Combine the object we found with the vtable offset in the
+         * method's class.
+         *
+         * We're using the current method's class' superclass, not the
+         * superclass of "this".  This is because we might be executing
+         * in a method inherited from a superclass, and we want to run
+         * in the method's class' superclass.
+         */
+        methodToCall = method->clazz->super->vtable[ref];
+
+#if 0
+        if (dvmIsAbstractMethod(methodToCall)) {
+            dvmThrowException("Ljava/lang/AbstractMethodError;",
+                "abstract method not implemented");
+            goto exceptionThrown;
+        }
+#else
+        assert(!dvmIsAbstractMethod(methodToCall) ||
+            methodToCall->nativeFunc != NULL);
+#endif
+        LOGVV("+++ super-virtual[%d]=%s.%s\n",
+            ref, methodToCall->clazz->descriptor, methodToCall->name);
+        assert(methodToCall != NULL);
+
+        goto invokeMethod;
+    }
+
+
+    /*
+     * General handling for invoke-{virtual,super,direct,static,interface},
+     * including "quick" variants.
+     *
+     * Set "methodToCall" to the Method we're calling, and "methodCallRange"
+     * depending on whether this is a "/range" instruction.
+     *
+     * For a range call:
+     *  "vsrc1" holds the argument count (8 bits)
+     *  "vdst" holds the first argument in the range
+     * For a non-range call:
+     *  "vsrc1" holds the argument count (4 bits) and the 5th argument index
+     *  "vdst" holds four 4-bit register indices
+     *
+     * The caller must EXPORT_PC before jumping here, because any method
+     * call can throw a stack overflow exception.
+     */
+invokeMethod:
+    {
+        u4* outs;
+        int i;
+
+        /*
+         * Copy args.  This may corrupt vsrc1/vdst.
+         */
+        if (methodCallRange) {
+            // could use memcpy or a "Duff's device"; most functions have
+            // so few args it won't matter much
+            assert(vsrc1 <= method->outsSize);
+            assert(vsrc1 == methodToCall->insSize);
+            outs = OUTS_FROM_FP(fp, vsrc1);
+            for (i = 0; i < vsrc1; i++)
+                outs[i] = GET_REGISTER(vdst+i);
+        } else {
+            u4 count = vsrc1 >> 4;
+
+            assert(count <= method->outsSize);
+            assert(count == methodToCall->insSize);
+            assert(count <= 5);
+
+            outs = OUTS_FROM_FP(fp, count);
+#if 0
+            if (count == 5) {
+                outs[4] = GET_REGISTER(vsrc1 & 0x0f);
+                count--;
+            }
+            for (i = 0; i < (int) count; i++) {
+                outs[i] = GET_REGISTER(vdst & 0x0f);
+                vdst >>= 4;
+            }
+#else
+            // This version executes fewer instructions but is larger
+            // overall.  Seems to be a teensy bit faster.
+            assert((vdst >> 16) == 0);  // 16 bits -or- high 16 bits clear
+            switch (count) {
+            case 5:
+                outs[4] = GET_REGISTER(vsrc1 & 0x0f);
+            case 4:
+                outs[3] = GET_REGISTER(vdst >> 12);
+            case 3:
+                outs[2] = GET_REGISTER((vdst & 0x0f00) >> 8);
+            case 2:
+                outs[1] = GET_REGISTER((vdst & 0x00f0) >> 4);
+            case 1:
+                outs[0] = GET_REGISTER(vdst & 0x0f);
+            default:
+                ;
+            }
+#endif
+        }
+    }
+
+    /*
+     * (This was originally a "goto" target; I've kept it separate from the
+     * stuff above in case we want to refactor things again.)
+     *
+     * At this point, we have the arguments stored in the "outs" area of
+     * the current method's stack frame, and the method to call in
+     * "methodToCall".  Push a new stack frame.
+     */
+    {
+        StackSaveArea* newSaveArea;
+        u4* newFp;
+
+        ILOGV("> %s%s.%s %s",
+            dvmIsNativeMethod(methodToCall) ? "(NATIVE) " : "",
+            methodToCall->clazz->descriptor, methodToCall->name,
+            methodToCall->signature);
+
+        newFp = (u4*) SAVEAREA_FROM_FP(fp) - methodToCall->registersSize;
+        newSaveArea = SAVEAREA_FROM_FP(newFp);
+
+        /* verify that we have enough space */
+        if (true) {
+            u1* bottom;
+            bottom = (u1*) newSaveArea - methodToCall->outsSize * sizeof(u4);
+            if (bottom < self->interpStackEnd) {
+                /* stack overflow */
+                LOGV("Stack overflow on method call (top=%p end=%p newBot=%p size=%d '%s')\n",
+                    self->interpStackStart, self->interpStackEnd, bottom,
+                    self->interpStackSize, methodToCall->name);
+                dvmHandleStackOverflow(self);
+                assert(dvmCheckException(self));
+                goto exceptionThrown;
+            }
+            //LOGD("+++ fp=%p newFp=%p newSave=%p bottom=%p\n",
+            //    fp, newFp, newSaveArea, bottom);
+        }
+
+#ifdef LOG_INSTR
+        if (methodToCall->registersSize > methodToCall->insSize) {
+            /*
+             * This makes valgrind quiet when we print registers that
+             * haven't been initialized.  Turn it off when the debug
+             * messages are disabled -- we want valgrind to report any
+             * used-before-initialized issues.
+             */
+            memset(newFp, 0xcc,
+                (methodToCall->registersSize - methodToCall->insSize) * 4);
+        }
+#endif
+
+#ifdef EASY_GDB
+        newSaveArea->prevSave = SAVEAREA_FROM_FP(fp);
+#endif
+        newSaveArea->prevFrame = fp;
+        newSaveArea->savedPc = pc;
+        newSaveArea->method = methodToCall;
+
+        if (!dvmIsNativeMethod(methodToCall)) {
+            /*
+             * "Call" interpreted code.  Reposition the PC, update the
+             * frame pointer and other local state, and continue.
+             */
+            method = methodToCall;
+            methodClassDex = method->clazz->pDvmDex;
+            pc = methodToCall->insns;
+            fp = self->curFrame = newFp;
+#ifdef EASY_GDB
+            debugSaveArea = SAVEAREA_FROM_FP(newFp);
+#endif
+#if INTERP_TYPE == INTERP_DBG
+            debugIsMethodEntry = true;              // profiling, debugging
+#endif
+            ILOGD("> pc <-- %s.%s %s", method->clazz->descriptor, method->name,
+                method->signature);
+            DUMP_REGS(method, fp, true);            // show input args
+            FINISH(0);                              // jump to method start
+        } else {
+            /* set this up for JNI locals, even if not a JNI native */
+            newSaveArea->xtra.localRefTop = self->jniLocalRefTable.nextEntry;
+
+            self->curFrame = newFp;
+
+            DUMP_REGS(methodToCall, newFp, true);   // show input args
+
+#if (INTERP_TYPE == INTERP_DBG) && defined(WITH_DEBUGGER)
+            if (gDvm.debuggerActive) {
+                dvmDbgPostLocationEvent(methodToCall, -1,
+                    dvmGetThisPtr(method, fp), DBG_METHOD_ENTRY);
+            }
+#endif
+#if (INTERP_TYPE == INTERP_DBG) && defined(WITH_PROFILER)
+            TRACE_METHOD_ENTER(self, methodToCall);
+#endif
+
+            ILOGD("> native <-- %s.%s %s", methodToCall->clazz->descriptor,
+                methodToCall->name, methodToCall->signature);
+
+            /*
+             * Jump through native call bridge.  Because we leave no
+             * space for locals on native calls, "newFp" points directly
+             * to the method arguments.
+             */
+            (*methodToCall->nativeFunc)(newFp, &retval, methodToCall, self);
+
+#ifdef WITH_EXTRA_OBJECT_VALIDATION
+            if (methodToCall->shorty[0] == 'L' && !dvmCheckException(self)) {
+                assert(retval.l == NULL || dvmIsValidObject(retval.l));
+            }
+#endif
+#if (INTERP_TYPE == INTERP_DBG) && defined(WITH_DEBUGGER)
+            if (gDvm.debuggerActive) {
+                dvmDbgPostLocationEvent(methodToCall, -1,
+                    dvmGetThisPtr(method, fp), DBG_METHOD_EXIT);
+            }
+#endif
+#if (INTERP_TYPE == INTERP_DBG) && defined(WITH_PROFILER)
+            TRACE_METHOD_EXIT(self, methodToCall);
+#endif
+
+            /* pop frame off */
+            dvmPopJniLocals(self, newSaveArea);
+            self->curFrame = fp;
+
+            /*
+             * If the native code threw an exception, or interpreted code
+             * invoked by the native call threw one and nobody has cleared
+             * it, jump to our local exception handling.
+             */
+            if (dvmCheckException(self)) {
+                LOGV("Exception thrown by/below native code\n");
+                goto exceptionThrown;
+            }
+
+            ILOGD("> retval=0x%llx (leaving native)", retval.j);
+            ILOGD("> (return from native %s.%s to %s.%s %s)",
+                methodToCall->clazz->descriptor, methodToCall->name,
+                method->clazz->descriptor, method->name,
+                method->signature);
+
+            //u2 invokeInstr = INST_INST(FETCH(0));
+            if (true /*invokeInstr >= OP_INVOKE_VIRTUAL &&
+                invokeInstr <= OP_INVOKE_INTERFACE*/)
+            {
+                FINISH(3);
+            } else {
+                //LOGE("Unknown invoke instr %02x at %d\n",
+                //    invokeInstr, (int) (pc - method->insns));
+                assert(false);
+            }
+        }
+    }
+    assert(false);      // should not get here
+
+
+HANDLE_OPCODE(OP_RETURN_WIDE /*vAA*/)
+    vsrc1 = INST_AA(inst);
+    ILOGV("|return-wide v%d", vsrc1);
+    retval.j = GET_REGISTER_WIDE(vsrc1);
+    goto returnFromMethod;
+
+HANDLE_OPCODE(OP_RETURN_VOID /**/)
+    ILOGV("|return-void");
+#ifndef NDEBUG
+    retval.j = 0xababababULL;    // placate valgrind
+#endif
+    goto returnFromMethod;
+
+HANDLE_OPCODE(OP_RETURN /*vAA*/)
+HANDLE_OPCODE(OP_RETURN_OBJECT /*vAA*/)
+    vsrc1 = INST_AA(inst);
+    ILOGV("|return%s v%d",
+        (INST_INST(inst) == OP_RETURN) ? "" : "-object", vsrc1);
+    retval.i = GET_REGISTER(vsrc1);
+    goto returnFromMethod;
+    /* gcc removes the goto -- these are 2x more common than return-void */
+
+    /*
+     * General handling for return-void, return, and return-wide.  Put the
+     * return value in "retval" before jumping here.
+     */
+returnFromMethod:
+    {
+        StackSaveArea* saveArea;
+
+        /*
+         * We must do this BEFORE we pop the previous stack frame off, so
+         * that the GC can see the return value (if any) in the local vars.
+         *
+         * Since this is now an interpreter switch point, we must do it before
+         * we do anything at all.
+         */
+        PERIODIC_CHECKS(kInterpEntryReturn, 0);
+
+        ILOGV("> retval=0x%llx (leaving %s.%s %s)",
+            retval.j, method->clazz->descriptor, method->name,
+            method->signature);
+        //DUMP_REGS(method, fp);
+
+        saveArea = SAVEAREA_FROM_FP(fp);
+
+#ifdef EASY_GDB
+        debugSaveArea = saveArea;
+#endif
+#if (INTERP_TYPE == INTERP_DBG) && defined(WITH_PROFILER)
+        TRACE_METHOD_EXIT(self, method);
+#endif
+
+        /* back up to previous frame and see if we hit a break */
+        fp = saveArea->prevFrame;
+        assert(fp != NULL);
+        if (dvmIsBreakFrame(fp)) {
+            /* bail without popping the method frame from stack */
+            LOGVV("+++ returned into break frame\n");
+            goto bail;
+        }
+
+        /* update thread FP, and reset local variables */
+        self->curFrame = fp;
+        method = SAVEAREA_FROM_FP(fp)->method;
+        //methodClass = method->clazz;
+        methodClassDex = method->clazz->pDvmDex;
+        pc = saveArea->savedPc;
+        ILOGD("> (return to %s.%s %s)", method->clazz->descriptor,
+            method->name, method->signature);
+
+        /* use FINISH on the caller's invoke instruction */
+        //u2 invokeInstr = INST_INST(FETCH(0));
+        if (true /*invokeInstr >= OP_INVOKE_VIRTUAL &&
+            invokeInstr <= OP_INVOKE_INTERFACE*/)
+        {
+            FINISH(3);
+        } else {
+            //LOGE("Unknown invoke instr %02x at %d\n",
+            //    invokeInstr, (int) (pc - method->insns));
+            assert(false);
+        }
+    }
+
+
+    /*
+     * Jump here when the code throws an exception.
+     *
+     * By the time we get here, the Throwable has been created and the stack
+     * trace has been saved off.
+     */
+exceptionThrown:
+    {
+        Object* exception;
+        int catchRelPc;
+
+        /*
+         * Since this is now an interpreter switch point, we must do it before
+         * we do anything at all.
+         */
+        PERIODIC_CHECKS(kInterpEntryThrow, 0);
+
+        /*
+         * We save off the exception and clear the exception status.  While
+         * processing the exception we might need to load some Throwable
+         * classes, and we don't want class loader exceptions to get
+         * confused with this one.
+         */
+        assert(dvmCheckException(self));
+        exception = dvmGetException(self);
+        dvmAddTrackedAlloc(exception, self);
+        dvmClearException(self);
+
+        LOGV("Handling exception %s at %s:%d\n",
+            exception->clazz->descriptor, method->name,
+            dvmLineNumFromPC(method, pc - method->insns));
+
+#if (INTERP_TYPE == INTERP_DBG) && defined(WITH_DEBUGGER)
+        /*
+         * Tell the debugger about it.
+         *
+         * TODO: if the exception was thrown by interpreted code, control
+         * fell through native, and then back to us, we will report the
+         * exception at the point of the throw and again here.  We can avoid
+         * this by not reporting exceptions when we jump here directly from
+         * the native call code above, but then we won't report exceptions
+         * that were thrown *from* the JNI code (as opposed to *through* it).
+         *
+         * The correct solution is probably to ignore from-native exceptions
+         * here, and have the JNI exception code do the reporting to the
+         * debugger.
+         */
+        if (gDvm.debuggerActive) {
+            void* catchFrame;
+            catchRelPc = dvmFindCatchBlock(self, pc - method->insns,
+                        exception, true, &catchFrame);
+            dvmDbgPostException(fp, pc - method->insns, catchFrame, catchRelPc,
+                exception);
+        }
+#endif
+
+        /*
+         * We need to unroll to the catch block or the nearest "break"
+         * frame.
+         *
+         * A break frame could indicate that we have reached an intermediate
+         * native call, or have gone off the top of the stack and the thread
+         * needs to exit.  Either way, we return from here, leaving the
+         * exception raised.
+         *
+         * If we do find a catch block, we want to transfer execution to
+         * that point.
+         */
+        catchRelPc = dvmFindCatchBlock(self, pc - method->insns,
+                    exception, false, (void*)&fp);
+
+        /*
+         * Restore the stack bounds after an overflow.  This isn't going to
+         * be correct in all circumstances, e.g. if JNI code devours the
+         * exception this won't happen until some other exception gets
+         * thrown.  If the code keeps pushing the stack bounds we'll end
+         * up aborting the VM.
+         */
+        if (self->stackOverflowed)
+            dvmCleanupStackOverflow(self);
+
+        if (catchRelPc < 0) {
+            /* falling through to JNI code or off the bottom of the stack */
+#if DVM_SHOW_EXCEPTION >= 2
+            LOGD("Exception %s from %s:%d not caught locally\n",
+                exception->clazz->descriptor, dvmGetMethodSourceFile(method),
+                dvmLineNumFromPC(method, pc - method->insns));
+#endif
+            dvmSetException(self, exception);
+            dvmReleaseTrackedAlloc(exception, self);
+            goto bail;
+        }
+
+#if DVM_SHOW_EXCEPTION >= 3
+        {
+            const Method* catchMethod = SAVEAREA_FROM_FP(fp)->method;
+            LOGD("Exception %s thrown from %s:%d to %s:%d\n",
+                exception->clazz->descriptor, dvmGetMethodSourceFile(method),
+                dvmLineNumFromPC(method, pc - method->insns),
+                dvmGetMethodSourceFile(catchMethod),
+                dvmLineNumFromPC(catchMethod, catchRelPc));
+        }
+#endif
+
+        /*
+         * Adjust local variables to match self->curFrame and the
+         * updated PC.
+         */
+        //fp = (u4*) self->curFrame;
+        method = SAVEAREA_FROM_FP(fp)->method;
+        //methodClass = method->clazz;
+        methodClassDex = method->clazz->pDvmDex;
+        pc = method->insns + catchRelPc;
+        ILOGV("> pc <-- %s.%s %s", method->clazz->descriptor, method->name,
+            method->signature);
+        DUMP_REGS(method, fp, false);               // show all regs
+
+        /*
+         * Restore the exception if the handler wants it.
+         *
+         * The Dalvik spec mandates that, if an exception handler wants to
+         * do something with the exception, the first instruction executed
+         * must be "move-exception".  We can pass the exception along
+         * through the thread struct, and let the move-exception instruction
+         * clear it for us.
+         *
+         * If the handler doesn't call move-exception, we don't want to
+         * finish here with an exception still pending.
+         */
+        if (INST_INST(FETCH(0)) == OP_MOVE_EXCEPTION)
+            dvmSetException(self, exception);
+
+        dvmReleaseTrackedAlloc(exception, self);
+        FINISH(0);
+    }
+
+#ifndef THREADED_INTERP
+        // ---------- bottom of instruction switch statement ----------
+        default:
+            /* fall into OP_UNUSED code */
+#endif
+
+HANDLE_OPCODE(OP_UNUSED_3E)
+HANDLE_OPCODE(OP_UNUSED_3F)
+HANDLE_OPCODE(OP_UNUSED_40)
+HANDLE_OPCODE(OP_UNUSED_41)
+HANDLE_OPCODE(OP_UNUSED_42)
+HANDLE_OPCODE(OP_UNUSED_43)
+HANDLE_OPCODE(OP_UNUSED_73)
+HANDLE_OPCODE(OP_UNUSED_79)
+HANDLE_OPCODE(OP_UNUSED_7A)
+HANDLE_OPCODE(OP_UNUSED_E3)
+HANDLE_OPCODE(OP_UNUSED_E4)
+HANDLE_OPCODE(OP_UNUSED_E5)
+HANDLE_OPCODE(OP_UNUSED_E6)
+HANDLE_OPCODE(OP_UNUSED_E7)
+HANDLE_OPCODE(OP_UNUSED_E8)
+HANDLE_OPCODE(OP_UNUSED_E9)
+HANDLE_OPCODE(OP_UNUSED_EA)
+HANDLE_OPCODE(OP_UNUSED_EB)
+HANDLE_OPCODE(OP_UNUSED_EC)
+HANDLE_OPCODE(OP_UNUSED_ED)
+HANDLE_OPCODE(OP_UNUSED_EF)
+HANDLE_OPCODE(OP_UNUSED_F1)
+HANDLE_OPCODE(OP_UNUSED_FC)
+HANDLE_OPCODE(OP_UNUSED_FD)
+HANDLE_OPCODE(OP_UNUSED_FE)
+HANDLE_OPCODE(OP_UNUSED_FF)
+    LOGE("unknown opcode 0x%02x\n", INST_INST(inst));
+    assert(false);
+    EXPORT_PC();
+    dvmThrowException("Ljava/lang/InternalError;", "unknown opcode");
+    goto exceptionThrown;
+
+#ifndef THREADED_INTERP
+        } // end of "switch"
+    } // end of "while"
+#endif
+
+bail:
+    ILOGD("|-- Leaving interpreter loop");      // note "method" may be NULL
+
+    interpState->retval = retval;
+    return false;
+
+bail_switch:
+    /*
+     * The standard interpreter currently doesn't set or care about the
+     * "debugIsMethodEntry" value, so setting this is only of use if we're
+     * switching between two "debug" interpreters, which we never do.
+     *
+     * TODO: figure out if preserving this makes any sense.
+     */
+#if defined(WITH_PROFILER) || defined(WITH_DEBUGGER)
+# if INTERP_TYPE == INTERP_DBG
+    interpState->debugIsMethodEntry = debugIsMethodEntry;
+# else
+    interpState->debugIsMethodEntry = false;
+# endif
+#endif
+
+    /* export state changes */
+    interpState->method = method;
+    interpState->pc = pc;
+    interpState->fp = fp;
+    /* debugTrackedRefStart doesn't change */
+    interpState->retval = retval;   /* need for _entryPoint=ret */
+    interpState->nextMode =
+        (INTERP_TYPE == INTERP_STD) ? INTERP_DBG : INTERP_STD;
+    LOGVV(" meth='%s.%s' pc=0x%x fp=%p\n",
+        method->clazz->descriptor, method->name,
+        pc - method->insns, fp);
+    return true;
+}
diff --git a/vm/interp/InterpDbg.c b/vm/interp/InterpDbg.c
new file mode 100644
index 0000000..ad2169b
--- /dev/null
+++ b/vm/interp/InterpDbg.c
@@ -0,0 +1,29 @@
+/*
+ * Copyright (C) 2008 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/*
+ * Main interpreter loop entry point ("debug" version).  This is only
+ * built if debugging or profiling is enabled in the VM.
+ */
+#include "Dalvik.h"
+
+#if defined(WITH_PROFILER) || defined(WITH_DEBUGGER)
+
+#define INTERP_FUNC_NAME dvmInterpretDbg
+#define INTERP_TYPE INTERP_DBG
+
+#include "interp/InterpCore.h"
+
+#endif /*WITH_PROFILER || WITH_DEBUGGER*/
diff --git a/vm/interp/InterpDefs.h b/vm/interp/InterpDefs.h
new file mode 100644
index 0000000..9381b2f
--- /dev/null
+++ b/vm/interp/InterpDefs.h
@@ -0,0 +1,558 @@
+/*
+ * Copyright (C) 2008 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/*
+ * Dalvik interpreter definitions.  These are internal to the interpreter.
+ *
+ * This includes defines, types, function declarations, and inline functions
+ * that are common to all interpreter implementations.
+ *
+ * Functions and globals declared here are defined in Interp.c.
+ */
+#ifndef _DALVIK_INTERP_DEFS
+#define _DALVIK_INTERP_DEFS
+
+
+/*
+ * Specify the starting point when switching between interpreters.
+ */
+typedef enum InterpEntry {
+    kInterpEntryInstr = 0,      // continue to next instruction
+    kInterpEntryReturn = 1,     // jump to method return
+    kInterpEntryThrow = 2,      // jump to exception throw
+} InterpEntry;
+
+/*
+ * Interpreter context, used when switching from one interpreter to
+ * another.  We also tuck "mterp" state in here.
+ */
+typedef struct InterpState {
+    /*
+     * To make some mterp state updates easier, "pc" and "fp" MUST come
+     * first and MUST appear in this order.
+     */
+    const u2*   pc;                     // program counter
+    u4*         fp;                     // frame pointer
+
+    JValue      retval;                 // return value -- "out" only
+    const Method* method;               // method being executed
+
+
+    /* ----------------------------------------------------------------------
+     * Mterp-only state
+     */
+    DvmDex*         methodClassDex;
+    Thread*         self;
+
+    /* housekeeping */
+    void*           bailPtr;
+
+    /*
+     * These are available globally, from gDvm, or from another glue field
+     * (self/method).  They're included for speed.
+     */
+    const u1*       interpStackEnd;
+    volatile int*   pSelfSuspendCount;
+#if defined(WITH_DEBUGGER)
+    volatile bool*  pDebuggerActive;
+#endif
+#if defined(WITH_PROFILER)
+    volatile int*   pActiveProfilers;
+#endif
+    /* ----------------------------------------------------------------------
+     */
+
+    /*
+     * Interpreter switching.
+     */
+    InterpEntry entryPoint;             // what to do when we start
+    int         nextMode;               // INTERP_STD or INTERP_DBG
+
+
+#if defined(WITH_PROFILER) || defined(WITH_DEBUGGER)
+    bool        debugIsMethodEntry;     // used for method entry event triggers
+#endif
+#if defined(WITH_TRACKREF_CHECKS)
+    int         debugTrackedRefStart;   // tracked refs from prior invocations
+#endif
+
+} InterpState;
+
+/*
+ * These are generated from InterpCore.h.
+ */
+extern bool dvmInterpretDbg(Thread* self, InterpState* interpState);
+extern bool dvmInterpretStd(Thread* self, InterpState* interpState);
+#define INTERP_STD 0
+#define INTERP_DBG 1
+
+/*
+ * "mterp" interpreter.
+ */
+extern bool dvmMterpStd(Thread* self, InterpState* interpState);
+
+/*
+ * Get the "this" pointer from the current frame.
+ */
+Object* dvmGetThisPtr(const Method* method, const u4* fp);
+
+/*
+ * Verify that our tracked local references are valid.
+ */
+void dvmInterpCheckTrackedRefs(Thread* self, const Method* method,
+    int debugTrackedRefStart);
+
+/*
+ * Process switch statement.
+ */
+s4 dvmInterpHandlePackedSwitch(const u2* switchData, s4 testVal);
+s4 dvmInterpHandleSparseSwitch(const u2* switchData, s4 testVal);
+
+/*
+ * Process fill-array-data.
+ */
+bool dvmInterpHandleFillArrayData(ArrayObject* arrayObject, 
+                                  const u2* arrayData);
+
+/*
+ * Find an interface method.
+ */
+Method* dvmInterpFindInterfaceMethod(ClassObject* thisClass, u4 methodIdx,
+    const Method* method, DvmDex* methodClassDex);
+
+/*
+ * Configuration defines.
+ *
+ * Some defines are controlled by the Makefile, e.g.:
+ *   WITH_PROFILER
+ *   WITH_DEBUGGER
+ *   WITH_INSTR_CHECKS
+ *   WITH_TRACKREF_CHECKS
+ *   EASY_GDB
+ *   NDEBUG
+ *
+ * If THREADED_INTERP is not defined, we use a classic "while true / switch"
+ * interpreter.  If it is defined, then the tail end of each instruction
+ * handler fetches the next instruction and jumps directly to the handler.
+ * This increases the size of the "Std" interpreter by about 10%, but
+ * provides a speedup of about the same magnitude.
+ *
+ * There's a "hybrid" approach that uses a goto table instead of a switch
+ * statement, avoiding the "is the opcode in range" tests required for switch.
+ * The performance is close to the threaded version, and without the 10%
+ * size increase, but the benchmark results are off enough that it's not
+ * worth adding as a third option.
+ */
+#define THREADED_INTERP             /* threaded vs. while-loop interpreter */
+
+#ifdef WITH_INSTR_CHECKS            /* instruction-level paranoia */
+# define CHECK_BRANCH_OFFSETS
+# define CHECK_REGISTER_INDICES
+#endif
+
+/*
+ * ARM EABI requires 64-bit alignment for access to 64-bit data types.  We
+ * can't just use pointers to copy 64-bit values out of our interpreted
+ * register set, because gcc will generate ldrd/strd.
+ *
+ * The __UNION version copies data in and out of a union.  The __MEMCPY
+ * version uses a memcpy() call to do the transfer; gcc is smart enough to
+ * not actually call memcpy().  The __UNION version is very bad on ARM;
+ * it only uses one more instruction than __MEMCPY, but for some reason
+ * gcc thinks it needs separate storage for every instance of the union.
+ * On top of that, it feels the need to zero them out at the start of the
+ * method.  Net result is we zero out ~700 bytes of stack space at the top
+ * of the interpreter using ARM STM instructions.
+ */
+#if defined(__ARM_EABI__)
+//# define NO_UNALIGN_64__UNION
+# define NO_UNALIGN_64__MEMCPY
+#endif
+
+//#define LOG_INSTR                   /* verbose debugging */
+/* set and adjust ANDROID_LOG_TAGS='*:i jdwp:i dalvikvm:i dalvikvmi:i' */
+
+/*
+ * Keep a tally of accesses to fields.  Currently only works if full DEX
+ * optimization is disabled.
+ */
+#ifdef PROFILE_FIELD_ACCESS
+# define UPDATE_FIELD_GET(_field) { (_field)->gets++; }
+# define UPDATE_FIELD_PUT(_field) { (_field)->puts++; }
+#else
+# define UPDATE_FIELD_GET(_field) ((void)0)
+# define UPDATE_FIELD_PUT(_field) ((void)0)
+#endif
+
+/*
+ * Adjust the program counter.  "_offset" is a signed int, in 16-bit units.
+ *
+ * Assumes the existence of "const u2* pc" and "const u2* method->insns".
+ *
+ * We don't advance the program counter until we finish an instruction or
+ * branch, because we do want to have to unroll the PC if there's an
+ * exception.
+ */
+#ifdef CHECK_BRANCH_OFFSETS
+# define ADJUST_PC(_offset) do {                                            \
+        int myoff = _offset;        /* deref only once */                   \
+        if (pc + myoff < method->insns ||                                   \
+            pc + myoff >= method->insns + dvmGetMethodInsnsSize(method))    \
+        {                                                                   \
+            char* desc = dexProtoCopyMethodDescriptor(&method->prototype);  \
+            LOGE("Invalid branch %d at 0x%04x in %s.%s %s\n",               \
+                myoff, (int) (pc - method->insns),                          \
+                method->clazz->descriptor, method->name, desc);             \
+            free(desc);                                                     \
+            dvmAbort();                                                     \
+        }                                                                   \
+        pc += myoff;                                                        \
+    } while (false)
+#else
+# define ADJUST_PC(_offset) (pc += _offset)
+#endif
+
+/*
+ * Instruction framing.  For a switch-oriented implementation this is
+ * case/break, for a threaded implementation it's a goto label and an
+ * instruction fetch/computed goto.
+ *
+ * Assumes the existence of "const u2* pc" and (for threaded operation)
+ * "u2 inst".
+ */
+#ifdef THREADED_INTERP
+# define H(_op)             &&op_##_op
+# define HANDLE_OPCODE(_op) op_##_op:
+# define FINISH(_offset) {                                                  \
+        ADJUST_PC(_offset);                                                 \
+        inst = FETCH(0);                                                    \
+        CHECK_DEBUG_AND_PROF();                                             \
+        CHECK_TRACKED_REFS();                                               \
+        goto *handlerTable[INST_INST(inst)];                                \
+    }
+#else
+# define HANDLE_OPCODE(_op) case _op:
+# define FINISH(_offset)    { ADJUST_PC(_offset); break; }
+#endif
+
+#if INTERP_TYPE == INTERP_DBG
+# define CHECK_DEBUG_AND_PROF() \
+    checkDebugAndProf(pc, fp, self, method, &debugIsMethodEntry)
+#else
+# define CHECK_DEBUG_AND_PROF() ((void)0)
+#endif
+
+#if defined(WITH_TRACKREF_CHECKS)
+# define CHECK_TRACKED_REFS() \
+    dvmInterpCheckTrackedRefs(self, method, debugTrackedRefStart)
+#else
+# define CHECK_TRACKED_REFS() ((void)0)
+#endif
+
+/*
+ * If enabled, log instructions as we execute them.
+ */
+#ifdef LOG_INSTR
+# define ILOGD(...) ILOG(LOG_DEBUG, __VA_ARGS__)
+# define ILOGV(...) ILOG(LOG_VERBOSE, __VA_ARGS__)
+# define ILOG(_level, ...) do {                                             \
+        char debugStrBuf[128];                                              \
+        snprintf(debugStrBuf, sizeof(debugStrBuf), __VA_ARGS__);            \
+        if (method != NULL)                                                 \
+            LOG(_level, LOG_TAG"i", "%-2d|%04x%s\n",                        \
+                self->threadId, (int) (pc - method->insns), debugStrBuf);   \
+        else                                                                \
+            LOG(_level, LOG_TAG"i", "%-2d|####%s\n",                        \
+                self->threadId, debugStrBuf);                               \
+    } while(false)
+void dvmDumpRegs(const Method* method, const u4* framePtr, bool inOnly);
+# define DUMP_REGS(_meth, _frame, _inOnly) dvmDumpRegs(_meth, _frame, _inOnly)
+static const char kSpacing[] = "            ";
+#else
+# define ILOGD(...) ((void)0)
+# define ILOGV(...) ((void)0)
+# define DUMP_REGS(_meth, _frame, _inOnly) ((void)0)
+#endif
+
+/* get a long from an array of u4 */
+static inline s8 getLongFromArray(const u4* ptr, int idx)
+{
+#if defined(NO_UNALIGN_64__UNION)
+    union { s8 ll; u4 parts[2]; } conv;
+
+    ptr += idx;
+    conv.parts[0] = ptr[0];
+    conv.parts[1] = ptr[1];
+    return conv.ll;
+#elif defined(NO_UNALIGN_64__MEMCPY)
+    s8 val;
+    memcpy(&val, &ptr[idx], 8);
+    return val;
+#else
+    return *((s8*) &ptr[idx]);
+#endif
+}
+
+/* store a long into an array of u4 */
+static inline void putLongToArray(u4* ptr, int idx, s8 val)
+{
+#if defined(NO_UNALIGN_64__UNION)
+    union { s8 ll; u4 parts[2]; } conv;
+
+    ptr += idx;
+    conv.ll = val;
+    ptr[0] = conv.parts[0];
+    ptr[1] = conv.parts[1];
+#elif defined(NO_UNALIGN_64__MEMCPY)
+    memcpy(&ptr[idx], &val, 8);
+#else
+    *((s8*) &ptr[idx]) = val;
+#endif
+}
+
+/* get a double from an array of u4 */
+static inline double getDoubleFromArray(const u4* ptr, int idx)
+{
+#if defined(NO_UNALIGN_64__UNION)
+    union { double d; u4 parts[2]; } conv;
+
+    ptr += idx;
+    conv.parts[0] = ptr[0];
+    conv.parts[1] = ptr[1];
+    return conv.d;
+#elif defined(NO_UNALIGN_64__MEMCPY)
+    double dval;
+    memcpy(&dval, &ptr[idx], 8);
+    return dval;
+#else
+    return *((double*) &ptr[idx]);
+#endif
+}
+
+/* store a double into an array of u4 */
+static inline void putDoubleToArray(u4* ptr, int idx, double dval)
+{
+#if defined(NO_UNALIGN_64__UNION)
+    union { double d; u4 parts[2]; } conv;
+
+    ptr += idx;
+    conv.d = dval;
+    ptr[0] = conv.parts[0];
+    ptr[1] = conv.parts[1];
+#elif defined(NO_UNALIGN_64__MEMCPY)
+    memcpy(&ptr[idx], &dval, 8);
+#else
+    *((double*) &ptr[idx]) = dval;
+#endif
+}
+
+/*
+ * If enabled, validate the register number on every access.  Otherwise,
+ * just do an array access.
+ *
+ * Assumes the existence of "u4* fp".
+ *
+ * "_idx" may be referenced more than once.
+ */
+#ifdef CHECK_REGISTER_INDICES
+# define GET_REGISTER(_idx) \
+    ( (_idx) < method->registersSize ? \
+        (fp[(_idx)]) : (assert(!"bad reg"),1969) )
+# define SET_REGISTER(_idx, _val) \
+    ( (_idx) < method->registersSize ? \
+        (fp[(_idx)] = (u4)(_val)) : (assert(!"bad reg"),1969) )
+# define GET_REGISTER_AS_OBJECT(_idx)       ((Object *)GET_REGISTER(_idx))
+# define SET_REGISTER_AS_OBJECT(_idx, _val) SET_REGISTER(_idx, (s4)_val)
+# define GET_REGISTER_INT(_idx) ((s4) GET_REGISTER(_idx))
+# define SET_REGISTER_INT(_idx, _val) SET_REGISTER(_idx, (s4)_val)
+# define GET_REGISTER_WIDE(_idx) \
+    ( (_idx) < method->registersSize-1 ? \
+        getLongFromArray(fp, (_idx)) : (assert(!"bad reg"),1969) )
+# define SET_REGISTER_WIDE(_idx, _val) \
+    ( (_idx) < method->registersSize-1 ? \
+        putLongToArray(fp, (_idx), (_val)) : (assert(!"bad reg"),1969) )
+# define GET_REGISTER_FLOAT(_idx) \
+    ( (_idx) < method->registersSize ? \
+        (*((float*) &fp[(_idx)])) : (assert(!"bad reg"),1969.0f) )
+# define SET_REGISTER_FLOAT(_idx, _val) \
+    ( (_idx) < method->registersSize ? \
+        (*((float*) &fp[(_idx)]) = (_val)) : (assert(!"bad reg"),1969.0f) )
+# define GET_REGISTER_DOUBLE(_idx) \
+    ( (_idx) < method->registersSize-1 ? \
+        getDoubleFromArray(fp, (_idx)) : (assert(!"bad reg"),1969.0) )
+# define SET_REGISTER_DOUBLE(_idx, _val) \
+    ( (_idx) < method->registersSize-1 ? \
+        putDoubleToArray(fp, (_idx), (_val)) : (assert(!"bad reg"),1969.0) )
+#else
+# define GET_REGISTER(_idx)                 (fp[(_idx)])
+# define SET_REGISTER(_idx, _val)           (fp[(_idx)] = (_val))
+# define GET_REGISTER_AS_OBJECT(_idx)       ((Object*) fp[(_idx)])
+# define SET_REGISTER_AS_OBJECT(_idx, _val) (fp[(_idx)] = (u4)(_val))
+# define GET_REGISTER_INT(_idx)             ((s4)GET_REGISTER(_idx))
+# define SET_REGISTER_INT(_idx, _val)       SET_REGISTER(_idx, (s4)_val)
+# define GET_REGISTER_WIDE(_idx)            getLongFromArray(fp, (_idx))
+# define SET_REGISTER_WIDE(_idx, _val)      putLongToArray(fp, (_idx), (_val))
+# define GET_REGISTER_FLOAT(_idx)           (*((float*) &fp[(_idx)]))
+# define SET_REGISTER_FLOAT(_idx, _val)     (*((float*) &fp[(_idx)]) = (_val))
+# define GET_REGISTER_DOUBLE(_idx)          getDoubleFromArray(fp, (_idx))
+# define SET_REGISTER_DOUBLE(_idx, _val)    putDoubleToArray(fp, (_idx), (_val))
+#endif
+
+/*
+ * Get 16 bits from the specified offset of the program counter.  We always
+ * want to load 16 bits at a time from the instruction stream -- it's more
+ * efficient than 8 and won't have the alignment problems that 32 might.
+ *
+ * Assumes existence of "const u2* pc".
+ */
+#define FETCH(_offset)     (pc[(_offset)])
+
+/*
+ * Extract instruction byte from 16-bit fetch (_inst is a u2).
+ */
+#define INST_INST(_inst)    ((_inst) & 0xff)
+
+/*
+ * Extract the "vA, vB" 4-bit registers from the instruction word (_inst is u2).
+ */
+#define INST_A(_inst)       (((_inst) >> 8) & 0x0f)
+#define INST_B(_inst)       ((_inst) >> 12)
+
+/*
+ * Get the 8-bit "vAA" 8-bit register index from the instruction word.
+ * (_inst is u2)
+ */
+#define INST_AA(_inst)      ((_inst) >> 8)
+
+/*
+ * The current PC must be available to Throwable constructors, e.g.
+ * those created by dvmThrowException(), so that the exception stack
+ * trace can be generated correctly.  If we don't do this, the offset
+ * within the current method won't be shown correctly.  See the notes
+ * in Exception.c.
+ *
+ * Assumes existence of "u4* fp" and "const u2* pc".
+ */
+#define EXPORT_PC()         (SAVEAREA_FROM_FP(fp)->xtra.currentPc = pc)
+
+/*
+ * Determine if we need to switch to a different interpreter.  "_current"
+ * is either INTERP_STD or INTERP_DBG.  It should be fixed for a given
+ * interpreter generation file, which should remove the outer conditional
+ * from the following.
+ *
+ * If we're building without debug and profiling support, we never switch.
+ */
+#if defined(WITH_PROFILER) || defined(WITH_DEBUGGER)
+# define NEED_INTERP_SWITCH(_current) (                                     \
+    (_current == INTERP_STD) ?                                              \
+        dvmDebuggerOrProfilerActive() : !dvmDebuggerOrProfilerActive() )
+#else
+# define NEED_INTERP_SWITCH(_current) (false)
+#endif
+
+/*
+ * Determine if the debugger or profiler is currently active.  Used when
+ * selecting which interpreter to start or switch to.
+ */
+static inline bool dvmDebuggerOrProfilerActive(void)
+{
+    return gDvm.debuggerActive
+#if defined(WITH_PROFILER)
+        || gDvm.activeProfilers != 0
+#endif
+        ;
+}
+
+/*
+ * Look up an interface on a class using the cache.
+ */
+INLINE Method* dvmFindInterfaceMethodInCache(ClassObject* thisClass,
+    u4 methodIdx, const Method* method, DvmDex* methodClassDex)
+{
+#define ATOMIC_CACHE_CALC \
+    dvmInterpFindInterfaceMethod(thisClass, methodIdx, method, methodClassDex)
+
+    return (Method*) ATOMIC_CACHE_LOOKUP(methodClassDex->pInterfaceCache,
+                DEX_INTERFACE_CACHE_SIZE, thisClass, methodIdx);
+
+#undef ATOMIC_CACHE_CALC
+}
+
+/*
+ * Check to see if "obj" is NULL.  If so, throw an exception.  Assumes the
+ * pc has already been exported to the stack.
+ *
+ * Perform additional checks on debug builds.
+ *
+ * Use this to check for NULL when the instruction handler calls into
+ * something that could throw an exception (so we have already called
+ * EXPORT_PC at the top).
+ */
+static inline bool checkForNull(Object* obj)
+{
+    if (obj == NULL) {
+        dvmThrowException("Ljava/lang/NullPointerException;", NULL);
+        return false;
+    }
+#ifdef WITH_EXTRA_OBJECT_VALIDATION
+    if (!dvmIsValidObject(obj)) {
+        LOGE("Invalid object %p\n", obj);
+        dvmAbort();
+    }
+#endif
+#ifndef NDEBUG
+    if (obj->clazz == NULL || ((u4) obj->clazz) <= 65536) {
+        /* probable heap corruption */
+        LOGE("Invalid object class %p (in %p)\n", obj->clazz, obj);
+        dvmAbort();
+    }
+#endif
+    return true;
+}
+
+/*
+ * Check to see if "obj" is NULL.  If so, export the PC into the stack
+ * frame and throw an exception.
+ *
+ * Perform additional checks on debug builds.
+ *
+ * Use this to check for NULL when the instruction handler doesn't do
+ * anything else that can throw an exception.
+ */
+static inline bool checkForNullExportPC(Object* obj, u4* fp, const u2* pc)
+{
+    if (obj == NULL) {
+        EXPORT_PC();
+        dvmThrowException("Ljava/lang/NullPointerException;", NULL);
+        return false;
+    }
+#ifdef WITH_EXTRA_OBJECT_VALIDATION
+    if (!dvmIsValidObject(obj)) {
+        LOGE("Invalid object %p\n", obj);
+        dvmAbort();
+    }
+#endif
+#ifndef NDEBUG
+    if (obj->clazz == NULL || ((u4) obj->clazz) <= 65536) {
+        /* probable heap corruption */
+        LOGE("Invalid object class %p (in %p)\n", obj->clazz, obj);
+        dvmAbort();
+    }
+#endif
+    return true;
+}
+
+#endif /*_DALVIK_INTERP_DEFS*/
diff --git a/vm/interp/InterpStd.c b/vm/interp/InterpStd.c
new file mode 100644
index 0000000..3e5e32e
--- /dev/null
+++ b/vm/interp/InterpStd.c
@@ -0,0 +1,25 @@
+/*
+ * Copyright (C) 2008 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/*
+ * Main interpreter loop entry point ("standard" version).
+ */
+#include "Dalvik.h"
+
+#define INTERP_FUNC_NAME dvmInterpretStd
+#define INTERP_TYPE INTERP_STD
+
+#include "interp/InterpCore.h"
+
diff --git a/vm/interp/README.txt b/vm/interp/README.txt
new file mode 100644
index 0000000..62b97a9
--- /dev/null
+++ b/vm/interp/README.txt
@@ -0,0 +1,11 @@
+Dalvik interpreter, "portable" version.
+
+The interpreter is built twice, once with debugging/profiling support,
+once without.  The "standard" version is much smaller than the "debug"
+version, and does less work per instruction, yielding a significant
+performance improvement.
+
+See the "mterp" directory for the non-portable version.
+
+TODO: combine old and new interpreters into a single source base.
+
diff --git a/vm/interp/Stack.c b/vm/interp/Stack.c
new file mode 100644
index 0000000..8245106
--- /dev/null
+++ b/vm/interp/Stack.c
@@ -0,0 +1,1268 @@
+/*
+ * Copyright (C) 2008 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/*
+ * Stacks and their uses (e.g. native --> interpreted method calls).
+ *
+ * See the majestic ASCII art in Stack.h.
+ */
+#include "Dalvik.h"
+#include "jni.h"
+
+#include <stdlib.h>
+#include <stdarg.h>
+
+/*
+ * Initialize the interpreter stack in a new thread.
+ *
+ * Currently this doesn't do much, since we don't need to zero out the
+ * stack (and we really don't want to if it was created with mmap).
+ */
+bool dvmInitInterpStack(Thread* thread, int stackSize)
+{
+    assert(thread->interpStackStart != NULL);
+
+    assert(thread->curFrame == NULL);
+
+    return true;
+}
+
+/*
+ * We're calling an interpreted method from an internal VM function or
+ * via reflection.
+ *
+ * Push a frame for an interpreted method onto the stack.  This is only
+ * used when calling into interpreted code from native code.  (The
+ * interpreter does its own stack frame manipulation for interp-->interp
+ * calls.)
+ *
+ * The size we need to reserve is the sum of parameters, local variables,
+ * saved goodies, and outbound parameters.
+ *
+ * We start by inserting a "break" frame, which ensures that the interpreter
+ * hands control back to us after the function we call returns or an
+ * uncaught exception is thrown.
+ */
+static bool dvmPushInterpFrame(Thread* self, const Method* method)
+{
+    StackSaveArea* saveBlock;
+    StackSaveArea* breakSaveBlock;
+    int stackReq;
+    u1* stackPtr;
+
+    assert(!dvmIsNativeMethod(method));
+    assert(!dvmIsAbstractMethod(method));
+
+    stackReq = method->registersSize * 4        // params + locals
+                + sizeof(StackSaveArea) * 2     // break frame + regular frame
+                + method->outsSize * 4;         // args to other methods
+
+    if (self->curFrame != NULL)
+        stackPtr = (u1*) SAVEAREA_FROM_FP(self->curFrame);
+    else
+        stackPtr = self->interpStackStart;
+
+    if (stackPtr - stackReq < self->interpStackEnd) {
+        /* not enough space */
+        LOGW("Stack overflow on call to interp (top=%p cur=%p size=%d %s.%s)\n",
+            self->interpStackStart, self->curFrame, self->interpStackSize,
+            method->clazz->descriptor, method->name);
+        dvmHandleStackOverflow(self);
+        assert(dvmCheckException(self));
+        return false;
+    }
+
+    /*
+     * Shift the stack pointer down, leaving space for the function's
+     * args/registers and save area.
+     */
+    stackPtr -= sizeof(StackSaveArea);
+    breakSaveBlock = (StackSaveArea*)stackPtr;
+    stackPtr -= method->registersSize * 4 + sizeof(StackSaveArea);
+    saveBlock = (StackSaveArea*) stackPtr;
+
+#if !defined(NDEBUG) && !defined(PAD_SAVE_AREA)
+    /* debug -- memset the new stack, unless we want valgrind's help */
+    memset(stackPtr - (method->outsSize*4), 0xaf, stackReq);
+#endif
+#ifdef EASY_GDB
+    breakSaveBlock->prevSave = FP_FROM_SAVEAREA(self->curFrame);
+    saveBlock->prevSave = breakSaveBlock;
+#endif
+
+    breakSaveBlock->prevFrame = self->curFrame;
+    breakSaveBlock->savedPc = NULL;             // not required
+    breakSaveBlock->xtra.localRefTop = NULL;    // not required
+    breakSaveBlock->method = NULL;
+    saveBlock->prevFrame = FP_FROM_SAVEAREA(breakSaveBlock);
+    saveBlock->savedPc = NULL;                  // not required
+    saveBlock->xtra.currentPc = NULL;           // not required?
+    saveBlock->method = method;
+
+    LOGVV("PUSH frame: old=%p new=%p (size=%d)\n",
+        self->curFrame, FP_FROM_SAVEAREA(saveBlock),
+        (u1*)self->curFrame - (u1*)FP_FROM_SAVEAREA(saveBlock));
+
+    self->curFrame = FP_FROM_SAVEAREA(saveBlock);
+
+    return true;
+}
+
+/*
+ * We're calling a JNI native method from an internal VM fuction or
+ * via reflection.  This is also used to create the "fake" native-method
+ * frames at the top of the interpreted stack.
+ *
+ * This actually pushes two frames; the first is a "break" frame.
+ *
+ * The top frame has additional space for JNI local reference tracking.
+ */
+bool dvmPushJNIFrame(Thread* self, const Method* method)
+{
+    StackSaveArea* saveBlock;
+    StackSaveArea* breakSaveBlock;
+    int stackReq;
+    u1* stackPtr;
+
+    assert(dvmIsNativeMethod(method));
+
+    stackReq = method->registersSize * 4        // params only
+                + sizeof(StackSaveArea) * 2;    // break frame + regular frame
+
+    if (self->curFrame != NULL)
+        stackPtr = (u1*) SAVEAREA_FROM_FP(self->curFrame);
+    else
+        stackPtr = self->interpStackStart;
+
+    if (stackPtr - stackReq < self->interpStackEnd) {
+        /* not enough space */
+        LOGW("Stack overflow on call to native (top=%p cur=%p size=%d '%s')\n",
+            self->interpStackStart, self->curFrame, self->interpStackSize,
+            method->name);
+        dvmHandleStackOverflow(self);
+        assert(dvmCheckException(self));
+        return false;
+    }
+
+    /*
+     * Shift the stack pointer down, leaving space for just the stack save
+     * area for the break frame, then shift down farther for the full frame.
+     * We leave space for the method args, which are copied in later.
+     */
+    stackPtr -= sizeof(StackSaveArea);
+    breakSaveBlock = (StackSaveArea*)stackPtr;
+    stackPtr -= method->registersSize * 4 + sizeof(StackSaveArea);
+    saveBlock = (StackSaveArea*) stackPtr;
+
+#if !defined(NDEBUG) && !defined(PAD_SAVE_AREA)
+    /* debug -- memset the new stack */
+    memset(stackPtr, 0xaf, stackReq);
+#endif
+#ifdef EASY_GDB
+    if (self->curFrame == NULL)
+        breakSaveBlock->prevSave = NULL;
+    else
+        breakSaveBlock->prevSave = FP_FROM_SAVEAREA(self->curFrame);
+    saveBlock->prevSave = breakSaveBlock;
+#endif
+
+    breakSaveBlock->prevFrame = self->curFrame;
+    breakSaveBlock->savedPc = NULL;             // not required
+    breakSaveBlock->xtra.localRefTop = NULL;    // not required
+    breakSaveBlock->method = NULL;
+    saveBlock->prevFrame = FP_FROM_SAVEAREA(breakSaveBlock);
+    saveBlock->savedPc = NULL;                  // not required
+    saveBlock->xtra.localRefTop = self->jniLocalRefTable.nextEntry;
+    saveBlock->method = method;
+
+    LOGVV("PUSH JNI frame: old=%p new=%p (size=%d)\n",
+        self->curFrame, FP_FROM_SAVEAREA(saveBlock),
+        (u1*)self->curFrame - (u1*)FP_FROM_SAVEAREA(saveBlock));
+
+    self->curFrame = FP_FROM_SAVEAREA(saveBlock);
+
+    return true;
+}
+
+/*
+ * This is used by the JNI PushLocalFrame call.  We push a new frame onto
+ * the stack that has no ins, outs, or locals, and no break frame above it.
+ * It's strictly used for tracking JNI local refs, and will be popped off
+ * by dvmPopFrame if it's not removed explicitly.
+ */
+bool dvmPushLocalFrame(Thread* self, const Method* method)
+{
+    StackSaveArea* saveBlock;
+    int stackReq;
+    u1* stackPtr;
+
+    assert(dvmIsNativeMethod(method));
+
+    stackReq = sizeof(StackSaveArea);       // regular frame
+
+    assert(self->curFrame != NULL);
+    stackPtr = (u1*) SAVEAREA_FROM_FP(self->curFrame);
+
+    if (stackPtr - stackReq < self->interpStackEnd) {
+        /* not enough space; let JNI throw the exception */
+        LOGW("Stack overflow on PushLocal (top=%p cur=%p size=%d '%s')\n",
+            self->interpStackStart, self->curFrame, self->interpStackSize,
+            method->name);
+        dvmHandleStackOverflow(self);
+        assert(dvmCheckException(self));
+        return false;
+    }
+
+    /*
+     * Shift the stack pointer down, leaving space for just the stack save
+     * area for the break frame, then shift down farther for the full frame.
+     */
+    stackPtr -= sizeof(StackSaveArea);
+    saveBlock = (StackSaveArea*) stackPtr;
+
+#if !defined(NDEBUG) && !defined(PAD_SAVE_AREA)
+    /* debug -- memset the new stack */
+    memset(stackPtr, 0xaf, stackReq);
+#endif
+#ifdef EASY_GDB
+    saveBlock->prevSave = FP_FROM_SAVEAREA(self->curFrame);
+#endif
+
+    saveBlock->prevFrame = self->curFrame;
+    saveBlock->savedPc = NULL;                  // not required
+    saveBlock->xtra.localRefTop = self->jniLocalRefTable.nextEntry;
+    saveBlock->method = method;
+
+    LOGVV("PUSH JNI local frame: old=%p new=%p (size=%d)\n",
+        self->curFrame, FP_FROM_SAVEAREA(saveBlock),
+        (u1*)self->curFrame - (u1*)FP_FROM_SAVEAREA(saveBlock));
+
+    self->curFrame = FP_FROM_SAVEAREA(saveBlock);
+
+    return true;
+}
+
+/*
+ * Pop one frame pushed on by JNI PushLocalFrame.
+ *
+ * If we've gone too far, the previous frame is either a break frame or
+ * an interpreted frame.  Either way, the method pointer won't match.
+ */
+bool dvmPopLocalFrame(Thread* self)
+{
+    StackSaveArea* saveBlock = SAVEAREA_FROM_FP(self->curFrame);
+
+    assert(!dvmIsBreakFrame(self->curFrame));
+    if (saveBlock->method != SAVEAREA_FROM_FP(saveBlock->prevFrame)->method) {
+        /*
+         * The previous frame doesn't have the same method pointer -- we've
+         * been asked to pop too much.
+         */
+        assert(dvmIsBreakFrame(saveBlock->prevFrame) ||
+               !dvmIsNativeMethod(
+                       SAVEAREA_FROM_FP(saveBlock->prevFrame)->method));
+        return false;
+    }
+
+    LOGVV("POP JNI local frame: removing %s, now %s\n",
+        saveBlock->method->name,
+        SAVEAREA_FROM_FP(saveBlock->prevFrame)->method->name);
+    dvmPopJniLocals(self, saveBlock);
+    self->curFrame = saveBlock->prevFrame;
+
+    return true;
+}
+
+/*
+ * Pop a frame we added.  There should be one method frame and one break
+ * frame.
+ *
+ * If JNI Push/PopLocalFrame calls were mismatched, we might end up
+ * popping multiple method frames before we find the break.
+ *
+ * Returns "false" if there was no frame to pop.
+ */
+static bool dvmPopFrame(Thread* self)
+{
+    StackSaveArea* saveBlock;
+
+    if (self->curFrame == NULL)
+        return false;
+
+    saveBlock = SAVEAREA_FROM_FP(self->curFrame);
+    assert(!dvmIsBreakFrame(self->curFrame));
+
+    /*
+     * Remove everything up to the break frame.  If this was a call into
+     * native code, pop the JNI local references table.
+     */
+    while (saveBlock->prevFrame != NULL && saveBlock->method != NULL) {
+        /* probably a native->native JNI call */
+
+        if (dvmIsNativeMethod(saveBlock->method)) {
+            LOGVV("Popping JNI stack frame for %s.%s%s\n",
+                saveBlock->method->clazz->descriptor,
+                saveBlock->method->name,
+                (SAVEAREA_FROM_FP(saveBlock->prevFrame)->method == NULL) ?
+                "" : " (JNI local)");
+            assert(saveBlock->xtra.localRefTop != NULL);
+            assert(saveBlock->xtra.localRefTop >=self->jniLocalRefTable.table &&
+                saveBlock->xtra.localRefTop <=self->jniLocalRefTable.nextEntry);
+
+            dvmPopJniLocals(self, saveBlock);
+        }
+
+        saveBlock = SAVEAREA_FROM_FP(saveBlock->prevFrame);
+    }
+    if (saveBlock->method != NULL) {
+        LOGE("PopFrame missed the break\n");
+        assert(false);
+        dvmAbort();     // stack trashed -- nowhere to go in this thread
+    }
+
+    LOGVV("POP frame: cur=%p new=%p\n",
+        self->curFrame, saveBlock->prevFrame);
+
+    self->curFrame = saveBlock->prevFrame;
+    return true;
+}
+
+/*
+ * Common code for dvmCallMethodV/A and dvmInvokeMethod.
+ *
+ * Pushes a call frame on, advancing self->curFrame.
+ */
+static ClassObject* callPrep(Thread* self, const Method* method, Object* obj,
+    bool checkAccess)
+{
+    ClassObject* clazz;
+
+#ifndef NDEBUG
+    if (self->status != THREAD_RUNNING) {
+        LOGW("Status=%d on call to %s.%s -\n", self->status,
+            method->clazz->descriptor, method->name);
+    }
+#endif
+
+    assert(self != NULL);
+    assert(method != NULL);
+
+    if (obj != NULL)
+        clazz = obj->clazz;
+    else
+        clazz = method->clazz;
+
+    IF_LOGVV() {
+        char* desc = dexProtoCopyMethodDescriptor(&method->prototype);
+        LOGVV("thread=%d native code calling %s.%s %s\n", self->threadId,
+            clazz->descriptor, method->name, desc);
+        free(desc);
+    }
+
+    if (checkAccess) {
+        /* needed for java.lang.reflect.Method.invoke */
+        if (!dvmCheckMethodAccess(dvmGetCaller2Class(self->curFrame),
+                method))
+        {
+            /* note this throws IAException, not IAError */
+            dvmThrowException("Ljava/lang/IllegalAccessException;",
+                "access to method denied");
+            return NULL;
+        }
+    }
+
+    /*
+     * Push a call frame on.  If there isn't enough room for ins, locals,
+     * outs, and the saved state, it will throw an exception.
+     *
+     * This updates self->curFrame.
+     */
+    if (dvmIsNativeMethod(method)) {
+        /* native code calling native code the hard way */
+        if (!dvmPushJNIFrame(self, method)) {
+            assert(dvmCheckException(self));
+            return NULL;
+        }
+    } else {
+        /* native code calling interpreted code */
+        if (!dvmPushInterpFrame(self, method)) {
+            assert(dvmCheckException(self));
+            return NULL;
+        }
+    }
+
+    return clazz;
+}
+
+/*
+ * Issue a method call.
+ *
+ * Pass in NULL for "obj" on calls to static methods.
+ *
+ * (Note this can't be inlined because it takes a variable number of args.)
+ */
+void dvmCallMethod(Thread* self, const Method* method, Object* obj,
+    JValue* pResult, ...)
+{
+    JValue result;
+
+    va_list args;
+    va_start(args, pResult);
+    dvmCallMethodV(self, method, obj, pResult, args);
+    va_end(args);
+}
+
+/*
+ * Issue a method call with a variable number of arguments.  We process
+ * the contents of "args" by scanning the method signature.
+ *
+ * Pass in NULL for "obj" on calls to static methods.
+ *
+ * We don't need to take the class as an argument because, in Dalvik,
+ * we don't need to worry about static synchronized methods.
+ */
+void dvmCallMethodV(Thread* self, const Method* method, Object* obj,
+    JValue* pResult, va_list args)
+{
+    const char* desc = &(method->shorty[1]); // [0] is the return type.
+    int verifyCount = 0;
+    ClassObject* clazz;
+    u4* ins;
+
+    clazz = callPrep(self, method, obj, false);
+    if (clazz == NULL)
+        return;
+
+    /* "ins" for new frame start at frame pointer plus locals */
+    ins = ((u4*)self->curFrame) + (method->registersSize - method->insSize);
+
+    //LOGD("  FP is %p, INs live at >= %p\n", self->curFrame, ins);
+
+    /* put "this" pointer into in0 if appropriate */
+    if (!dvmIsStaticMethod(method)) {
+#ifdef WITH_EXTRA_OBJECT_VALIDATION
+        assert(obj != NULL && dvmIsValidObject(obj));
+#endif
+        *ins++ = (u4) obj;
+        verifyCount++;
+    }
+
+    while (*desc != '\0') {
+        switch (*(desc++)) {
+            case 'D': case 'J': {
+                u8 val = va_arg(args, u8);
+                memcpy(ins, &val, 8);       // EABI prevents direct store
+                ins += 2;
+                verifyCount += 2;
+                break;
+            }
+            case 'F': {
+                /* floats were normalized to doubles; convert back */
+                float f = (float) va_arg(args, double);
+                *ins++ = dvmFloatToU4(f);
+                verifyCount++;
+                break;
+            }
+#ifdef WITH_EXTRA_OBJECT_VALIDATION
+            case 'L': {     /* 'shorty' descr uses L for all refs, incl array */
+                Object* argObj = (Object*) va_arg(args, u4);
+                assert(obj == NULL || dvmIsValidObject(obj));
+                *ins++ = (u4) argObj;
+                verifyCount++;
+                break;
+            }
+#endif
+            default: {
+                *ins++ = va_arg(args, u4);
+                verifyCount++;
+                break;
+            }
+        }
+    }
+
+#ifndef NDEBUG
+    if (verifyCount != method->insSize) {
+        LOGE("Got vfycount=%d insSize=%d for %s.%s\n", verifyCount,
+            method->insSize, clazz->descriptor, method->name);
+        assert(false);
+        goto bail;
+    }
+#endif
+
+    //dvmDumpThreadStack(dvmThreadSelf());
+
+    if (dvmIsNativeMethod(method)) {
+        /*
+         * Because we leave no space for local variables, "curFrame" points
+         * directly at the method arguments.
+         */
+        (*method->nativeFunc)(self->curFrame, pResult, method, self);
+    } else {
+        dvmInterpret(self, method, pResult);
+    }
+
+bail:
+    dvmPopFrame(self);
+}
+
+/*
+ * Issue a method call with arguments provided in an array.  We process
+ * the contents of "args" by scanning the method signature.
+ *
+ * The values were likely placed into an uninitialized jvalue array using
+ * the field specifiers, which means that sub-32-bit fields (e.g. short,
+ * boolean) may not have 32 or 64 bits of valid data.  This is different
+ * from the varargs invocation where the C compiler does a widening
+ * conversion when calling a function.  As a result, we have to be a
+ * little more precise when pulling stuff out.
+ */
+void dvmCallMethodA(Thread* self, const Method* method, Object* obj,
+    JValue* pResult, const jvalue* args)
+{
+    const char* desc = &(method->shorty[1]); // [0] is the return type.
+    int verifyCount = 0;
+    ClassObject* clazz;
+    u4* ins;
+
+    clazz = callPrep(self, method, obj, false);
+    if (clazz == NULL)
+        return;
+
+    /* "ins" for new frame start at frame pointer plus locals */
+    ins = ((u4*)self->curFrame) + (method->registersSize - method->insSize);
+
+    /* put "this" pointer into in0 if appropriate */
+    if (!dvmIsStaticMethod(method)) {
+        assert(obj != NULL);
+        *ins++ = (u4) obj;
+        verifyCount++;
+    }
+
+    while (*desc != '\0') {
+        switch (*(desc++)) {
+            case 'D': case 'J': {
+                memcpy(ins, &args->j, 8);   /* EABI prevents direct store */
+                ins += 2;
+                verifyCount += 2;
+                args++;
+                break;
+            }
+            case 'F': case 'I': case 'L': { /* (no '[' in short signatures) */
+                *ins++ = args->i;           /* get all 32 bits */
+                verifyCount++;
+                args++;
+                break;
+            }
+            case 'S': {
+                *ins++ = args->s;           /* 16 bits, sign-extended */
+                verifyCount++;
+                args++;
+                break;
+            }
+            case 'C': {
+                *ins++ = args->c;           /* 16 bits, unsigned */
+                verifyCount++;
+                args++;
+                break;
+            }
+            case 'B': {
+                *ins++ = args->b;           /* 8 bits, sign-extended */
+                verifyCount++;
+                args++;
+                break;
+            }
+            case 'Z': {
+                *ins++ = args->z;           /* 8 bits, zero or non-zero */
+                verifyCount++;
+                args++;
+                break;
+            }
+            default: {
+                LOGE("Invalid char %c in short signature of %s.%s\n",
+                    *(desc-1), clazz->descriptor, method->name);
+                assert(false);
+                goto bail;
+            }
+        }
+    }
+
+#ifndef NDEBUG
+    if (verifyCount != method->insSize) {
+        LOGE("Got vfycount=%d insSize=%d for %s.%s\n", verifyCount,
+            method->insSize, clazz->descriptor, method->name);
+        assert(false);
+        goto bail;
+    }
+#endif
+
+    if (dvmIsNativeMethod(method)) {
+        /*
+         * Because we leave no space for local variables, "curFrame" points
+         * directly at the method arguments.
+         */
+        (*method->nativeFunc)(self->curFrame, pResult, method, self);
+    } else {
+        dvmInterpret(self, method, pResult);
+    }
+
+bail:
+    dvmPopFrame(self);
+}
+
+/*
+ * Invoke a method, using the specified arguments and return type, through
+ * one of the reflection interfaces.  Could be a virtual or direct method
+ * (including constructors).  Used for reflection.
+ *
+ * Deals with boxing/unboxing primitives and performs widening conversions.
+ *
+ * "invokeObj" will be null for a static method.
+ *
+ * If the invocation returns with an exception raised, we have to wrap it.
+ */
+Object* dvmInvokeMethod(Object* obj, const Method* method,
+    ArrayObject* argList, ArrayObject* params, ClassObject* returnType,
+    bool noAccessCheck)
+{
+    ClassObject* clazz;
+    Object* retObj = NULL;
+    Thread* self = dvmThreadSelf();
+    s4* ins;
+    int verifyCount, argListLength;
+    JValue retval;
+
+    /* verify arg count */
+    if (argList != NULL)
+        argListLength = argList->length;
+    else
+        argListLength = 0;
+    if (argListLength != (int) params->length) {
+        LOGI("invoke: expected %d args, received %d args\n",
+            params->length, argListLength);
+        dvmThrowException("Ljava/lang/IllegalArgumentException;",
+            "wrong number of arguments");
+        return NULL;
+    }
+
+    clazz = callPrep(self, method, obj, !noAccessCheck);
+    if (clazz == NULL)
+        return NULL;
+
+    /* "ins" for new frame start at frame pointer plus locals */
+    ins = ((s4*)self->curFrame) + (method->registersSize - method->insSize);
+    verifyCount = 0;
+
+    //LOGD("  FP is %p, INs live at >= %p\n", self->curFrame, ins);
+
+    /* put "this" pointer into in0 if appropriate */
+    if (!dvmIsStaticMethod(method)) {
+        assert(obj != NULL);
+        *ins++ = (s4) obj;
+        verifyCount++;
+    }
+
+    /*
+     * Copy the args onto the stack.  Primitive types are converted when
+     * necessary, and object types are verified.
+     */
+    DataObject** args;
+    ClassObject** types;
+    int i;
+
+    args = (DataObject**) argList->contents;
+    types = (ClassObject**) params->contents;
+    for (i = 0; i < argListLength; i++) {
+        int width;
+
+        width = dvmConvertArgument(*args++, *types++, ins);
+        if (width < 0) {
+            if (*(args-1) != NULL) {
+                LOGV("invoke: type mismatch on arg %d ('%s' '%s')\n",
+                    i, (*(args-1))->obj.clazz->descriptor,
+                    (*(types-1))->descriptor);
+            }
+            dvmPopFrame(self);      // throw wants to pull PC out of stack
+            dvmThrowException("Ljava/lang/IllegalArgumentException;",
+                "argument type mismatch");
+            goto bail_popped;
+        }
+
+        ins += width;
+        verifyCount += width;
+    }
+
+    if (verifyCount != method->insSize) {
+        LOGE("Got vfycount=%d insSize=%d for %s.%s\n", verifyCount,
+            method->insSize, clazz->descriptor, method->name);
+        assert(false);
+        goto bail;
+    }
+    //dvmDumpThreadStack(dvmThreadSelf());
+
+    if (dvmIsNativeMethod(method)) {
+        /*
+         * Because we leave no space for local variables, "curFrame" points
+         * directly at the method arguments.
+         */
+        (*method->nativeFunc)(self->curFrame, &retval, method, self);
+    } else {
+        dvmInterpret(self, method, &retval);
+    }
+
+    /*
+     * If an exception is raised, wrap and replace.  This is necessary
+     * because the invoked method could have thrown a checked exception
+     * that the caller wasn't prepared for.
+     *
+     * We might be able to do this up in the interpreted code, but that will
+     * leave us with a shortened stack trace in the top-level exception.
+     */
+    if (dvmCheckException(self)) {
+        Object* origExcep;
+        ClassObject* iteClass;
+
+        origExcep = dvmGetException(self);
+        dvmAddTrackedAlloc(origExcep, self);
+
+        dvmClearException(self);        // clear before class lookup
+        iteClass = dvmFindSystemClass(
+                "Ljava/lang/reflect/InvocationTargetException;");
+        if (iteClass != NULL) {
+            Object* iteExcep;
+            Method* initMethod;
+
+            iteExcep = dvmAllocObject(iteClass, ALLOC_DEFAULT);
+            if (iteExcep != NULL) {
+                initMethod = dvmFindDirectMethodByDescriptor(iteClass, "<init>",
+                                "(Ljava/lang/Throwable;)V");
+                if (initMethod != NULL) {
+                    JValue unused;
+                    dvmCallMethod(self, initMethod, iteExcep, &unused,
+                        origExcep);
+
+                    /* if <init> succeeded, replace the old exception */
+                    if (!dvmCheckException(self))
+                        dvmSetException(self, iteExcep);
+                }
+                dvmReleaseTrackedAlloc(iteExcep, NULL);
+
+                /* if initMethod doesn't exist, or failed... */
+                if (!dvmCheckException(self))
+                    dvmSetException(self, origExcep);
+            }
+        }
+
+        assert(dvmCheckException(self));
+        dvmReleaseTrackedAlloc(origExcep, self);
+    } else {
+        /*
+         * If this isn't a void method or constructor, convert the return type
+         * to an appropriate object.
+         *
+         * We don't do this when an exception is raised because the value
+         * in "retval" is undefined.
+         */
+        if (returnType != NULL) {
+            retObj = (Object*)dvmWrapPrimitive(retval, returnType);
+            dvmReleaseTrackedAlloc(retObj, NULL);
+        }
+    }
+
+bail:
+    dvmPopFrame(self);
+bail_popped:
+    return retObj;
+}
+
+typedef struct LineNumFromPcContext {
+    u4 address;
+    u4 lineNum;
+} LineNumFromPcContext;
+
+static int lineNumForPcCb(void *cnxt, u4 address, u4 lineNum)
+{
+    LineNumFromPcContext *pContext = (LineNumFromPcContext *)cnxt;
+
+    // We know that this callback will be called in 
+    // ascending address order, so keep going until we find
+    // a match or we've just gone past it.
+
+    if (address > pContext->address) {
+        // The line number from the previous positions callback
+        // wil be the final result.
+        return 1;
+    }
+
+    pContext->lineNum = lineNum;
+
+    return (address == pContext->address) ? 1 : 0;
+}
+
+/*
+ * Determine the source file line number based on the program counter.
+ * "pc" is an offset, in 16-bit units, from the start of the method's code.
+ *
+ * Returns -1 if no match was found (possibly because the source files were
+ * compiled without "-g", so no line number information is present).
+ * Returns -2 for native methods (as expected in exception traces).
+ */
+int dvmLineNumFromPC(const Method* method, u4 relPc)
+{
+    const DexCode* pDexCode = dvmGetMethodCode(method);
+
+    if (pDexCode == NULL) {
+        if (dvmIsNativeMethod(method) && !dvmIsAbstractMethod(method))
+            return -2;
+        return -1;      /* can happen for abstract method stub */
+    }
+
+    LineNumFromPcContext context;
+    memset(&context, 0, sizeof(context));
+    context.address = relPc;
+    // A method with no line number info should return -1
+    context.lineNum = -1;
+
+    dexDecodeDebugInfo(method->clazz->pDvmDex->pDexFile, pDexCode,
+            method->clazz->descriptor,
+            method->prototype.protoIdx,
+            method->accessFlags,
+            lineNumForPcCb, NULL, &context);
+    
+    return context.lineNum;
+}
+
+/*
+ * Compute the frame depth.
+ *
+ * Excludes "break" frames.
+ */
+int dvmComputeExactFrameDepth(const void* fp)
+{
+    int count = 0;
+
+    for ( ; fp != NULL; fp = SAVEAREA_FROM_FP(fp)->prevFrame) {
+        if (!dvmIsBreakFrame(fp))
+            count++;
+    }
+
+    return count;
+}
+
+/*
+ * Compute the "vague" frame depth, which is just a pointer subtraction.
+ * The result is NOT an overly generous assessment of the number of
+ * frames; the only meaningful use is to compare against the result of
+ * an earlier invocation.
+ *
+ * Useful for implementing single-step debugger modes, which may need to
+ * call this for every instruction.
+ */
+int dvmComputeVagueFrameDepth(Thread* thread, const void* fp)
+{
+    const u1* interpStackStart = thread->interpStackStart;
+    const u1* interpStackBottom = interpStackStart - thread->interpStackSize;
+
+    assert((u1*) fp >= interpStackBottom && (u1*) fp < interpStackStart);
+    return interpStackStart - (u1*) fp;
+}
+
+/*
+ * Get the calling frame.  Pass in the current fp.
+ *
+ * Skip "break" frames and reflection invoke frames.
+ */
+void* dvmGetCallerFP(const void* curFrame)
+{
+    void* caller = SAVEAREA_FROM_FP(curFrame)->prevFrame;
+    StackSaveArea* saveArea;
+
+retry:
+    if (dvmIsBreakFrame(caller)) {
+        /* pop up one more */
+        caller = SAVEAREA_FROM_FP(caller)->prevFrame;
+        if (caller == NULL)
+            return NULL;        /* hit the top */
+
+        /*
+         * If we got here by java.lang.reflect.Method.invoke(), we don't
+         * want to return Method's class loader.  Shift up one and try
+         * again.
+         */
+        saveArea = SAVEAREA_FROM_FP(caller);
+        if (dvmIsReflectionMethod(saveArea->method)) {
+            caller = saveArea->prevFrame;
+            assert(caller != NULL);
+            goto retry;
+        }
+    }
+
+    return caller;
+}
+
+/*
+ * Get the caller's class.  Pass in the current fp.
+ *
+ * This is used by e.g. java.lang.Class.
+ */
+ClassObject* dvmGetCallerClass(const void* curFrame)
+{
+    void* caller;
+
+    caller = dvmGetCallerFP(curFrame);
+    if (caller == NULL)
+        return NULL;
+
+    return SAVEAREA_FROM_FP(caller)->method->clazz;
+}
+
+/*
+ * Get the caller's caller's class.  Pass in the current fp.
+ *
+ * This is used by e.g. java.lang.Class, which wants to know about the
+ * class loader of the method that called it.
+ */
+ClassObject* dvmGetCaller2Class(const void* curFrame)
+{
+    void* caller = SAVEAREA_FROM_FP(curFrame)->prevFrame;
+    void* callerCaller;
+
+    /* at the top? */
+    if (dvmIsBreakFrame(caller) && SAVEAREA_FROM_FP(caller)->prevFrame == NULL)
+        return NULL;
+
+    /* go one more */
+    callerCaller = dvmGetCallerFP(caller);
+    if (callerCaller == NULL)
+        return NULL;
+
+    return SAVEAREA_FROM_FP(callerCaller)->method->clazz;
+}
+
+/*
+ * Get the caller's caller's caller's class.  Pass in the current fp.
+ *
+ * This is used by e.g. java.lang.Class, which wants to know about the
+ * class loader of the method that called it.
+ */
+ClassObject* dvmGetCaller3Class(const void* curFrame)
+{
+    void* caller = SAVEAREA_FROM_FP(curFrame)->prevFrame;
+    int i;
+
+    /* at the top? */
+    if (dvmIsBreakFrame(caller) && SAVEAREA_FROM_FP(caller)->prevFrame == NULL)
+        return NULL;
+
+    /* Walk up two frames if possible. */
+    for (i = 0; i < 2; i++) {
+        caller = dvmGetCallerFP(caller);
+        if (caller == NULL)
+            return NULL;
+    }
+    
+    return SAVEAREA_FROM_FP(caller)->method->clazz;
+}
+
+/*
+ * Create a flat array of methods that comprise the current interpreter
+ * stack trace.  Pass in the current frame ptr.
+ *
+ * Allocates a new array and fills it with method pointers.  Break frames
+ * are skipped, but reflection invocations are not.  The caller must free
+ * "*pArray".
+ *
+ * The current frame will be in element 0.
+ *
+ * Returns "true" on success, "false" on failure (e.g. malloc failed).
+ */
+bool dvmCreateStackTraceArray(const void* fp, const Method*** pArray,
+    int* pLength)
+{
+    const Method** array;
+    int idx, depth;
+
+    depth = dvmComputeExactFrameDepth(fp);
+    array = (const Method**) malloc(depth * sizeof(Method*));
+    if (array == NULL)
+        return false;
+
+    for (idx = 0; fp != NULL; fp = SAVEAREA_FROM_FP(fp)->prevFrame) {
+        if (!dvmIsBreakFrame(fp))
+            array[idx++] = SAVEAREA_FROM_FP(fp)->method;
+    }
+    assert(idx == depth);
+
+    *pArray = array;
+    *pLength = depth;
+    return true;
+}
+
+/*
+ * Open up the reserved area and throw an exception.  The reserved area
+ * should only be needed to create and initialize the exception itself.
+ *
+ * If we already opened it and we're continuing to overflow, abort the VM.
+ *
+ * We have to leave the "reserved" area open until the "catch" handler has
+ * finished doing its processing.  This is because the catch handler may
+ * need to resolve classes, which requires calling into the class loader if
+ * the classes aren't already in the "initiating loader" list.
+ */
+void dvmHandleStackOverflow(Thread* self)
+{
+    /*
+     * Can we make the reserved area available?
+     */
+    if (self->stackOverflowed) {
+        /*
+         * Already did, nothing to do but bail.
+         */
+        LOGE("DalvikVM: double-overflow of stack in threadid=%d; aborting\n",
+            self->threadId);
+        dvmDumpThread(self, false);
+        dvmAbort();
+    }
+
+    /* open it up to the full range */
+    LOGI("Stack overflow, expanding (%p to %p)\n", self->interpStackEnd,
+        self->interpStackStart - self->interpStackSize);
+    //dvmDumpThread(self, false);
+    self->interpStackEnd = self->interpStackStart - self->interpStackSize;
+    self->stackOverflowed = true;
+
+    /*
+     * If we were trying to throw an exception when the stack overflowed,
+     * we will blow up when doing the class lookup on StackOverflowError
+     * because of the pending exception.  So, we clear it and make it
+     * the cause of the SOE.
+     */
+    Object* excep = dvmGetException(self);
+    if (excep != NULL) {
+        LOGW("Stack overflow while throwing exception\n");
+        dvmClearException(self);
+    }
+    dvmThrowChainedException("Ljava/lang/StackOverflowError;", NULL, excep);
+}
+
+/*
+ * Reduce the available stack size.  By this point we should have finished
+ * our overflow processing.
+ */
+void dvmCleanupStackOverflow(Thread* self)
+{
+    const u1* newStackEnd;
+
+    assert(self->stackOverflowed);
+
+    newStackEnd = (self->interpStackStart - self->interpStackSize)
+        + STACK_OVERFLOW_RESERVE;
+    if ((u1*)self->curFrame <= newStackEnd) {
+        LOGE("Can't shrink stack: curFrame is in reserved area (%p %p)\n",
+            self->interpStackEnd, self->curFrame);
+        dvmDumpThread(self, false);
+        dvmAbort();
+    }
+
+    self->interpStackEnd = newStackEnd;
+    self->stackOverflowed = false;
+
+    LOGI("Shrank stack (to %p, curFrame is %p)\n", self->interpStackEnd,
+        self->curFrame);
+}
+
+
+/*
+ * Dump stack frames, starting from the specified frame and moving down.
+ *
+ * Each frame holds a pointer to the currently executing method, and the
+ * saved program counter from the caller ("previous" frame).  This means
+ * we don't have the PC for the current method on the stack, which is
+ * pretty reasonable since it's in the "PC register" for the VM.  Because
+ * exceptions need to show the correct line number we actually *do* have
+ * an updated version in the fame's "xtra.currentPc", but it's unreliable.
+ *
+ * Note "framePtr" could be NULL in rare circumstances.
+ */
+static void dumpFrames(const DebugOutputTarget* target, void* framePtr,
+    Thread* thread)
+{
+    const StackSaveArea* saveArea;
+    const Method* method;
+    int checkCount = 0;
+    const u2* currentPc = NULL;
+    bool first = true;
+
+    /*
+     * The "currentPc" is updated whenever we execute an instruction that
+     * might throw an exception.  Show it here.
+     */
+    if (framePtr != NULL && !dvmIsBreakFrame(framePtr)) {
+        saveArea = SAVEAREA_FROM_FP(framePtr);
+
+        if (saveArea->xtra.currentPc != NULL)
+            currentPc = saveArea->xtra.currentPc;
+    }
+
+    while (framePtr != NULL) {
+        saveArea = SAVEAREA_FROM_FP(framePtr);
+        method = saveArea->method;
+
+        if (dvmIsBreakFrame(framePtr)) {
+            //dvmPrintDebugMessage(target, "  (break frame)\n");
+        } else {
+            int relPc;
+
+            if (currentPc != NULL)
+                relPc = currentPc - saveArea->method->insns;
+            else
+                relPc = -1;
+
+            char* className = dvmDescriptorToDot(method->clazz->descriptor);
+            if (dvmIsNativeMethod(method))
+                dvmPrintDebugMessage(target,
+                    "  at %s.%s(Native Method)\n", className, method->name);
+            else {
+                dvmPrintDebugMessage(target,
+                    "  at %s.%s(%s:%s%d)\n",
+                    className, method->name, dvmGetMethodSourceFile(method),
+                    (relPc >= 0 && first) ? "~" : "",
+                    relPc < 0 ? -1 : dvmLineNumFromPC(method, relPc));
+            }
+            free(className);
+
+            if (first &&
+                (thread->status == THREAD_WAIT ||
+                 thread->status == THREAD_TIMED_WAIT))
+            {
+                /* warning: wait status not stable, even in suspend */
+                Monitor* mon = thread->waitMonitor;
+                Object* obj = dvmGetMonitorObject(mon);
+                if (obj != NULL) {
+                    className = dvmDescriptorToDot(obj->clazz->descriptor);
+                    dvmPrintDebugMessage(target,
+                        "  - waiting on <%p> (a %s)\n", mon, className);
+                    free(className);
+                }
+            }
+
+        }
+
+        /*
+         * Get saved PC for previous frame.  There's no savedPc in a "break"
+         * frame, because that represents native or interpreted code
+         * invoked by the VM.  The saved PC is sitting in the "PC register",
+         * a local variable on the native stack.
+         */
+        currentPc = saveArea->savedPc;
+
+        first = false;
+
+        assert(framePtr != saveArea->prevFrame);
+        framePtr = saveArea->prevFrame;
+
+        checkCount++;
+        if (checkCount > 200) {
+            dvmPrintDebugMessage(target,
+                "  ***** printed %d frames, not showing any more\n",
+                checkCount);
+            break;
+        }
+    }
+    dvmPrintDebugMessage(target, "\n");
+}
+
+
+/*
+ * Dump the stack for the specified thread.
+ */
+void dvmDumpThreadStack(const DebugOutputTarget* target, Thread* thread)
+{
+    dumpFrames(target, thread->curFrame, thread);
+}
+
+/*
+ * Dump the stack for the specified thread, which is still running.
+ *
+ * This is very dangerous, because stack frames are being pushed on and
+ * popped off, and if the thread exits we'll be looking at freed memory.
+ * The plan here is to take a snapshot of the stack and then dump that
+ * to try to minimize the chances of catching it mid-update.  This should
+ * work reasonably well on a single-CPU system.
+ *
+ * There is a small chance that calling here will crash the VM.
+ */
+void dvmDumpRunningThreadStack(const DebugOutputTarget* target, Thread* thread)
+{
+    StackSaveArea* saveArea;
+    const u1* origStack;
+    u1* stackCopy = NULL;
+    int origSize, fpOffset;
+    void* fp;
+    int depthLimit = 200;
+
+    if (thread == NULL || thread->curFrame == NULL) {
+        dvmPrintDebugMessage(target,
+            "DumpRunning: Thread at %p has no curFrame (threadid=%d)\n",
+            thread, (thread != NULL) ? thread->threadId : 0);
+        return;
+    }
+
+    /* wait for a full quantum */
+    sched_yield();
+
+    /* copy the info we need, then the stack itself */
+    origSize = thread->interpStackSize;
+    origStack = (const u1*) thread->interpStackStart - origSize;
+    stackCopy = (u1*) malloc(origSize);
+    fpOffset = (u1*) thread->curFrame - origStack;
+    memcpy(stackCopy, origStack, origSize);
+
+    /*
+     * Run through the stack and rewrite the "prev" pointers.
+     */
+    //LOGI("DR: fpOff=%d (from %p %p)\n",fpOffset, origStack, thread->curFrame);
+    fp = stackCopy + fpOffset;
+    while (true) {
+        int prevOffset;
+
+        if (depthLimit-- < 0) {
+            /* we're probably screwed */
+            dvmPrintDebugMessage(target, "DumpRunning: depth limit hit\n");
+            dvmAbort();
+        }
+        saveArea = SAVEAREA_FROM_FP(fp);
+        if (saveArea->prevFrame == NULL)
+            break;
+
+        prevOffset = (u1*) saveArea->prevFrame - origStack;
+        if (prevOffset < 0 || prevOffset > origSize) {
+            dvmPrintDebugMessage(target,
+                "DumpRunning: bad offset found: %d (from %p %p)\n",
+                prevOffset, origStack, saveArea->prevFrame);
+            saveArea->prevFrame = NULL;
+            break;
+        }
+
+        saveArea->prevFrame = stackCopy + prevOffset;
+        fp = saveArea->prevFrame;
+    }
+
+    /*
+     * We still need to pass the Thread for some monitor wait stuff.
+     */
+    dumpFrames(target, stackCopy + fpOffset, thread);
+    free(stackCopy);
+}
+
diff --git a/vm/interp/Stack.h b/vm/interp/Stack.h
new file mode 100644
index 0000000..1b28d49
--- /dev/null
+++ b/vm/interp/Stack.h
@@ -0,0 +1,277 @@
+/*
+ * Copyright (C) 2008 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/*
+ * Stack frames, and uses thereof.
+ */
+#ifndef _DALVIK_INTERP_STACK
+#define _DALVIK_INTERP_STACK
+
+#include "jni.h"
+#include <stdarg.h>
+
+
+/*
+Stack layout
+
+In what follows, the "top" of the stack is at a low position in memory,
+and the "bottom" of the stack is in a high position (put more simply,
+they grow downward).  They may be merged with the native stack at a
+later date.  The interpreter assumes that they have a fixed size,
+determined when the thread is created.
+
+Dalvik's registers (of which there can be up to 64K) map to the "ins"
+(method arguments) and "locals" (local variables).  The "outs" (arguments
+to called methods) are specified by the "invoke" operand.  The return
+value, which is passed through the interpreter rather than on the stack,
+is retrieved with a "move-result" instruction.
+
+    Low addresses (0x00000000)
+
+                     +- - - - - - - - -+
+                     -  out0           -
+                     +-----------------+  <-- stack ptr (top of stack)
+                     +  VM-specific    +
+                     +  internal goop  +
+                     +-----------------+  <-- curFrame: FP for cur function
+                     +  v0 == local0   +
++-----------------+  +-----------------+
++  out0           +  +  v1 == in0      +
++-----------------+  +-----------------+
++  out1           +  +  v2 == in1      +
++-----------------+  +-----------------+
++  VM-specific    +
++  internal goop  +
++-----------------+  <-- frame ptr (FP) for previous function
++  v0 == local0   +
++-----------------+
++  v1 == local1   +
++-----------------+
++  v2 == in0      +
++-----------------+
++  v3 == in1      +
++-----------------+
++  v4 == in2      +
++-----------------+
+-                 -
+-                 -
+-                 -
++-----------------+  <-- interpStackStart
+
+    High addresses (0xffffffff)
+
+Note the "ins" and "outs" overlap -- values pushed into the "outs" area
+become the parameters to the called method.  The VM guarantees that there
+will be enough room for all possible "outs" on the stack before calling
+into a method.
+
+All "V registers" are 32 bits, and all stack entries are 32-bit aligned.
+Registers are accessed as a positive offset from the frame pointer,
+e.g. register v2 is fp[2].  64-bit quantities are stored in two adjacent
+registers, addressed by the lower-numbered register, and are in host order.
+64-bit quantities do not need to start in an even-numbered register.
+
+We push two stack frames on when calling an interpreted or native method
+directly from the VM (e.g. invoking <clinit> or via reflection "invoke()").
+The first is a "break" frame, which allows us to tell when a call return or
+exception unroll has reached the VM call site.  Without the break frame the
+stack might look like an uninterrupted series of interpreted method calls.
+The second frame is for the method itself.
+
+The "break" frame is used as an alternative to adding additional fields
+to the StackSaveArea struct itself.  They are recognized by having a
+NULL method pointer.
+
+When calling a native method from interpreted code, the stack setup is
+essentially identical to calling an interpreted method.  Because it's a
+native method, though, there are never any "locals" or "outs".
+
+For native calls into JNI, we want to store a table of local references
+on the stack.  The GC needs to scan them while the native code is running,
+and we want to trivially discard them when the method returns.  See JNI.c
+for a discussion of how this is managed.  In particular note that it is
+possible to push additional call frames on without calling a method.
+*/
+
+
+struct StackSaveArea;
+typedef struct StackSaveArea StackSaveArea;
+
+//#define PAD_SAVE_AREA       /* help debug stack trampling */
+
+/*
+ * The VM-specific internal goop.
+ *
+ * The idea is to mimic a typical native stack frame, with copies of the
+ * saved PC and FP.  At some point we'd like to have interpreted and
+ * native code share the same stack, though this makes portability harder.
+ */
+struct StackSaveArea {
+#ifdef PAD_SAVE_AREA
+    u4          pad0, pad1, pad2;
+#endif
+
+#ifdef EASY_GDB
+    /* make it easier to trek through stack frames in GDB */
+    StackSaveArea* prevSave;
+#endif
+
+    /* saved frame pointer for previous frame, or NULL if this is at bottom */
+    void*       prevFrame;
+
+    /* saved program counter (from method in caller's frame) */
+    const u2*   savedPc;
+
+    /* pointer to method we're *currently* executing; handy for exceptions */
+    const Method* method;
+
+    union {
+        /* for JNI native methods: top of local reference storage */
+        Object**    localRefTop;
+
+        /* for interpreted methods: saved current PC, for exception stack
+         * traces and debugger traces */
+        const u2*   currentPc;
+    } xtra;
+
+#ifdef PAD_SAVE_AREA
+    u4          pad3, pad4, pad5;
+#endif
+};
+
+/* move between the stack save area and the frame pointer */
+#define SAVEAREA_FROM_FP(_fp)   ((StackSaveArea*)(_fp) -1)
+#define FP_FROM_SAVEAREA(_save) ((void*) ((StackSaveArea*)(_save) +1))
+
+/* when calling a function, get a pointer to outs[0] */
+#define OUTS_FROM_FP(_fp, _argCount) \
+    ((u4*) ((u1*)SAVEAREA_FROM_FP(_fp) - sizeof(u4) * (_argCount)))
+
+/* reserve this many bytes for handling StackOverflowError */
+#define STACK_OVERFLOW_RESERVE  512
+
+/*
+ * Determine if the frame pointer points to a "break frame".
+ */
+INLINE bool dvmIsBreakFrame(const u4* fp)
+{
+    return SAVEAREA_FROM_FP(fp)->method == NULL;
+}
+
+/*
+ * Initialize the interp stack (call this after allocating storage and
+ * setting thread->interpStackStart).
+ */
+bool dvmInitInterpStack(Thread* thread, int stackSize);
+
+/*
+ * Push a native method frame directly onto the stack.  Used to push the
+ * "fake" native frames at the top of each thread stack.
+ */
+bool dvmPushJNIFrame(Thread* thread, const Method* method);
+
+/*
+ * JNI local frame management.
+ */
+bool dvmPushLocalFrame(Thread* thread, const Method* method);
+bool dvmPopLocalFrame(Thread* thread);
+
+/*
+ * Call an interpreted method from native code.
+ *
+ * "obj" should be NULL for "direct" methods.
+ */
+void dvmCallMethodV(Thread* self, const Method* method, Object* obj,
+    JValue* pResult, va_list args);
+void dvmCallMethodA(Thread* self, const Method* method, Object* obj,
+    JValue* pResult, const jvalue* args);
+void dvmCallMethod(Thread* self, const Method* method, Object* obj,
+    JValue* pResult, ...);
+
+/*
+ * Invoke a method, using the specified arguments and return type, through
+ * a reflection interface.
+ *
+ * Deals with boxing/unboxing primitives and performs widening conversions.
+ *
+ * "obj" should be null for a static method.
+ *
+ * "params" and "returnType" come from the Method object, so we don't have
+ * to re-generate them from the method signature.  "returnType" should be
+ * NULL if we're invoking a constructor.
+ */
+Object* dvmInvokeMethod(Object* invokeObj, const Method* meth,
+    ArrayObject* argList, ArrayObject* params, ClassObject* returnType,
+    bool noAccessCheck);
+
+/*
+ * Determine the source file line number, given the program counter offset
+ * into the specified method.  Returns -2 for native methods, -1 if no
+ * match was found.
+ */
+int dvmLineNumFromPC(const Method* method, u4 relPc);
+
+/*
+ * Given a frame pointer, compute the current call depth.  The value can be
+ * "exact" (a count of non-break frames) or "vague" (just subtracting
+ * pointers to give relative values).
+ */
+int dvmComputeExactFrameDepth(const void* fp);
+int dvmComputeVagueFrameDepth(Thread* thread, const void* fp);
+
+/*
+ * Get the frame pointer for the caller's stack frame.
+ */
+void* dvmGetCallerFP(const void* curFrame);
+
+/*
+ * Get the class of the method that called us.
+ */
+ClassObject* dvmGetCallerClass(const void* curFrame);
+
+/*
+ * Get the caller's caller's class.  Pass in the current fp.
+ *
+ * This is used by e.g. java.lang.Class, which wants to know about the
+ * class loader of the method that called it.
+ */
+ClassObject* dvmGetCaller2Class(const void* curFrame);
+
+/*
+ * Get the caller's caller's caller's class.  Pass in the current fp.
+ *
+ * This is used by e.g. java.lang.Class, which wants to know about the
+ * class loader of the method that called it.
+ */
+ClassObject* dvmGetCaller3Class(const void* curFrame);
+
+/*
+ * Allocate and fill an array of method pointers representing the current
+ * stack trace (element 0 is current frame).
+ */
+bool dvmCreateStackTraceArray(const void* fp, const Method*** pArray,
+    int* pLength);
+
+/*
+ * Common handling for stack overflow.
+ */
+void dvmHandleStackOverflow(Thread* self);
+void dvmCleanupStackOverflow(Thread* self);
+
+/* debugging; dvmDumpThread() is probably a better starting point */
+void dvmDumpThreadStack(const DebugOutputTarget* target, Thread* thread);
+void dvmDumpRunningThreadStack(const DebugOutputTarget* target, Thread* thread);
+
+#endif /*_DALVIK_INTERP_STACK*/
diff --git a/vm/jdwp/ExpandBuf.c b/vm/jdwp/ExpandBuf.c
new file mode 100644
index 0000000..50c3035
--- /dev/null
+++ b/vm/jdwp/ExpandBuf.c
@@ -0,0 +1,176 @@
+/*
+ * Copyright (C) 2008 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/*
+ * Implementation of an expandable byte buffer.  Designed for serializing
+ * primitive values, e.g. JDWP replies.
+ */
+#include "jdwp/ExpandBuf.h"
+#include "Bits.h"
+#include "Common.h"
+
+#include <stdlib.h>
+#include <string.h>
+
+/*
+ * Data structure used to track buffer use.
+ */
+struct ExpandBuf {
+    u1*     storage;
+    int     curLen;
+    int     maxLen;
+};
+
+#define kInitialStorage 64
+
+/*
+ * Allocate a JdwpBuf and some initial storage.
+ */
+ExpandBuf* expandBufAlloc(void)
+{
+    ExpandBuf* newBuf;
+
+    newBuf = (ExpandBuf*) malloc(sizeof(*newBuf));
+    newBuf->storage = (u1*) malloc(kInitialStorage);
+    newBuf->curLen = 0;
+    newBuf->maxLen = kInitialStorage;
+
+    return newBuf;
+}
+
+/*
+ * Free a JdwpBuf and associated storage.
+ */
+void expandBufFree(ExpandBuf* pBuf)
+{
+    if (pBuf == NULL)
+        return;
+
+    free(pBuf->storage);
+    free(pBuf);
+}
+
+/*
+ * Get a pointer to the start of the buffer.
+ */
+u1* expandBufGetBuffer(ExpandBuf* pBuf)
+{
+    return pBuf->storage;
+}
+
+/*
+ * Get the amount of data currently in the buffer.
+ */
+size_t expandBufGetLength(ExpandBuf* pBuf)
+{
+    return pBuf->curLen;
+}
+
+
+/*
+ * Ensure that the buffer has enough space to hold incoming data.  If it
+ * doesn't, resize the buffer.
+ */
+static void ensureSpace(ExpandBuf* pBuf, int newCount)
+{
+    u1* newPtr;
+
+    if (pBuf->curLen + newCount <= pBuf->maxLen)
+        return;
+
+    while (pBuf->curLen + newCount > pBuf->maxLen)
+        pBuf->maxLen *= 2;
+
+    newPtr = realloc(pBuf->storage, pBuf->maxLen);
+    if (newPtr == NULL) {
+        LOGE("realloc(%d) failed\n", pBuf->maxLen);
+        abort();
+    }
+
+    pBuf->storage = newPtr;
+}
+
+/*
+ * Allocate some space in the buffer.
+ */
+u1* expandBufAddSpace(ExpandBuf* pBuf, int gapSize)
+{
+    u1* gapStart;
+
+    ensureSpace(pBuf, gapSize);
+    gapStart = pBuf->storage + pBuf->curLen;
+    /* do we want to garbage-fill the gap for debugging? */
+    pBuf->curLen += gapSize;
+
+    return gapStart;
+}
+
+/*
+ * Append a byte.
+ */
+void expandBufAdd1(ExpandBuf* pBuf, u1 val)
+{
+    ensureSpace(pBuf, sizeof(val));
+    *(pBuf->storage + pBuf->curLen) = val;
+    pBuf->curLen++;
+}
+
+/*
+ * Append two big-endian bytes.
+ */
+void expandBufAdd2BE(ExpandBuf* pBuf, u2 val)
+{
+    ensureSpace(pBuf, sizeof(val));
+    set2BE(pBuf->storage + pBuf->curLen, val);
+    pBuf->curLen += sizeof(val);
+}
+
+/*
+ * Append four big-endian bytes.
+ */
+void expandBufAdd4BE(ExpandBuf* pBuf, u4 val)
+{
+    ensureSpace(pBuf, sizeof(val));
+    set4BE(pBuf->storage + pBuf->curLen, val);
+    pBuf->curLen += sizeof(val);
+}
+
+/*
+ * Append eight big-endian bytes.
+ */
+void expandBufAdd8BE(ExpandBuf* pBuf, u8 val)
+{
+    ensureSpace(pBuf, sizeof(val));
+    set8BE(pBuf->storage + pBuf->curLen, val);
+    pBuf->curLen += sizeof(val);
+}
+
+/*
+ * Add a UTF8 string as a 4-byte length followed by a non-NULL-terminated
+ * string.
+ *
+ * Because these strings are coming out of the VM, it's safe to assume that
+ * they can be null-terminated (either they don't have null bytes or they
+ * have stored null bytes in a multi-byte encoding).
+ */
+void expandBufAddUtf8String(ExpandBuf* pBuf, const u1* str)
+{
+    int strLen = strlen((const char*)str);
+
+    ensureSpace(pBuf, sizeof(u4) + strLen);
+    setUtf8String(pBuf->storage + pBuf->curLen, str);
+    pBuf->curLen += sizeof(u4) + strLen;
+}
+
diff --git a/vm/jdwp/ExpandBuf.h b/vm/jdwp/ExpandBuf.h
new file mode 100644
index 0000000..8bdc8a7
--- /dev/null
+++ b/vm/jdwp/ExpandBuf.h
@@ -0,0 +1,57 @@
+/*
+ * Copyright (C) 2008 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/*
+ * Expanding byte buffer, with primitives for appending basic data types.
+ */
+#ifndef _DALVIK_JDWP_EXPANDBUF
+#define _DALVIK_JDWP_EXPANDBUF
+
+#include "Common.h"     // need u1/u2/u4/u8 types
+
+struct ExpandBuf;   /* private */
+typedef struct ExpandBuf ExpandBuf;
+
+/* create a new struct */
+ExpandBuf* expandBufAlloc(void);
+/* free storage */
+void expandBufFree(ExpandBuf* pBuf);
+
+/*
+ * Accessors.  The buffer pointer and length will only be valid until more
+ * data is added.
+ */
+u1* expandBufGetBuffer(ExpandBuf* pBuf);
+size_t expandBufGetLength(ExpandBuf* pBuf);
+
+/*
+ * The "add" operations allocate additional storage and append the data.
+ *
+ * There are no "get" operations included with this "class", other than
+ * GetBuffer().  If you want to get or set data from a position other
+ * than the end, get a pointer to the buffer and use the inline functions
+ * defined elsewhere.
+ *
+ * expandBufAddSpace() returns a pointer to the *start* of the region
+ * added.
+ */
+u1* expandBufAddSpace(ExpandBuf* pBuf, int gapSize);
+void expandBufAdd1(ExpandBuf* pBuf, u1 val);
+void expandBufAdd2BE(ExpandBuf* pBuf, u2 val);
+void expandBufAdd4BE(ExpandBuf* pBuf, u4 val);
+void expandBufAdd8BE(ExpandBuf* pBuf, u8 val);
+void expandBufAddUtf8String(ExpandBuf* pBuf, const u1* str);
+
+#endif /*_DALVIK_JDWP_EXPANDBUF*/
diff --git a/vm/jdwp/Jdwp.h b/vm/jdwp/Jdwp.h
new file mode 100644
index 0000000..0a72a06
--- /dev/null
+++ b/vm/jdwp/Jdwp.h
@@ -0,0 +1,237 @@
+/*
+ * Copyright (C) 2008 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/*
+ * JDWP "public" interface.  The main body of the VM should only use JDWP
+ * structures and functions declared here.
+ *
+ * The JDWP code follows the DalvikVM rules for naming conventions, but
+ * attempts to remain independent of VM innards (e.g. it doesn't access VM
+ * data structures directly).  All calls go through Debugger.c.
+ */
+#ifndef _DALVIK_JDWP_JDWP
+#define _DALVIK_JDWP_JDWP
+
+#include "jdwp/JdwpConstants.h"
+#include "jdwp/ExpandBuf.h"
+#include "Common.h"
+#include "Bits.h"
+#include <pthread.h>
+
+struct JdwpState;       /* opaque */
+typedef struct JdwpState JdwpState;
+
+/*
+ * Fundamental types.
+ *
+ * ObjectId and RefTypeId must be the same size.
+ */
+typedef u4 FieldId;     /* static or instance field */
+typedef u4 MethodId;    /* any kind of method, including constructors */
+typedef u8 ObjectId;    /* any object (threadID, stringID, arrayID, etc) */
+typedef u8 RefTypeId;   /* like ObjectID, but unique for Class objects */
+typedef u8 FrameId;     /* short-lived stack frame ID */
+
+/*
+ * Match these with the type sizes.  This way we don't have to pass
+ * a value and a length.
+ */
+INLINE FieldId dvmReadFieldId(const u1** pBuf)      { return read4BE(pBuf); }
+INLINE MethodId dvmReadMethodId(const u1** pBuf)    { return read4BE(pBuf); }
+INLINE ObjectId dvmReadObjectId(const u1** pBuf)    { return read8BE(pBuf); }
+INLINE RefTypeId dvmReadRefTypeId(const u1** pBuf)  { return read8BE(pBuf); }
+INLINE FrameId dvmReadFrameId(const u1** pBuf)      { return read8BE(pBuf); }
+INLINE void dvmSetFieldId(u1* buf, FieldId val)     { return set4BE(buf, val); }
+INLINE void dvmSetMethodId(u1* buf, MethodId val)   { return set4BE(buf, val); }
+INLINE void dvmSetObjectId(u1* buf, ObjectId val)   { return set8BE(buf, val); }
+INLINE void dvmSetRefTypeId(u1* buf, RefTypeId val) { return set8BE(buf, val); }
+INLINE void dvmSetFrameId(u1* buf, FrameId val)     { return set8BE(buf, val); }
+INLINE void expandBufAddFieldId(ExpandBuf* pReply, FieldId id) {
+    expandBufAdd4BE(pReply, id);
+}
+INLINE void expandBufAddMethodId(ExpandBuf* pReply, MethodId id) {
+    expandBufAdd4BE(pReply, id);
+}
+INLINE void expandBufAddObjectId(ExpandBuf* pReply, ObjectId id) {
+    expandBufAdd8BE(pReply, id);
+}
+INLINE void expandBufAddRefTypeId(ExpandBuf* pReply, RefTypeId id) {
+    expandBufAdd8BE(pReply, id);
+}
+INLINE void expandBufAddFrameId(ExpandBuf* pReply, FrameId id) {
+    expandBufAdd8BE(pReply, id);
+}
+
+
+/*
+ * Holds a JDWP "location".
+ */
+typedef struct JdwpLocation {
+    u1          typeTag;        /* class or interface? */
+    RefTypeId   classId;        /* method->clazz */
+    MethodId    methodId;       /* method in which "idx" resides */
+    u8          idx;            /* relative index into code block */
+} JdwpLocation;
+//#define kJDWPLocationSize   (25)
+
+/*
+ * How we talk to the debugger.
+ */
+typedef enum JdwpTransportType {
+    kJdwpTransportUnknown = 0,
+    kJdwpTransportSocket,       /* transport=dt_socket */
+    kJdwpTransportAndroidAdb,   /* transport=dt_android_adb */
+} JdwpTransportType;
+
+/*
+ * Holds collection of JDWP initialization parameters.
+ */
+typedef struct JdwpStartupParams {
+    JdwpTransportType transport;
+    bool        server;
+    bool        suspend;
+    char        host[64];
+    short       port;
+    /* more will be here someday */
+} JdwpStartupParams;
+
+/*
+ * Perform one-time initialization.
+ *
+ * Among other things, this binds to a port to listen for a connection from
+ * the debugger.
+ *
+ * Returns a newly-allocated JdwpState struct on success, or NULL on failure.
+ */
+JdwpState* dvmJdwpStartup(const JdwpStartupParams* params);
+
+/*
+ * Shut everything down.
+ */
+void dvmJdwpShutdown(JdwpState* state);
+
+/*
+ * Returns "true" if a debugger or DDM is connected.
+ */
+bool dvmJdwpIsActive(JdwpState* state);
+
+/*
+ * Return the debugger thread's handle, or 0 if the debugger thread isn't
+ * running.
+ */
+pthread_t dvmJdwpGetDebugThread(JdwpState* state);
+
+/*
+ * Get time, in milliseconds, since the last debugger activity.
+ */
+s8 dvmJdwpLastDebuggerActivity(JdwpState* state);
+
+/*
+ * When we hit a debugger event that requires suspension, it's important
+ * that we wait for the thread to suspend itself before processing any
+ * additional requests.  (Otherwise, if the debugger immediately sends a
+ * "resume thread" command, the resume might arrive before the thread has
+ * suspended itself.)
+ *
+ * The thread should call the "set" function before sending the event to
+ * the debugger.  The main JDWP handler loop calls "get" before processing
+ * an event, and will wait for thread suspension if it's set.  Once the
+ * thread has suspended itself, the JDWP handler calls "clear" and
+ * continues processing the current event.  This works in the suspend-all
+ * case because the event thread doesn't suspend itself until everything
+ * else has suspended.
+ *
+ * It's possible that multiple threads could encounter thread-suspending
+ * events at the same time, so we grab a mutex in the "set" call, and
+ * release it in the "clear" call.
+ */
+//ObjectId dvmJdwpGetWaitForEventThread(JdwpState* state);
+void dvmJdwpSetWaitForEventThread(JdwpState* state, ObjectId threadId);
+void dvmJdwpClearWaitForEventThread(JdwpState* state);
+
+/*
+ * Network functions.
+ */
+bool dvmJdwpCheckConnection(JdwpState* state);
+bool dvmJdwpAcceptConnection(JdwpState* state);
+bool dvmJdwpEstablishConnection(JdwpState* state);
+void dvmJdwpCloseConnection(JdwpState* state);
+bool dvmJdwpProcessIncoming(JdwpState* state);
+
+
+/*
+ * These notify the debug code that something interesting has happened.  This
+ * could be a thread starting or ending, an exception, or an opportunity
+ * for a breakpoint.  These calls do not mean that an event the debugger
+ * is interested has happened, just that something has happened that the
+ * debugger *might* be interested in.
+ *
+ * The item of interest may trigger multiple events, some or all of which
+ * are grouped together in a single response.
+ *
+ * The event may cause the current thread or all threads (except the
+ * JDWP support thread) to be suspended.
+ */
+
+/*
+ * The VM has finished initializing.  Only called when the debugger is
+ * connected at the time initialization completes.
+ */
+bool dvmJdwpPostVMStart(JdwpState* state, bool suspend);
+
+/*
+ * A location of interest has been reached.  This is used for breakpoints,
+ * single-stepping, and method entry/exit.  (JDWP requires that these four
+ * events are grouped together in a single response.)
+ *
+ * In some cases "*pLoc" will just have a method and class name, e.g. when
+ * issuing a MethodEntry on a native method.
+ *
+ * "eventFlags" indicates the types of events that have occurred.
+ */
+bool dvmJdwpPostLocationEvent(JdwpState* state, const JdwpLocation* pLoc,
+    ObjectId thisPtr, int eventFlags);
+
+/*
+ * An exception has been thrown.
+ *
+ * Pass in a zeroed-out "*pCatchLoc" if the exception wasn't caught.
+ */
+bool dvmJdwpPostException(JdwpState* state, const JdwpLocation* pThrowLoc,
+    ObjectId excepId, RefTypeId excepClassId, const JdwpLocation* pCatchLoc,
+    ObjectId thisPtr);
+
+/*
+ * A thread has started or stopped.
+ */
+bool dvmJdwpPostThreadChange(JdwpState* state, ObjectId threadId, bool start);
+
+/*
+ * Class has been prepared.
+ */
+bool dvmJdwpPostClassPrepare(JdwpState* state, int tag, RefTypeId refTypeId,
+    const char* signature, int status);
+
+/*
+ * The VM is about to stop.
+ */
+bool dvmJdwpPostVMDeath(JdwpState* state);
+
+/*
+ * Send up a chunk of DDM data.
+ */
+void dvmJdwpDdmSendChunk(JdwpState* state, int type, int len, const u1* buf);
+
+#endif /*_DALVIK_JDWP_JDWP*/
diff --git a/vm/jdwp/JdwpAdb.c b/vm/jdwp/JdwpAdb.c
new file mode 100644
index 0000000..91a8e47
--- /dev/null
+++ b/vm/jdwp/JdwpAdb.c
@@ -0,0 +1,673 @@
+/*
+ * Copyright (C) 2008 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#include "jdwp/JdwpPriv.h"
+#include "jdwp/JdwpHandler.h"
+#include <sys/socket.h>
+#include <sys/un.h>
+#include <errno.h>
+#include <unistd.h>
+
+/* the JDWP <-> ADB transport protocol is explained in details
+ * in //device/tools/adb/jdwp_service.c, here's a summary.
+ *
+ * 1/ when the JDWP thread starts, it tries to connect to a Unix
+ *    domain stream socket (@jdwp-control) that is opened by the
+ *    ADB daemon.
+ *
+ * 2/ it then sends the current process PID as a string of 4 hexadecimal
+ *    chars (no terminating zero)
+ *
+ * 3/ then, it uses recvmsg to receive file descriptors from the
+ *    daemon. each incoming file descriptor is a pass-through to
+ *    a given JDWP debugger, that can be used to read the usual
+ *    JDWP-handshake, etc...
+ *
+ */
+
+#define kInputBufferSize    8192
+
+#define kMagicHandshake     "JDWP-Handshake"
+#define kMagicHandshakeLen  (sizeof(kMagicHandshake)-1)
+
+#define kJdwpControlName    "\0jdwp-control"
+#define kJdwpControlNameLen (sizeof(kJdwpControlName)-1)
+
+struct JdwpNetState {
+    int                 controlSock;
+    int                 clientSock;
+    bool                awaitingHandshake;
+    int                 wakeFds[2];
+
+    int                 inputCount;
+    unsigned char       inputBuffer[kInputBufferSize];
+
+    socklen_t           controlAddrLen;
+    union {
+        struct sockaddr_un  controlAddrUn;
+        struct sockaddr     controlAddrPlain;
+    } controlAddr;
+};
+
+static void
+adbStateFree( JdwpNetState*  netState )
+{
+    if (netState == NULL)
+        return;
+
+    if (netState->clientSock >= 0) {
+        shutdown(netState->clientSock, SHUT_RDWR);
+        close(netState->clientSock);
+    }
+    if (netState->controlSock >= 0) {
+        shutdown(netState->controlSock, SHUT_RDWR);
+        close(netState->controlSock);
+    }
+    if (netState->wakeFds[0] >= 0) {
+        close(netState->wakeFds[0]);
+        netState->wakeFds[0] = -1;
+    }
+    if (netState->wakeFds[1] >= 0) {
+        close(netState->wakeFds[1]);
+        netState->wakeFds[1] = -1;
+    }
+
+    free(netState);
+}
+
+
+static JdwpNetState*
+adbStateAlloc(void)
+{
+    JdwpNetState*   netState = calloc(sizeof(*netState),1);
+
+    netState->controlSock = -1;
+    netState->clientSock  = -1;
+
+    netState->controlAddr.controlAddrUn.sun_family = AF_UNIX;
+    netState->controlAddrLen =
+            sizeof(netState->controlAddr.controlAddrUn.sun_family) +
+            kJdwpControlNameLen;
+                               
+    memcpy(netState->controlAddr.controlAddrUn.sun_path, 
+           kJdwpControlName, kJdwpControlNameLen);
+ 
+    netState->wakeFds[0] = -1;
+    netState->wakeFds[1] = -1;
+    
+    return netState;
+}
+
+
+/*
+ * Do initial prep work, e.g. binding to ports and opening files.  This
+ * runs in the main thread, before the JDWP thread starts, so it shouldn't
+ * do anything that might block forever.
+ */
+static bool startup(struct JdwpState* state, const JdwpStartupParams* pParams)
+{
+    JdwpNetState*  netState;
+
+    LOGV("ADB transport startup\n");
+    
+    state->netState = netState = adbStateAlloc();
+    if (netState == NULL)
+        return false;
+
+    return true;
+}
+
+static int  receiveClientFd(JdwpNetState*  netState)
+{
+    struct msghdr    msg;
+    struct cmsghdr*  cmsg;
+    struct iovec     iov;
+    char             dummy = '!';
+    char             buffer[sizeof(struct cmsghdr)+sizeof(int)];
+    int              ret;
+
+    iov.iov_base       = &dummy;
+    iov.iov_len        = 1;
+    msg.msg_name       = NULL;
+    msg.msg_namelen    = 0;
+    msg.msg_iov        = &iov;
+    msg.msg_iovlen     = 1;
+    msg.msg_flags      = 0;
+    msg.msg_control    = buffer;
+    msg.msg_controllen = sizeof(buffer);
+    
+    cmsg = CMSG_FIRSTHDR(&msg);
+    cmsg->cmsg_len   = msg.msg_controllen;
+    cmsg->cmsg_level = SOL_SOCKET;
+    cmsg->cmsg_type  = SCM_RIGHTS;
+    ((int*)CMSG_DATA(cmsg))[0] = -1;
+    
+    do {
+        ret = recvmsg(netState->controlSock, &msg, 0);
+    } while (ret < 0 && errno == EINTR);
+
+    if (ret < 0) {
+        LOGE("receiving file descriptor from ADB failed (socket %d): %s\n",
+             netState->controlSock, strerror(errno));
+        return -1;
+    }
+
+    return ((int*)CMSG_DATA(cmsg))[0];
+}
+
+/*
+ * Block forever, waiting for a debugger to connect to us.  Called from the
+ * JDWP thread.
+ *
+ * This needs to un-block and return "false" if the VM is shutting down.  It
+ * should return "true" when it successfully accepts a connection.
+ */
+static bool acceptConnection(struct JdwpState* state)
+{
+    JdwpNetState*  netState = state->netState;
+
+    /* first, ensure that we get a connection to the ADB daemon */
+    
+    if (netState->controlSock < 0)
+    {
+        int        sleep_ms     = 500;
+        const int  sleep_max_ms = 2*1000;
+        char       buff[5];
+
+        netState->controlSock = socket(PF_UNIX, SOCK_STREAM, 0);
+        if (netState->controlSock < 0) {
+            LOGE("Could not create ADB control socket:%s\n",
+                 strerror(errno));
+            return false;
+        }
+
+        if (pipe(netState->wakeFds) < 0) {
+            LOGE("pipe failed");
+            return false;
+        }
+
+        snprintf(buff, sizeof(buff), "%04x", getpid());
+        buff[4] = 0;
+
+        for (;;) {
+            int  ret = connect(netState->controlSock,
+                               &netState->controlAddr.controlAddrPlain,
+                               netState->controlAddrLen);
+            if (!ret) {
+                /* now try to send our pid to the ADB daemon */
+                do {
+                    ret = send( netState->controlSock, buff, 4, 0 );
+                } while (ret < 0 && errno == EINTR);
+
+                if (ret >= 0) {
+                    LOGV("PID sent as '%.*s' to ADB\n", 4, buff);
+                    break;
+                }
+
+                LOGE("Weird, can't send JDWP process pid to ADB: %s\n",
+                     strerror(errno));
+                return false;
+            }
+            LOGV("Can't connect to ADB control socket:%s\n",
+                 strerror(errno));
+
+            usleep( sleep_ms*1000 );
+
+            sleep_ms += (sleep_ms >> 1);
+            if (sleep_ms > sleep_max_ms)
+                sleep_ms = sleep_max_ms;
+        }
+    }
+
+    LOGV("trying to receive file descriptor from ADB\n");
+    /* now we can receive a client file descriptor */
+    netState->clientSock = receiveClientFd(netState);
+    if (netState->clientSock >= 0) {
+        LOGI("received file descriptor %d from ADB\n", netState->clientSock);
+        netState->awaitingHandshake = 1;
+        netState->inputCount = 0;
+    }
+    return (netState->clientSock >= 0);
+}
+
+/*
+ * Connect out to a debugger (for server=n).  Not required.
+ */
+static bool establishConnection(struct JdwpState* state)
+{
+    return false;
+}
+
+/*
+ * Close a connection from a debugger (which may have already dropped us).
+ * Only called from the JDWP thread.
+ */
+static void closeConnection(struct JdwpState* state)
+{
+    JdwpNetState* netState;
+
+    assert(state != NULL && state->netState != NULL);
+
+    netState = state->netState;
+    if (netState->clientSock < 0)
+        return;
+
+    LOGV("+++ closed JDWP <-> ADB connection\n");
+
+    close(netState->clientSock);
+    netState->clientSock = -1;
+}
+
+/*
+ * Close all network stuff, including the socket we use to listen for
+ * new connections.
+ *
+ * May be called from a non-JDWP thread, e.g. when the VM is shutting down.
+ */
+static void adbStateShutdown(struct JdwpNetState* netState)
+{
+    int  controlSock;
+    int  clientSock;
+
+    if (netState == NULL)
+        return;
+
+    clientSock = netState->clientSock;
+    if (clientSock >= 0) {
+        shutdown(clientSock, SHUT_RDWR);
+        netState->clientSock = -1;
+    }
+
+    controlSock = netState->controlSock;
+    if (controlSock >= 0) {
+        shutdown(controlSock, SHUT_RDWR);
+        netState->controlSock = -1;
+    }
+    
+    if (netState->wakeFds[1] >= 0) {
+        LOGV("+++ writing to wakePipe\n");
+        (void) write(netState->wakeFds[1], "", 1);
+    }
+}
+
+static void netShutdown(JdwpState* state)
+{
+    adbStateShutdown(state->netState);
+}
+
+/*
+ * Free up anything we put in state->netState.  This is called after
+ * "netShutdown", after the JDWP thread has stopped.
+ */
+static void netFree(struct JdwpState* state)
+{
+    JdwpNetState*  netState = state->netState;
+
+    adbStateFree(netState);
+}
+
+/*
+ * Is a debugger connected to us?
+ */
+static bool isConnected(struct JdwpState* state)
+{
+    return (state->netState != NULL   &&
+            state->netState->clientSock >= 0);
+}
+
+/*
+ * Are we still waiting for the JDWP handshake?
+ */
+static bool awaitingHandshake(struct JdwpState* state)
+{
+    return state->netState->awaitingHandshake;
+}
+
+/*
+ * Figure out if we have a full packet in the buffer.
+ */
+static bool haveFullPacket(JdwpNetState* netState)
+{
+    long length;
+
+    if (netState->awaitingHandshake)
+        return (netState->inputCount >= (int) kMagicHandshakeLen);
+
+    if (netState->inputCount < 4)
+        return false;
+
+    length = get4BE(netState->inputBuffer);
+    return (netState->inputCount >= length);
+}
+
+/*
+ * Consume bytes from the buffer.
+ *
+ * This would be more efficient with a circular buffer.  However, we're
+ * usually only going to find one packet, which is trivial to handle.
+ */
+static void consumeBytes(JdwpNetState* netState, int count)
+{
+    assert(count > 0);
+    assert(count <= netState->inputCount);
+
+    if (count == netState->inputCount) {
+        netState->inputCount = 0;
+        return;
+    }
+
+    memmove(netState->inputBuffer, netState->inputBuffer + count,
+        netState->inputCount - count);
+    netState->inputCount -= count;
+}
+
+/*
+ * Handle a packet.  Returns "false" if we encounter a connection-fatal error.
+ */
+static bool handlePacket(JdwpState* state)
+{
+    JdwpNetState* netState = state->netState;
+    const unsigned char* buf = netState->inputBuffer;
+    JdwpReqHeader hdr;
+    u4 length, id;
+    u1 flags, cmdSet, cmd;
+    u2 error;
+    bool reply;
+    int dataLen;
+
+    cmd = cmdSet = 0;       // shut up gcc
+
+    /*dumpPacket(netState->inputBuffer);*/
+
+    length = read4BE(&buf);
+    id = read4BE(&buf);
+    flags = read1(&buf);
+    if ((flags & kJDWPFlagReply) != 0) {
+        reply = true;
+        error = read2BE(&buf);
+    } else {
+        reply = false;
+        cmdSet = read1(&buf);
+        cmd = read1(&buf);
+    }
+
+    assert((int) length <= netState->inputCount);
+    dataLen = length - (buf - netState->inputBuffer);
+
+    if (!reply) {
+        ExpandBuf* pReply = expandBufAlloc();
+
+        hdr.length = length;
+        hdr.id = id;
+        hdr.cmdSet = cmdSet;
+        hdr.cmd = cmd;
+        dvmJdwpProcessRequest(state, &hdr, buf, dataLen, pReply);
+        if (expandBufGetLength(pReply) > 0) {
+            int cc;
+
+            /*
+             * TODO: we currently assume the write() will complete in one
+             * go, which may not be safe for a network socket.  We may need
+             * to mutex this against sendRequest().
+             */
+            cc = write(netState->clientSock, expandBufGetBuffer(pReply),
+                    expandBufGetLength(pReply));
+            if (cc != (int) expandBufGetLength(pReply)) {
+                LOGE("Failed sending reply to debugger: %s\n", strerror(errno));
+                expandBufFree(pReply);
+                return false;
+            }
+        } else {
+            LOGW("No reply created for set=%d cmd=%d\n", cmdSet, cmd);
+        }
+        expandBufFree(pReply);
+    } else {
+        LOGV("reply?!\n");
+        assert(false);
+    }
+
+    LOGV("----------\n");
+
+    consumeBytes(netState, length);
+    return true;
+}
+
+/*
+ * Process incoming data.  If no data is available, this will block until
+ * some arrives.
+ *
+ * If we get a full packet, handle it.
+ *
+ * To take some of the mystery out of life, we want to reject incoming
+ * connections if we already have a debugger attached.  If we don't, the
+ * debugger will just mysteriously hang until it times out.  We could just
+ * close the listen socket, but there's a good chance we won't be able to
+ * bind to the same port again, which would confuse utilities.
+ *
+ * Returns "false" on error (indicating that the connection has been severed),
+ * "true" if things are still okay.
+ */
+static bool processIncoming(JdwpState* state)
+{
+    JdwpNetState* netState = state->netState;
+    int readCount;
+
+    assert(netState->clientSock >= 0);
+
+    if (!haveFullPacket(netState)) {
+        /* read some more, looping until we have data */
+        errno = 0;
+        while (1) {
+            int selCount;
+            fd_set readfds;
+            int maxfd = -1;
+            int fd;
+
+            FD_ZERO(&readfds);
+
+            /* configure fds; note these may get zapped by another thread */
+            fd = netState->controlSock;
+            if (fd >= 0) {
+                FD_SET(fd, &readfds);
+                if (maxfd < fd)
+                    maxfd = fd;
+            }
+            fd = netState->clientSock;
+            if (fd >= 0) {
+                FD_SET(fd, &readfds);
+                if (maxfd < fd)
+                    maxfd = fd;
+            }
+            fd = netState->wakeFds[0];
+            if (fd >= 0) {
+                FD_SET(fd, &readfds);
+                if (maxfd < fd)
+                    maxfd = fd;
+            } else {
+                LOGI("NOTE: entering select w/o wakepipe\n");
+            }
+
+            if (maxfd < 0) {
+                LOGV("+++ all fds are closed\n");
+                return false;
+            }
+
+            /*
+             * Select blocks until it sees activity on the file descriptors.
+             * Closing the local file descriptor does not count as activity,
+             * so we can't rely on that to wake us up (it works for read()
+             * and accept(), but not select()).
+             *
+             * We can do one of three things: (1) send a signal and catch
+             * EINTR, (2) open an additional fd ("wakePipe") and write to
+             * it when it's time to exit, or (3) time out periodically and
+             * re-issue the select.  We're currently using #2, as it's more
+             * reliable than #1 and generally better than #3.  Wastes two fds.
+             */
+            selCount = select(maxfd+1, &readfds, NULL, NULL, NULL);
+            if (selCount < 0) {
+                if (errno == EINTR)
+                    continue;
+                LOGE("select failed: %s\n", strerror(errno));
+                goto fail;
+            }
+
+            if (netState->wakeFds[0] >= 0 &&
+                FD_ISSET(netState->wakeFds[0], &readfds))
+            {
+                LOGD("Got wake-up signal, bailing out of select\n");
+                goto fail;
+            }
+            if (netState->controlSock >= 0 &&
+                FD_ISSET(netState->controlSock, &readfds))
+            {
+                LOGI("Ignoring second debugger -- accepting and dropping\n");
+                int  sock = receiveClientFd(netState);
+                if (sock < 0)
+                    LOGI("Weird -- client fd reception failed\n");
+                else
+                    close(sock);
+            }
+            if (netState->clientSock >= 0 &&
+                FD_ISSET(netState->clientSock, &readfds))
+            {
+                readCount = read(netState->clientSock,
+                                netState->inputBuffer + netState->inputCount,
+                    sizeof(netState->inputBuffer) - netState->inputCount);
+                if (readCount < 0) {
+                    /* read failed */
+                    if (errno != EINTR)
+                        goto fail;
+                    LOGD("+++ EINTR hit\n");
+                    return true;
+                } else if (readCount == 0) {
+                    /* EOF hit -- far end went away */
+                    LOGD("+++ peer disconnected\n");
+                    goto fail;
+                } else
+                    break;
+            }
+        }
+
+        netState->inputCount += readCount;
+        if (!haveFullPacket(netState))
+            return true;        /* still not there yet */
+    }
+
+    /*
+     * Special-case the initial handshake.  For some bizarre reason we're
+     * expected to emulate bad tty settings by echoing the request back
+     * exactly as it was sent.  Note the handshake is always initiated by
+     * the debugger, no matter who connects to whom.
+     *
+     * Other than this one case, the protocol [claims to be] stateless.
+     */
+    if (netState->awaitingHandshake) {
+        int cc;
+
+        if (memcmp(netState->inputBuffer,
+                kMagicHandshake, kMagicHandshakeLen) != 0)
+        {
+            LOGE("ERROR: bad handshake '%.14s'\n", netState->inputBuffer);
+            goto fail;
+        }
+
+        errno = 0;
+        cc = write(netState->clientSock, netState->inputBuffer,
+                kMagicHandshakeLen);
+        if (cc != kMagicHandshakeLen) {
+            LOGE("Failed writing handshake bytes: %s (%d of %d)\n",
+                strerror(errno), cc, (int) kMagicHandshakeLen);
+            goto fail;
+        }
+
+        consumeBytes(netState, kMagicHandshakeLen);
+        netState->awaitingHandshake = false;
+        LOGV("+++ handshake complete\n");
+        return true;
+    }
+
+    /*
+     * Handle this packet.
+     */
+    return handlePacket(state);
+
+fail:
+    closeConnection(state);
+    return false;
+}
+
+/*
+ * Send a request.
+ *
+ * The entire packet must be sent with a single write() call to avoid
+ * threading issues.
+ *
+ * Returns "true" if it was sent successfully.
+ */
+static bool sendRequest(JdwpState* state, ExpandBuf* pReq)
+{
+    JdwpNetState* netState = state->netState;
+    int cc;
+
+    /* dumpPacket(expandBufGetBuffer(pReq)); */
+    if (netState->clientSock < 0) {
+        /* can happen with some DDMS events */
+        LOGV("NOT sending request -- no debugger is attached\n");
+        return false;
+    }
+
+    /*
+     * TODO: we currently assume the write() will complete in one
+     * go, which may not be safe for a network socket.  We may need
+     * to mutex this against handlePacket().
+     */
+    errno = 0;
+    cc = write(netState->clientSock, expandBufGetBuffer(pReq),
+            expandBufGetLength(pReq));
+    if (cc != (int) expandBufGetLength(pReq)) {
+        LOGE("Failed sending req to debugger: %s (%d of %d)\n",
+            strerror(errno), cc, (int) expandBufGetLength(pReq));
+        return false;
+    }
+
+    return true;
+}
+
+
+/*
+ * Our functions.
+ */
+static const JdwpTransport socketTransport = {
+    startup,
+    acceptConnection,
+    establishConnection,
+    closeConnection,
+    netShutdown,
+    netFree,
+    isConnected,
+    awaitingHandshake,
+    processIncoming,
+    sendRequest
+};
+
+/*
+ * Return our set.
+ */
+const JdwpTransport* dvmJdwpAndroidAdbTransport(void)
+{
+    return &socketTransport;
+}
+
diff --git a/vm/jdwp/JdwpConstants.c b/vm/jdwp/JdwpConstants.c
new file mode 100644
index 0000000..e089afa
--- /dev/null
+++ b/vm/jdwp/JdwpConstants.c
@@ -0,0 +1,240 @@
+/*
+ * Copyright (C) 2008 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/*
+ * String constants to go along with enumerated values.  (Pity we don't
+ * have enumerated constant reflection in C.)  These are only needed for
+ * making the output human-readable.
+ */
+#include "jdwp/JdwpConstants.h"
+
+/*
+ * Return a string for the error code.
+ */
+const char* dvmJdwpErrorStr(enum JdwpError error)
+{
+    switch (error) {
+    case ERR_NONE:
+        return "NONE";
+    case ERR_INVALID_THREAD:
+        return "INVALID_THREAD";
+    case ERR_INVALID_THREAD_GROUP:
+        return "INVALID_THREAD_GROUP";
+    case ERR_INVALID_PRIORITY:
+        return "INVALID_PRIORITY";
+    case ERR_THREAD_NOT_SUSPENDED:
+        return "THREAD_NOT_SUSPENDED";
+    case ERR_THREAD_SUSPENDED:
+        return "THREAD_SUSPENDED";
+    case ERR_INVALID_OBJECT:
+        return "INVALID_OBJEC";
+    case ERR_INVALID_CLASS:
+        return "INVALID_CLASS";
+    case ERR_CLASS_NOT_PREPARED:
+        return "CLASS_NOT_PREPARED";
+    case ERR_INVALID_METHODID:
+        return "INVALID_METHODID";
+    case ERR_INVALID_LOCATION:
+        return "INVALID_LOCATION";
+    case ERR_INVALID_FIELDID:
+        return "INVALID_FIELDID";
+    case ERR_INVALID_FRAMEID:
+        return "INVALID_FRAMEID";
+    case ERR_NO_MORE_FRAMES:
+        return "NO_MORE_FRAMES";
+    case ERR_OPAQUE_FRAME:
+        return "OPAQUE_FRAME";
+    case ERR_NOT_CURRENT_FRAME:
+        return "NOT_CURRENT_FRAME";
+    case ERR_TYPE_MISMATCH:
+        return "TYPE_MISMATCH";
+    case ERR_INVALID_SLOT:
+        return "INVALID_SLOT";
+    case ERR_DUPLICATE:
+        return "DUPLICATE";
+    case ERR_NOT_FOUND:
+        return "NOT_FOUND";
+    case ERR_INVALID_MONITOR:
+        return "INVALID_MONITOR";
+    case ERR_NOT_MONITOR_OWNER:
+        return "NOT_MONITOR_OWNER";
+    case ERR_INTERRUPT:
+        return "INTERRUPT";
+    case ERR_INVALID_CLASS_FORMAT:
+        return "INVALID_CLASS_FORMAT";
+    case ERR_CIRCULAR_CLASS_DEFINITION:
+        return "CIRCULAR_CLASS_DEFINITION";
+    case ERR_FAILS_VERIFICATION:
+        return "FAILS_VERIFICATION";
+    case ERR_ADD_METHOD_NOT_IMPLEMENTED:
+        return "ADD_METHOD_NOT_IMPLEMENTED";
+    case ERR_SCHEMA_CHANGE_NOT_IMPLEMENTED:
+        return "SCHEMA_CHANGE_NOT_IMPLEMENTED";
+    case ERR_INVALID_TYPESTATE:
+        return "INVALID_TYPESTATE";
+    case ERR_HIERARCHY_CHANGE_NOT_IMPLEMENTED:
+        return "HIERARCHY_CHANGE_NOT_IMPLEMENTED";
+    case ERR_DELETE_METHOD_NOT_IMPLEMENTED:
+        return "DELETE_METHOD_NOT_IMPLEMENTED";
+    case ERR_UNSUPPORTED_VERSION:
+        return "UNSUPPORTED_VERSION";
+    case ERR_NAMES_DONT_MATCH:
+        return "NAMES_DONT_MATCH";
+    case ERR_CLASS_MODIFIERS_CHANGE_NOT_IMPLEMENTED:
+        return "CLASS_MODIFIERS_CHANGE_NOT_IMPLEMENTED";
+    case ERR_METHOD_MODIFIERS_CHANGE_NOT_IMPLEMENTED:
+        return "METHOD_MODIFIERS_CHANGE_NOT_IMPLEMENTED";
+    case ERR_NOT_IMPLEMENTED:
+        return "NOT_IMPLEMENTED";
+    case ERR_NULL_POINTER:
+        return "NULL_POINTER";
+    case ERR_ABSENT_INFORMATION:
+        return "ABSENT_INFORMATION";
+    case ERR_INVALID_EVENT_TYPE:
+        return "INVALID_EVENT_TYPE";
+    case ERR_ILLEGAL_ARGUMENT:
+        return "ILLEGAL_ARGUMENT";
+    case ERR_OUT_OF_MEMORY:
+        return "OUT_OF_MEMORY";
+    case ERR_ACCESS_DENIED:
+        return "ACCESS_DENIED";
+    case ERR_VM_DEAD:
+        return "VM_DEAD";
+    case ERR_INTERNAL:
+        return "INTERNAL";
+    case ERR_UNATTACHED_THREAD:
+        return "UNATTACHED_THREAD";
+    case ERR_INVALID_TAG:
+        return "INVALID_TAG";
+    case ERR_ALREADY_INVOKING:
+        return "ALREADY_INVOKING";
+    case ERR_INVALID_INDEX:
+        return "INVALID_INDEX";
+    case ERR_INVALID_LENGTH:
+        return "INVALID_LENGTH";
+    case ERR_INVALID_STRING:
+        return "INVALID_STRING";
+    case ERR_INVALID_CLASS_LOADER:
+        return "INVALID_CLASS_LOADER";
+    case ERR_INVALID_ARRAY:
+        return "INVALID_ARRAY";
+    case ERR_TRANSPORT_LOAD:
+        return "TRANSPORT_LOAD";
+    case ERR_TRANSPORT_INIT:
+        return "TRANSPORT_INIT";
+    case ERR_NATIVE_METHOD:
+        return "NATIVE_METHOD";
+    case ERR_INVALID_COUNT:
+        return "INVALID_COUNT";
+    default:
+        return "?UNKNOWN?";
+    }
+}
+
+/*
+ * Return a string for the EventKind.
+ */
+const char* dvmJdwpEventKindStr(enum JdwpEventKind kind)
+{
+    switch (kind) {
+    case EK_SINGLE_STEP:        return "SINGLE_STEP";
+    case EK_BREAKPOINT:         return "BREAKPOINT";
+    case EK_FRAME_POP:          return "FRAME_POP";
+    case EK_EXCEPTION:          return "EXCEPTION";
+    case EK_USER_DEFINED:       return "USER_DEFINED";
+    case EK_THREAD_START:       return "THREAD_START";
+    /*case EK_THREAD_END:         return "THREAD_END";*/
+    case EK_CLASS_PREPARE:      return "CLASS_PREPARE";
+    case EK_CLASS_UNLOAD:       return "CLASS_UNLOAD";
+    case EK_CLASS_LOAD:         return "CLASS_LOAD";
+    case EK_FIELD_ACCESS:       return "FIELD_ACCESS";
+    case EK_FIELD_MODIFICATION: return "FIELD_MODIFICATION";
+    case EK_EXCEPTION_CATCH:    return "EXCEPTION_CATCH";
+    case EK_METHOD_ENTRY:       return "METHOD_ENTRY";
+    case EK_METHOD_EXIT:        return "METHOD_EXIT";
+    case EK_VM_INIT:            return "VM_INIT";
+    case EK_VM_DEATH:           return "VM_DEATH";
+    case EK_VM_DISCONNECTED:    return "VM_DISCONNECTED";
+    /*case EK_VM_START:           return "VM_START";*/
+    case EK_THREAD_DEATH:       return "THREAD_DEATH";
+    default:                    return "?UNKNOWN?";
+    }
+}
+
+/*
+ * Return a string for the StepDepth.
+ */
+const char* dvmJdwpStepDepthStr(enum JdwpStepDepth depth)
+{
+    switch (depth) {
+    case SD_INTO:               return "INTO";
+    case SD_OVER:               return "OVER";
+    case SD_OUT:                return "OUT";
+    default:                    return "?UNKNOWN?";
+    }
+}
+
+/*
+ * Return a string for the StepSize.
+ */
+const char* dvmJdwpStepSizeStr(enum JdwpStepSize size)
+{
+    switch (size) {
+    case SS_MIN:                return "MIN";
+    case SS_LINE:               return "LINE";
+    default:                    return "?UNKNOWN?";
+    }
+}
+
+/*
+ * Return a string for the SuspendPolicy.
+ */
+const char* dvmJdwpSuspendPolicyStr(enum JdwpSuspendPolicy policy)
+{
+    switch (policy) {
+    case SP_NONE:               return "NONE";
+    case SP_EVENT_THREAD:       return "EVENT_THREAD";
+    case SP_ALL:                return "ALL";
+    default:                    return "?UNKNOWN?";
+    }
+}
+
+/*
+ * Return a string for the SuspendStatus.
+ */
+const char* dvmJdwpSuspendStatusStr(enum JdwpSuspendStatus status)
+{
+    switch (status) {
+    case 0:                         return "Not SUSPENDED";
+    case SUSPEND_STATUS_SUSPENDED:  return "SUSPENDED";
+    default:                        return "?UNKNOWN?";
+    }
+}
+
+/*
+ * Return a string for the ThreadStatus.
+ */
+const char* dvmJdwpThreadStatusStr(enum JdwpThreadStatus status)
+{
+    switch (status) {
+    case TS_ZOMBIE:             return "ZOMBIE";
+    case TS_RUNNING:            return "RUNNING";
+    case TS_SLEEPING:           return "SLEEPING";
+    case TS_MONITOR:            return "MONITOR";
+    case TS_WAIT:               return "WAIT";
+    default:                    return "?UNKNOWN?";
+    }
+};
+
diff --git a/vm/jdwp/JdwpConstants.h b/vm/jdwp/JdwpConstants.h
new file mode 100644
index 0000000..922dbcd
--- /dev/null
+++ b/vm/jdwp/JdwpConstants.h
@@ -0,0 +1,229 @@
+/*
+ * Copyright (C) 2008 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/*
+ * These come out of the JDWP documentation.
+ */
+#ifndef _DALVIK_JDWP_JDWPCONSTANTS
+#define _DALVIK_JDWP_JDWPCONSTANTS
+
+/*
+ * Error constants.
+ */
+enum JdwpError {
+    ERR_NONE                                        = 0,
+    ERR_INVALID_THREAD                              = 10,
+    ERR_INVALID_THREAD_GROUP                        = 11,
+    ERR_INVALID_PRIORITY                            = 12,
+    ERR_THREAD_NOT_SUSPENDED                        = 13,
+    ERR_THREAD_SUSPENDED                            = 14,
+    ERR_INVALID_OBJECT                              = 20,
+    ERR_INVALID_CLASS                               = 21,
+    ERR_CLASS_NOT_PREPARED                          = 22,
+    ERR_INVALID_METHODID                            = 23,
+    ERR_INVALID_LOCATION                            = 24,
+    ERR_INVALID_FIELDID                             = 25,
+    ERR_INVALID_FRAMEID                             = 30,
+    ERR_NO_MORE_FRAMES                              = 31,
+    ERR_OPAQUE_FRAME                                = 32,
+    ERR_NOT_CURRENT_FRAME                           = 33,
+    ERR_TYPE_MISMATCH                               = 34,
+    ERR_INVALID_SLOT                                = 35,
+    ERR_DUPLICATE                                   = 40,
+    ERR_NOT_FOUND                                   = 41,
+    ERR_INVALID_MONITOR                             = 50,
+    ERR_NOT_MONITOR_OWNER                           = 51,
+    ERR_INTERRUPT                                   = 52,
+    ERR_INVALID_CLASS_FORMAT                        = 60,
+    ERR_CIRCULAR_CLASS_DEFINITION                   = 61,
+    ERR_FAILS_VERIFICATION                          = 62,
+    ERR_ADD_METHOD_NOT_IMPLEMENTED                  = 63,
+    ERR_SCHEMA_CHANGE_NOT_IMPLEMENTED               = 64,
+    ERR_INVALID_TYPESTATE                           = 65,
+    ERR_HIERARCHY_CHANGE_NOT_IMPLEMENTED            = 66,
+    ERR_DELETE_METHOD_NOT_IMPLEMENTED               = 67,
+    ERR_UNSUPPORTED_VERSION                         = 68,
+    ERR_NAMES_DONT_MATCH                            = 69,
+    ERR_CLASS_MODIFIERS_CHANGE_NOT_IMPLEMENTED      = 70,
+    ERR_METHOD_MODIFIERS_CHANGE_NOT_IMPLEMENTED     = 71,
+    ERR_NOT_IMPLEMENTED                             = 99,
+    ERR_NULL_POINTER                                = 100,
+    ERR_ABSENT_INFORMATION                          = 101,
+    ERR_INVALID_EVENT_TYPE                          = 102,
+    ERR_ILLEGAL_ARGUMENT                            = 103,
+    ERR_OUT_OF_MEMORY                               = 110,
+    ERR_ACCESS_DENIED                               = 111,
+    ERR_VM_DEAD                                     = 112,
+    ERR_INTERNAL                                    = 113,
+    ERR_UNATTACHED_THREAD                           = 115,
+    ERR_INVALID_TAG                                 = 500,
+    ERR_ALREADY_INVOKING                            = 502,
+    ERR_INVALID_INDEX                               = 503,
+    ERR_INVALID_LENGTH                              = 504,
+    ERR_INVALID_STRING                              = 506,
+    ERR_INVALID_CLASS_LOADER                        = 507,
+    ERR_INVALID_ARRAY                               = 508,
+    ERR_TRANSPORT_LOAD                              = 509,
+    ERR_TRANSPORT_INIT                              = 510,
+    ERR_NATIVE_METHOD                               = 511,
+    ERR_INVALID_COUNT                               = 512,
+};
+typedef enum JdwpError JdwpError;
+const char* dvmJdwpErrorStr(enum JdwpError error);
+
+
+/*
+ * ClassStatus constants.  These are bit flags that can be ORed together.
+ */
+enum JdwpClassStatus {
+    CS_VERIFIED             = 0x01,
+    CS_PREPARED             = 0x02,
+    CS_INITIALIZED          = 0x04,
+    CS_ERROR                = 0x08,
+};
+
+/*
+ * EventKind constants.
+ */
+enum JdwpEventKind {
+    EK_SINGLE_STEP          = 1,
+    EK_BREAKPOINT           = 2,
+    EK_FRAME_POP            = 3,
+    EK_EXCEPTION            = 4,
+    EK_USER_DEFINED         = 5,
+    EK_THREAD_START         = 6,
+    EK_THREAD_END           = 7,
+    EK_CLASS_PREPARE        = 8,
+    EK_CLASS_UNLOAD         = 9,
+    EK_CLASS_LOAD           = 10,
+    EK_FIELD_ACCESS         = 20,
+    EK_FIELD_MODIFICATION   = 21,
+    EK_EXCEPTION_CATCH      = 30,
+    EK_METHOD_ENTRY         = 40,
+    EK_METHOD_EXIT          = 41,
+    EK_VM_INIT              = 90,
+    EK_VM_DEATH             = 99,
+    EK_VM_DISCONNECTED      = 100,  /* "Never sent across JDWP */
+    EK_VM_START             = EK_VM_INIT,
+    EK_THREAD_DEATH         = EK_THREAD_END,
+};
+const char* dvmJdwpEventKindStr(enum JdwpEventKind kind);
+
+/*
+ * Values for "modKind" in EventRequest.Set.
+ */
+enum JdwpModKind {
+    MK_COUNT                = 1,
+    MK_CONDITIONAL          = 2,
+    MK_THREAD_ONLY          = 3,
+    MK_CLASS_ONLY           = 4,
+    MK_CLASS_MATCH          = 5,
+    MK_CLASS_EXCLUDE        = 6,
+    MK_LOCATION_ONLY        = 7,
+    MK_EXCEPTION_ONLY       = 8,
+    MK_FIELD_ONLY           = 9,
+    MK_STEP                 = 10,
+    MK_INSTANCE_ONLY        = 11,
+};
+
+/*
+ * InvokeOptions constants (bit flags).
+ */
+enum JdwpInvokeOptions {
+    INVOKE_SINGLE_THREADED  = 0x01,
+    INVOKE_NONVIRTUAL       = 0x02,
+};
+
+/*
+ * StepDepth constants.
+ */
+enum JdwpStepDepth {
+    SD_INTO                 = 0,    /* step into method calls */
+    SD_OVER                 = 1,    /* step over method calls */
+    SD_OUT                  = 2,    /* step out of current method */
+};
+const char* dvmJdwpStepDepthStr(enum JdwpStepDepth depth);
+
+/*
+ * StepSize constants.
+ */
+enum JdwpStepSize {
+    SS_MIN                  = 0,    /* step by minimum (e.g. 1 bytecode inst) */
+    SS_LINE                 = 1,    /* if possible, step to next line */
+};
+const char* dvmJdwpStepSizeStr(enum JdwpStepSize size);
+
+/*
+ * SuspendPolicy constants.
+ */
+enum JdwpSuspendPolicy {
+    SP_NONE                 = 0,    /* suspend no threads */
+    SP_EVENT_THREAD         = 1,    /* suspend event thread */
+    SP_ALL                  = 2,    /* suspend all threads */
+};
+const char* dvmJdwpSuspendPolicyStr(enum JdwpSuspendPolicy policy);
+
+/*
+ * SuspendStatus constants.
+ */
+enum JdwpSuspendStatus {
+    SUSPEND_STATUS_SUSPENDED = 1,
+};
+const char* dvmJdwpSuspendStatusStr(enum JdwpSuspendStatus status);
+
+/*
+ * ThreadStatus constants.
+ */
+enum JdwpThreadStatus {
+    TS_ZOMBIE               = 0,
+    TS_RUNNING              = 1,        // RUNNING
+    TS_SLEEPING             = 2,        // (in Thread.sleep())
+    TS_MONITOR              = 3,        // WAITING (monitor wait)
+    TS_WAIT                 = 4,        // (in Object.wait())
+};
+const char* dvmJdwpThreadStatusStr(enum JdwpThreadStatus status);
+
+/*
+ * TypeTag constants.
+ */
+enum JdwpTypeTag {
+    TT_CLASS                = 1,
+    TT_INTERFACE            = 2,
+    TT_ARRAY                = 3,
+};
+
+/*
+ * Tag constants.
+ */
+enum JdwpType {
+    JT_ARRAY                 = '[',
+    JT_BYTE                  = 'B',
+    JT_CHAR                  = 'C',
+    JT_OBJECT                = 'L',
+    JT_FLOAT                 = 'F',
+    JT_DOUBLE                = 'D',
+    JT_INT                   = 'I',
+    JT_LONG                  = 'J',
+    JT_SHORT                 = 'S',
+    JT_VOID                  = 'V',
+    JT_BOOLEAN               = 'Z',
+    JT_STRING                = 's',
+    JT_THREAD                = 't',
+    JT_THREAD_GROUP          = 'g',
+    JT_CLASS_LOADER          = 'l',
+    JT_CLASS_OBJECT          = 'c',
+};
+
+#endif /*_DALVIK_JDWP_JDWPCONSTANTS*/
diff --git a/vm/jdwp/JdwpEvent.c b/vm/jdwp/JdwpEvent.c
new file mode 100644
index 0000000..a3ff05a
--- /dev/null
+++ b/vm/jdwp/JdwpEvent.c
@@ -0,0 +1,1290 @@
+/*
+ * Copyright (C) 2008 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/*
+ * Send events to the debugger.
+ */
+#include "jdwp/JdwpPriv.h"
+#include "jdwp/JdwpConstants.h"
+#include "jdwp/JdwpHandler.h"
+#include "jdwp/JdwpEvent.h"
+#include "jdwp/ExpandBuf.h"
+
+#include <stdlib.h>
+#include <string.h>
+#include <stddef.h>     /* for offsetof() */
+#include <unistd.h>
+
+/*
+General notes:
+
+The event add/remove stuff usually happens from the debugger thread,
+in response to requests from the debugger, but can also happen as the
+result of an event in an arbitrary thread (e.g. an event with a "count"
+mod expires).  It's important to keep the event list locked when processing
+events.
+
+Event posting can happen from any thread.  The JDWP thread will not usually
+post anything but VM start/death, but if a JDWP request causes a class
+to be loaded, the ClassPrepare event will come from the JDWP thread.
+
+
+We can have serialization issues when we post an event to the debugger.
+For example, a thread could send an "I hit a breakpoint and am suspending
+myself" message to the debugger.  Before it manages to suspend itself, the
+debugger's response ("not interested, resume thread") arrives and is
+processed.  We try to resume a thread that hasn't yet suspended.
+
+This means that, after posting an event to the debugger, we need to wait
+for the event thread to suspend itself (and, potentially, all other threads)
+before processing any additional requests from the debugger.  While doing
+so we need to be aware that multiple threads may be hitting breakpoints
+or other events simultaneously, so we either need to wait for all of them
+or serialize the events with each other.
+
+The current mechanism works like this:
+  Event thread:
+   - If I'm going to suspend, grab the "I am posting an event" token.  Wait
+     for it if it's not currently available.
+   - Post the event to the debugger.
+   - If appropriate, suspend others and then myself.  As part of suspending
+     myself, release the "I am posting" token.
+  JDWP thread:
+   - When an event arrives, see if somebody is posting an event.  If so,
+     sleep until we can acquire the "I am posting an event" token.  Release
+     it immediately and continue processing -- the event we have already
+     received should not interfere with other events that haven't yet
+     been posted.
+
+Some care must be taken to avoid deadlock:
+
+ - thread A and thread B exit near-simultaneously, and post thread-death
+   events with a "suspend all" clause
+ - thread A gets the event token, thread B sits and waits for it
+ - thread A wants to suspend all other threads, but thread B is waiting
+   for the token and can't be suspended
+
+So we need to mark thread B in such a way that thread A doesn't wait for it.
+
+If we just bracket the "grab event token" call with a change to VMWAIT
+before sleeping, the switch back to RUNNING state when we get the token
+will cause thread B to suspend (remember, thread A's global suspend is
+still in force, even after it releases the token).  Suspending while
+holding the event token is very bad, because it prevents the JDWP thread
+from processing incoming messages.
+
+We need to change to VMWAIT state at the *start* of posting an event,
+and stay there until we either finish posting the event or decide to
+put ourselves to sleep.  That way we don't interfere with anyone else and
+don't allow anyone else to interfere with us.
+*/
+
+
+#define kJdwpEventCommandSet    64
+#define kJdwpCompositeCommand   100
+
+/*
+ * Stuff to compare against when deciding if a mod matches.  Only the
+ * values for mods valid for the event being evaluated will be filled in.
+ * The rest will be zeroed.
+ */
+typedef struct ModBasket {
+    const JdwpLocation* pLoc;           /* LocationOnly */
+    const char*         className;      /* ClassMatch/ClassExclude */
+    ObjectId            threadId;       /* ThreadOnly */
+    RefTypeId           classId;        /* ClassOnly */
+    RefTypeId           excepClassId;   /* ExceptionOnly */
+    bool                caught;         /* ExceptionOnly */
+    FieldId             field;          /* FieldOnly */
+    ObjectId            thisPtr;        /* InstanceOnly */
+    /* nothing for StepOnly -- handled differently */
+} ModBasket;
+
+/*
+ * Get the next "request" serial number.  We use this when sending
+ * packets to the debugger.
+ */
+u4 dvmJdwpNextRequestSerial(JdwpState* state)
+{
+    u4 result;
+
+    dvmDbgLockMutex(&state->serialLock);
+    result = state->requestSerial++;
+    dvmDbgUnlockMutex(&state->serialLock);
+
+    return result;
+}
+
+/*
+ * Get the next "event" serial number.  We use this in the response to
+ * message type EventRequest.Set.
+ */
+u4 dvmJdwpNextEventSerial(JdwpState* state)
+{
+    u4 result;
+
+    dvmDbgLockMutex(&state->serialLock);
+    result = state->eventSerial++;
+    dvmDbgUnlockMutex(&state->serialLock);
+
+    return result;
+}
+
+/*
+ * Lock the "event" mutex, which guards the list of registered events.
+ */
+static void lockEventMutex(JdwpState* state)
+{
+    //dvmDbgThreadWaiting();
+    dvmDbgLockMutex(&state->eventLock);
+    //dvmDbgThreadRunning();
+}
+
+/*
+ * Unlock the "event" mutex.
+ */
+static void unlockEventMutex(JdwpState* state)
+{
+    dvmDbgUnlockMutex(&state->eventLock);
+}
+
+/*
+ * Add an event to the list.  Ordering is not important.
+ *
+ * If something prevents the event from being registered, e.g. it's a
+ * single-step request on a thread that doesn't exist, the event will
+ * not be added to the list, and an appropriate error will be returned.
+ */
+JdwpError dvmJdwpRegisterEvent(JdwpState* state, JdwpEvent* pEvent)
+{
+    JdwpError err = ERR_NONE;
+    int i;
+
+    lockEventMutex(state);
+
+    assert(state != NULL);
+    assert(pEvent != NULL);
+    assert(pEvent->prev == NULL);
+    assert(pEvent->next == NULL);
+
+    /*
+     * If one or more LocationOnly mods are used, register them with
+     * the interpreter.
+     */
+    for (i = 0; i < pEvent->modCount; i++) {
+        JdwpEventMod* pMod = &pEvent->mods[i];
+        if (pMod->modKind == MK_LOCATION_ONLY) {
+            /* should only be for Breakpoint, Step, and Exception */
+            dvmDbgWatchLocation(&pMod->locationOnly.loc);
+        }
+        if (pMod->modKind == MK_STEP) {
+            /* should only be for EK_SINGLE_STEP; should only be one */
+            dvmDbgConfigureStep(pMod->step.threadId, pMod->step.size,
+                pMod->step.depth);
+        }
+    }
+
+    /*
+     * Add to list.
+     */
+    if (state->eventList != NULL) {
+        pEvent->next = state->eventList;
+        state->eventList->prev = pEvent;
+    }
+    state->eventList = pEvent;
+    state->numEvents++;
+
+bail:
+    unlockEventMutex(state);
+
+    return err;
+}
+
+/*
+ * Remove an event from the list.  This will also remove the event from
+ * any optimization tables, e.g. breakpoints.
+ *
+ * Does not free the JdwpEvent.
+ *
+ * Grab the eventLock before calling here.
+ */
+static void unregisterEvent(JdwpState* state, JdwpEvent* pEvent)
+{
+    int i;
+
+    if (pEvent->prev == NULL) {
+        /* head of the list */
+        assert(state->eventList == pEvent);
+
+        state->eventList = pEvent->next;
+    } else {
+        pEvent->prev->next = pEvent->next;
+    }
+
+    if (pEvent->next != NULL) {
+        pEvent->next->prev = pEvent->prev;
+        pEvent->next = NULL;
+    }
+    pEvent->prev = NULL;
+
+    /*
+     * Unhook us from the interpreter, if necessary.
+     */
+    for (i = 0; i < pEvent->modCount; i++) {
+        JdwpEventMod* pMod = &pEvent->mods[i];
+        if (pMod->modKind == MK_LOCATION_ONLY) {
+            /* should only be for Breakpoint, Step, and Exception */
+            dvmDbgUnwatchLocation(&pMod->locationOnly.loc);
+        }
+        if (pMod->modKind == MK_STEP) {
+            /* should only be for EK_SINGLE_STEP; should only be one */
+            dvmDbgUnconfigureStep(pMod->step.threadId);
+        }
+    }
+
+    state->numEvents--;
+    assert(state->numEvents != 0 || state->eventList == NULL);
+}
+
+/*
+ * Remove the event with the given ID from the list.
+ *
+ * Failure to find the event isn't really an error, but it is a little
+ * weird.  (It looks like Eclipse will try to be extra careful and will
+ * explicitly remove one-off single-step events.)
+ */
+void dvmJdwpUnregisterEventById(JdwpState* state, u4 requestId)
+{
+    JdwpEvent* pEvent;
+
+    lockEventMutex(state);
+
+    pEvent = state->eventList;
+    while (pEvent != NULL) {
+        if (pEvent->requestId == requestId) {
+            unregisterEvent(state, pEvent);
+            dvmJdwpEventFree(pEvent);
+            goto done;      /* there can be only one with a given ID */
+        }
+
+        pEvent = pEvent->next;
+    }
+
+    //LOGD("Odd: no match when removing event reqId=0x%04x\n", requestId);
+
+done:
+    unlockEventMutex(state);
+}
+
+/*
+ * Remove all entries from the event list.
+ */
+void dvmJdwpUnregisterAll(JdwpState* state)
+{
+    JdwpEvent* pEvent;
+    JdwpEvent* pNextEvent;
+
+    lockEventMutex(state);
+
+    pEvent = state->eventList;
+    while (pEvent != NULL) {
+        pNextEvent = pEvent->next;
+
+        unregisterEvent(state, pEvent);
+        dvmJdwpEventFree(pEvent);
+        pEvent = pNextEvent;
+    }
+
+    state->eventList = NULL;
+
+    unlockEventMutex(state);
+}
+
+
+
+/*
+ * Allocate a JdwpEvent struct with enough space to hold the specified
+ * number of mod records.
+ */
+JdwpEvent* dvmJdwpEventAlloc(int numMods)
+{
+    JdwpEvent* newEvent;
+    int allocSize = offsetof(JdwpEvent, mods) +
+                    numMods * sizeof(newEvent->mods[0]);
+
+    newEvent = (JdwpEvent*)malloc(allocSize);
+    memset(newEvent, 0, allocSize);
+    return newEvent;
+}
+
+/*
+ * Free a JdwpEvent.
+ *
+ * Do not call this until the event has been removed from the list.
+ */
+void dvmJdwpEventFree(JdwpEvent* pEvent)
+{
+    int i;
+
+    if (pEvent == NULL)
+        return;
+
+    /* make sure it was removed from the list */
+    assert(pEvent->prev == NULL);
+    assert(pEvent->next == NULL);
+    /* want to assert state->eventList != pEvent */
+
+    /*
+     * Free any hairy bits in the mods.
+     */
+    for (i = 0; i < pEvent->modCount; i++) {
+        if (pEvent->mods[i].modKind == MK_CLASS_MATCH) {
+            free(pEvent->mods[i].classMatch.classPattern);
+            pEvent->mods[i].classMatch.classPattern = NULL;
+        }
+        if (pEvent->mods[i].modKind == MK_CLASS_EXCLUDE) {
+            free(pEvent->mods[i].classExclude.classPattern);
+            pEvent->mods[i].classExclude.classPattern = NULL;
+        }
+    }
+
+    free(pEvent);
+}
+
+/*
+ * Allocate storage for matching events.  To keep things simple we
+ * use an array with enough storage for the entire list.
+ *
+ * The state->eventLock should be held before calling.
+ */
+static JdwpEvent** allocMatchList(JdwpState* state)
+{
+    return (JdwpEvent**) malloc(sizeof(JdwpEvent*) * state->numEvents);
+}
+
+/*
+ * Run through the list and remove any entries with an expired "count" mod
+ * from the event list, then free the match list.
+ */
+static void cleanupMatchList(JdwpState* state, JdwpEvent** matchList,
+    int matchCount)
+{
+    JdwpEvent** ppEvent = matchList;
+
+    while (matchCount--) {
+        JdwpEvent* pEvent = *ppEvent;
+        int i;
+
+        for (i = 0; i < pEvent->modCount; i++) {
+            if (pEvent->mods[i].modKind == MK_COUNT &&
+                pEvent->mods[i].count.count == 0)
+            {
+                LOGV("##### Removing expired event\n");
+                unregisterEvent(state, pEvent);
+                dvmJdwpEventFree(pEvent);
+                break;
+            }
+        }
+
+        ppEvent++;
+    }
+
+    free(matchList);
+}
+
+/*
+ * Match a string against a "restricted regular expression", which is just
+ * a string that may start or end with '*' (e.g. "*.Foo" or "java.*").
+ *
+ * ("Restricted name globbing" might have been a better term.)
+ */
+static bool patternMatch(const char* pattern, const char* target)
+{
+    int patLen = strlen(pattern);
+
+    if (pattern[0] == '*') {
+        int targetLen = strlen(target);
+        patLen--;
+        // TODO: remove printf when we find a test case to verify this
+        LOGE(">>> comparing '%s' to '%s'\n",
+            pattern+1, target + (targetLen-patLen));
+
+        if (targetLen < patLen)
+            return false;
+        return strcmp(pattern+1, target + (targetLen-patLen)) == 0;
+    } else if (pattern[patLen-1] == '*') {
+        int i;
+
+        return strncmp(pattern, target, patLen-1) == 0;
+    } else {
+        return strcmp(pattern, target) == 0;
+    }
+}
+
+/*
+ * See if two locations are equal.
+ *
+ * It's tempting to do a bitwise compare ("struct ==" or memcmp), but if
+ * the storage wasn't zeroed out there could be undefined values in the
+ * padding.  Besides, the odds of "idx" being equal while the others aren't
+ * is very small, so this is usually just a simple integer comparison.
+ */
+static inline bool locationMatch(const JdwpLocation* pLoc1,
+    const JdwpLocation* pLoc2)
+{
+    return pLoc1->idx == pLoc2->idx &&
+           pLoc1->methodId == pLoc2->methodId &&
+           pLoc1->classId == pLoc2->classId &&
+           pLoc1->typeTag == pLoc2->typeTag;
+}
+
+/*
+ * See if the event's mods match up with the contents of "basket".
+ *
+ * If we find a Count mod before rejecting an event, we decrement it.  We
+ * need to do this even if later mods cause us to ignore the event.
+ */
+static bool modsMatch(JdwpState* state, JdwpEvent* pEvent, ModBasket* basket)
+{
+    JdwpEventMod* pMod = pEvent->mods;
+    int i;
+
+    for (i = pEvent->modCount; i > 0; i--, pMod++) {
+        switch (pMod->modKind) {
+        case MK_COUNT:
+            assert(pMod->count.count > 0);
+            pMod->count.count--;
+            break;
+        case MK_CONDITIONAL:
+            assert(false);  // should not be getting these
+            break;
+        case MK_THREAD_ONLY:
+            if (pMod->threadOnly.threadId != basket->threadId)
+                return false;
+            break;
+        case MK_CLASS_ONLY:
+            if (!dvmDbgMatchType(basket->classId,
+                    pMod->classOnly.referenceTypeId))
+                return false;
+            break;
+        case MK_CLASS_MATCH:
+            if (!patternMatch(pMod->classMatch.classPattern,
+                    basket->className))
+                return false;
+            break;
+        case MK_CLASS_EXCLUDE:
+            if (patternMatch(pMod->classMatch.classPattern,
+                    basket->className))
+                return false;
+            break;
+        case MK_LOCATION_ONLY:
+            if (!locationMatch(&pMod->locationOnly.loc, basket->pLoc))
+                return false;
+            break;
+        case MK_EXCEPTION_ONLY:
+            if (pMod->exceptionOnly.refTypeId != 0 &&
+                !dvmDbgMatchType(basket->excepClassId,
+                                 pMod->exceptionOnly.refTypeId))
+                return false;
+            if ((basket->caught && !pMod->exceptionOnly.caught) ||
+                (!basket->caught && !pMod->exceptionOnly.uncaught))
+                return false;
+            break;
+        case MK_FIELD_ONLY:
+            // TODO
+            break;
+        case MK_STEP:
+            if (pMod->step.threadId != basket->threadId)
+                return false;
+            break;
+        case MK_INSTANCE_ONLY:
+            if (pMod->instanceOnly.objectId != basket->thisPtr)
+                return false;
+            break;
+        default:
+            LOGE("unhandled mod kind %d\n", pMod->modKind);
+            assert(false);
+            break;
+        }
+    }
+
+    return true;
+}
+
+/*
+ * Find all events of type "eventKind" with mods that match up with the
+ * rest of the arguments.
+ *
+ * Found events are appended to "matchList", and "*pMatchCount" is advanced,
+ * so this may be called multiple times for grouped events.
+ *
+ * DO NOT call this multiple times for the same eventKind, as Count mods are
+ * decremented during the scan.
+ */
+static void findMatchingEvents(JdwpState* state, enum JdwpEventKind eventKind,
+    ModBasket* basket, JdwpEvent** matchList, int* pMatchCount)
+{
+    JdwpEvent* pEvent;
+
+    /* start after the existing entries */
+    matchList += *pMatchCount;
+
+    pEvent = state->eventList;
+    while (pEvent != NULL) {
+        if (pEvent->eventKind == eventKind && modsMatch(state, pEvent, basket))
+        {
+            *matchList++ = pEvent;
+            (*pMatchCount)++;
+        }
+
+        pEvent = pEvent->next;
+    }
+}
+
+/*
+ * Scan through the list of matches and determine the most severe
+ * suspension policy.
+ */
+static enum JdwpSuspendPolicy scanSuspendPolicy(JdwpEvent** matchList,
+    int matchCount)
+{
+    enum JdwpSuspendPolicy policy = SP_NONE;
+
+    while (matchCount--) {
+        if ((*matchList)->suspendPolicy > policy)
+            policy = (*matchList)->suspendPolicy;
+        matchList++;
+    }
+
+    return policy;
+}
+
+/*
+ * Three possibilities:
+ *  SP_NONE - do nothing
+ *  SP_EVENT_THREAD - suspend ourselves
+ *  SP_ALL - suspend everybody except JDWP support thread
+ */
+static void suspendByPolicy(JdwpState* state,
+    enum JdwpSuspendPolicy suspendPolicy)
+{
+    if (suspendPolicy == SP_NONE)
+        return;
+
+    if (suspendPolicy == SP_ALL) {
+        dvmDbgSuspendVM(true);
+    } else {
+        assert(suspendPolicy == SP_EVENT_THREAD);
+    }
+
+    /* this is rare but possible -- see CLASS_PREPARE handling */
+    if (dvmDbgGetThreadSelfId() == state->debugThreadId) {
+        LOGI("NOTE: suspendByPolicy not suspending JDWP thread\n");
+        return;
+    }
+
+    DebugInvokeReq* pReq = dvmDbgGetInvokeReq();
+    while (true) {
+        pReq->ready = true;
+        dvmDbgSuspendSelf();
+        pReq->ready = false;
+
+        /*
+         * The JDWP thread has told us (and possibly all other threads) to
+         * resume.  See if it has left anything in our DebugInvokeReq mailbox.
+         */
+        if (!pReq->invokeNeeded) {
+            /*LOGD("suspendByPolicy: no invoke needed\n");*/
+            break;
+        }
+
+        /* grab this before posting/suspending again */
+        dvmJdwpSetWaitForEventThread(state, dvmDbgGetThreadSelfId());
+
+        /* leave pReq->invokeNeeded raised so we can check reentrancy */
+        LOGV("invoking method...\n");
+        dvmDbgExecuteMethod(pReq);
+
+        pReq->err = ERR_NONE;
+
+        /* clear this before signaling */
+        pReq->invokeNeeded = false;
+
+        LOGV("invoke complete, signaling and self-suspending\n");
+        dvmDbgLockMutex(&pReq->lock);
+        dvmDbgCondSignal(&pReq->cv);
+        dvmDbgUnlockMutex(&pReq->lock);
+    }
+}
+
+/*
+ * Determine if there is a method invocation in progress in the current
+ * thread.
+ *
+ * We look at the "invokeNeeded" flag in the per-thread DebugInvokeReq
+ * state.  If set, we're in the process of invoking a method.
+ */
+static bool invokeInProgress(JdwpState* state)
+{
+    DebugInvokeReq* pReq = dvmDbgGetInvokeReq();
+    return pReq->invokeNeeded;
+}
+
+/*
+ * We need the JDWP thread to hold off on doing stuff while we post an
+ * event and then suspend ourselves.
+ *
+ * Call this with a threadId of zero if you just want to wait for the
+ * current thread operation to complete.
+ *
+ * This could go to sleep waiting for another thread, so it's important
+ * that the thread be marked as VMWAIT before calling here.
+ */
+void dvmJdwpSetWaitForEventThread(JdwpState* state, ObjectId threadId)
+{
+    bool waited = false;
+
+    /* this is held for very brief periods; contention is unlikely */
+    dvmDbgLockMutex(&state->eventThreadLock);
+
+    /*
+     * If another thread is already doing stuff, wait for it.  This can
+     * go to sleep indefinitely.
+     */
+    while (state->eventThreadId != 0) {
+        LOGV("event in progress (0x%llx), 0x%llx sleeping\n",
+            state->eventThreadId, threadId);
+        waited = true;
+        dvmDbgCondWait(&state->eventThreadCond, &state->eventThreadLock);
+    }
+
+    if (waited || threadId != 0)
+        LOGV("event token grabbed (0x%llx)\n", threadId);
+    if (threadId != 0)
+        state->eventThreadId = threadId;
+
+    dvmDbgUnlockMutex(&state->eventThreadLock);
+}
+
+/*
+ * Clear the threadId and signal anybody waiting.
+ */
+void dvmJdwpClearWaitForEventThread(JdwpState* state)
+{
+    /*
+     * Grab the mutex.  Don't try to go in/out of VMWAIT mode, as this
+     * function is called by dvmSuspendSelf(), and the transition back
+     * to RUNNING would confuse it.
+     */
+    dvmDbgLockMutex(&state->eventThreadLock);
+
+    assert(state->eventThreadId != 0);
+    LOGV("cleared event token (0x%llx)\n", state->eventThreadId);
+
+    state->eventThreadId = 0;
+
+    dvmDbgCondSignal(&state->eventThreadCond);
+
+    dvmDbgUnlockMutex(&state->eventThreadLock);
+}
+
+
+/*
+ * Prep an event.  Allocates storage for the message and leaves space for
+ * the header.
+ */
+static ExpandBuf* eventPrep(void)
+{
+    ExpandBuf* pReq;
+
+    pReq = expandBufAlloc();
+    expandBufAddSpace(pReq, kJDWPHeaderLen);
+
+    return pReq;
+}
+
+/*
+ * Write the header into the buffer and send the packet off to the debugger.
+ *
+ * Takes ownership of "pReq" (currently discards it).
+ */
+static void eventFinish(JdwpState* state, ExpandBuf* pReq)
+{
+    u1* buf = expandBufGetBuffer(pReq);
+
+    set4BE(buf, expandBufGetLength(pReq));
+    set4BE(buf+4, dvmJdwpNextRequestSerial(state));
+    set1(buf+8, 0);     /* flags */
+    set1(buf+9, kJdwpEventCommandSet);
+    set1(buf+10, kJdwpCompositeCommand);
+
+    dvmJdwpSendRequest(state, pReq);
+
+    expandBufFree(pReq);
+}
+
+
+/*
+ * Tell the debugger that we have finished initializing.  This is always
+ * sent, even if the debugger hasn't requested it.
+ *
+ * This should be sent "before the main thread is started and before
+ * any application code has been executed".  The thread ID in the message
+ * must be for the main thread.
+ */
+bool dvmJdwpPostVMStart(JdwpState* state, bool suspend)
+{
+    enum JdwpSuspendPolicy suspendPolicy;
+    ObjectId threadId = dvmDbgGetThreadSelfId();
+    
+    if (suspend)
+        suspendPolicy = SP_ALL;
+    else
+        suspendPolicy = SP_NONE;
+
+    /* probably don't need this here */
+    lockEventMutex(state);
+
+    ExpandBuf* pReq = NULL;
+    if (true) {
+        LOGV("EVENT: %s\n", dvmJdwpEventKindStr(EK_VM_START));
+        LOGV("  suspendPolicy=%s\n", dvmJdwpSuspendPolicyStr(suspendPolicy));
+
+        pReq = eventPrep();
+        expandBufAdd1(pReq, suspendPolicy);
+        expandBufAdd4BE(pReq, 1);
+
+        expandBufAdd1(pReq, EK_VM_START);
+        expandBufAdd4BE(pReq, 0);       /* requestId */
+        expandBufAdd8BE(pReq, threadId);
+    }
+
+    unlockEventMutex(state);
+
+    /* send request and possibly suspend ourselves */
+    if (pReq != NULL) {
+        int oldStatus = dvmDbgThreadWaiting();
+        if (suspendPolicy != SP_NONE)
+            dvmJdwpSetWaitForEventThread(state, threadId);
+
+        eventFinish(state, pReq);
+
+        suspendByPolicy(state, suspendPolicy);
+        dvmDbgThreadContinuing(oldStatus);
+    }
+
+    return true;
+}
+
+/*
+ * A location of interest has been reached.  This handles:
+ *   Breakpoint
+ *   SingleStep
+ *   MethodEntry
+ *   MethodExit
+ * These four types must be grouped together in a single response.  The
+ * "eventFlags" indicates the type of event(s) that have happened.
+ *
+ * Valid mods:
+ *   Count, ThreadOnly, ClassOnly, ClassMatch, ClassExclude, InstanceOnly
+ *   LocationOnly (for breakpoint/step only)
+ *   Step (for step only)
+ *
+ * Interesting test cases:
+ *  - Put a breakpoint on a native method.  Eclipse creates METHOD_ENTRY
+ *    and METHOD_EXIT events with a ClassOnly mod on the method's class.
+ *  - Use "run to line".  Eclipse creates a BREAKPOINT with Count=1.
+ *  - Single-step to a line with a breakpoint.  Should get a single
+ *    event message with both events in it.
+ */
+bool dvmJdwpPostLocationEvent(JdwpState* state, const JdwpLocation* pLoc,
+    ObjectId thisPtr, int eventFlags)
+{
+    enum JdwpSuspendPolicy suspendPolicy = SP_NONE;
+    ModBasket basket;
+    JdwpEvent** matchList;
+    int matchCount;
+    char* nameAlloc = NULL;
+
+    memset(&basket, 0, sizeof(basket));
+    basket.pLoc = pLoc;
+    basket.classId = pLoc->classId;
+    basket.thisPtr = thisPtr;
+    basket.threadId = dvmDbgGetThreadSelfId();
+    basket.className = nameAlloc =
+        dvmDescriptorToName(dvmDbgGetClassDescriptor(pLoc->classId));
+
+    /*
+     * On rare occasions we may need to execute interpreted code in the VM
+     * while handling a request from the debugger.  Don't fire breakpoints
+     * while doing so.  (I don't think we currently do this at all, so
+     * this is mostly paranoia.)
+     */
+    if (basket.threadId == state->debugThreadId) {
+        LOGV("Ignoring location event in JDWP thread\n");
+        free(nameAlloc);
+        return false;
+    }
+
+    /*
+     * The debugger variable display tab may invoke the interpreter to format
+     * complex objects.  We want to ignore breakpoints and method entry/exit
+     * traps while working on behalf of the debugger.
+     *
+     * If we don't ignore them, the VM will get hung up, because we'll
+     * suspend on a breakpoint while the debugger is still waiting for its
+     * method invocation to complete.
+     */
+    if (invokeInProgress(state)) {
+        LOGV("Not checking breakpoints during invoke (%s)\n", basket.className);
+        free(nameAlloc);
+        return false;
+    }
+
+    /* don't allow the list to be updated while we scan it */
+    lockEventMutex(state);
+
+    matchList = allocMatchList(state);
+    matchCount = 0;
+
+    if ((eventFlags & DBG_BREAKPOINT) != 0)
+        findMatchingEvents(state, EK_BREAKPOINT, &basket, matchList,
+            &matchCount);
+    if ((eventFlags & DBG_SINGLE_STEP) != 0)
+        findMatchingEvents(state, EK_SINGLE_STEP, &basket, matchList,
+            &matchCount);
+    if ((eventFlags & DBG_METHOD_ENTRY) != 0)
+        findMatchingEvents(state, EK_METHOD_ENTRY, &basket, matchList,
+            &matchCount);
+    if ((eventFlags & DBG_METHOD_EXIT) != 0)
+        findMatchingEvents(state, EK_METHOD_EXIT, &basket, matchList,
+            &matchCount);
+
+    ExpandBuf* pReq = NULL;
+    if (matchCount != 0) {
+        int i;
+
+        LOGV("EVENT: %s(%d total) %s.%s thread=%llx code=%llx)\n",
+            dvmJdwpEventKindStr(matchList[0]->eventKind), matchCount,
+            basket.className,
+            dvmDbgGetMethodName(pLoc->classId, pLoc->methodId),
+            basket.threadId, pLoc->idx);
+
+        suspendPolicy = scanSuspendPolicy(matchList, matchCount);
+        LOGV("  suspendPolicy=%s\n",
+            dvmJdwpSuspendPolicyStr(suspendPolicy));
+
+        pReq = eventPrep();
+        expandBufAdd1(pReq, suspendPolicy);
+        expandBufAdd4BE(pReq, matchCount);
+
+        for (i = 0; i < matchCount; i++) {
+            expandBufAdd1(pReq, matchList[i]->eventKind);
+            expandBufAdd4BE(pReq, matchList[i]->requestId);
+            expandBufAdd8BE(pReq, basket.threadId);
+            dvmJdwpAddLocation(pReq, pLoc);
+        }
+    }
+
+    cleanupMatchList(state, matchList, matchCount);
+    unlockEventMutex(state);
+
+    /* send request and possibly suspend ourselves */
+    if (pReq != NULL) {
+        int oldStatus = dvmDbgThreadWaiting();
+        if (suspendPolicy != SP_NONE)
+            dvmJdwpSetWaitForEventThread(state, basket.threadId);
+
+        eventFinish(state, pReq);
+
+        suspendByPolicy(state, suspendPolicy);
+        dvmDbgThreadContinuing(oldStatus);
+    }
+
+    free(nameAlloc);
+    return matchCount != 0;
+}
+
+/*
+ * A thread is starting or stopping.
+ *
+ * Valid mods:
+ *  Count, ThreadOnly
+ */
+bool dvmJdwpPostThreadChange(JdwpState* state, ObjectId threadId, bool start)
+{
+    enum JdwpSuspendPolicy suspendPolicy = SP_NONE;
+    ModBasket basket;
+    JdwpEvent** matchList;
+    int matchCount;
+
+    assert(threadId = dvmDbgGetThreadSelfId());
+
+    /*
+     * I don't think this can happen.
+     */
+    if (invokeInProgress(state)) {
+        LOGW("Not posting thread change during invoke\n");
+        return false;
+    }
+
+    memset(&basket, 0, sizeof(basket));
+    basket.threadId = threadId;
+
+    /* don't allow the list to be updated while we scan it */
+    lockEventMutex(state);
+
+    matchList = allocMatchList(state);
+    matchCount = 0;
+
+    if (start)
+        findMatchingEvents(state, EK_THREAD_START, &basket, matchList,
+            &matchCount);
+    else
+        findMatchingEvents(state, EK_THREAD_DEATH, &basket, matchList,
+            &matchCount);
+
+    ExpandBuf* pReq = NULL;
+    if (matchCount != 0) {
+        int i;
+
+        LOGV("EVENT: %s(%d total) thread=%llx)\n",
+            dvmJdwpEventKindStr(matchList[0]->eventKind), matchCount,
+            basket.threadId);
+
+        suspendPolicy = scanSuspendPolicy(matchList, matchCount);
+        LOGV("  suspendPolicy=%s\n",
+            dvmJdwpSuspendPolicyStr(suspendPolicy));
+
+        pReq = eventPrep();
+        expandBufAdd1(pReq, suspendPolicy);
+        expandBufAdd4BE(pReq, matchCount);
+
+        for (i = 0; i < matchCount; i++) {
+            expandBufAdd1(pReq, matchList[i]->eventKind);
+            expandBufAdd4BE(pReq, matchList[i]->requestId);
+            expandBufAdd8BE(pReq, basket.threadId);
+        }
+
+    }
+
+    cleanupMatchList(state, matchList, matchCount);
+    unlockEventMutex(state);
+
+    /* send request and possibly suspend ourselves */
+    if (pReq != NULL) {
+        int oldStatus = dvmDbgThreadWaiting();
+        if (suspendPolicy != SP_NONE)
+            dvmJdwpSetWaitForEventThread(state, basket.threadId);
+
+        eventFinish(state, pReq);
+
+        suspendByPolicy(state, suspendPolicy);
+        dvmDbgThreadContinuing(oldStatus);
+    }
+
+    return matchCount != 0;
+}
+
+/*
+ * Send a polite "VM is dying" message to the debugger.
+ *
+ * Skips the usual "event token" stuff.
+ */
+bool dvmJdwpPostVMDeath(JdwpState* state)
+{
+    ExpandBuf* pReq;
+
+    LOGV("EVENT: %s\n", dvmJdwpEventKindStr(EK_VM_DEATH));
+
+    pReq = eventPrep();
+    expandBufAdd1(pReq, SP_NONE);
+    expandBufAdd4BE(pReq, 1);
+
+    expandBufAdd1(pReq, EK_VM_DEATH);
+    expandBufAdd4BE(pReq, 0);
+    eventFinish(state, pReq);
+    return true;
+}
+
+
+/*
+ * An exception has been thrown.  It may or may not have been caught.
+ *
+ * Valid mods:
+ *  Count, ThreadOnly, ClassOnly, ClassMatch, ClassExclude, LocationOnly,
+ *    ExceptionOnly, InstanceOnly
+ */
+bool dvmJdwpPostException(JdwpState* state, const JdwpLocation* pThrowLoc,
+    ObjectId exceptionId, RefTypeId exceptionClassId,
+    const JdwpLocation* pCatchLoc, ObjectId thisPtr)
+{
+    enum JdwpSuspendPolicy suspendPolicy = SP_NONE;
+    ModBasket basket;
+    JdwpEvent** matchList;
+    int matchCount;
+    char* nameAlloc = NULL;
+
+    memset(&basket, 0, sizeof(basket));
+    basket.pLoc = pThrowLoc;
+    basket.classId = pThrowLoc->classId;
+    basket.threadId = dvmDbgGetThreadSelfId();
+    basket.className = nameAlloc =
+        dvmDescriptorToName(dvmDbgGetClassDescriptor(basket.classId));
+    basket.excepClassId = exceptionClassId;
+    basket.caught = (pCatchLoc->classId != 0);
+    basket.thisPtr = thisPtr;
+
+    /* don't try to post an exception caused by the debugger */
+    if (invokeInProgress(state)) {
+        LOGV("Not posting exception hit during invoke (%s)\n",basket.className);
+        free(nameAlloc);
+        return false;
+    }
+
+    /* don't allow the list to be updated while we scan it */
+    lockEventMutex(state);
+
+    matchList = allocMatchList(state);
+    matchCount = 0;
+
+    findMatchingEvents(state, EK_EXCEPTION, &basket, matchList, &matchCount);
+
+    ExpandBuf* pReq = NULL;
+    if (matchCount != 0) {
+        int i;
+
+        LOGV("EVENT: %s(%d total) thread=%llx exceptId=%llx caught=%d)\n",
+            dvmJdwpEventKindStr(matchList[0]->eventKind), matchCount,
+            basket.threadId, exceptionId, basket.caught);
+        LOGV("  throw: %d %llx %x %lld (%s.%s)\n", pThrowLoc->typeTag,
+            pThrowLoc->classId, pThrowLoc->methodId, pThrowLoc->idx,
+            dvmDbgGetClassDescriptor(pThrowLoc->classId),
+            dvmDbgGetMethodName(pThrowLoc->classId, pThrowLoc->methodId));
+        if (pCatchLoc->classId == 0) {
+            LOGV("  catch: (not caught)\n");
+        } else {
+            LOGV("  catch: %d %llx %x %lld (%s.%s)\n", pCatchLoc->typeTag,
+                pCatchLoc->classId, pCatchLoc->methodId, pCatchLoc->idx,
+                dvmDbgGetClassDescriptor(pCatchLoc->classId),
+                dvmDbgGetMethodName(pCatchLoc->classId, pCatchLoc->methodId));
+        }
+
+        suspendPolicy = scanSuspendPolicy(matchList, matchCount);
+        LOGV("  suspendPolicy=%s\n",
+            dvmJdwpSuspendPolicyStr(suspendPolicy));
+
+        pReq = eventPrep();
+        expandBufAdd1(pReq, suspendPolicy);
+        expandBufAdd4BE(pReq, matchCount);
+
+        for (i = 0; i < matchCount; i++) {
+            expandBufAdd1(pReq, matchList[i]->eventKind);
+            expandBufAdd4BE(pReq, matchList[i]->requestId);
+            expandBufAdd8BE(pReq, basket.threadId);
+
+            dvmJdwpAddLocation(pReq, pThrowLoc);
+            expandBufAdd1(pReq, JT_OBJECT);
+            expandBufAdd8BE(pReq, exceptionId);
+            dvmJdwpAddLocation(pReq, pCatchLoc);
+        }
+    }
+
+    cleanupMatchList(state, matchList, matchCount);
+    unlockEventMutex(state);
+
+    /* send request and possibly suspend ourselves */
+    if (pReq != NULL) {
+        int oldStatus = dvmDbgThreadWaiting();
+        if (suspendPolicy != SP_NONE)
+            dvmJdwpSetWaitForEventThread(state, basket.threadId);
+
+        eventFinish(state, pReq);
+
+        suspendByPolicy(state, suspendPolicy);
+        dvmDbgThreadContinuing(oldStatus);
+    }
+
+    free(nameAlloc);
+    return matchCount != 0;
+}
+
+/*
+ * Announce that a class has been loaded.
+ *
+ * Valid mods:
+ *  Count, ThreadOnly, ClassOnly, ClassMatch, ClassExclude
+ */
+bool dvmJdwpPostClassPrepare(JdwpState* state, int tag, RefTypeId refTypeId,
+    const char* signature, int status)
+{
+    enum JdwpSuspendPolicy suspendPolicy = SP_NONE;
+    ModBasket basket;
+    JdwpEvent** matchList;
+    int matchCount;
+    char* nameAlloc = NULL;
+
+    memset(&basket, 0, sizeof(basket));
+    basket.classId = refTypeId;
+    basket.threadId = dvmDbgGetThreadSelfId();
+    basket.className = nameAlloc =
+        dvmDescriptorToName(dvmDbgGetClassDescriptor(basket.classId));
+
+    /* suppress class prep caused by debugger */
+    if (invokeInProgress(state)) {
+        LOGV("Not posting class prep caused by invoke (%s)\n",basket.className);
+        free(nameAlloc);
+        return false;
+    }
+
+    /* don't allow the list to be updated while we scan it */
+    lockEventMutex(state);
+
+    matchList = allocMatchList(state);
+    matchCount = 0;
+
+    findMatchingEvents(state, EK_CLASS_PREPARE, &basket, matchList,
+        &matchCount);
+
+    ExpandBuf* pReq = NULL;
+    if (matchCount != 0) {
+        int i;
+
+        LOGV("EVENT: %s(%d total) thread=%llx)\n",
+            dvmJdwpEventKindStr(matchList[0]->eventKind), matchCount,
+            basket.threadId);
+
+        suspendPolicy = scanSuspendPolicy(matchList, matchCount);
+        LOGV("  suspendPolicy=%s\n",
+            dvmJdwpSuspendPolicyStr(suspendPolicy));
+
+        if (basket.threadId == state->debugThreadId) {
+            /*
+             * JDWP says that, for a class prep in the debugger thread, we
+             * should set threadId to null and if any threads were supposed
+             * to be suspended then we suspend all other threads.
+             */
+            LOGV("  NOTE: class prepare in debugger thread!\n");
+            basket.threadId = 0;
+            if (suspendPolicy == SP_EVENT_THREAD)
+                suspendPolicy = SP_ALL;
+        }
+
+        pReq = eventPrep();
+        expandBufAdd1(pReq, suspendPolicy);
+        expandBufAdd4BE(pReq, matchCount);
+
+        for (i = 0; i < matchCount; i++) {
+            expandBufAdd1(pReq, matchList[i]->eventKind);
+            expandBufAdd4BE(pReq, matchList[i]->requestId);
+            expandBufAdd8BE(pReq, basket.threadId);
+
+            expandBufAdd1(pReq, tag);
+            expandBufAdd8BE(pReq, refTypeId);
+            expandBufAddUtf8String(pReq, (const u1*) signature);
+            expandBufAdd4BE(pReq, status);
+        }
+    }
+
+    cleanupMatchList(state, matchList, matchCount);
+
+    unlockEventMutex(state);
+
+    /* send request and possibly suspend ourselves */
+    if (pReq != NULL) {
+        int oldStatus = dvmDbgThreadWaiting();
+        if (suspendPolicy != SP_NONE)
+            dvmJdwpSetWaitForEventThread(state, basket.threadId);
+
+        eventFinish(state, pReq);
+
+        suspendByPolicy(state, suspendPolicy);
+        dvmDbgThreadContinuing(oldStatus);
+    }
+
+    free(nameAlloc);
+    return matchCount != 0;
+}
+
+/*
+ * Unload a class.
+ *
+ * Valid mods:
+ *  Count, ClassMatch, ClassExclude
+ */
+bool dvmJdwpPostClassUnload(JdwpState* state, RefTypeId refTypeId)
+{
+    assert(false);      // TODO
+    return false;
+}
+
+/*
+ * Get or set a field.
+ *
+ * Valid mods:
+ *  Count, ThreadOnly, ClassOnly, ClassMatch, ClassExclude, FieldOnly,
+ *    InstanceOnly
+ */
+bool dvmJdwpPostFieldAccess(JdwpState* state, int STUFF, ObjectId thisPtr,
+    bool modified)
+{
+    assert(false);      // TODO
+    return false;
+}
+
+/*
+ * Send up a chunk of DDM data.
+ *
+ * While this takes the form of a JDWP "event", it doesn't interact with
+ * other debugger traffic, and can't suspend the VM, so we skip all of
+ * the fun event token gymnastics.
+ */
+void dvmJdwpDdmSendChunk(JdwpState* state, int type, int len, const u1* buf)
+{
+    ExpandBuf* pReq;
+    u1* outBuf;
+
+    /*
+     * Write the chunk header and data into the ExpandBuf.
+     */
+    pReq = expandBufAlloc();
+    expandBufAddSpace(pReq, kJDWPHeaderLen);
+    expandBufAdd4BE(pReq, type);
+    expandBufAdd4BE(pReq, len);
+    if (len > 0) {
+        outBuf = expandBufAddSpace(pReq, len);
+        memcpy(outBuf, buf, len);
+    }
+
+    /*
+     * Go back and write the JDWP header.
+     */
+    outBuf = expandBufGetBuffer(pReq);
+
+    set4BE(outBuf, expandBufGetLength(pReq));
+    set4BE(outBuf+4, dvmJdwpNextRequestSerial(state));
+    set1(outBuf+8, 0);     /* flags */
+    set1(outBuf+9, kJDWPDdmCmdSet);
+    set1(outBuf+10, kJDWPDdmCmd);
+
+    /*
+     * Send it up.
+     */
+    //LOGD("Sending chunk (type=0x%08x len=%d)\n", type, len);
+    dvmJdwpSendRequest(state, pReq);
+
+    expandBufFree(pReq);
+}
+
diff --git a/vm/jdwp/JdwpEvent.h b/vm/jdwp/JdwpEvent.h
new file mode 100644
index 0000000..1a6a2c7
--- /dev/null
+++ b/vm/jdwp/JdwpEvent.h
@@ -0,0 +1,129 @@
+/*
+ * Copyright (C) 2008 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/*
+ * Handle registration of events, and debugger event notification.
+ */
+#ifndef _DALVIK_JDWP_JDWPEVENT
+#define _DALVIK_JDWP_JDWPEVENT
+
+#include "JdwpConstants.h"
+#include "ExpandBuf.h"
+
+/*
+ * Event modifiers.  A JdwpEvent may have zero or more of these.
+ */
+typedef union JdwpEventMod {
+    u1      modKind;                /* JdwpModKind */
+    struct {
+        u1          modKind;
+        int         count;
+    } count;
+    struct {
+        u1          modKind;
+        u4          exprId;
+    } conditional;
+    struct {
+        u1          modKind;
+        ObjectId    threadId;
+    } threadOnly;
+    struct {
+        u1          modKind;
+        RefTypeId   referenceTypeId;
+    } classOnly;
+    struct {
+        u1          modKind;
+        char*       classPattern;
+    } classMatch;
+    struct {
+        u1          modKind;
+        char*       classPattern;
+    } classExclude;
+    struct {
+        u1          modKind;
+        JdwpLocation loc;
+    } locationOnly;
+    struct {
+        u1          modKind;
+        u1          caught;
+        u1          uncaught;
+        RefTypeId   refTypeId;
+    } exceptionOnly;
+    struct {
+        u1          modKind;
+        RefTypeId   refTypeId;
+        FieldId     fieldId;
+    } fieldOnly;
+    struct {
+        u1          modKind;
+        ObjectId    threadId;
+        int         size;           /* JdwpStepSize */
+        int         depth;          /* JdwpStepDepth */
+    } step;
+    struct {
+        u1          modKind;
+        ObjectId    objectId;
+    } instanceOnly;
+} JdwpEventMod;
+
+/*
+ * One of these for every registered event.
+ *
+ * We over-allocate the struct to hold the modifiers.
+ */
+typedef struct JdwpEvent {
+    struct JdwpEvent*       prev;           /* linked list */
+    struct JdwpEvent*       next;
+
+    enum JdwpEventKind      eventKind;      /* what kind of event is this? */
+    enum JdwpSuspendPolicy  suspendPolicy;  /* suspend all, none, or self? */
+    int                     modCount;       /* #of entries in mods[] */
+    u4                      requestId;      /* serial#, reported to debugger */
+
+    JdwpEventMod            mods[1];        /* MUST be last field in struct */
+} JdwpEvent;
+
+/*
+ * Allocate an event structure with enough space.
+ */
+JdwpEvent* dvmJdwpEventAlloc(int numMods);
+void dvmJdwpEventFree(JdwpEvent* pEvent);
+
+/*
+ * Register an event by adding it to the event list.
+ *
+ * "*pEvent" must be storage allocated with jdwpEventAlloc().  The caller
+ * may discard its pointer after calling this.
+ */
+JdwpError dvmJdwpRegisterEvent(JdwpState* state, JdwpEvent* pEvent);
+
+/*
+ * Unregister an event, given the requestId.
+ */
+void dvmJdwpUnregisterEventById(JdwpState* state, u4 requestId);
+
+/*
+ * Unregister all events.
+ */
+void dvmJdwpUnregisterAll(JdwpState* state);
+
+/*
+ * Send an event, formatted into "pReq", to the debugger.
+ *
+ * (Messages are sent asynchronously, and do not receive a reply.)
+ */
+bool dvmJdwpSendRequest(JdwpState* state, ExpandBuf* pReq);
+
+#endif /*_DALVIK_JDWP_JDWPEVENT*/
diff --git a/vm/jdwp/JdwpHandler.c b/vm/jdwp/JdwpHandler.c
new file mode 100644
index 0000000..e4b6f26
--- /dev/null
+++ b/vm/jdwp/JdwpHandler.c
@@ -0,0 +1,2152 @@
+/*
+ * Copyright (C) 2008 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/*
+ * Handle messages from debugger.
+ *
+ * GENERAL NOTE: we're not currently testing the message length for
+ * correctness.  This is usually a bad idea, but here we can probably
+ * get away with it so long as the debugger isn't broken.  We can
+ * change the "read" macros to use "dataLen" to avoid wandering into
+ * bad territory, and have a single "is dataLen correct" check at the
+ * end of each function.  Not needed at this time.
+ */
+#include "jdwp/JdwpPriv.h"
+#include "jdwp/JdwpHandler.h"
+#include "jdwp/JdwpEvent.h"
+#include "jdwp/JdwpConstants.h"
+#include "jdwp/ExpandBuf.h"
+
+#include "Bits.h"
+#include "Atomic.h"
+
+#include <stdlib.h>
+#include <string.h>
+#include <unistd.h>
+
+#if 0
+#include <time.h>
+#include <sys/time.h>
+static void showTime(const char* label)
+{
+    struct timeval tv;
+    int min, sec, msec;
+
+    gettimeofday(&tv, NULL);
+    min = (tv.tv_sec / 60) % 60;
+    sec = tv.tv_sec % 60;
+    msec = tv.tv_usec / 1000;
+
+    LOGI("%02d:%02d.%03d %s\n", min, sec, msec, label);
+}
+#endif
+
+/*
+ * Helper function: read a "location" from an input buffer.
+ */
+static void jdwpReadLocation(const u1** pBuf, JdwpLocation* pLoc)
+{
+    memset(pLoc, 0, sizeof(*pLoc));     /* allows memcmp() later */
+    pLoc->typeTag = read1(pBuf);
+    pLoc->classId = dvmReadObjectId(pBuf);
+    pLoc->methodId = dvmReadMethodId(pBuf);
+    pLoc->idx = read8BE(pBuf);
+}
+
+/*
+ * Helper function: write a "location" into the reply buffer.
+ */
+void dvmJdwpAddLocation(ExpandBuf* pReply, const JdwpLocation* pLoc)
+{
+    expandBufAdd1(pReply, pLoc->typeTag);
+    expandBufAddObjectId(pReply, pLoc->classId);
+    expandBufAddMethodId(pReply, pLoc->methodId);
+    expandBufAdd8BE(pReply, pLoc->idx);
+}
+
+/*
+ * Helper function: read a variable-width value from the input buffer.
+ */
+static u8 jdwpReadValue(const u1** pBuf, int width)
+{
+    u8 value;
+
+    switch (width) {
+    case 1:     value = read1(pBuf);                break;
+    case 2:     value = read2BE(pBuf);              break;
+    case 4:     value = read4BE(pBuf);              break;
+    case 8:     value = read8BE(pBuf);              break;
+    default:    value = (u8) -1; assert(false);     break;
+    }
+
+    return value;
+}
+
+/*
+ * Helper function: write a variable-width value into the output input buffer.
+ */
+static void jdwpWriteValue(ExpandBuf* pReply, int width, u8 value)
+{
+    switch (width) {
+    case 1:     expandBufAdd1(pReply, value);       break;
+    case 2:     expandBufAdd2BE(pReply, value);     break;
+    case 4:     expandBufAdd4BE(pReply, value);     break;
+    case 8:     expandBufAdd8BE(pReply, value);     break;
+    default:    assert(false);                      break;
+    }
+}
+
+/*
+ * Common code for *_InvokeMethod requests.
+ */
+static JdwpError finishInvoke(JdwpState* state,
+    const u1* buf, int dataLen, ExpandBuf* pReply,
+    ObjectId threadId, ObjectId objectId, RefTypeId classId, MethodId methodId)
+{
+    JdwpError err = ERR_NONE;
+    u8* argArray = NULL;
+    u4 numArgs;
+    u4 options;     /* enum InvokeOptions bit flags */
+    int i;
+
+    numArgs = read4BE(&buf);
+
+    LOGV("    --> threadId=%llx objectId=%llx\n", threadId, objectId);
+    LOGV("        classId=%llx methodId=%x %s.%s\n",
+        classId, methodId,
+        dvmDbgGetClassDescriptor(classId),
+        dvmDbgGetMethodName(classId, methodId));
+    LOGV("        %d args:\n", numArgs);
+
+    if (numArgs > 0)
+        argArray = (ObjectId*) malloc(sizeof(ObjectId) * numArgs);
+
+    for (i = 0; i < (int) numArgs; i++) {
+        u1 typeTag;
+        u8 value;
+        int width;
+
+        typeTag = read1(&buf);
+        width = dvmDbgGetTagWidth(typeTag);
+        value = jdwpReadValue(&buf, width);
+
+        LOGV("          '%c'(%d): 0x%llx\n", typeTag, width, value);
+        argArray[i] = value;
+    }
+
+    options = read4BE(&buf);
+    LOGV("        options=0x%04x%s%s\n", options,
+        (options & INVOKE_SINGLE_THREADED) ? " (SINGLE_THREADED)" : "",
+        (options & INVOKE_NONVIRTUAL) ? " (NONVIRTUAL)" : "");
+
+
+    u1 resultTag;
+    u8 resultValue;
+    ObjectId exceptObjId;
+
+    err = dvmDbgInvokeMethod(threadId, objectId, classId, methodId,
+            numArgs, argArray, options,
+            &resultTag, &resultValue, &exceptObjId);
+    if (err != ERR_NONE)
+        goto bail;
+
+    if (err == ERR_NONE) {
+        int width = dvmDbgGetTagWidth(resultTag);
+
+        expandBufAdd1(pReply, resultTag);
+        if (width != 0)
+            jdwpWriteValue(pReply, width, resultValue);
+        expandBufAdd1(pReply, JT_OBJECT);
+        expandBufAddObjectId(pReply, exceptObjId);
+
+        LOGV("  --> returned '%c' 0x%llx (except=%08llx)\n",
+            resultTag, resultValue, exceptObjId);
+
+        /* show detailed debug output */
+        if (resultTag == JT_STRING && exceptObjId == 0) {
+            if (resultValue != 0) {
+                char* str = dvmDbgStringToUtf8(resultValue);
+                LOGV("      string '%s'\n", str);
+                free(str);
+            } else {
+                LOGV("      string (null)\n");
+            }
+        }
+    }
+
+bail:
+    free(argArray);
+    return err;
+}
+
+
+/*
+ * Request for version info.
+ */
+static JdwpError handleVM_Version(JdwpState* state, const u1* buf,
+    int dataLen, ExpandBuf* pReply)
+{
+    /* text information on VM version */
+    expandBufAddUtf8String(pReply, (const u1*) "Android DalvikVM 0.0.1");
+    /* JDWP version numbers */
+    expandBufAdd4BE(pReply, 1);        // major
+    expandBufAdd4BE(pReply, 5);        // minor
+    /* VM JRE version */
+    expandBufAddUtf8String(pReply, (const u1*) "1.5.0");  /* e.g. 1.5.0_04 */
+    /* target VM name */
+    expandBufAddUtf8String(pReply, (const u1*) "DalvikVM");
+
+    return ERR_NONE;
+}
+
+/*
+ * Given a class JNI signature (e.g. "Ljava/lang/Error;"), return the
+ * referenceTypeID.  We need to send back more than one if the class has
+ * been loaded by multiple class loaders.
+ */
+static JdwpError handleVM_ClassesBySignature(JdwpState* state,
+    const u1* buf, int dataLen, ExpandBuf* pReply)
+{
+    char* classDescriptor = NULL;
+    u4 numClasses;
+    size_t strLen;
+    RefTypeId refTypeId;
+    int i;
+
+    classDescriptor = readNewUtf8String(&buf, &strLen);
+    LOGV("  Req for class by signature '%s'\n", classDescriptor);
+
+    /*
+     * TODO: if a class with the same name has been loaded multiple times
+     * (by different class loaders), we're supposed to return each of them.
+     *
+     * NOTE: this may mangle "className".
+     */
+    if (!dvmDbgFindLoadedClassBySignature(classDescriptor, &refTypeId)) {
+        /* not currently loaded */
+        LOGV("    --> no match!\n");
+        numClasses = 0;
+    } else {
+        /* just the one */
+        numClasses = 1;
+    }
+
+    expandBufAdd4BE(pReply, numClasses);
+
+    if (numClasses > 0) {
+        u1 typeTag;
+        u4 status;
+
+        /* get class vs. interface and status flags */
+        dvmDbgGetClassInfo(refTypeId, &typeTag, &status, NULL);
+
+        expandBufAdd1(pReply, typeTag);
+        expandBufAddRefTypeId(pReply, refTypeId);
+        expandBufAdd4BE(pReply, status);
+    }
+
+    free(classDescriptor);
+
+    return ERR_NONE;
+}
+
+/*
+ * Handle request for the thread IDs of all running threads.
+ *
+ * We exclude ourselves from the list, because we don't allow ourselves
+ * to be suspended, and that violates some JDWP expectations.
+ */
+static JdwpError handleVM_AllThreads(JdwpState* state,
+    const u1* buf, int dataLen, ExpandBuf* pReply)
+{
+    u4 threadCount;
+    ObjectId* pThreadIds;
+    ObjectId* walker;
+    int i;
+
+    dvmDbgGetAllThreads(&pThreadIds, &threadCount);
+
+    expandBufAdd4BE(pReply, threadCount);
+
+    walker = pThreadIds;
+    for (i = 0; i < (int) threadCount; i++) {
+        expandBufAddObjectId(pReply, *walker++);
+    }
+
+    free(pThreadIds);
+
+    return ERR_NONE;
+}
+
+/*
+ * List all thread groups that do not have a parent.
+ */
+static JdwpError handleVM_TopLevelThreadGroups(JdwpState* state,
+    const u1* buf, int dataLen, ExpandBuf* pReply)
+{
+    u4 groups;
+    ObjectId threadGroupId;
+
+    /*
+     * TODO: maintain a list of parentless thread groups in the VM.
+     *
+     * For now, just return "system".  Application threads are created
+     * in "main", which is a child of "system".
+     */
+    groups = 1;
+    expandBufAdd4BE(pReply, groups);
+    //threadGroupId = debugGetMainThreadGroup();
+    //expandBufAdd8BE(pReply, threadGroupId);
+    threadGroupId = dvmDbgGetSystemThreadGroupId();
+    expandBufAddObjectId(pReply, threadGroupId);
+
+    return ERR_NONE;
+}
+
+/*
+ * Respond with the sizes of the basic debugger types.
+ *
+ * All IDs are 8 bytes.
+ */
+static JdwpError handleVM_IDSizes(JdwpState* state,
+    const u1* buf, int dataLen, ExpandBuf* pReply)
+{
+    expandBufAdd4BE(pReply, sizeof(FieldId));
+    expandBufAdd4BE(pReply, sizeof(MethodId));
+    expandBufAdd4BE(pReply, sizeof(ObjectId));
+    expandBufAdd4BE(pReply, sizeof(RefTypeId));
+    expandBufAdd4BE(pReply, sizeof(FrameId));
+    return ERR_NONE;
+}
+
+/*
+ * The debugger is politely asking to disconnect.  We're good with that.
+ *
+ * We could resume threads and clean up pinned references, but we can do
+ * that when the TCP connection drops.
+ */
+static JdwpError handleVM_Dispose(JdwpState* state,
+    const u1* buf, int dataLen, ExpandBuf* pReply)
+{
+    return ERR_NONE;
+}
+
+/*
+ * Suspend the execution of the application running in the VM (i.e. suspend
+ * all threads).
+ *
+ * This needs to increment the "suspend count" on all threads.
+ */
+static JdwpError handleVM_Suspend(JdwpState* state,
+    const u1* buf, int dataLen, ExpandBuf* pReply)
+{
+    dvmDbgSuspendVM(false);
+    return ERR_NONE;
+}
+
+/*
+ * Resume execution.  Decrements the "suspend count" of all threads.
+ */
+static JdwpError handleVM_Resume(JdwpState* state,
+    const u1* buf, int dataLen, ExpandBuf* pReply)
+{
+    dvmDbgResumeVM();
+    return ERR_NONE;
+}
+
+/*
+ * The debugger wants the entire VM to exit.
+ */
+static JdwpError handleVM_Exit(JdwpState* state,
+    const u1* buf, int dataLen, ExpandBuf* pReply)
+{
+    u4 exitCode;
+
+    exitCode = get4BE(buf);
+
+    LOGW("Debugger is telling the VM to exit with code=%d\n", exitCode);
+
+    dvmDbgExit(exitCode);
+    return ERR_NOT_IMPLEMENTED;     // shouldn't get here
+}
+
+/*
+ * Create a new string in the VM and return its ID.
+ *
+ * (Ctrl-Shift-I in Eclipse on an array of objects causes it to create the
+ * string "java.util.Arrays".)
+ */
+static JdwpError handleVM_CreateString(JdwpState* state,
+    const u1* buf, int dataLen, ExpandBuf* pReply)
+{
+    char* str;
+    size_t strLen;
+    ObjectId stringId;
+
+    str = readNewUtf8String(&buf, &strLen);
+
+    LOGV("  Req to create string '%s'\n", str);
+
+    stringId = dvmDbgCreateString(str);
+    expandBufAddObjectId(pReply, stringId);
+
+    return ERR_NONE;
+}
+
+/*
+ * Tell the debugger what we are capable of.
+ */
+static JdwpError handleVM_Capabilities(JdwpState* state,
+    const u1* buf, int dataLen, ExpandBuf* pReply)
+{
+    int i;
+
+    expandBufAdd1(pReply, false);   /* canWatchFieldModification */
+    expandBufAdd1(pReply, false);   /* canWatchFieldAccess */
+    expandBufAdd1(pReply, false);   /* canGetBytecodes */
+    expandBufAdd1(pReply, false);   /* canGetSyntheticAttribute */
+    expandBufAdd1(pReply, false);   /* canGetOwnedMonitorInfo */
+    expandBufAdd1(pReply, false);   /* canGetCurrentContendedMonitor */
+    expandBufAdd1(pReply, false);   /* canGetMonitorInfo */
+    return ERR_NONE;
+}
+
+/*
+ * Return classpath and bootclasspath.
+ */
+static JdwpError handleVM_ClassPaths(JdwpState* state,
+    const u1* buf, int dataLen, ExpandBuf* pReply)
+{
+    char baseDir[2] = "/";
+    u4 classPaths;
+    u4 bootClassPaths;
+    int i;
+
+    /*
+     * TODO: make this real.  Not important for remote debugging, but
+     * might be useful for local debugging.
+     */
+    classPaths = 1;
+    bootClassPaths = 0;
+
+    expandBufAddUtf8String(pReply, (const u1*) baseDir);
+    expandBufAdd4BE(pReply, classPaths);
+    for (i = 0; i < (int) classPaths; i++) {
+        expandBufAddUtf8String(pReply, (const u1*) ".");
+    }
+
+    expandBufAdd4BE(pReply, bootClassPaths);
+    for (i = 0; i < (int) classPaths; i++) {
+        /* add bootclasspath components as strings */
+    }
+
+    return ERR_NONE;
+}
+
+/*
+ * Release a list of object IDs.  (Seen in jdb.)
+ *
+ * Currently does nothing.
+ */
+static JdwpError HandleVM_DisposeObjects(JdwpState* state,
+    const u1* buf, int dataLen, ExpandBuf* pReply)
+{
+    return ERR_NONE;
+}
+
+/*
+ * Tell the debugger what we are capable of.
+ */
+static JdwpError handleVM_CapabilitiesNew(JdwpState* state,
+    const u1* buf, int dataLen, ExpandBuf* pReply)
+{
+    int i;
+
+    expandBufAdd1(pReply, false);   /* canWatchFieldModification */
+    expandBufAdd1(pReply, false);   /* canWatchFieldAccess */
+    expandBufAdd1(pReply, false);   /* canGetBytecodes */
+    expandBufAdd1(pReply, false);   /* canGetSyntheticAttribute */
+    expandBufAdd1(pReply, false);   /* canGetOwnedMonitorInfo */
+    expandBufAdd1(pReply, false);   /* canGetCurrentContendedMonitor */
+    expandBufAdd1(pReply, false);   /* canGetMonitorInfo */
+    expandBufAdd1(pReply, false);   /* canRedefineClasses */
+    expandBufAdd1(pReply, false);   /* canAddMethod */
+    expandBufAdd1(pReply, false);   /* canUnrestrictedlyRedefineClasses */
+    expandBufAdd1(pReply, false);   /* canPopFrames */
+    expandBufAdd1(pReply, false);   /* canUseInstanceFilters */
+    expandBufAdd1(pReply, false);   /* canGetSourceDebugExtension */
+    expandBufAdd1(pReply, false);   /* canRequestVMDeathEvent */
+    expandBufAdd1(pReply, false);   /* canSetDefaultStratum */
+    expandBufAdd1(pReply, false);   /* 1.6: canGetInstanceInfo */
+    expandBufAdd1(pReply, false);   /* 1.6: canRequestMonitorEvents */
+    expandBufAdd1(pReply, false);   /* 1.6: canGetMonitorFrameInfo */
+    expandBufAdd1(pReply, false);   /* 1.6: canUseSourceNameFilters */
+    expandBufAdd1(pReply, false);   /* 1.6: canGetConstantPool */
+    expandBufAdd1(pReply, false);   /* 1.6: canForceEarlyReturn */
+
+    /* fill in reserved22 through reserved32; note count started at 1 */
+    for (i = 22; i <= 32; i++)
+        expandBufAdd1(pReply, false);   /* reservedN */
+    return ERR_NONE;
+}
+
+/*
+ * Cough up the complete list of classes.
+ */
+static JdwpError handleVM_AllClassesWithGeneric(JdwpState* state,
+    const u1* buf, int dataLen, ExpandBuf* pReply)
+{
+    u4 numClasses = 0;
+    RefTypeId* classRefBuf = NULL;
+    int i;
+
+    dvmDbgGetClassList(&numClasses, &classRefBuf);
+
+    expandBufAdd4BE(pReply, numClasses);
+
+    for (i = 0; i < (int) numClasses; i++) {
+        static const u1 genericSignature[1] = "";
+        u1 refTypeTag;
+        char* signature;
+        u4 status;
+
+        dvmDbgGetClassInfo(classRefBuf[i], &refTypeTag, &status, &signature);
+
+        expandBufAdd1(pReply, refTypeTag);
+        expandBufAddRefTypeId(pReply, classRefBuf[i]);
+        expandBufAddUtf8String(pReply, (const u1*) signature);
+        expandBufAddUtf8String(pReply, genericSignature);
+        expandBufAdd4BE(pReply, status);
+
+        free(signature);
+    }
+
+    free(classRefBuf);
+
+    return ERR_NONE;
+}
+
+/*
+ * Given a referenceTypeID, return a string with the JNI reference type
+ * signature (e.g. "Ljava/lang/Error;").
+ */
+static JdwpError handleRT_Signature(JdwpState* state,
+    const u1* buf, int dataLen, ExpandBuf* pReply)
+{
+    char* signature;
+    RefTypeId refTypeId;
+
+    refTypeId = dvmReadRefTypeId(&buf);
+
+    LOGV("  Req for signature of refTypeId=0x%llx\n", refTypeId);
+    signature = dvmDbgGetSignature(refTypeId);
+    expandBufAddUtf8String(pReply, (const u1*) signature);
+    free(signature);
+
+    return ERR_NONE;
+}
+
+/*
+ * Return the modifiers (a/k/a access flags) for a reference type.
+ */
+static JdwpError handleRT_Modifiers(JdwpState* state,
+    const u1* buf, int dataLen, ExpandBuf* pReply)
+{
+    RefTypeId refTypeId;
+    u4 modBits;
+
+    refTypeId = dvmReadRefTypeId(&buf);
+    modBits = dvmDbgGetAccessFlags(refTypeId);
+
+    expandBufAdd4BE(pReply, modBits);
+
+    return ERR_NONE;
+}
+
+/*
+ * Get values from static fields in a reference type.
+ */
+static JdwpError handleRT_GetValues(JdwpState* state,
+    const u1* buf, int dataLen, ExpandBuf* pReply)
+{
+    RefTypeId refTypeId;
+    u4 numFields;
+    int i;
+
+    refTypeId = dvmReadRefTypeId(&buf);
+    numFields = read4BE(&buf);
+
+    expandBufAdd4BE(pReply, numFields);
+    for (i = 0; i < (int) numFields; i++) {
+        FieldId fieldId;
+        u1 fieldTag;
+        int width;
+        u1* ptr;
+
+        fieldId = dvmReadFieldId(&buf);
+        fieldTag = dvmDbgGetFieldTag(refTypeId, fieldId);
+        width = dvmDbgGetTagWidth(fieldTag);
+
+        expandBufAdd1(pReply, fieldTag);
+        ptr = expandBufAddSpace(pReply, width);
+        dvmDbgGetStaticFieldValue(refTypeId, fieldId, ptr, width);
+    }
+
+    return ERR_NONE;
+}
+
+/*
+ * Get the name of the source file in which a reference type was declared.
+ */
+static JdwpError handleRT_SourceFile(JdwpState* state,
+    const u1* buf, int dataLen, ExpandBuf* pReply)
+{
+    RefTypeId refTypeId;
+    const char* fileName;
+
+    refTypeId = dvmReadRefTypeId(&buf);
+
+    fileName = dvmDbgGetSourceFile(refTypeId);
+    if (fileName != NULL) {
+        expandBufAddUtf8String(pReply, (const u1*) fileName);
+        return ERR_NONE;
+    } else {
+        return ERR_ABSENT_INFORMATION;
+    }
+}
+
+/*
+ * Return the current status of the reference type.
+ */
+static JdwpError handleRT_Status(JdwpState* state,
+    const u1* buf, int dataLen, ExpandBuf* pReply)
+{
+    RefTypeId refTypeId;
+    u1 typeTag;
+    u4 status;
+
+    refTypeId = dvmReadRefTypeId(&buf);
+
+    /* get status flags */
+    dvmDbgGetClassInfo(refTypeId, &typeTag, &status, NULL);
+    expandBufAdd4BE(pReply, status);
+    return ERR_NONE;
+}
+
+/*
+ * Return interfaces implemented directly by this class.
+ */
+static JdwpError handleRT_Interfaces(JdwpState* state,
+    const u1* buf, int dataLen, ExpandBuf* pReply)
+{
+    RefTypeId refTypeId;
+    u4 numInterfaces;
+    int i;
+
+    refTypeId = dvmReadRefTypeId(&buf);
+
+    LOGV("  Req for interfaces in %llx (%s)\n", refTypeId,
+        dvmDbgGetClassDescriptor(refTypeId));
+
+    dvmDbgOutputAllInterfaces(refTypeId, pReply);
+
+    return ERR_NONE;
+}
+
+/*
+ * Returns the value of the SourceDebugExtension attribute.
+ *
+ * JDB seems interested, but DEX files don't currently support this.
+ */
+static JdwpError handleRT_SourceDebugExtension(JdwpState* state,
+    const u1* buf, int dataLen, ExpandBuf* pReply)
+{
+    /* referenceTypeId in, string out */
+    return ERR_ABSENT_INFORMATION;
+}
+
+/*
+ * Like RT_Signature but with the possibility of a "generic signature".
+ */
+static JdwpError handleRT_SignatureWithGeneric(JdwpState* state,
+    const u1* buf, int dataLen, ExpandBuf* pReply)
+{
+    static const u1 genericSignature[1] = "";
+    char* signature;
+    RefTypeId refTypeId;
+
+    refTypeId = dvmReadRefTypeId(&buf);
+
+    LOGV("  Req for signature of refTypeId=0x%llx\n", refTypeId);
+    signature = dvmDbgGetSignature(refTypeId);
+    if (signature != NULL)
+        expandBufAddUtf8String(pReply, (const u1*) signature);
+    else
+        expandBufAddUtf8String(pReply, (const u1*) "Lunknown;");  /* native? */
+    expandBufAddUtf8String(pReply, genericSignature);
+    free(signature);
+
+    return ERR_NONE;
+}
+
+/*
+ * Return the instance of java.lang.ClassLoader that loaded the specified
+ * reference type, or null if it was loaded by the system loader.
+ */
+static JdwpError handleRT_ClassLoader(JdwpState* state,
+    const u1* buf, int dataLen, ExpandBuf* pReply)
+{
+    RefTypeId refTypeId;
+    ObjectId classLoaderId;
+
+    refTypeId = dvmReadRefTypeId(&buf);
+
+    expandBufAddObjectId(pReply, dvmDbgGetClassLoader(refTypeId));
+
+    return ERR_NONE;
+}
+
+/*
+ * Given a referenceTypeId, return a block of stuff that describes the
+ * fields declared by a class.
+ */
+static JdwpError handleRT_FieldsWithGeneric(JdwpState* state,
+    const u1* buf, int dataLen, ExpandBuf* pReply)
+{
+    RefTypeId refTypeId;
+    int i, numFields;
+
+    refTypeId = dvmReadRefTypeId(&buf);
+    LOGV("  Req for fields in refTypeId=0x%llx\n", refTypeId);
+    {
+        char* tmp = dvmDbgGetSignature(refTypeId);
+        LOGV("  --> '%s'\n", tmp);
+        free(tmp);
+    }
+
+    dvmDbgOutputAllFields(refTypeId, true, pReply);
+
+    return ERR_NONE;
+}
+
+/*
+ * Given a referenceTypeID, return a block of goodies describing the
+ * methods declared by a class.
+ */
+static JdwpError handleRT_MethodsWithGeneric(JdwpState* state,
+    const u1* buf, int dataLen, ExpandBuf* pReply)
+{
+    RefTypeId refTypeId;
+    int i;
+
+    refTypeId = dvmReadRefTypeId(&buf);
+
+    LOGV("  Req for methods in refTypeId=0x%llx\n", refTypeId);
+    {
+        char* tmp = dvmDbgGetSignature(refTypeId);
+        LOGV("  --> '%s'\n", tmp);
+        free(tmp);
+    }
+
+    dvmDbgOutputAllMethods(refTypeId, true, pReply);
+
+    return ERR_NONE;
+}
+
+/*
+ * Return the immediate superclass of a class.
+ */
+static JdwpError handleCT_Superclass(JdwpState* state,
+    const u1* buf, int dataLen, ExpandBuf* pReply)
+{
+    RefTypeId classId;
+    RefTypeId superClassId;
+
+    classId = dvmReadRefTypeId(&buf);
+
+    superClassId = dvmDbgGetSuperclass(classId);
+
+    expandBufAddRefTypeId(pReply, superClassId);
+
+    return ERR_NONE;
+}
+
+/*
+ * Set static class values.
+ */
+static JdwpError handleCT_SetValues(JdwpState* state,
+    const u1* buf, int dataLen, ExpandBuf* pReply)
+{
+    RefTypeId classId;
+    u4 values;
+    int i;
+
+    classId = dvmReadRefTypeId(&buf);
+    values = read4BE(&buf);
+
+    LOGV("  Req to set %d values in classId=%llx\n", values, classId);
+
+    for (i = 0; i < (int) values; i++) {
+        FieldId fieldId;
+        u1 fieldTag;
+        u8 value;
+        int width;
+
+        fieldId = dvmReadFieldId(&buf);
+        fieldTag = dvmDbgGetStaticFieldTag(classId, fieldId);
+        width = dvmDbgGetTagWidth(fieldTag);
+        value = jdwpReadValue(&buf, width);
+
+        LOGV("    --> field=%x tag=%c -> %lld\n", fieldId, fieldTag, value);
+        dvmDbgSetStaticFieldValue(classId, fieldId, value, width);
+    }
+
+    return ERR_NONE;
+}
+
+/*
+ * Invoke a static method.
+ *
+ * Example: Eclipse sometimes uses java/lang/Class.forName(String s) on
+ * values in the "variables" display.
+ */
+static JdwpError handleCT_InvokeMethod(JdwpState* state,
+    const u1* buf, int dataLen, ExpandBuf* pReply)
+{
+    RefTypeId classId;
+    ObjectId threadId;
+    MethodId methodId;
+
+    classId = dvmReadRefTypeId(&buf);
+    threadId = dvmReadObjectId(&buf);
+    methodId = dvmReadMethodId(&buf);
+
+    return finishInvoke(state, buf, dataLen, pReply,
+            threadId, 0, classId, methodId);
+}
+
+/*
+ * Return line number information for the method, if present.
+ */
+static JdwpError handleM_LineTable(JdwpState* state,
+    const u1* buf, int dataLen, ExpandBuf* pReply)
+{
+    RefTypeId refTypeId;
+    MethodId methodId;
+
+    refTypeId = dvmReadRefTypeId(&buf);
+    methodId = dvmReadMethodId(&buf);
+
+    LOGV("  Req for line table in %s.%s\n",
+        dvmDbgGetClassDescriptor(refTypeId),
+        dvmDbgGetMethodName(refTypeId,methodId));
+
+    dvmDbgOutputLineTable(refTypeId, methodId, pReply);
+
+    return ERR_NONE;
+}
+
+/*
+ * Pull out the LocalVariableTable goodies.
+ */
+static JdwpError handleM_VariableTableWithGeneric(JdwpState* state,
+    const u1* buf, int dataLen, ExpandBuf* pReply)
+{
+    RefTypeId classId;
+    MethodId methodId;
+
+    classId = dvmReadRefTypeId(&buf);
+    methodId = dvmReadMethodId(&buf);
+
+    LOGV("  Req for LocalVarTab in class=%s method=%s\n",
+        dvmDbgGetClassDescriptor(classId),
+        dvmDbgGetMethodName(classId, methodId));
+
+    /*
+     * We could return ERR_ABSENT_INFORMATION here if the DEX file was
+     * built without local variable information.  That will cause Eclipse
+     * to make a best-effort attempt at displaying local variables
+     * anonymously.  However, the attempt isn't very good, so we're probably
+     * better off just not showing anything.
+     */
+    dvmDbgOutputVariableTable(classId, methodId, true, pReply);
+    return ERR_NONE;
+}
+
+/*
+ * Given an object reference, return the runtime type of the object
+ * (class or array).
+ *
+ * This can get called on different things, e.g. threadId gets
+ * passed in here.
+ */
+static JdwpError handleOR_ReferenceType(JdwpState* state,
+    const u1* buf, int dataLen, ExpandBuf* pReply)
+{
+    ObjectId objectId;
+    u1 refTypeTag;
+    RefTypeId typeId;
+
+    objectId = dvmReadObjectId(&buf);
+    LOGV("  Req for type of objectId=0x%llx\n", objectId);
+
+    dvmDbgGetObjectType(objectId, &refTypeTag, &typeId);
+
+    expandBufAdd1(pReply, refTypeTag);
+    expandBufAddRefTypeId(pReply, typeId);
+
+    return ERR_NONE;
+}
+
+/*
+ * Get values from the fields of an object.
+ */
+static JdwpError handleOR_GetValues(JdwpState* state,
+    const u1* buf, int dataLen, ExpandBuf* pReply)
+{
+    ObjectId objectId;
+    u4 numFields;
+    int i;
+
+    objectId = dvmReadObjectId(&buf);
+    numFields = read4BE(&buf);
+
+    LOGV("  Req for %d fields from objectId=0x%llx\n", numFields, objectId);
+
+    expandBufAdd4BE(pReply, numFields);
+
+    for (i = 0; i < (int) numFields; i++) {
+        FieldId fieldId;
+        u1 fieldTag;
+        int width;
+        u1* ptr;
+        const char* fieldName;
+
+        fieldId = dvmReadFieldId(&buf);
+
+        fieldTag = dvmDbgGetFieldTag(objectId, fieldId);
+        width = dvmDbgGetTagWidth(fieldTag);
+
+        LOGV("    --> fieldId %x --> tag '%c'(%d)\n",
+            fieldId, fieldTag, width);
+
+        expandBufAdd1(pReply, fieldTag);
+        ptr = expandBufAddSpace(pReply, width);
+        dvmDbgGetFieldValue(objectId, fieldId, ptr, width);
+    }
+
+    return ERR_NONE;
+}
+
+/*
+ * Set values in the fields of an object.
+ */
+static JdwpError handleOR_SetValues(JdwpState* state,
+    const u1* buf, int dataLen, ExpandBuf* pReply)
+{
+    ObjectId objectId;
+    u4 numFields;
+    int i;
+
+    objectId = dvmReadObjectId(&buf);
+    numFields = read4BE(&buf);
+
+    LOGV("  Req to set %d fields in objectId=0x%llx\n", numFields, objectId);
+
+    for (i = 0; i < (int) numFields; i++) {
+        FieldId fieldId;
+        u1 fieldTag;
+        int width;
+        u8 value;
+
+        fieldId = dvmReadFieldId(&buf);
+
+        fieldTag = dvmDbgGetFieldTag(objectId, fieldId);
+        width = dvmDbgGetTagWidth(fieldTag);
+        value = jdwpReadValue(&buf, width);
+
+        LOGV("    --> fieldId=%x tag='%c'(%d) value=%lld\n",
+            fieldId, fieldTag, width, value);
+
+        dvmDbgSetFieldValue(objectId, fieldId, value, width);
+    }
+
+    return ERR_NONE;
+}
+
+/*
+ * Invoke an instance method.  The invocation must occur in the specified
+ * thread, which must have been suspended by an event.
+ *
+ * The call is synchronous.  All threads in the VM are resumed, unless the
+ * SINGLE_THREADED flag is set.
+ *
+ * If you ask Eclipse to "inspect" an object (or ask JDB to "print" an
+ * object), it will try to invoke the object's toString() function.  This
+ * feature becomes crucial when examining ArrayLists with Eclipse.
+ */
+static JdwpError handleOR_InvokeMethod(JdwpState* state,
+    const u1* buf, int dataLen, ExpandBuf* pReply)
+{
+    ObjectId objectId;
+    ObjectId threadId;
+    RefTypeId classId;
+    MethodId methodId;
+
+    objectId = dvmReadObjectId(&buf);
+    threadId = dvmReadObjectId(&buf);
+    classId = dvmReadRefTypeId(&buf);
+    methodId = dvmReadMethodId(&buf);
+
+    return finishInvoke(state, buf, dataLen, pReply,
+            threadId, objectId, classId, methodId);
+}
+
+/*
+ * Disable garbage collection of the specified object.
+ */
+static JdwpError handleOR_DisableCollection(JdwpState* state,
+    const u1* buf, int dataLen, ExpandBuf* pReply)
+{
+    // this is currently a no-op
+    return ERR_NONE;
+}
+
+/*
+ * Enable garbage collection of the specified object.
+ */
+static JdwpError handleOR_EnableCollection(JdwpState* state,
+    const u1* buf, int dataLen, ExpandBuf* pReply)
+{
+    // this is currently a no-op
+    return ERR_NONE;
+}
+
+/*
+ * Determine whether an object has been garbage collected.
+ */
+static JdwpError handleOR_IsCollected(JdwpState* state,
+    const u1* buf, int dataLen, ExpandBuf* pReply)
+{
+    ObjectId objectId;
+
+    objectId = dvmReadObjectId(&buf);
+
+    LOGV("  Req IsCollected(0x%llx)\n", objectId);
+
+    // TODO: currently returning false; must integrate with GC
+    expandBufAdd1(pReply, 0);
+
+    return ERR_NONE;
+}
+
+/*
+ * Return the string value in a string object.
+ */
+static JdwpError handleSR_Value(JdwpState* state,
+    const u1* buf, int dataLen, ExpandBuf* pReply)
+{
+    ObjectId stringObject;
+    char* str;
+
+    stringObject = dvmReadObjectId(&buf);
+    str = dvmDbgStringToUtf8(stringObject);
+
+    LOGV("  Req for str %llx --> '%s'\n", stringObject, str);
+
+    expandBufAddUtf8String(pReply, (u1*) str);
+    free(str);
+
+    return ERR_NONE;
+}
+
+/*
+ * Return a thread's name.
+ */
+static JdwpError handleTR_Name(JdwpState* state,
+    const u1* buf, int dataLen, ExpandBuf* pReply)
+{
+    ObjectId threadId;
+    char* name;
+
+    threadId = dvmReadObjectId(&buf);
+
+    LOGV("  Req for name of thread 0x%llx\n", threadId);
+    name = dvmDbgGetThreadName(threadId);
+    if (name == NULL)
+        return ERR_INVALID_THREAD;
+
+    expandBufAddUtf8String(pReply, (u1*) name);
+    free(name);
+
+    return ERR_NONE;
+}
+
+/*
+ * Suspend the specified thread.
+ *
+ * It's supposed to remain suspended even if interpreted code wants to
+ * resume it; only the JDI is allowed to resume it.
+ */
+static JdwpError handleTR_Suspend(JdwpState* state,
+    const u1* buf, int dataLen, ExpandBuf* pReply)
+{
+    ObjectId threadId;
+
+    threadId = dvmReadObjectId(&buf);
+
+    if (threadId == dvmDbgGetThreadSelfId()) {
+        LOGI("  Warning: ignoring request to suspend self\n");
+        return ERR_THREAD_NOT_SUSPENDED;
+    }
+    LOGV("  Req to suspend thread 0x%llx\n", threadId);
+
+    dvmDbgSuspendThread(threadId);
+
+    return ERR_NONE;
+}
+
+/*
+ * Resume the specified thread.
+ */
+static JdwpError handleTR_Resume(JdwpState* state,
+    const u1* buf, int dataLen, ExpandBuf* pReply)
+{
+    ObjectId threadId;
+
+    threadId = dvmReadObjectId(&buf);
+
+    if (threadId == dvmDbgGetThreadSelfId()) {
+        LOGI("  Warning: ignoring request to resume self\n");
+        return ERR_NONE;
+    }
+    LOGV("  Req to resume thread 0x%llx\n", threadId);
+
+    dvmDbgResumeThread(threadId);
+
+    return ERR_NONE;
+}
+
+/*
+ * Return status of specified thread.
+ */
+static JdwpError handleTR_Status(JdwpState* state,
+    const u1* buf, int dataLen, ExpandBuf* pReply)
+{
+    ObjectId threadId;
+    u4 threadStatus;
+    u4 suspendStatus;
+
+    threadId = dvmReadObjectId(&buf);
+
+    LOGV("  Req for status of thread 0x%llx\n", threadId);
+
+    if (!dvmDbgGetThreadStatus(threadId, &threadStatus, &suspendStatus))
+        return ERR_INVALID_THREAD;
+
+    LOGV("    --> %s, %s\n", dvmJdwpThreadStatusStr(threadStatus),
+        dvmJdwpSuspendStatusStr(suspendStatus));
+
+    expandBufAdd4BE(pReply, threadStatus);
+    expandBufAdd4BE(pReply, suspendStatus);
+
+    return ERR_NONE;
+}
+
+/*
+ * Return the thread group that the specified thread is a member of.
+ */
+static JdwpError handleTR_ThreadGroup(JdwpState* state,
+    const u1* buf, int dataLen, ExpandBuf* pReply)
+{
+    ObjectId threadId;
+    ObjectId threadGroupId;
+
+    threadId = dvmReadObjectId(&buf);
+
+    /* currently not handling these */
+    threadGroupId = dvmDbgGetThreadGroup(threadId);
+    expandBufAddObjectId(pReply, threadGroupId);
+
+    return ERR_NONE;
+}
+
+/*
+ * Return the current call stack of a suspended thread.
+ *
+ * If the thread isn't suspended, the error code isn't defined, but should
+ * be THREAD_NOT_SUSPENDED.
+ */
+static JdwpError handleTR_Frames(JdwpState* state,
+    const u1* buf, int dataLen, ExpandBuf* pReply)
+{
+    ObjectId threadId;
+    u4 startFrame, length, frames;
+    int i, frameCount;
+
+    threadId = dvmReadObjectId(&buf);
+    startFrame = read4BE(&buf);
+    length = read4BE(&buf);
+
+    if (!dvmDbgThreadExists(threadId))
+        return ERR_INVALID_THREAD;
+    if (!dvmDbgIsSuspended(threadId)) {
+        LOGV("  Rejecting req for frames in running thread '%s' (%llx)\n",
+            dvmDbgGetThreadName(threadId), threadId);
+        return ERR_THREAD_NOT_SUSPENDED;
+    }
+
+    frameCount = dvmDbgGetThreadFrameCount(threadId);
+
+    LOGV("  Request for frames: threadId=%llx start=%d length=%d [count=%d]\n",
+        threadId, startFrame, length, frameCount);
+    if (frameCount <= 0)
+        return ERR_THREAD_NOT_SUSPENDED;    /* == 0 means 100% native */
+
+    if (length == (u4) -1)
+        length = frameCount;
+    assert((int) startFrame >= 0 && (int) startFrame < frameCount);
+    assert((int) (startFrame + length) <= frameCount);
+
+    frames = length;
+    expandBufAdd4BE(pReply, frames);
+    for (i = startFrame; i < (int) (startFrame+length); i++) {
+        FrameId frameId;
+        JdwpLocation loc;
+
+        dvmDbgGetThreadFrame(threadId, i, &frameId, &loc);
+
+        expandBufAdd8BE(pReply, frameId);
+        dvmJdwpAddLocation(pReply, &loc);
+
+        LOGVV("    Frame %d: id=%llx loc={type=%d cls=%llx mth=%x loc=%llx}\n",
+            i, frameId, loc.typeTag, loc.classId, loc.methodId, loc.idx);
+    }
+
+    return ERR_NONE;
+}
+
+/*
+ * Returns the #of frames on the specified thread, which must be suspended.
+ */
+static JdwpError handleTR_FrameCount(JdwpState* state,
+    const u1* buf, int dataLen, ExpandBuf* pReply)
+{
+    ObjectId threadId;
+    int frameCount;
+
+    threadId = dvmReadObjectId(&buf);
+
+    if (!dvmDbgThreadExists(threadId))
+        return ERR_INVALID_THREAD;
+    if (!dvmDbgIsSuspended(threadId)) {
+        LOGV("  Rejecting req for frames in running thread '%s' (%llx)\n",
+            dvmDbgGetThreadName(threadId), threadId);
+        return ERR_THREAD_NOT_SUSPENDED;
+    }
+
+    frameCount = dvmDbgGetThreadFrameCount(threadId);
+    if (frameCount < 0)
+        return ERR_INVALID_THREAD;
+    expandBufAdd4BE(pReply, (u4)frameCount);
+
+    return ERR_NONE;
+}
+
+/*
+ * Get the monitor that the thread is waiting on.
+ */
+static JdwpError handleTR_CurrentContendedMonitor(JdwpState* state,
+    const u1* buf, int dataLen, ExpandBuf* pReply)
+{
+    ObjectId threadId;
+
+    threadId = dvmReadObjectId(&buf);
+
+    // TODO: create an Object to represent the monitor (we're currently
+    // just using a raw Monitor struct in the VM)
+
+    return ERR_NOT_IMPLEMENTED;
+}
+
+/*
+ * Return the suspend count for the specified thread.
+ *
+ * (The thread *might* still be running -- it might not have examined
+ * its suspend count recently.)
+ */
+static JdwpError handleTR_SuspendCount(JdwpState* state,
+    const u1* buf, int dataLen, ExpandBuf* pReply)
+{
+    ObjectId threadId;
+    u4 suspendCount;
+
+    threadId = dvmReadObjectId(&buf);
+
+    suspendCount = dvmDbgGetThreadSuspendCount(threadId);
+    expandBufAdd4BE(pReply, suspendCount);
+
+    return ERR_NONE;
+}
+
+/*
+ * Return the name of a thread group.
+ *
+ * The Eclipse debugger recognizes "main" and "system" as special.
+ */
+static JdwpError handleTGR_Name(JdwpState* state,
+    const u1* buf, int dataLen, ExpandBuf* pReply)
+{
+    ObjectId threadGroupId;
+    char* name = NULL;
+
+    threadGroupId = dvmReadObjectId(&buf);
+    LOGV("  Req for name of threadGroupId=0x%llx\n", threadGroupId);
+
+    name = dvmDbgGetThreadGroupName(threadGroupId);
+    if (name != NULL)
+        expandBufAddUtf8String(pReply, (u1*) name);
+    else {
+        expandBufAddUtf8String(pReply, (u1*) "BAD-GROUP-ID");
+        LOGW("bad thread group ID\n");
+    }
+
+    free(name);
+
+    return ERR_NONE;
+}
+
+/*
+ * Returns the thread group -- if any -- that contains the specified
+ * thread group.
+ */
+static JdwpError handleTGR_Parent(JdwpState* state,
+    const u1* buf, int dataLen, ExpandBuf* pReply)
+{
+    ObjectId groupId;
+    ObjectId parentGroup;
+
+    groupId = dvmReadObjectId(&buf);
+
+    parentGroup = dvmDbgGetThreadGroupParent(groupId);
+    expandBufAddObjectId(pReply, parentGroup);
+
+    return ERR_NONE;
+}
+
+/*
+ * Return the active threads and thread groups that are part of the
+ * specified thread group.
+ */
+static JdwpError handleTGR_Children(JdwpState* state,
+    const u1* buf, int dataLen, ExpandBuf* pReply)
+{
+    ObjectId threadGroupId;
+    u4 threadCount;
+    ObjectId threadId;
+    ObjectId* pThreadIds;
+    ObjectId* walker;
+    int i;
+
+    threadGroupId = dvmReadObjectId(&buf);
+    LOGV("  Req for threads in threadGroupId=0x%llx\n", threadGroupId);
+
+    dvmDbgGetThreadGroupThreads(threadGroupId, &pThreadIds, &threadCount);
+
+    expandBufAdd4BE(pReply, threadCount);
+
+    walker = pThreadIds;
+    for (i = 0; i < (int) threadCount; i++)
+        expandBufAddObjectId(pReply, pThreadIds[i]);
+    free(pThreadIds);
+
+    /*
+     * TODO: finish support for child groups
+     *
+     * For now, just show that "main" is a child of "system".
+     */
+    if (threadGroupId == dvmDbgGetSystemThreadGroupId()) {
+        expandBufAdd4BE(pReply, 1);
+        expandBufAddObjectId(pReply, dvmDbgGetMainThreadGroupId());
+    } else {
+        expandBufAdd4BE(pReply, 0);
+    }
+
+    return ERR_NONE;
+}
+
+/*
+ * Return the #of components in the array.
+ */
+static JdwpError handleAR_Length(JdwpState* state,
+    const u1* buf, int dataLen, ExpandBuf* pReply)
+{
+    ObjectId arrayId;
+    u4 arrayLength;
+
+    arrayId = dvmReadObjectId(&buf);
+    LOGV("  Req for length of array 0x%llx\n", arrayId);
+
+    arrayLength = dvmDbgGetArrayLength(arrayId);
+
+    LOGV("    --> %d\n", arrayLength);
+
+    expandBufAdd4BE(pReply, arrayLength);
+
+    return ERR_NONE;
+}
+
+/*
+ * Return the values from an array.
+ */
+static JdwpError handleAR_GetValues(JdwpState* state,
+    const u1* buf, int dataLen, ExpandBuf* pReply)
+{
+    ObjectId arrayId;
+    u4 firstIndex;
+    u4 length;
+    u1 tag;
+
+    arrayId = dvmReadObjectId(&buf);
+    firstIndex = read4BE(&buf);
+    length = read4BE(&buf);
+
+    tag = dvmDbgGetArrayElementTag(arrayId);
+    LOGV("  Req for array values 0x%llx first=%d len=%d (elem tag=%c)\n",
+        arrayId, firstIndex, length, tag);
+
+    expandBufAdd1(pReply, tag);
+    expandBufAdd4BE(pReply, length);
+
+    if (!dvmDbgOutputArray(arrayId, firstIndex, length, pReply))
+        return ERR_INVALID_LENGTH;
+
+    return ERR_NONE;
+}
+
+/*
+ * Set values in an array.
+ */
+static JdwpError handleAR_SetValues(JdwpState* state,
+    const u1* buf, int dataLen, ExpandBuf* pReply)
+{
+    ObjectId arrayId;
+    u4 firstIndex;
+    u4 values;
+    u1 tag;
+    int i;
+
+    arrayId = dvmReadObjectId(&buf);
+    firstIndex = read4BE(&buf);
+    values = read4BE(&buf);
+
+    LOGV("  Req to set array values 0x%llx first=%d count=%d\n",
+        arrayId, firstIndex, values);
+
+    if (!dvmDbgSetArrayElements(arrayId, firstIndex, values, buf))
+        return ERR_INVALID_LENGTH;
+
+    return ERR_NONE;
+}
+
+/*
+ * Return the set of classes visible to a class loader.  All classes which
+ * have the class loader as a defining or initiating loader are returned.
+ */
+static JdwpError handleCLR_VisibleClasses(JdwpState* state,
+    const u1* buf, int dataLen, ExpandBuf* pReply)
+{
+    ObjectId classLoaderObject;
+    u4 numClasses = 0;
+    RefTypeId* classRefBuf = NULL;
+    int i;
+
+    classLoaderObject = dvmReadObjectId(&buf);
+
+    dvmDbgGetVisibleClassList(classLoaderObject, &numClasses, &classRefBuf);
+
+    expandBufAdd4BE(pReply, numClasses);
+    for (i = 0; i < (int) numClasses; i++) {
+        u1 refTypeTag;
+
+        refTypeTag = dvmDbgGetClassObjectType(classRefBuf[i]);
+
+        expandBufAdd1(pReply, refTypeTag);
+        expandBufAddRefTypeId(pReply, classRefBuf[i]);
+    }
+
+    return ERR_NONE;
+}
+
+/*
+ * Set an event trigger.
+ *
+ * Reply with a requestID.
+ */
+static JdwpError handleER_Set(JdwpState* state,
+    const u1* buf, int dataLen, ExpandBuf* pReply)
+{
+    JdwpEvent* pEvent;
+    JdwpError err;
+    const u1* origBuf = buf;
+    /*int origDataLen = dataLen;*/
+    u1 eventKind;
+    u1 suspendPolicy;
+    u4 modifierCount;
+    u4 requestId;
+    int idx;
+
+    eventKind = read1(&buf);
+    suspendPolicy = read1(&buf);
+    modifierCount = read4BE(&buf);
+
+    LOGVV("  Set(kind=%s(%u) suspend=%s(%u) mods=%u)\n",
+        dvmJdwpEventKindStr(eventKind), eventKind,
+        dvmJdwpSuspendPolicyStr(suspendPolicy), suspendPolicy,
+        modifierCount);
+
+    assert(modifierCount < 256);    /* reasonableness check */
+
+    pEvent = dvmJdwpEventAlloc(modifierCount);
+    pEvent->eventKind = eventKind;
+    pEvent->suspendPolicy = suspendPolicy;
+    pEvent->modCount = modifierCount;
+
+    /*
+     * Read modifiers.  Ordering may be significant (see explanation of Count
+     * mods in JDWP doc).
+     */
+    for (idx = 0; idx < (int) modifierCount; idx++) {
+        u1 modKind;
+
+        modKind = read1(&buf);
+
+        pEvent->mods[idx].modKind = modKind;
+
+        switch (modKind) {
+        case MK_COUNT:          /* report once, when "--count" reaches 0 */
+            {
+                u4 count = read4BE(&buf);
+                LOGVV("    Count: %u\n", count);
+                if (count == 0)
+                    return ERR_INVALID_COUNT;
+                pEvent->mods[idx].count.count = count;
+            }
+            break;
+        case MK_CONDITIONAL:    /* conditional on expression) */
+            {
+                u4 exprId = read4BE(&buf);
+                LOGVV("    Conditional: %d\n", exprId);
+                pEvent->mods[idx].conditional.exprId = exprId;
+            }
+            break;
+        case MK_THREAD_ONLY:    /* only report events in specified thread */
+            {
+                ObjectId threadId = dvmReadObjectId(&buf);
+                LOGVV("    ThreadOnly: %llx\n", threadId);
+                pEvent->mods[idx].threadOnly.threadId = threadId;
+            }
+            break;
+        case MK_CLASS_ONLY:     /* for ClassPrepare, MethodEntry */
+            {
+                RefTypeId clazzId = dvmReadRefTypeId(&buf);
+                LOGVV("    ClassOnly: %llx (%s)\n",
+                    clazzId, dvmDbgGetClassDescriptor(clazzId));
+                pEvent->mods[idx].classOnly.referenceTypeId = clazzId;
+            }
+            break;
+        case MK_CLASS_MATCH:    /* restrict events to matching classes */
+            {
+                char* pattern;
+                size_t strLen;
+
+                pattern = readNewUtf8String(&buf, &strLen);
+                LOGVV("    ClassMatch: '%s'\n", pattern);
+                /* pattern is "java.foo.*", we want "java/foo/ *" */
+                pEvent->mods[idx].classMatch.classPattern =
+                    dvmDotToSlash(pattern);
+                free(pattern);
+            }
+            break;
+        case MK_CLASS_EXCLUDE:  /* restrict events to non-matching classes */
+            {
+                char* pattern;
+                size_t strLen;
+
+                pattern = readNewUtf8String(&buf, &strLen);
+                LOGVV("    ClassExclude: '%s'\n", pattern);
+                pEvent->mods[idx].classExclude.classPattern =
+                    dvmDotToSlash(pattern);
+                free(pattern);
+            }
+            break;
+        case MK_LOCATION_ONLY:  /* restrict certain events based on loc */
+            {
+                JdwpLocation loc;
+
+                jdwpReadLocation(&buf, &loc);
+                LOGVV("    LocationOnly: typeTag=%d classId=%llx methodId=%x idx=%llx\n",
+                    loc.typeTag, loc.classId, loc.methodId, loc.idx);
+                pEvent->mods[idx].locationOnly.loc = loc;
+            }
+            break;
+        case MK_EXCEPTION_ONLY: /* modifies EK_EXCEPTION events */
+            {
+                RefTypeId exceptionOrNull;      /* null == all exceptions */
+                u1 caught, uncaught;
+
+                exceptionOrNull = dvmReadRefTypeId(&buf);
+                caught = read1(&buf);
+                uncaught = read1(&buf);
+                LOGVV("    ExceptionOnly: type=%llx(%s) caught=%d uncaught=%d\n",
+                    exceptionOrNull, (exceptionOrNull == 0) ? "null"
+                        : dvmDbgGetClassDescriptor(exceptionOrNull),
+                    caught, uncaught);
+
+                pEvent->mods[idx].exceptionOnly.refTypeId = exceptionOrNull;
+                pEvent->mods[idx].exceptionOnly.caught = caught;
+                pEvent->mods[idx].exceptionOnly.uncaught = uncaught;
+            }
+            break;
+        case MK_FIELD_ONLY:     /* for field access/mod events */
+            {
+                RefTypeId declaring = dvmReadRefTypeId(&buf);
+                FieldId fieldId = dvmReadFieldId(&buf);
+                LOGVV("    FieldOnly: %llx %x\n", declaring, fieldId);
+                pEvent->mods[idx].fieldOnly.refTypeId = declaring;
+                pEvent->mods[idx].fieldOnly.fieldId = fieldId;;
+            }
+            break;
+        case MK_STEP:           /* for use with EK_SINGLE_STEP */
+            {
+                ObjectId threadId;
+                u4 size, depth;
+
+                threadId = dvmReadObjectId(&buf);
+                size = read4BE(&buf);
+                depth = read4BE(&buf);
+                LOGVV("    Step: thread=%llx size=%s depth=%s\n",
+                    threadId, dvmJdwpStepSizeStr(size),
+                    dvmJdwpStepDepthStr(depth));
+
+                pEvent->mods[idx].step.threadId = threadId;
+                pEvent->mods[idx].step.size = size;
+                pEvent->mods[idx].step.depth = depth;
+            }
+            break;
+        case MK_INSTANCE_ONLY:  /* report events related to a specific obj */
+            {
+                ObjectId instance = dvmReadObjectId(&buf);
+                LOGVV("    InstanceOnly: %llx\n", instance);
+                pEvent->mods[idx].instanceOnly.objectId = instance;
+            }
+            break;
+        default:
+            LOGW("GLITCH: unsupported modKind=%d\n", modKind);
+            break;
+        }
+    }
+
+    /*
+     * Make sure we consumed all data.  It is possible that the remote side
+     * has sent us bad stuff, but for now we blame ourselves.
+     */
+    if (buf != origBuf + dataLen) {
+        LOGW("GLITCH: dataLen is %d, we have consumed %d\n", dataLen,
+            (int) (buf - origBuf));
+    }
+
+    /*
+     * We reply with an integer "requestID".
+     */
+    requestId = dvmJdwpNextEventSerial(state);
+    expandBufAdd4BE(pReply, requestId);
+
+    pEvent->requestId = requestId;
+
+    LOGV("    --> event requestId=0x%x\n", requestId);
+
+    /* add it to the list */
+    err = dvmJdwpRegisterEvent(state, pEvent);
+    if (err != ERR_NONE) {
+        /* registration failed, probably because event is bogus */
+        dvmJdwpEventFree(pEvent);
+        LOGW("WARNING: event request rejected\n");
+    }
+    return err;
+}
+
+/*
+ * Clear an event.  Failure to find an event with a matching ID is a no-op
+ * and does not return an error.
+ */
+static JdwpError handleER_Clear(JdwpState* state,
+    const u1* buf, int dataLen, ExpandBuf* pReply)
+{
+    u1 eventKind;
+    u4 requestId;
+
+    eventKind = read1(&buf);
+    requestId = read4BE(&buf);
+
+    LOGV("  Req to clear eventKind=%d requestId=0x%08x\n", eventKind,requestId);
+
+    dvmJdwpUnregisterEventById(state, requestId);
+
+    return ERR_NONE;
+}
+
+/*
+ * Return the values of arguments and local variables.
+ */
+static JdwpError handleSF_GetValues(JdwpState* state,
+    const u1* buf, int dataLen, ExpandBuf* pReply)
+{
+    ObjectId threadId;
+    FrameId frameId;
+    u4 slots;
+    int i;
+
+    threadId = dvmReadObjectId(&buf);
+    frameId = dvmReadFrameId(&buf);
+    slots = read4BE(&buf);
+
+    LOGV("  Req for %d slots in threadId=%llx frameId=%llx\n",
+        slots, threadId, frameId);
+
+    expandBufAdd4BE(pReply, slots);     /* "int values" */
+    for (i = 0; i < (int) slots; i++) {
+        u4 slot;
+        u1 reqSigByte;
+        int width;
+        u1* ptr;
+
+        slot = read4BE(&buf);
+        reqSigByte = read1(&buf);
+
+        LOGV("    --> slot %d '%c'\n", slot, reqSigByte);
+
+        width = dvmDbgGetTagWidth(reqSigByte);
+        ptr = expandBufAddSpace(pReply, width+1);
+        dvmDbgGetLocalValue(threadId, frameId, slot, reqSigByte, ptr, width);
+    }
+
+    return ERR_NONE;
+}
+
+/*
+ * Set the values of arguments and local variables.
+ */
+static JdwpError handleSF_SetValues(JdwpState* state,
+    const u1* buf, int dataLen, ExpandBuf* pReply)
+{
+    ObjectId threadId;
+    FrameId frameId;
+    u4 slots;
+    int i;
+
+    threadId = dvmReadObjectId(&buf);
+    frameId = dvmReadFrameId(&buf);
+    slots = read4BE(&buf);
+
+    LOGV("  Req to set %d slots in threadId=%llx frameId=%llx\n",
+        slots, threadId, frameId);
+
+    for (i = 0; i < (int) slots; i++) {
+        u4 slot;
+        u1 sigByte;
+        u8 value;
+        int width;
+
+        slot = read4BE(&buf);
+        sigByte = read1(&buf);
+        width = dvmDbgGetTagWidth(sigByte);
+        value = jdwpReadValue(&buf, width);
+
+        LOGV("    --> slot %d '%c' %llx\n", slot, sigByte, value);
+        dvmDbgSetLocalValue(threadId, frameId, slot, sigByte, value, width);
+    }
+
+    return ERR_NONE;
+}
+
+/*
+ * Returns the value of "this" for the specified frame.
+ */
+static JdwpError handleSF_ThisObject(JdwpState* state,
+    const u1* buf, int dataLen, ExpandBuf* pReply)
+{
+    ObjectId threadId;
+    FrameId frameId;
+    u1 objectTag;
+    ObjectId objectId;
+    char* typeName;
+
+    threadId = dvmReadObjectId(&buf);
+    frameId = dvmReadFrameId(&buf);
+
+    if (!dvmDbgGetThisObject(threadId, frameId, &objectId))
+        return ERR_INVALID_FRAMEID;
+
+    if (objectId == 0) {
+        typeName = strdup("null");
+        objectTag = 0;
+    } else {
+        typeName = dvmDbgGetObjectTypeName(objectId);
+        objectTag = dvmDbgGetObjectTag(objectId, typeName);
+    }
+    LOGV("  Req for 'this' in thread=%llx frame=%llx --> %llx %s '%c'\n",
+        threadId, frameId, objectId, typeName, (char)objectTag);
+    free(typeName);
+
+    expandBufAdd1(pReply, objectTag);
+    expandBufAddObjectId(pReply, objectId);
+
+    return ERR_NONE;
+}
+
+/*
+ * Return the reference type reflected by this class object.
+ *
+ * This appears to be required because ReferenceTypeId values are NEVER
+ * reused, whereas ClassIds can be recycled like any other object.  (Either
+ * that, or I have no idea what this is for.)
+ */
+static JdwpError handleCOR_ReflectedType(JdwpState* state,
+    const u1* buf, int dataLen, ExpandBuf* pReply)
+{
+    RefTypeId classObjectId;
+
+    classObjectId = dvmReadRefTypeId(&buf);
+
+    LOGV("  Req for refTypeId for class=%llx (%s)\n",
+        classObjectId, dvmDbgGetClassDescriptor(classObjectId));
+
+    /* just hand the type back to them */
+    if (dvmDbgIsInterface(classObjectId))
+        expandBufAdd1(pReply, TT_INTERFACE);
+    else
+        expandBufAdd1(pReply, TT_CLASS);
+    expandBufAddRefTypeId(pReply, classObjectId);
+
+    return ERR_NONE;
+}
+
+/*
+ * Handle a DDM packet with a single chunk in it.
+ */
+static JdwpError handleDDM_Chunk(JdwpState* state,
+    const u1* buf, int dataLen, ExpandBuf* pReply)
+{
+    u1* replyBuf = NULL;
+    int replyLen = -1;
+
+    LOGV("  Handling DDM packet (%.4s)\n", buf);
+
+    /*
+     * On first DDM packet, notify all handlers that DDM is running.
+     */
+    if (!state->ddmActive) {
+        state->ddmActive = true;
+        dvmDbgDdmConnected();
+    }
+
+    /*
+     * If they want to send something back, we copy it into the buffer.
+     * A no-copy approach would be nicer.
+     *
+     * TODO: consider altering the JDWP stuff to hold the packet header
+     * in a separate buffer.  That would allow us to writev() DDM traffic
+     * instead of copying it into the expanding buffer.  The reduction in
+     * heap requirements is probably more valuable than the efficiency.
+     */
+    if (dvmDbgDdmHandlePacket(buf, dataLen, &replyBuf, &replyLen)) {
+        assert(replyLen > 0 && replyLen < 1*1024*1024);
+        memcpy(expandBufAddSpace(pReply, replyLen), replyBuf, replyLen);
+        free(replyBuf);
+    }
+    return ERR_NONE;
+}
+
+/*
+ * Handler map decl.
+ */
+typedef JdwpError (*JdwpRequestHandler)(JdwpState* state,
+    const u1* buf, int dataLen, ExpandBuf* reply);
+
+typedef struct {
+    u1  cmdSet;
+    u1  cmd;
+    JdwpRequestHandler  func;
+    const char* descr;
+} JdwpHandlerMap;
+
+/*
+ * Map commands to functions.
+ *
+ * Command sets 0-63 are incoming requests, 64-127 are outbound requests,
+ * and 128-256 are vendor-defined.
+ */
+static const JdwpHandlerMap gHandlerMap[] = {
+    /* VirtualMachine command set (1) */
+    { 1,    1,  handleVM_Version,       "VirtualMachine.Version" },
+    { 1,    2,  handleVM_ClassesBySignature,
+                                        "VirtualMachine.ClassesBySignature" },
+    //1,    3,  VirtualMachine.AllClasses
+    { 1,    4,  handleVM_AllThreads,    "VirtualMachine.AllThreads" },
+    { 1,    5,  handleVM_TopLevelThreadGroups,
+                                        "VirtualMachine.TopLevelThreadGroups" },
+    { 1,    6,  handleVM_Dispose,       "VirtualMachine.Dispose" },
+    { 1,    7,  handleVM_IDSizes,       "VirtualMachine.IDSizes" },
+    { 1,    8,  handleVM_Suspend,       "VirtualMachine.Suspend" },
+    { 1,    9,  handleVM_Resume,        "VirtualMachine.Resume" },
+    { 1,    10, handleVM_Exit,          "VirtualMachine.Exit" },
+    { 1,    11, handleVM_CreateString,  "VirtualMachine.CreateString" },
+    { 1,    12, handleVM_Capabilities,  "VirtualMachine.Capabilities" },
+    { 1,    13, handleVM_ClassPaths,    "VirtualMachine.ClassPaths" },
+    { 1,    14, HandleVM_DisposeObjects, "VirtualMachine.DisposeObjects" },
+    //1,    15, HoldEvents
+    //1,    16, ReleaseEvents
+    { 1,    17, handleVM_CapabilitiesNew,
+                                        "VirtualMachine.CapabilitiesNew" },
+    //1,    18, RedefineClasses
+    //1,    19, SetDefaultStratum
+    { 1,    20, handleVM_AllClassesWithGeneric,
+                                        "VirtualMachine.AllClassesWithGeneric"},
+    //1,    21, InstanceCounts
+
+    /* ReferenceType command set (2) */
+    { 2,    1,  handleRT_Signature,     "ReferenceType.Signature" },
+    { 2,    2,  handleRT_ClassLoader,   "ReferenceType.ClassLoader" },
+    { 2,    3,  handleRT_Modifiers,     "ReferenceType.Modifiers" },
+    //2,    4,  Fields
+    //2,    5,  Methods
+    { 2,    6,  handleRT_GetValues,     "ReferenceType.GetValues" },
+    { 2,    7,  handleRT_SourceFile,    "ReferenceType.SourceFile" },
+    //2,    8,  NestedTypes
+    { 2,    9,  handleRT_Status,        "ReferenceType.Status" },
+    { 2,    10, handleRT_Interfaces,    "ReferenceType.Interfaces" },
+    //2,    11, ClassObject
+    { 2,    12, handleRT_SourceDebugExtension,
+                                        "ReferenceType.SourceDebugExtension" },
+    { 2,    13, handleRT_SignatureWithGeneric,
+                                        "ReferenceType.SignatureWithGeneric" },
+    { 2,    14, handleRT_FieldsWithGeneric,
+                                        "ReferenceType.FieldsWithGeneric" },
+    { 2,    15, handleRT_MethodsWithGeneric,
+                                        "ReferenceType.MethodsWithGeneric" },
+    //2,    16, Instances
+    //2,    17, ClassFileVersion
+    //2,    18, ConstantPool
+
+    /* ClassType command set (3) */
+    { 3,    1,  handleCT_Superclass,    "ClassType.Superclass" },
+    { 3,    2,  handleCT_SetValues,     "ClassType.SetValues" },
+    { 3,    3,  handleCT_InvokeMethod,  "ClassType.InvokeMethod" },
+    //3,    4,  NewInstance
+
+    /* ArrayType command set (4) */
+    //4,    1,  NewInstance
+
+    /* InterfaceType command set (5) */
+
+    /* Method command set (6) */
+    { 6,    1,  handleM_LineTable,      "Method.LineTable" },
+    //6,    2,  VariableTable
+    //6,    3,  Bytecodes
+    //6,    4,  IsObsolete
+    { 6,    5,  handleM_VariableTableWithGeneric,
+                                        "Method.VariableTableWithGeneric" },
+
+    /* Field command set (8) */
+
+    /* ObjectReference command set (9) */
+    { 9,    1,  handleOR_ReferenceType, "ObjectReference.ReferenceType" },
+    { 9,    2,  handleOR_GetValues,     "ObjectReference.GetValues" },
+    { 9,    3,  handleOR_SetValues,     "ObjectReference.SetValues" },
+    //9,    4,  (not defined)
+    //9,    5,  MonitorInfo
+    { 9,    6,  handleOR_InvokeMethod,  "ObjectReference.InvokeMethod" },
+    { 9,    7,  handleOR_DisableCollection,
+                                        "ObjectReference.DisableCollection" },
+    { 9,    8,  handleOR_EnableCollection,
+                                        "ObjectReference.EnableCollection" },
+    { 9,    9,  handleOR_IsCollected,   "ObjectReference.IsCollected" },
+    //9,    10, ReferringObjects
+
+    /* StringReference command set (10) */
+    { 10,   1,  handleSR_Value,         "StringReference.Value" },
+
+    /* ThreadReference command set (11) */
+    { 11,   1,  handleTR_Name,          "ThreadReference.Name" },
+    { 11,   2,  handleTR_Suspend,       "ThreadReference.Suspend" },
+    { 11,   3,  handleTR_Resume,        "ThreadReference.Resume" },
+    { 11,   4,  handleTR_Status,        "ThreadReference.Status" },
+    { 11,   5,  handleTR_ThreadGroup,   "ThreadReference.ThreadGroup" },
+    { 11,   6,  handleTR_Frames,        "ThreadReference.Frames" },
+    { 11,   7,  handleTR_FrameCount,    "ThreadReference.FrameCount" },
+    //11,   8,  OwnedMonitors
+    { 11,   9,  handleTR_CurrentContendedMonitor,
+                                    "ThreadReference.CurrentContendedMonitor" },
+    //11,   10, Stop
+    //11,   11, Interrupt
+    { 11,   12, handleTR_SuspendCount,  "ThreadReference.SuspendCount" },
+    //11,   13, OwnedMonitorsStackDepthInfo
+    //11,   14, ForceEarlyReturn
+
+    /* ThreadGroupReference command set (12) */
+    { 12,   1,  handleTGR_Name,         "ThreadGroupReference.Name" },
+    { 12,   2,  handleTGR_Parent,       "ThreadGroupReference.Parent" },
+    { 12,   3,  handleTGR_Children,     "ThreadGroupReference.Children" },
+
+    /* ArrayReference command set (13) */
+    { 13,   1,  handleAR_Length,        "ArrayReference.Length" },
+    { 13,   2,  handleAR_GetValues,     "ArrayReference.GetValues" },
+    { 13,   3,  handleAR_SetValues,     "ArrayReference.SetValues" },
+
+    /* ClassLoaderReference command set (14) */
+    { 14,   1,  handleCLR_VisibleClasses,
+                                        "ClassLoaderReference.VisibleClasses" },
+
+    /* EventRequest command set (15) */
+    { 15,   1,  handleER_Set,           "EventRequest.Set" },
+    { 15,   2,  handleER_Clear,         "EventRequest.Clear" },
+    //15,   3,  ClearAllBreakpoints
+
+    /* StackFrame command set (16) */
+    { 16,   1,  handleSF_GetValues,     "StackFrame.GetValues" },
+    { 16,   2,  handleSF_SetValues,     "StackFrame.SetValues" },
+    { 16,   3,  handleSF_ThisObject,    "StackFrame.ThisObject" },
+    //16,   4,  PopFrames
+
+    /* ClassObjectReference command set (17) */
+    { 17,   1,  handleCOR_ReflectedType,"ClassObjectReference.ReflectedType" },
+
+    /* Event command set (64) */
+    //64,  100, Composite   <-- sent from VM to debugger, never received by VM
+
+    { 199,  1,  handleDDM_Chunk,        "DDM.Chunk" },
+};
+
+
+/*
+ * Process a request from the debugger.
+ *
+ * On entry, the JDWP thread is in VMWAIT.
+ */
+void dvmJdwpProcessRequest(JdwpState* state, const JdwpReqHeader* pHeader,
+    const u1* buf, int dataLen, ExpandBuf* pReply)
+{
+    JdwpError result = ERR_NONE;
+    int i, respLen;
+
+    /*
+     * Activity from a debugger, not merely ddms.  Mark us as having an
+     * active debugger session, and zero out the last-activity timestamp.
+     */
+    if (pHeader->cmdSet != kJDWPDdmCmdSet) {
+        dvmDbgActive();
+
+        state->lastActivitySec = 0;
+        MEM_BARRIER();
+    }
+
+    /*
+     * If a debugger event has fired in another thread, wait until the
+     * initiating thread has suspended itself before processing messages
+     * from the debugger.  Otherwise we (the JDWP thread) could be told to
+     * resume the thread before it has suspended.
+     *
+     * We call with an argument of zero to wait for the current event
+     * thread to finish, and then clear the block.  Depending on the thread
+     * suspend policy, this may allow events in other threads to fire,
+     * but those events have no bearing on what the debugger has sent us
+     * in the current request.
+     *
+     * Note that we MUST clear the event token before waking the event
+     * thread up, or risk waiting for the thread to suspend after we've
+     * told it to resume.
+     */
+    dvmJdwpSetWaitForEventThread(state, 0);
+
+    /*
+     * Tell the VM that we're running and shouldn't be interrupted by GC.
+     * Do this after anything that can stall indefinitely.
+     */
+    dvmDbgThreadRunning();
+
+    expandBufAddSpace(pReply, kJDWPHeaderLen);
+
+    for (i = 0; i < (int) NELEM(gHandlerMap); i++) {
+        if (gHandlerMap[i].cmdSet == pHeader->cmdSet &&
+            gHandlerMap[i].cmd == pHeader->cmd)
+        {
+            LOGV("REQ: %s (cmd=%d/%d dataLen=%d id=0x%06x)\n",
+                gHandlerMap[i].descr, pHeader->cmdSet, pHeader->cmd,
+                dataLen, pHeader->id);
+            result = (*gHandlerMap[i].func)(state, buf, dataLen, pReply);
+            break;
+        }
+    }
+    if (i == NELEM(gHandlerMap)) {
+        LOGE("REQ: UNSUPPORTED (cmd=%d/%d dataLen=%d id=0x%06x)\n",
+            pHeader->cmdSet, pHeader->cmd, dataLen, pHeader->id);
+        if (dataLen > 0)
+            dvmPrintHexDumpDbg(buf, dataLen, LOG_TAG);
+        assert(!"command not implemented");      // make it *really* obvious
+        result = ERR_NOT_IMPLEMENTED;
+    }
+
+    /*
+     * Set up the reply header.
+     *
+     * If we encountered an error, only send the header back.
+     */
+    u1* replyBuf = expandBufGetBuffer(pReply);
+    set4BE(replyBuf + 4, pHeader->id);
+    set1(replyBuf + 8, kJDWPFlagReply);
+    set2BE(replyBuf + 9, result);
+    if (result == ERR_NONE)
+        set4BE(replyBuf + 0, expandBufGetLength(pReply));
+    else
+        set4BE(replyBuf + 0, kJDWPHeaderLen);
+
+    respLen = expandBufGetLength(pReply) - kJDWPHeaderLen;
+    IF_LOG(LOG_VERBOSE, LOG_TAG) {
+        LOGV("reply: dataLen=%d err=%s(%d)%s\n", respLen,
+            dvmJdwpErrorStr(result), result,
+            result != ERR_NONE ? " **FAILED**" : "");
+        if (respLen > 0)
+            dvmPrintHexDumpDbg(expandBufGetBuffer(pReply) + kJDWPHeaderLen,
+                respLen, LOG_TAG);
+    }
+
+    /*
+     * Update last-activity timestamp.  We really only need this during
+     * the initial setup.  Only update if this is a non-DDMS packet.
+     */
+    if (pHeader->cmdSet != kJDWPDdmCmdSet) {
+        long lastSec, lastMsec;
+
+        dvmJdwpGetNowMsec(&lastSec, &lastMsec);
+        state->lastActivityMsec = lastMsec;
+        MEM_BARRIER();      // updating a 64-bit value
+        state->lastActivitySec = lastSec;
+    }
+
+    /* tell the VM that GC is okay again */
+    dvmDbgThreadWaiting();
+}
+
diff --git a/vm/jdwp/JdwpHandler.h b/vm/jdwp/JdwpHandler.h
new file mode 100644
index 0000000..3a7a98c
--- /dev/null
+++ b/vm/jdwp/JdwpHandler.h
@@ -0,0 +1,47 @@
+/*
+ * Copyright (C) 2008 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/*
+ * Handle requests.
+ */
+#ifndef _DALVIK_JDWP_JDWPHANDLER
+#define _DALVIK_JDWP_JDWPHANDLER
+
+#include "Common.h"
+#include "ExpandBuf.h"
+
+/*
+ * JDWP message header for a request.
+ */
+typedef struct JdwpReqHeader {
+    u4  length;
+    u4  id;
+    u1  cmdSet;
+    u1  cmd;
+} JdwpReqHeader;
+
+/*
+ * Process a request from the debugger.
+ *
+ * "buf" points past the header, to the content of the message.  "dataLen"
+ * can therefore be zero.
+ */
+void dvmJdwpProcessRequest(JdwpState* state, const JdwpReqHeader* pHeader,
+    const u1* buf, int dataLen, ExpandBuf* pReply);
+
+/* helper function */
+void dvmJdwpAddLocation(ExpandBuf* pReply, const JdwpLocation* pLoc);
+
+#endif /*_DALVIK_JDWP_JDWPHANDLER*/
diff --git a/vm/jdwp/JdwpMain.c b/vm/jdwp/JdwpMain.c
new file mode 100644
index 0000000..4166c67
--- /dev/null
+++ b/vm/jdwp/JdwpMain.c
@@ -0,0 +1,440 @@
+/*
+ * Copyright (C) 2008 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/*
+ * JDWP initialization.
+ */
+#include "jdwp/JdwpPriv.h"
+#include "Dalvik.h"
+#include "Atomic.h"
+
+#include <stdlib.h>
+#include <unistd.h>
+#include <sys/time.h>
+#include <time.h>
+#include <errno.h>
+
+
+static void* jdwpThreadStart(void* arg);
+
+
+/*
+ * Initialize JDWP.
+ *
+ * Does not return until JDWP thread is running, but may return before
+ * the thread is accepting network connections.
+ */
+JdwpState* dvmJdwpStartup(const JdwpStartupParams* pParams)
+{
+    JdwpState* state = NULL;
+    int i, sleepIter;
+    u8 startWhen;
+
+    /* comment this out when debugging JDWP itself */
+    android_setMinPriority(LOG_TAG, ANDROID_LOG_DEBUG);
+
+    state = (JdwpState*) calloc(1, sizeof(JdwpState));
+
+    state->params = *pParams;
+
+    state->requestSerial = 0x10000000;
+    state->eventSerial = 0x20000000;
+    dvmDbgInitMutex(&state->threadStartLock);
+    dvmDbgInitMutex(&state->attachLock);
+    dvmDbgInitMutex(&state->serialLock);
+    dvmDbgInitMutex(&state->eventLock);
+    state->eventThreadId = 0;
+    dvmDbgInitMutex(&state->eventThreadLock);
+    dvmDbgInitCond(&state->threadStartCond);
+    dvmDbgInitCond(&state->attachCond);
+    dvmDbgInitCond(&state->eventThreadCond);
+
+    switch (pParams->transport) {
+    case kJdwpTransportSocket:
+        // LOGD("prepping for JDWP over TCP\n");
+        state->transport = dvmJdwpSocketTransport();
+        break;
+    case kJdwpTransportAndroidAdb:
+        // LOGD("prepping for JDWP over ADB\n");
+        state->transport = dvmJdwpAndroidAdbTransport();
+        /* TODO */
+        break;
+    default:
+        LOGE("Unknown transport %d\n", pParams->transport);
+        assert(false);
+        goto fail;
+    }
+
+    if (!dvmJdwpNetStartup(state, pParams))
+        goto fail;
+
+    /*
+     * Grab a mutex or two before starting the thread.  This ensures they
+     * won't signal the cond var before we're waiting.
+     */
+    dvmDbgLockMutex(&state->threadStartLock);
+    if (pParams->suspend)
+        dvmDbgLockMutex(&state->attachLock);
+
+    /*
+     * We have bound to a port, or are trying to connect outbound to a
+     * debugger.  Create the JDWP thread and let it continue the mission.
+     */
+    if (!dvmCreateInternalThread(&state->debugThreadHandle, "JDWP",
+            jdwpThreadStart, state))
+    {
+        /* state is getting tossed, but unlock these anyway for cleanliness */
+        dvmDbgUnlockMutex(&state->threadStartLock);
+        if (pParams->suspend)
+            dvmDbgUnlockMutex(&state->attachLock);
+        goto fail;
+    }
+
+    /*
+     * Wait until the thread finishes basic initialization.
+     * TODO: cond vars should be waited upon in a loop
+     */
+    dvmDbgCondWait(&state->threadStartCond, &state->threadStartLock);
+    dvmDbgUnlockMutex(&state->threadStartLock);
+
+
+    /*
+     * For suspend=y, wait for the debugger to connect to us or for us to
+     * connect to the debugger.
+     *
+     * The JDWP thread will signal us when it connects successfully or
+     * times out (for timeout=xxx), so we have to check to see what happened
+     * when we wake up.
+     */
+    if (pParams->suspend) {
+        dvmChangeStatus(NULL, THREAD_VMWAIT);
+        dvmDbgCondWait(&state->attachCond, &state->attachLock);
+        dvmDbgUnlockMutex(&state->attachLock);
+        dvmChangeStatus(NULL, THREAD_RUNNING);
+
+        if (!dvmJdwpIsActive(state)) {
+            LOGE("JDWP connection failed\n");
+            goto fail;
+        }
+
+        LOGI("JDWP connected\n");
+
+        /*
+         * Ordinarily we would pause briefly to allow the debugger to set
+         * breakpoints and so on, but for "suspend=y" the VM init code will
+         * pause the VM when it sends the VM_START message.
+         */
+    }
+
+    return state;
+
+fail:
+    dvmJdwpShutdown(state);     // frees state
+    return NULL;
+}
+
+/*
+ * Reset all session-related state.  There should not be an active connection
+ * to the client at this point (we may be listening for a new one though).
+ *
+ * This includes freeing up the debugger event list.
+ */
+void dvmJdwpResetState(JdwpState* state)
+{
+    /* could reset the serial numbers, but no need to */
+
+    dvmJdwpUnregisterAll(state);
+    assert(state->eventList == NULL);
+
+    /*
+     * Should not have one of these in progress.  If the debugger went away
+     * mid-request, though, we could see this.
+     */
+    if (state->eventThreadId != 0) {
+        LOGW("WARNING: resetting state while event in progress\n");
+        assert(false);
+    }
+}
+
+/*
+ * Tell the JDWP thread to shut down.  Frees "state".
+ */
+void dvmJdwpShutdown(JdwpState* state)
+{
+    void* threadReturn;
+
+    if (state == NULL)
+        return;
+
+    if (dvmJdwpIsTransportDefined(state)) {
+        if (dvmJdwpIsConnected(state))
+            dvmJdwpPostVMDeath(state);
+
+        /*
+         * Close down the network to inspire the thread to halt.
+         */
+        LOGD("JDWP shutting down net...\n");
+        dvmJdwpNetShutdown(state);
+
+        if (state->debugThreadStarted) {
+            state->run = false;
+            if (pthread_join(state->debugThreadHandle, &threadReturn) != 0) {
+                LOGW("JDWP thread join failed\n");
+            }
+        }
+
+        LOGV("JDWP freeing netstate...\n");
+        dvmJdwpNetFree(state);
+        state->netState = NULL;
+    }
+    assert(state->netState == NULL);
+
+    dvmJdwpResetState(state);
+    free(state);
+}
+
+/*
+ * Are we talking to a debugger?
+ */ 
+bool dvmJdwpIsActive(JdwpState* state)
+{
+    return dvmJdwpIsConnected(state);
+}
+
+/*
+ * Entry point for JDWP thread.  The thread was created through the VM
+ * mechanisms, so there is a java/lang/Thread associated with us.
+ */
+static void* jdwpThreadStart(void* arg)
+{
+    JdwpState* state = (JdwpState*) arg;
+
+    LOGV("JDWP: thread running\n");
+
+    /*
+     * Finish initializing "state", then notify the creating thread that
+     * we're running.
+     */
+    state->debugThreadHandle = dvmThreadSelf()->handle;
+    state->run = true;
+    MEM_BARRIER();
+    state->debugThreadStarted = true;       // touch this last
+
+    dvmDbgLockMutex(&state->threadStartLock);
+    dvmDbgCondBroadcast(&state->threadStartCond);
+    dvmDbgUnlockMutex(&state->threadStartLock);
+
+    /* set the thread state to VMWAIT so GCs don't wait for us */
+    dvmDbgThreadWaiting();
+
+    /*
+     * Loop forever if we're in server mode, processing connections.  In
+     * non-server mode, we bail out of the thread when the debugger drops
+     * us.
+     *
+     * We broadcast a notification when a debugger attaches, after we
+     * successfully process the handshake.
+     */
+    while (state->run) {
+        bool first;
+        int cc;
+
+        if (state->params.server) {
+            /*
+             * Block forever, waiting for a connection.  To support the
+             * "timeout=xxx" option we'll need to tweak this.
+             */
+            if (!dvmJdwpAcceptConnection(state))
+                break;
+        } else {
+            /*
+             * If we're not acting as a server, we need to connect out to the
+             * debugger.  To support the "timeout=xxx" option we need to
+             * have a timeout if the handshake reply isn't received in a
+             * reasonable amount of time.
+             */
+            if (!dvmJdwpEstablishConnection(state)) {
+                /* wake anybody who was waiting for us to succeed */
+                dvmDbgLockMutex(&state->attachLock);
+                dvmDbgCondBroadcast(&state->attachCond);
+                dvmDbgUnlockMutex(&state->attachLock);
+                break;
+            }
+        }
+
+        /* prep debug code to handle the new connection */
+        dvmDbgConnected();
+
+        /* process requests until the debugger drops */
+        first = true;
+        while (true) {
+            // sanity check -- shouldn't happen?
+            if (dvmThreadSelf()->status != THREAD_VMWAIT) {
+                LOGE("JDWP thread no longer in VMWAIT (now %d); resetting\n",
+                    dvmThreadSelf()->status);
+                dvmDbgThreadWaiting();
+            }
+
+            if (!dvmJdwpProcessIncoming(state))     /* blocking read */
+                break;
+
+            if (first && !dvmJdwpAwaitingHandshake(state)) {
+                /* handshake worked, tell the interpreter that we're active */
+                first = false;
+
+                /* set thread ID; requires object registry to be active */
+                state->debugThreadId = dvmDbgGetThreadSelfId();
+
+                /* wake anybody who's waiting for us */
+                dvmDbgLockMutex(&state->attachLock);
+                dvmDbgCondBroadcast(&state->attachCond);
+                dvmDbgUnlockMutex(&state->attachLock);
+            }
+        }
+
+        dvmJdwpCloseConnection(state);
+
+        if (state->ddmActive) {
+            state->ddmActive = false;
+
+            /* broadcast the disconnect; must be in RUNNING state */
+            dvmDbgThreadRunning();
+            dvmDbgDdmDisconnected();
+            dvmDbgThreadWaiting();
+        }
+
+        /* interpreter can ignore breakpoints */
+        dvmDbgDisconnected();
+
+        /* if we had stuff suspended, resume it now */
+        dvmUndoDebuggerSuspensions();
+
+        dvmJdwpResetState(state);
+
+        /* if we connected out, this was a one-shot deal */
+        if (!state->params.server)
+            state->run = false;
+    }
+
+    /* back to running, for thread shutdown */
+    dvmDbgThreadRunning();
+
+    LOGV("JDWP: thread exiting\n");
+    return NULL;
+}
+
+
+/*
+ * Return the thread handle, or (pthread_t)0 if the debugger isn't running.
+ */
+pthread_t dvmJdwpGetDebugThread(JdwpState* state)
+{
+    if (state == NULL)
+        return 0;
+
+    return state->debugThreadHandle;
+}
+
+#if 0
+/*
+ * Wait until the debugger attaches.  Returns immediately if the debugger
+ * is already attached.
+ *
+ * If we return the instant the debugger connects, we run the risk of
+ * executing code before the debugger has had a chance to configure
+ * breakpoints or issue suspend calls.  It would be nice to just sit in
+ * the suspended state, but most debuggers don't expect any threads to be
+ * suspended when they attach.
+ *
+ * There's no event we can post to tell the debugger "we've stopped, and
+ * we like it that way".  We could send a fake breakpoint, which should
+ * cause the debugger to immediately send a resume, but the debugger might
+ * send the resume immediately or might throw an exception of its own upon
+ * receiving a breakpoint event that it didn't ask for.
+ *
+ * What we really want is a "wait until the debugger is done configuring
+ * stuff" event.  We can get close with a "wait until the debugger has
+ * been idle for a brief period", and we can do a mild approximation with
+ * "just sleep for a second after it connects".
+ *
+ * We should be in THREAD_VMWAIT here, so we're not allowed to do anything
+ * with objects because a GC could be in progress.
+ *
+ * NOTE: this trips as soon as something connects to the socket.  This
+ * is no longer appropriate -- we don't want to return when DDMS connects.
+ * We could fix this by polling for the first debugger packet, but we have
+ * to watch out for disconnects.  If we're going to do polling, it's
+ * probably best to do it at a higher level.
+ */
+void dvmJdwpWaitForDebugger(JdwpState* state)
+{
+    // no more
+}
+#endif
+
+/*
+ * Get a notion of the current time, in milliseconds.  We leave it in
+ * two 32-bit pieces.
+ */
+void dvmJdwpGetNowMsec(long* pSec, long* pMsec)
+{
+#ifdef HAVE_POSIX_CLOCKS
+    struct timespec now;
+    clock_gettime(CLOCK_MONOTONIC, &now);
+    *pSec = now.tv_sec;
+    *pMsec = now.tv_nsec / 1000000;
+#else
+    struct timeval now;
+    gettimeofday(&now, NULL);
+    *pSec = now.tv_sec;
+    *pMsec = now.tv_usec / 1000;
+#endif
+}
+
+/*
+ * Return the time, in milliseconds, since the last debugger activity.
+ *
+ * Returns -1 if no debugger is attached, or 0 if we're in the middle of
+ * processing a debugger request.
+ */
+s8 dvmJdwpLastDebuggerActivity(JdwpState* state)
+{
+    long lastSec, lastMsec;
+    long nowSec, nowMsec;
+
+    /* these are volatile; lastSec becomes 0 during update */
+    lastSec = state->lastActivitySec;
+    lastMsec = state->lastActivityMsec;
+
+    /* initializing or in the middle of something? */
+    if (lastSec == 0 || state->lastActivitySec != lastSec) {
+        //LOGI("+++ last=busy\n");
+        return 0;
+    }
+
+    /* get the current time *after* latching the "last" time */
+    dvmJdwpGetNowMsec(&nowSec, &nowMsec);
+
+    s8 last = (s8)lastSec * 1000 + lastMsec;
+    s8 now = (s8)nowSec * 1000 + nowMsec;
+
+    //LOGI("last is %ld.%ld --> %lld\n", lastSec, lastMsec, last);
+    //LOGI("now is  %ld.%ld --> %lld\n", nowSec, nowMsec, now);
+
+
+    //LOGI("+++ interval=%lld\n", now - last);
+    return now - last;
+}
+
diff --git a/vm/jdwp/JdwpPriv.h b/vm/jdwp/JdwpPriv.h
new file mode 100644
index 0000000..087b560
--- /dev/null
+++ b/vm/jdwp/JdwpPriv.h
@@ -0,0 +1,171 @@
+/*
+ * Copyright (C) 2008 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/*
+ * JDWP internal interfaces.
+ */
+#ifndef _DALVIK_JDWP_JDWPPRIV
+#define _DALVIK_JDWP_JDWPPRIV
+
+#define LOG_TAG "jdwp"
+
+#include "jdwp/Jdwp.h"
+#include "jdwp/JdwpEvent.h"
+#include "Debugger.h"
+#include <pthread.h>
+
+/*
+ * JDWP constants.
+ */
+#define kJDWPHeaderLen  11
+#define kJDWPFlagReply  0x80
+
+/* DDM support */
+#define kJDWPDdmCmdSet  199     /* 0xc7, or 'G'+128 */
+#define kJDWPDdmCmd     1
+
+
+/*
+ * Transport-specific network status.
+ */
+struct JdwpNetState;
+typedef struct JdwpNetState JdwpNetState;
+
+struct JdwpState;
+
+/*
+ * Transport functions.
+ */
+typedef struct JdwpTransport {
+    bool (*startup)(struct JdwpState* state, const JdwpStartupParams* pParams);
+    bool (*accept)(struct JdwpState* state);
+    bool (*establish)(struct JdwpState* state);
+    void (*close)(struct JdwpState* state);
+    void (*shutdown)(struct JdwpState* state);
+    void (*free)(struct JdwpState* state);
+    bool (*isConnected)(struct JdwpState* state);
+    bool (*awaitingHandshake)(struct JdwpState* state);
+    bool (*processIncoming)(struct JdwpState* state);
+    bool (*sendRequest)(struct JdwpState* state, ExpandBuf* pReq);
+} JdwpTransport;
+
+const JdwpTransport* dvmJdwpSocketTransport();
+const JdwpTransport* dvmJdwpAndroidAdbTransport();
+
+
+/*
+ * State for JDWP functions.
+ */
+struct JdwpState {
+    JdwpStartupParams   params;
+
+    /* wait for creation of the JDWP thread */
+    pthread_mutex_t threadStartLock;
+    pthread_cond_t  threadStartCond;
+
+    bool            debugThreadStarted;
+    pthread_t       debugThreadHandle;
+    ObjectId        debugThreadId;
+    bool            run;
+
+    const JdwpTransport*    transport;
+    JdwpNetState*   netState;
+
+    /* for wait-for-debugger */
+    pthread_mutex_t attachLock;
+    pthread_cond_t  attachCond;
+
+    /* time of last debugger activity; "sec" zeroed while processing */
+    volatile long   lastActivitySec;
+    volatile long   lastActivityMsec;
+
+    /* global counters and a mutex to protect them */
+    u4              requestSerial;
+    u4              eventSerial;
+    pthread_mutex_t serialLock;
+
+    /*
+     * Events requested by the debugger (breakpoints, class prep, etc).
+     */
+    int             numEvents;      /* #of elements in eventList */
+    JdwpEvent*      eventList;      /* linked list of events */
+    pthread_mutex_t eventLock;      /* guards numEvents/eventList */
+
+    /*
+     * Synchronize suspension of event thread (to avoid receiving "resume"
+     * events before the thread has finished suspending itself).
+     */
+    pthread_mutex_t eventThreadLock;
+    pthread_cond_t  eventThreadCond;
+    ObjectId        eventThreadId;
+
+    /*
+     * DDM support.
+     */
+    bool            ddmActive;
+};
+
+
+/* reset all session-specific data */
+void dvmJdwpResetState(JdwpState* state);
+
+/* atomic ops to get next serial number */
+u4 dvmJdwpNextRequestSerial(JdwpState* state);
+u4 dvmJdwpNextEventSerial(JdwpState* state);
+
+/* get current time, in msec */
+void dvmJdwpGetNowMsec(long* pSec, long* pMsec);
+
+
+/*
+ * Transport functions.
+ */
+INLINE bool dvmJdwpNetStartup(JdwpState* state,
+    const JdwpStartupParams* pParams)
+{
+    return (*state->transport->startup)(state, pParams);
+}
+INLINE bool dvmJdwpAcceptConnection(JdwpState* state) {
+    return (*state->transport->accept)(state);
+}
+INLINE bool dvmJdwpEstablishConnection(JdwpState* state) {
+    return (*state->transport->establish)(state);
+}
+INLINE void dvmJdwpCloseConnection(JdwpState* state) {
+    (*state->transport->close)(state);
+}
+INLINE void dvmJdwpNetShutdown(JdwpState* state) {
+    (*state->transport->shutdown)(state);
+}
+INLINE void dvmJdwpNetFree(JdwpState* state) {
+    (*state->transport->free)(state);
+}
+INLINE bool dvmJdwpIsTransportDefined(JdwpState* state) {
+    return state != NULL && state->transport != NULL;
+}
+INLINE bool dvmJdwpIsConnected(JdwpState* state) {
+    return state != NULL && (*state->transport->isConnected)(state);
+}
+INLINE bool dvmJdwpAwaitingHandshake(JdwpState* state) {
+    return (*state->transport->awaitingHandshake)(state);
+}
+INLINE bool dvmJdwpProcessIncoming(JdwpState* state) {
+    return (*state->transport->processIncoming)(state);
+}
+INLINE bool dvmJdwpSendRequest(JdwpState* state, ExpandBuf* pReq) {
+    return (*state->transport->sendRequest)(state, pReq);
+}
+
+#endif /*_DALVIK_JDWP_JDWPPRIV*/
diff --git a/vm/jdwp/JdwpSocket.c b/vm/jdwp/JdwpSocket.c
new file mode 100644
index 0000000..7b1ccfc
--- /dev/null
+++ b/vm/jdwp/JdwpSocket.c
@@ -0,0 +1,876 @@
+/*
+ * Copyright (C) 2008 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/*
+ * JDWP TCP socket network code.
+ */
+#include "jdwp/JdwpPriv.h"
+#include "jdwp/JdwpHandler.h"
+#include "Bits.h"
+
+#include <stdlib.h>
+#include <unistd.h>
+#include <stdio.h>
+#include <string.h>
+#include <errno.h>
+#include <sys/types.h>
+#include <sys/socket.h>
+#include <netinet/in.h>
+#include <netinet/tcp.h>
+#include <arpa/inet.h>
+#include <netdb.h>
+
+#define kBasePort           8000
+#define kMaxPort            8040
+
+#define kInputBufferSize    8192
+
+#define kMagicHandshake     "JDWP-Handshake"
+#define kMagicHandshakeLen  (sizeof(kMagicHandshake)-1)
+
+// fwd
+static void netShutdown(JdwpNetState* state);
+static void netFree(JdwpNetState* state);
+
+
+/*
+ * JDWP network state.
+ *
+ * We only talk to one debugger at a time.
+ */
+struct JdwpNetState {
+    short   listenPort;
+    int     listenSock;         /* listen for connection from debugger */
+    int     clientSock;         /* active connection to debugger */
+    int     wakePipe[2];        /* break out of select */
+
+    struct in_addr remoteAddr;
+    unsigned short remotePort;
+
+    bool    awaitingHandshake;  /* waiting for "JDWP-Handshake" */
+
+    /* pending data from the network; would be more efficient as circular buf */
+    unsigned char  inputBuffer[kInputBufferSize];
+    int     inputCount;
+};
+
+static JdwpNetState* netStartup(short port);
+
+/*
+ * Set up some stuff for transport=dt_socket.
+ */
+static bool prepareSocket(JdwpState* state, const JdwpStartupParams* pParams)
+{
+    unsigned short port;
+
+    if (pParams->server) {
+        if (pParams->port != 0) {
+            /* try only the specified port */
+            port = pParams->port;
+            state->netState = netStartup(port);
+        } else {
+            /* scan through a range of ports, binding to the first available */
+            for (port = kBasePort; port <= kMaxPort; port++) {
+                state->netState = netStartup(port);
+                if (state->netState != NULL)
+                    break;
+            }
+        }
+        if (state->netState == NULL) {
+            LOGE("JDWP net startup failed (req port=%d)\n", pParams->port);
+            return false;
+        }
+    } else {
+        port = pParams->port;   // used in a debug msg later
+        state->netState = netStartup(-1);
+    }
+
+    if (pParams->suspend)
+        LOGI("JDWP will wait for debugger on port %d\n", port);
+    else
+        LOGD("JDWP will %s on port %d\n",
+            pParams->server ? "listen" : "connect", port);
+
+    return true;
+}
+
+
+/*
+ * Are we still waiting for the handshake string?
+ */
+static bool awaitingHandshake(JdwpState* state)
+{
+    return state->netState->awaitingHandshake;
+}
+
+/*
+ * Initialize JDWP stuff.
+ *
+ * Allocates a new state structure.  If "port" is non-negative, this also
+ * tries to bind to a listen port.  If "port" is less than zero, we assume
+ * we're preparing for an outbound connection, and return without binding
+ * to anything.
+ *
+ * This may be called several times if we're probing for a port.
+ *
+ * Returns 0 on success.
+ */
+static JdwpNetState* netStartup(short port)
+{
+    JdwpNetState* netState;
+    int one = 1;
+
+    netState = (JdwpNetState*) malloc(sizeof(*netState));
+    memset(netState, 0, sizeof(*netState));
+    netState->listenSock = -1;
+    netState->clientSock = -1;
+    netState->wakePipe[0] = -1;
+    netState->wakePipe[1] = -1;
+
+    if (port < 0)
+        return netState;
+
+    assert(port != 0);
+
+    netState->listenSock = socket(PF_INET, SOCK_STREAM, IPPROTO_TCP);
+    if (netState->listenSock < 0) {
+        LOGE("Socket create failed: %s\n", strerror(errno));
+        goto fail;
+    }
+
+    /* allow immediate re-use */
+    if (setsockopt(netState->listenSock, SOL_SOCKET, SO_REUSEADDR, &one,
+            sizeof(one)) < 0)
+    {
+        LOGE("setsockopt(SO_REUSEADDR) failed: %s\n", strerror(errno));
+        goto fail;
+    }
+
+    union {
+        struct sockaddr_in  addrInet;
+        struct sockaddr     addrPlain;
+    } addr;
+    addr.addrInet.sin_family = AF_INET;
+    addr.addrInet.sin_port = htons(port);
+    inet_aton("127.0.0.1", &addr.addrInet.sin_addr);
+
+    if (bind(netState->listenSock, &addr.addrPlain, sizeof(addr)) != 0) {
+        LOGV("attempt to bind to port %u failed: %s\n", port, strerror(errno));
+        goto fail;
+    }
+
+    netState->listenPort = port;
+    LOGVV("+++ bound to port %d\n", netState->listenPort);
+
+    if (listen(netState->listenSock, 5) != 0) {
+        LOGE("Listen failed: %s\n", strerror(errno));
+        goto fail;
+    }
+
+    return netState;
+
+fail:
+    netShutdown(netState);
+    netFree(netState);
+    return NULL;
+}
+
+/*
+ * Shut down JDWP listener.  Don't free state.
+ *
+ * Note that "netState" may be partially initialized if "startup" failed.
+ *
+ * This may be called from a non-JDWP thread as part of shutting the
+ * JDWP thread down.
+ *
+ * (This is currently called several times during startup as we probe
+ * for an open port.)
+ */
+static void netShutdown(JdwpNetState* netState)
+{
+    if (netState == NULL)
+        return;
+
+    int listenSock = netState->listenSock;
+    int clientSock = netState->clientSock;
+
+    /* clear these out so it doesn't wake up and try to reuse them */
+    netState->listenSock = netState->clientSock = -1;
+
+    /* "shutdown" dislodges blocking read() and accept() calls */
+    if (listenSock >= 0) {
+        shutdown(listenSock, SHUT_RDWR);
+        close(listenSock);
+    }
+    if (clientSock >= 0) {
+        shutdown(clientSock, SHUT_RDWR);
+        close(clientSock);
+    }
+
+    /* if we might be sitting in select, kick us loose */
+    if (netState->wakePipe[1] >= 0) {
+        LOGV("+++ writing to wakePipe\n");
+        (void) write(netState->wakePipe[1], "", 1);
+    }
+}
+static void netShutdownExtern(JdwpState* state)
+{
+    netShutdown(state->netState);
+}
+
+/*
+ * Free JDWP state.
+ *
+ * Call this after shutting the network down with netShutdown().
+ */
+static void netFree(JdwpNetState* netState)
+{
+    if (netState == NULL)
+        return;
+    assert(netState->listenSock == -1);
+    assert(netState->clientSock == -1);
+
+    if (netState->wakePipe[0] >= 0) {
+        close(netState->wakePipe[0]);
+        netState->wakePipe[0] = -1;
+    }
+    if (netState->wakePipe[1] >= 0) {
+        close(netState->wakePipe[1]);
+        netState->wakePipe[1] = -1;
+    }
+
+    free(netState);
+}
+static void netFreeExtern(JdwpState* state)
+{
+    netFree(state->netState);
+}
+
+/*
+ * Returns "true" if we're connected to a debugger.
+ */
+static bool isConnected(JdwpState* state)
+{
+    return (state->netState != NULL &&
+            state->netState->clientSock >= 0);
+}
+
+/*
+ * Returns "true" if the fd is ready, "false" if not.
+ */
+static bool isFdReadable(int sock)
+{
+    fd_set readfds;
+    struct timeval tv;
+    int count;
+
+    FD_ZERO(&readfds);
+    FD_SET(sock, &readfds);
+
+    tv.tv_sec = 0;
+    tv.tv_usec = 0;
+    count = select(sock+1, &readfds, NULL, NULL, &tv);
+    if (count <= 0)
+        return false;
+
+    if (FD_ISSET(sock, &readfds))   /* make sure it's our fd */
+        return true;
+
+    LOGE("WEIRD: odd behavior in select (count=%d)\n", count);
+    return false;
+}
+
+#if 0
+/*
+ * Check to see if we have a pending connection from the debugger.
+ *
+ * Returns true on success (meaning a connection is available).
+ */
+static bool checkConnection(JdwpState* state)
+{
+    JdwpNetState* netState = state->netState;
+
+    assert(netState->listenSock >= 0);
+    /* not expecting to be called when debugger is actively connected */
+    assert(netState->clientSock < 0);
+
+    if (!isFdReadable(netState->listenSock))
+        return false;
+    return true;
+}
+#endif
+
+/*
+ * Disable the TCP Nagle algorithm, which delays transmission of outbound
+ * packets until the previous transmissions have been acked.  JDWP does a
+ * lot of back-and-forth with small packets, so this may help.
+ */
+static int setNoDelay(int fd)
+{
+    int cc, on = 1;
+
+    cc = setsockopt(fd, IPPROTO_TCP, TCP_NODELAY, &on, sizeof(on));
+    assert(cc == 0);
+    return cc;
+}
+
+/*
+ * Accept a connection.  This will block waiting for somebody to show up.
+ * If that's not desirable, use checkConnection() to make sure something
+ * is pending.
+ */
+static bool acceptConnection(JdwpState* state)
+{
+    JdwpNetState* netState = state->netState;
+    union {
+        struct sockaddr_in  addrInet;
+        struct sockaddr     addrPlain;
+    } addr;
+    socklen_t addrlen;
+    int sock;
+
+    if (netState->listenSock < 0)
+        return false;       /* you're not listening! */
+
+    assert(netState->clientSock < 0);      /* must not already be talking */
+
+    addrlen = sizeof(addr);
+    do {
+        sock = accept(netState->listenSock, &addr.addrPlain, &addrlen);
+        if (sock < 0 && errno != EINTR) {
+            // When we call shutdown() on the socket, accept() returns with
+            // EINVAL.  Don't gripe about it.
+            if (errno == EINVAL)
+                LOGVV("accept failed: %s\n", strerror(errno));
+            else
+                LOGE("accept failed: %s\n", strerror(errno));
+            return false;
+        }
+    } while (sock < 0);
+
+    netState->remoteAddr = addr.addrInet.sin_addr;
+    netState->remotePort = ntohs(addr.addrInet.sin_port);
+    LOGV("+++ accepted connection from %s:%u\n",
+        inet_ntoa(netState->remoteAddr), netState->remotePort);
+
+    netState->clientSock = sock;
+    netState->awaitingHandshake = true;
+    netState->inputCount = 0;
+
+    LOGV("Setting TCP_NODELAY on accepted socket\n");
+    setNoDelay(netState->clientSock);
+
+    if (pipe(netState->wakePipe) < 0) {
+        LOGE("pipe failed");
+        return false;
+    }
+
+    return true;
+}
+
+/*
+ * Create a connection to a waiting debugger.
+ */
+static bool establishConnection(JdwpState* state)
+{
+    union {
+        struct sockaddr_in  addrInet;
+        struct sockaddr     addrPlain;
+    } addr;
+    struct hostent* pEntry;
+    char auxBuf[128];
+    int cc, h_errno;
+
+    assert(state != NULL && state->netState != NULL);
+    assert(!state->params.server);
+    assert(state->params.host[0] != '\0');
+    assert(state->params.port != 0);
+
+    /*
+     * Start by resolving the host name.
+     */
+//#undef HAVE_GETHOSTBYNAME_R
+//#warning "forcing non-R"
+#ifdef HAVE_GETHOSTBYNAME_R
+    struct hostent he;
+    cc = gethostbyname_r(state->params.host, &he, auxBuf, sizeof(auxBuf),
+            &pEntry, &h_errno);
+    if (cc != 0) {
+        LOGW("gethostbyname_r('%s') failed: %s\n",
+            state->params.host, strerror(errno));
+        return false;
+    }
+
+#else
+    h_errno = 0;
+    pEntry = gethostbyname(state->params.host);
+    if (pEntry == NULL) {
+        LOGW("gethostbyname('%s') failed: %s\n",
+            state->params.host, strerror(h_errno));
+        return false;
+    }
+#endif
+
+    /* copy it out ASAP to minimize risk of multithreaded annoyances */
+    memcpy(&addr.addrInet.sin_addr, pEntry->h_addr, pEntry->h_length);
+    addr.addrInet.sin_family = pEntry->h_addrtype;
+
+    addr.addrInet.sin_port = htons(state->params.port);
+
+    LOGI("Connecting out to '%s' %d\n",
+        inet_ntoa(addr.addrInet.sin_addr), ntohs(addr.addrInet.sin_port));
+
+    /*
+     * Create a socket.
+     */
+    JdwpNetState* netState;
+    netState = state->netState;
+    netState->clientSock = socket(PF_INET, SOCK_STREAM, IPPROTO_TCP);
+    if (netState->clientSock < 0) {
+        LOGE("Unable to create socket: %s\n", strerror(errno));
+        return false;
+    }
+
+    /*
+     * Try to connect.
+     */
+    if (connect(netState->clientSock, &addr.addrPlain, sizeof(addr)) != 0) {
+        LOGE("Unable to connect to %s:%d: %s\n",
+            inet_ntoa(addr.addrInet.sin_addr), ntohs(addr.addrInet.sin_port),
+            strerror(errno));
+        close(netState->clientSock);
+        netState->clientSock = -1;
+        return false;
+    }
+
+    LOGI("Connection established to %s (%s:%d)\n",
+        state->params.host, inet_ntoa(addr.addrInet.sin_addr),
+        ntohs(addr.addrInet.sin_port));
+    netState->awaitingHandshake = true;
+    netState->inputCount = 0;
+
+    setNoDelay(netState->clientSock);
+
+    if (pipe(netState->wakePipe) < 0) {
+        LOGE("pipe failed");
+        return false;
+    }
+
+    return true;
+}
+
+/*
+ * Close the connection to the debugger.
+ *
+ * Reset the state so we're ready to receive a new connection.
+ */
+static void closeConnection(JdwpState* state)
+{
+    JdwpNetState* netState;
+
+    assert(state != NULL && state->netState != NULL);
+
+    netState = state->netState;
+    if (netState->clientSock < 0)
+        return;
+
+    LOGV("+++ closed connection to %s:%u\n",
+        inet_ntoa(netState->remoteAddr), netState->remotePort);
+
+    close(netState->clientSock);
+    netState->clientSock = -1;
+
+    return;
+}
+
+/*
+ * Figure out if we have a full packet in the buffer.
+ */
+static bool haveFullPacket(JdwpNetState* netState)
+{
+    long length;
+
+    if (netState->awaitingHandshake)
+        return (netState->inputCount >= (int) kMagicHandshakeLen);
+
+    if (netState->inputCount < 4)
+        return false;
+
+    length = get4BE(netState->inputBuffer);
+    return (netState->inputCount >= length);
+}
+
+/*
+ * Consume bytes from the buffer.
+ *
+ * This would be more efficient with a circular buffer.  However, we're
+ * usually only going to find one packet, which is trivial to handle.
+ */
+static void consumeBytes(JdwpNetState* netState, int count)
+{
+    assert(count > 0);
+    assert(count <= netState->inputCount);
+
+    if (count == netState->inputCount) {
+        netState->inputCount = 0;
+        return;
+    }
+
+    memmove(netState->inputBuffer, netState->inputBuffer + count,
+        netState->inputCount - count);
+    netState->inputCount -= count;
+}
+
+/*
+ * Dump the contents of a packet to stdout.
+ */
+static void dumpPacket(const unsigned char* packetBuf)
+{
+    const unsigned char* buf = packetBuf;
+    u4 length, id;
+    u1 flags, cmdSet, cmd;
+    u2 error;
+    bool reply;
+    int dataLen;
+
+    cmd = cmdSet = 0xcc;
+
+    length = read4BE(&buf);
+    id = read4BE(&buf);
+    flags = read1(&buf);
+    if ((flags & kJDWPFlagReply) != 0) {
+        reply = true;
+        error = read2BE(&buf);
+    } else {
+        reply = false;
+        cmdSet = read1(&buf);
+        cmd = read1(&buf);
+    }
+
+    dataLen = length - (buf - packetBuf);
+
+    LOGV("--- %s: dataLen=%u id=0x%08x flags=0x%02x cmd=%d/%d\n",
+        reply ? "reply" : "req",
+        dataLen, id, flags, cmdSet, cmd);
+    if (dataLen > 0)
+        dvmPrintHexDumpDbg(buf, dataLen, LOG_TAG);
+}
+
+/*
+ * Handle a packet.  Returns "false" if we encounter a connection-fatal error.
+ */
+static bool handlePacket(JdwpState* state)
+{
+    JdwpNetState* netState = state->netState;
+    const unsigned char* buf = netState->inputBuffer;
+    JdwpReqHeader hdr;
+    u4 length, id;
+    u1 flags, cmdSet, cmd;
+    u2 error;
+    bool reply;
+    int dataLen;
+
+    cmd = cmdSet = 0;       // shut up gcc
+
+    /*dumpPacket(netState->inputBuffer);*/
+
+    length = read4BE(&buf);
+    id = read4BE(&buf);
+    flags = read1(&buf);
+    if ((flags & kJDWPFlagReply) != 0) {
+        reply = true;
+        error = read2BE(&buf);
+    } else {
+        reply = false;
+        cmdSet = read1(&buf);
+        cmd = read1(&buf);
+    }
+
+    assert((int) length <= netState->inputCount);
+    dataLen = length - (buf - netState->inputBuffer);
+
+    if (!reply) {
+        ExpandBuf* pReply = expandBufAlloc();
+
+        hdr.length = length;
+        hdr.id = id;
+        hdr.cmdSet = cmdSet;
+        hdr.cmd = cmd;
+        dvmJdwpProcessRequest(state, &hdr, buf, dataLen, pReply);
+        if (expandBufGetLength(pReply) > 0) {
+            int cc;
+
+            /*
+             * TODO: we currently assume the write() will complete in one
+             * go, which may not be safe for a network socket.  We may need
+             * to mutex this against sendRequest().
+             */
+            cc = write(netState->clientSock, expandBufGetBuffer(pReply),
+                    expandBufGetLength(pReply));
+            if (cc != (int) expandBufGetLength(pReply)) {
+                LOGE("Failed sending reply to debugger: %s\n", strerror(errno));
+                expandBufFree(pReply);
+                return false;
+            }
+        } else {
+            LOGW("No reply created for set=%d cmd=%d\n", cmdSet, cmd);
+        }
+        expandBufFree(pReply);
+    } else {
+        LOGV("reply?!\n");
+        assert(false);
+    }
+
+    LOGV("----------\n");
+
+    consumeBytes(netState, length);
+    return true;
+}
+
+/*
+ * Process incoming data.  If no data is available, this will block until
+ * some arrives.
+ *
+ * If we get a full packet, handle it.
+ *
+ * To take some of the mystery out of life, we want to reject incoming
+ * connections if we already have a debugger attached.  If we don't, the
+ * debugger will just mysteriously hang until it times out.  We could just
+ * close the listen socket, but there's a good chance we won't be able to
+ * bind to the same port again, which would confuse utilities.
+ *
+ * Returns "false" on error (indicating that the connection has been severed),
+ * "true" if things are still okay.
+ */
+static bool processIncoming(JdwpState* state)
+{
+    JdwpNetState* netState = state->netState;
+    int readCount;
+
+    assert(netState->clientSock >= 0);
+
+    if (!haveFullPacket(netState)) {
+        /* read some more, looping until we have data */
+        errno = 0;
+        while (1) {
+            int selCount;
+            fd_set readfds;
+            int maxfd;
+            int fd;
+
+            maxfd = netState->listenSock;
+            if (netState->clientSock > maxfd)
+                maxfd = netState->clientSock;
+            if (netState->wakePipe[0] > maxfd)
+                maxfd = netState->wakePipe[0];
+
+            if (maxfd < 0) {
+                LOGV("+++ all fds are closed\n");
+                return false;
+            }
+
+            FD_ZERO(&readfds);
+
+            /* configure fds; note these may get zapped by another thread */
+            fd = netState->listenSock;
+            if (fd >= 0)
+                FD_SET(fd, &readfds);
+            fd = netState->clientSock;
+            if (fd >= 0)
+                FD_SET(fd, &readfds);
+            fd = netState->wakePipe[0];
+            if (fd >= 0) {
+                FD_SET(fd, &readfds);
+            } else {
+                LOGI("NOTE: entering select w/o wakepipe\n");
+            }
+
+            /*
+             * Select blocks until it sees activity on the file descriptors.
+             * Closing the local file descriptor does not count as activity,
+             * so we can't rely on that to wake us up (it works for read()
+             * and accept(), but not select()).
+             *
+             * We can do one of three things: (1) send a signal and catch
+             * EINTR, (2) open an additional fd ("wakePipe") and write to
+             * it when it's time to exit, or (3) time out periodically and
+             * re-issue the select.  We're currently using #2, as it's more
+             * reliable than #1 and generally better than #3.  Wastes two fds.
+             */
+            selCount = select(maxfd+1, &readfds, NULL, NULL, NULL);
+            if (selCount < 0) {
+                if (errno == EINTR)
+                    continue;
+                LOGE("select failed: %s\n", strerror(errno));
+                goto fail;
+            }
+
+            if (netState->wakePipe[0] >= 0 &&
+                FD_ISSET(netState->wakePipe[0], &readfds))
+            {
+                if (netState->listenSock >= 0)
+                    LOGE("Exit wake set, but not exiting?\n");
+                else
+                    LOGD("Got wake-up signal, bailing out of select\n");
+                goto fail;
+            }
+            if (netState->listenSock >= 0 &&
+                FD_ISSET(netState->listenSock, &readfds))
+            {
+                LOGI("Ignoring second debugger -- accepting and dropping\n");
+                union {
+                    struct sockaddr_in   addrInet;
+                    struct sockaddr      addrPlain;
+                } addr;
+                socklen_t addrlen;
+                int tmpSock;
+                tmpSock = accept(netState->listenSock, &addr.addrPlain,
+                                &addrlen);
+                if (tmpSock < 0)
+                    LOGI("Weird -- accept failed\n");
+                else
+                    close(tmpSock);
+            }
+            if (netState->clientSock >= 0 &&
+                FD_ISSET(netState->clientSock, &readfds))
+            {
+                readCount = read(netState->clientSock,
+                                netState->inputBuffer + netState->inputCount,
+                    sizeof(netState->inputBuffer) - netState->inputCount);
+                if (readCount < 0) {
+                    /* read failed */
+                    if (errno != EINTR)
+                        goto fail;
+                    LOGD("+++ EINTR hit\n");
+                    return true;
+                } else if (readCount == 0) {
+                    /* EOF hit -- far end went away */
+                    LOGD("+++ peer disconnected\n");
+                    goto fail;
+                } else
+                    break;
+            }
+        }
+
+        netState->inputCount += readCount;
+        if (!haveFullPacket(netState))
+            return true;        /* still not there yet */
+    }
+
+    /*
+     * Special-case the initial handshake.  For some bizarre reason we're
+     * expected to emulate bad tty settings by echoing the request back
+     * exactly as it was sent.  Note the handshake is always initiated by
+     * the debugger, no matter who connects to whom.
+     *
+     * Other than this one case, the protocol [claims to be] stateless.
+     */
+    if (netState->awaitingHandshake) {
+        int cc;
+
+        if (memcmp(netState->inputBuffer,
+                kMagicHandshake, kMagicHandshakeLen) != 0)
+        {
+            LOGE("ERROR: bad handshake '%.14s'\n", netState->inputBuffer);
+            goto fail;
+        }
+
+        errno = 0;
+        cc = write(netState->clientSock, netState->inputBuffer,
+                kMagicHandshakeLen);
+        if (cc != kMagicHandshakeLen) {
+            LOGE("Failed writing handshake bytes: %s (%d of %d)\n",
+                strerror(errno), cc, (int) kMagicHandshakeLen);
+            goto fail;
+        }
+
+        consumeBytes(netState, kMagicHandshakeLen);
+        netState->awaitingHandshake = false;
+        LOGV("+++ handshake complete\n");
+        return true;
+    }
+
+    /*
+     * Handle this packet.
+     */
+    return handlePacket(state);
+
+fail:
+    closeConnection(state);
+    return false;
+}
+
+/*
+ * Send a request.
+ *
+ * The entire packet must be sent with a single write() call to avoid
+ * threading issues.
+ *
+ * Returns "true" if it was sent successfully.
+ */
+static bool sendRequest(JdwpState* state, ExpandBuf* pReq)
+{
+    JdwpNetState* netState = state->netState;
+    int cc;
+
+    dumpPacket(expandBufGetBuffer(pReq));
+    if (netState->clientSock < 0) {
+        /* can happen with some DDMS events */
+        LOGV("NOT sending request -- no debugger is attached\n");
+        return false;
+    }
+
+    /*
+     * TODO: we currently assume the write() will complete in one
+     * go, which may not be safe for a network socket.  We may need
+     * to mutex this against handlePacket().
+     */
+    errno = 0;
+    cc = write(netState->clientSock, expandBufGetBuffer(pReq),
+            expandBufGetLength(pReq));
+    if (cc != (int) expandBufGetLength(pReq)) {
+        LOGE("Failed sending req to debugger: %s (%d of %d)\n",
+            strerror(errno), cc, (int) expandBufGetLength(pReq));
+        return false;
+    }
+
+    return true;
+}
+
+
+/*
+ * Our functions.
+ */
+static const JdwpTransport socketTransport = {
+    prepareSocket,
+    acceptConnection,
+    establishConnection,
+    closeConnection,
+    netShutdownExtern,
+    netFreeExtern,
+    isConnected,
+    awaitingHandshake,
+    processIncoming,
+    sendRequest
+};
+
+/*
+ * Return our set.
+ */
+const JdwpTransport* dvmJdwpSocketTransport(void)
+{
+    return &socketTransport;
+}
+
diff --git a/vm/jdwp/README.txt b/vm/jdwp/README.txt
new file mode 100644
index 0000000..b511cc8
--- /dev/null
+++ b/vm/jdwp/README.txt
@@ -0,0 +1,13 @@
+Java Debug Wire Protocol support
+
+This is a reasonably complete implementation, but only messages that are
+actually generated by debuggers have been implemented.  The reasoning
+behind this is that it's better to leave a call unimplemented than have
+something that appears implemented but has never been tested.
+
+An attempt has been made to keep the implementation distinct from the VM,
+with Debugger.c acting as a sort of portability layer, so that the code
+might be useful in other projects.  Once you get multiple simultaneous
+events and debugger requests with thread suspension bouncing around,
+though, it's difficult to keep things "generic".
+
diff --git a/vm/mterp/Makefile-mterp b/vm/mterp/Makefile-mterp
new file mode 100644
index 0000000..151d5c8
--- /dev/null
+++ b/vm/mterp/Makefile-mterp
@@ -0,0 +1,50 @@
+# Copyright (C) 2008 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+#
+# Makefile for the Dalvik modular interpreter.  This is not currently
+# integrated into the build system.
+#
+
+SHELL := /bin/sh
+
+# Build system has TARGET_ARCH=arm, but we need the exact architecture.
+# Sooner is armv5, Dream is armv6; someday we may have x86.  The simulator
+# is "desktop".
+#
+# To generate sources for all targets:
+# for arch in desktop armv5; do TARGET_ARCH_EXT=$arch make -f Makefile-mterp; done
+#
+#TARGET_ARCH_EXT := armv5
+
+OUTPUT_DIR := out
+
+# Accumulate all possible dependencies for the generated files in a very
+# conservative fashion.  If it's not one of the generated files in "out",
+# assume it's a dependency.
+SOURCE_DEPS := \
+	$(shell find . -path ./$(OUTPUT_DIR) -prune -o -type f -print)
+
+# Source files generated by the script.  There's always one C and one
+# assembly file, though in practice one or the other could be empty.
+GEN_SOURCES := \
+	$(OUTPUT_DIR)/InterpC-$(TARGET_ARCH_EXT).c \
+	$(OUTPUT_DIR)/InterpAsm-$(TARGET_ARCH_EXT).S
+
+target: $(GEN_SOURCES)
+
+$(GEN_SOURCES): $(SOURCE_DEPS)
+	@mkdir -p out
+	./gen-mterp.py $(TARGET_ARCH_EXT) $(OUTPUT_DIR)
+
diff --git a/vm/mterp/Mterp.c b/vm/mterp/Mterp.c
new file mode 100644
index 0000000..6ef00f1
--- /dev/null
+++ b/vm/mterp/Mterp.c
@@ -0,0 +1,111 @@
+/*
+ * Copyright (C) 2008 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/*
+ * Mterp entry point and support functions.
+ */
+#include "mterp/Mterp.h"
+
+#include <stddef.h>
+
+
+/*
+ * Verify some constants used by the mterp interpreter.
+ */
+bool dvmCheckAsmConstants(void)
+{
+    extern char dvmAsmInstructionStart[];
+    extern char dvmAsmInstructionEnd[];
+    extern char dvmAsmSisterStart[];
+    extern char dvmAsmSisterEnd[];
+
+    bool failed = false;
+
+#define ASM_DEF_VERIFY
+#include "mterp/common/asm-constants.h"
+
+    if (failed) {
+        LOGE("Please correct the values in mterp/common/asm-constants.h\n");
+        dvmAbort();
+    }
+
+    /*
+     * If an instruction overflows the 64-byte handler size limit, it will
+     * push everything up and alter the total size.  Check it here.
+     */
+    const int width = 64;
+    int interpSize = dvmAsmInstructionEnd - dvmAsmInstructionStart;
+    if (interpSize != 0 && interpSize != 256*width) {
+        LOGE("ERROR: unexpected asm interp size %d\n", interpSize);
+        LOGE("(did an instruction handler exceed %d bytes?)\n", width);
+        dvmAbort();
+    }
+    int sisterSize = dvmAsmSisterEnd - dvmAsmSisterStart;
+    LOGV("mterp: interp is %d bytes, sisters are %d bytes\n",
+        interpSize, sisterSize);
+
+    return !failed;
+}
+
+
+/*
+ * "Standard" mterp entry point.  This sets up a "glue" structure and then
+ * calls into the assembly interpreter implementation.
+ */
+bool dvmMterpStd(Thread* self, InterpState* glue)
+{
+    int changeInterp;
+
+    /* configure mterp items */
+    glue->self = self;
+    glue->methodClassDex = glue->method->clazz->pDvmDex;
+
+    glue->interpStackEnd = self->interpStackEnd;
+    glue->pSelfSuspendCount = &self->suspendCount;
+#if defined(WITH_DEBUGGER)
+    glue->pDebuggerActive = &gDvm.debuggerActive;
+#endif
+#if defined(WITH_PROFILER)
+    glue->pActiveProfilers = &gDvm.activeProfilers;
+#endif
+
+    IF_LOGVV() {
+        char* desc = dexProtoCopyMethodDescriptor(&glue->method->prototype);
+        LOGVV("mterp threadid=%d entry %d: %s.%s %s\n",
+            dvmThreadSelf()->threadId,
+            glue->entryPoint,
+            glue->method->clazz->descriptor,
+            glue->method->name,
+            desc);
+        free(desc);
+    }
+    //LOGI("glue is %p, pc=%p, fp=%p\n", glue, glue->pc, glue->fp);
+    //LOGI("first instruction is 0x%04x\n", glue->pc[0]);
+
+    changeInterp = dvmMterpStdRun(glue);
+    if (!changeInterp) {
+        /* this is a "normal" exit; we're not coming back */
+#ifdef LOG_INSTR
+        LOGD("|-- Leaving interpreter loop");
+#endif
+        return false;
+    } else {
+        /* we're "standard", so switch to "debug" */
+        LOGVV("  mterp returned, changeInterp=%d\n", changeInterp);
+        glue->nextMode = INTERP_DBG;
+        return true;
+    }
+}
+
diff --git a/vm/mterp/Mterp.h b/vm/mterp/Mterp.h
new file mode 100644
index 0000000..65e831f
--- /dev/null
+++ b/vm/mterp/Mterp.h
@@ -0,0 +1,48 @@
+/*
+ * Copyright (C) 2008 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/*
+ * Some declarations used throughout mterp.
+ */
+#ifndef _DALVIK_MTERP_MTERP
+#define _DALVIK_MTERP_MTERP
+
+#include "Dalvik.h"
+#include "interp/InterpDefs.h"
+
+/*
+ * Interpreter state, passed into C functions from assembly stubs.  The
+ * assembly code exports all registers into the "glue" structure before
+ * calling, then extracts them when the call returns.
+ */
+typedef InterpState MterpGlue;
+
+/*
+ * Call this during initialization to verify that the values in asm-constants.h
+ * are still correct.
+ */
+bool dvmCheckAsmConstants(void);
+
+/*
+ * Local entry and exit points.  All implementations must provide these two.
+ *
+ * dvmMterpStdRun() returns the "changeInterp" argument from dvmMterpStdBail(),
+ * indicating whether we want to bail out of the interpreter or just switch
+ * between "standard" and "debug" mode.
+ */
+bool dvmMterpStdRun(MterpGlue* glue);
+void dvmMterpStdBail(MterpGlue* glue, bool changeInterp);
+
+#endif /*_DALVIK_MTERP_MTERP*/
diff --git a/vm/mterp/README.txt b/vm/mterp/README.txt
new file mode 100644
index 0000000..34d229c
--- /dev/null
+++ b/vm/mterp/README.txt
@@ -0,0 +1,188 @@
+Dalvik "mterp" README
+
+NOTE: Find rebuilding instructions at the bottom of this file.
+
+
+==== Overview ====
+
+The architecture-specific config files determine what goes into two
+generated output files (InterpC-<arch>.c, InterpAsm-<arch>.S).  The goal is
+to make it easy to swap C and assembly sources during initial development
+and testing, and to provide a way to use architecture-specific versions
+of some operations (e.g. making use of PLD instructions on ARMv6).
+
+Two basic assumptions are made about the operation of the interpreter:
+
+ - The assembly version uses fixed-size areas for each instruction
+   (e.g. 64 bytes).  "Overflow" code is tacked on to the end.
+ - When a C implementation is desired, the assembly version packs all
+   local state into a "glue" struct, and passes that into the C function.
+   Updates to the state are pulled out of the "glue" on return.
+
+
+The "arch" value should indicate an architecture family with common
+programming characteristics, so "armv5" would work for all ARMv5 CPUs,
+but might not be backward- or forward-compatible.  (We *might* want to
+specify the ABI model as well, e.g. "armv5-eabi", but currently that adds
+verbosity without value.)
+
+
+==== Config file format ====
+
+The config files are parsed from top to bottom.  Each line in the file
+may be blank, hold a comment (line starts with '#'), or be a command.
+
+The commands are:
+
+  handler-size <bytes>
+
+    Specify the size of the assembly region, in bytes.  On most platforms
+    this will need to be a power of 2.
+
+  import <filename>
+
+    The specified file is included immediately, in its entirety.  No
+    substitutions are performed.  ".c" and ".h" files are copied to the
+    C output, ".S" files are copied to the asm output.
+
+  asm-stub <filename>
+
+    The named file will be included whenever an assembly "stub" is needed.
+    Text substitution is performed on the opcode name.
+
+  op-start <directory>
+
+    Indicates the start of the opcode list.  Must precede any "op"
+    commands.  The specified directory is the default location to pull
+    instruction files from.
+
+  op <opcode> <directory>
+
+    Can only appear after "op-start" and before "op-end".  Overrides the
+    default source file location of the specified opcode.  The opcode
+    definition will come from the specified file, e.g. "op OP_NOP armv5"
+    will load from "armv5/OP_NOP.S".  A substitution dictionary will be
+    applied (see below).
+
+  op-end
+
+    Indicates the end of the opcode list.  All 256 opcodes are emitted
+    when this is seen, followed by any code that didn't fit inside the
+    fixed-size instruction handler space.
+
+
+The order of "op" directives is not significant; the generation tool will
+extract ordering info from the VM sources.
+
+Typically the form in which most opcodes currently exist is used in
+the "op-start" directive.  For a new port you would start with "c",
+and add architecture-specific "op" entries as you write instructions.
+When complete it will default to the target architecture, and you insert
+"c" ops to stub out platform-specific code.
+
+For the <directory> specified in the "op" command, the "c" directory
+is special in two ways: (1) the sources are assumed to be C code, and
+will be inserted into the generated C file; (2) when a C implementation
+is emitted, a "glue stub" is emitted in the assembly source file.
+(The generator script always emits 256 assembly instructions, unless
+"asm-stub" was left blank, in which case it only emits some labels.)
+
+
+==== Instruction file format ====
+
+The assembly instruction files are simply fragments of assembly sources.
+The starting label will be provided by the generation tool, as will
+declarations for the segment type and alignment.  The expected target
+assembler is GNU "as", but others will work (may require fiddling with
+some of the pseudo-ops emitted by the generation tool).
+
+The C files do a bunch of fancy things with macros in an attempt to keep
+the code "cut & pastable" from the portable interpreter.  (This will be
+reduced when the code bases are merged.)
+
+A substitution dictionary is applied to all opcode fragments as they are
+appended to the output.  Substitutions can look like "$value" or "${value}".
+
+The dictionary always includes:
+
+  $opcode - opcode name, e.g. "OP_NOP"
+  $opnum - opcode number, e.g. 0 for OP_NOP
+  $handler_size_bytes - max size of an instruction handler, in bytes
+  $handler_size_bits - max size of an instruction handler, log 2
+
+Both C and assembly sources will be passed through the C pre-processor,
+so you can take advantage of C-style comments and preprocessor directives
+like "#define".
+
+Some generator operations are available.
+
+  %include "filename" [subst-dict]
+
+    Includes the file, which should look like "armv5/OP_NOP.S".  You can
+    specify values for the substitution dictionary, using standard Python
+    syntax.  For example, this:
+      %include "armv5/unop.S" {"result":"r1"}
+    would insert "armv5/unop.S" at the current file position, replacing
+    occurrences of "$result" with "r1".
+
+  %default <subst-dict>
+
+    Specify default substitution dictionary values, using standard Python
+    syntax.  Useful if you want to have a "base" version and variants.
+
+  %break
+
+    Identifies the split between the main portion of the instruction
+    handler (which must fit in "handler-size" bytes) and the "sister"
+    code, which is appended to the end of the instruction handler block.
+
+  %verify "message"
+
+    Leave a note to yourself about what needs to be tested.  (This may
+    turn into something more interesting someday; for now, it just gets
+    stripped out before the output is generated.)
+
+The generation tool does *not* print a warning if your instructions
+exceed "handler-size", but the VM will abort on startup if it detects an
+oversized handler.  On architectures with fixed-width instructions this
+is easy to work with, on others this you will need to count bytes.
+
+
+==== Sharing constants ====
+
+The file "common/asm-constants.h" has some definitions for constant
+values, structure sizes, and struct member offsets.  The format is fairly
+restricted, as simple macros are used to massage it for use with both C
+(where it is verified) and assembly (where the definitions are used).
+
+If a constant in the file becomes out of sync, the VM will log an error
+message and abort during startup.
+
+
+==== Tips ====
+
+If you need to debug the initial piece of an opcode handler, and your
+debug code expands it beyond the handler size limit, you can insert a
+generic header at the top:
+
+    b       ${opcode}_start
+%break
+${opcode}_start:
+
+If you already have a %break, it's okay to leave it in place -- the second
+%break is ignored.
+
+
+==== Rebuilding ====
+
+If you change any of the source file fragments, you need to rebuild the
+combined source files in the "out" directory.  Make sure the files in
+"out" are editable, then:
+
+    $ cd mterp
+    $ ./rebuild.sh
+
+As of this writing, this requires Python 2.5. You may see inscrutible
+error messages or just general failure if you have a different version
+of Python installed.
+
diff --git a/vm/mterp/armv5/OP_ADD_DOUBLE.S b/vm/mterp/armv5/OP_ADD_DOUBLE.S
new file mode 100644
index 0000000..e1ed0d5
--- /dev/null
+++ b/vm/mterp/armv5/OP_ADD_DOUBLE.S
@@ -0,0 +1,2 @@
+%verify "executed"
+%include "armv5/binopWide.S" {"instr":"bl      __aeabi_dadd"}
diff --git a/vm/mterp/armv5/OP_ADD_DOUBLE_2ADDR.S b/vm/mterp/armv5/OP_ADD_DOUBLE_2ADDR.S
new file mode 100644
index 0000000..299ca37
--- /dev/null
+++ b/vm/mterp/armv5/OP_ADD_DOUBLE_2ADDR.S
@@ -0,0 +1,2 @@
+%verify "executed"
+%include "armv5/binopWide2addr.S" {"instr":"bl      __aeabi_dadd"}
diff --git a/vm/mterp/armv5/OP_ADD_FLOAT.S b/vm/mterp/armv5/OP_ADD_FLOAT.S
new file mode 100644
index 0000000..318b733
--- /dev/null
+++ b/vm/mterp/armv5/OP_ADD_FLOAT.S
@@ -0,0 +1,2 @@
+%verify "executed"
+%include "armv5/binop.S" {"instr":"bl      __aeabi_fadd"}
diff --git a/vm/mterp/armv5/OP_ADD_FLOAT_2ADDR.S b/vm/mterp/armv5/OP_ADD_FLOAT_2ADDR.S
new file mode 100644
index 0000000..0195fd4
--- /dev/null
+++ b/vm/mterp/armv5/OP_ADD_FLOAT_2ADDR.S
@@ -0,0 +1,2 @@
+%verify "executed"
+%include "armv5/binop2addr.S" {"instr":"bl      __aeabi_fadd"}
diff --git a/vm/mterp/armv5/OP_ADD_INT.S b/vm/mterp/armv5/OP_ADD_INT.S
new file mode 100644
index 0000000..b69577c
--- /dev/null
+++ b/vm/mterp/armv5/OP_ADD_INT.S
@@ -0,0 +1,2 @@
+%verify "executed"
+%include "armv5/binop.S" {"instr":"add     r0, r0, r1"}
diff --git a/vm/mterp/armv5/OP_ADD_INT_2ADDR.S b/vm/mterp/armv5/OP_ADD_INT_2ADDR.S
new file mode 100644
index 0000000..0d54f91
--- /dev/null
+++ b/vm/mterp/armv5/OP_ADD_INT_2ADDR.S
@@ -0,0 +1,2 @@
+%verify "executed"
+%include "armv5/binop2addr.S" {"instr":"add     r0, r0, r1"}
diff --git a/vm/mterp/armv5/OP_ADD_INT_LIT16.S b/vm/mterp/armv5/OP_ADD_INT_LIT16.S
new file mode 100644
index 0000000..2710041
--- /dev/null
+++ b/vm/mterp/armv5/OP_ADD_INT_LIT16.S
@@ -0,0 +1,2 @@
+%verify "executed"
+%include "armv5/binopLit16.S" {"instr":"add     r0, r0, r1"}
diff --git a/vm/mterp/armv5/OP_ADD_INT_LIT8.S b/vm/mterp/armv5/OP_ADD_INT_LIT8.S
new file mode 100644
index 0000000..074fb90
--- /dev/null
+++ b/vm/mterp/armv5/OP_ADD_INT_LIT8.S
@@ -0,0 +1,2 @@
+%verify "executed"
+%include "armv5/binopLit8.S" {"instr":"add     r0, r0, r1"}
diff --git a/vm/mterp/armv5/OP_ADD_LONG.S b/vm/mterp/armv5/OP_ADD_LONG.S
new file mode 100644
index 0000000..b30ff53
--- /dev/null
+++ b/vm/mterp/armv5/OP_ADD_LONG.S
@@ -0,0 +1,2 @@
+%verify "executed"
+%include "armv5/binopWide.S" {"preinstr":"adds    r0, r0, r2", "instr":"adc     r1, r1, r3"}
diff --git a/vm/mterp/armv5/OP_ADD_LONG_2ADDR.S b/vm/mterp/armv5/OP_ADD_LONG_2ADDR.S
new file mode 100644
index 0000000..7edc50b
--- /dev/null
+++ b/vm/mterp/armv5/OP_ADD_LONG_2ADDR.S
@@ -0,0 +1,2 @@
+%verify "executed"
+%include "armv5/binopWide2addr.S" {"preinstr":"adds    r0, r0, r2", "instr":"adc     r1, r1, r3"}
diff --git a/vm/mterp/armv5/OP_AGET.S b/vm/mterp/armv5/OP_AGET.S
new file mode 100644
index 0000000..7a0950b
--- /dev/null
+++ b/vm/mterp/armv5/OP_AGET.S
@@ -0,0 +1,28 @@
+%default { "load":"ldr", "shift":"2" }
+%verify "executed"
+    /*
+     * Array get, 32 bits or less.  vAA <- vBB[vCC].
+     *
+     * Note: using the usual FETCH/and/shift stuff, this fits in exactly 17
+     * instructions.  We use a pair of FETCH_Bs instead.
+     *
+     * for: aget, aget-object, aget-boolean, aget-byte, aget-char, aget-short
+     */
+    /* op vAA, vBB, vCC */
+    FETCH_B(r2, 1, 0)                   @ r2<- BB
+    mov     r9, rINST, lsr #8           @ r9<- AA
+    FETCH_B(r3, 1, 1)                   @ r3<- CC
+    GET_VREG(r0, r2)                    @ r0<- vBB (array object)
+    GET_VREG(r1, r3)                    @ r1<- vCC (requested index)
+    cmp     r0, #0                      @ null array object?
+    beq     common_errNullObject        @ yes, bail
+    ldr     r3, [r0, #offArrayObject_length]    @ r3<- arrayObj->length
+    add     r0, r0, r1, lsl #$shift     @ r0<- arrayObj + index*width
+    cmp     r1, r3                      @ compare unsigned index, length
+    bcs     common_errArrayIndex        @ index >= length, bail
+    FETCH_ADVANCE_INST(2)               @ advance rPC, load rINST
+    $load   r2, [r0, #offArrayObject_contents]  @ r2<- vBB[vCC]
+    GET_INST_OPCODE(ip)                 @ extract opcode from rINST
+    SET_VREG(r2, r9)                    @ vAA<- r2
+    GOTO_OPCODE(ip)                     @ jump to next instruction
+
diff --git a/vm/mterp/armv5/OP_AGET_BOOLEAN.S b/vm/mterp/armv5/OP_AGET_BOOLEAN.S
new file mode 100644
index 0000000..7745821
--- /dev/null
+++ b/vm/mterp/armv5/OP_AGET_BOOLEAN.S
@@ -0,0 +1,2 @@
+%verify "executed"
+%include "armv5/OP_AGET.S" { "load":"ldrb", "shift":"0" }
diff --git a/vm/mterp/armv5/OP_AGET_BYTE.S b/vm/mterp/armv5/OP_AGET_BYTE.S
new file mode 100644
index 0000000..cf7d01b
--- /dev/null
+++ b/vm/mterp/armv5/OP_AGET_BYTE.S
@@ -0,0 +1,2 @@
+%verify "executed"
+%include "armv5/OP_AGET.S" { "load":"ldrsb", "shift":"0" }
diff --git a/vm/mterp/armv5/OP_AGET_CHAR.S b/vm/mterp/armv5/OP_AGET_CHAR.S
new file mode 100644
index 0000000..1a485b2
--- /dev/null
+++ b/vm/mterp/armv5/OP_AGET_CHAR.S
@@ -0,0 +1,2 @@
+%verify "executed"
+%include "armv5/OP_AGET.S" { "load":"ldrh", "shift":"1" }
diff --git a/vm/mterp/armv5/OP_AGET_OBJECT.S b/vm/mterp/armv5/OP_AGET_OBJECT.S
new file mode 100644
index 0000000..200b7b2
--- /dev/null
+++ b/vm/mterp/armv5/OP_AGET_OBJECT.S
@@ -0,0 +1,2 @@
+%verify "executed"
+%include "armv5/OP_AGET.S"
diff --git a/vm/mterp/armv5/OP_AGET_SHORT.S b/vm/mterp/armv5/OP_AGET_SHORT.S
new file mode 100644
index 0000000..07fbe94
--- /dev/null
+++ b/vm/mterp/armv5/OP_AGET_SHORT.S
@@ -0,0 +1,2 @@
+%verify "executed"
+%include "armv5/OP_AGET.S" { "load":"ldrsh", "shift":"1" }
diff --git a/vm/mterp/armv5/OP_AGET_WIDE.S b/vm/mterp/armv5/OP_AGET_WIDE.S
new file mode 100644
index 0000000..ec346ca
--- /dev/null
+++ b/vm/mterp/armv5/OP_AGET_WIDE.S
@@ -0,0 +1,33 @@
+%verify "executed"
+    /*
+     * Array get, 64 bits.  vAA <- vBB[vCC].
+     *
+     * Arrays of long/double are 64-bit aligned, so it's okay to use LDRD.
+     */
+    /* aget-wide vAA, vBB, vCC */
+    FETCH(r0, 1)                        @ r0<- CCBB
+    mov     r9, rINST, lsr #8           @ r9<- AA
+    and     r2, r0, #255                @ r2<- BB
+    mov     r3, r0, lsr #8              @ r3<- CC
+    GET_VREG(r0, r2)                    @ r0<- vBB (array object)
+    GET_VREG(r1, r3)                    @ r1<- vCC (requested index)
+    cmp     r0, #0                      @ null array object?
+    beq     common_errNullObject        @ yes, bail
+    ldr     r3, [r0, #offArrayObject_length]    @ r3<- arrayObj->length
+    add     r0, r0, r1, lsl #3          @ r0<- arrayObj + index*width
+    cmp     r1, r3                      @ compare unsigned index, length
+    bcc     .L${opcode}_finish          @ okay, continue below
+    b       common_errArrayIndex        @ index >= length, bail
+    @ May want to swap the order of these two branches depending on how the
+    @ branch prediction (if any) handles conditional forward branches vs.
+    @ unconditional forward branches.
+%break
+
+.L${opcode}_finish:
+    FETCH_ADVANCE_INST(2)               @ advance rPC, load rINST
+    ldrd    r2, [r0, #offArrayObject_contents]  @ r2/r3<- vBB[vCC]
+    add     r9, rFP, r9, lsl #2         @ r9<- &fp[AA]
+    GET_INST_OPCODE(ip)                 @ extract opcode from rINST
+    stmia   r9, {r2-r3}                 @ vAA/vAA+1<- r2/r3
+    GOTO_OPCODE(ip)                     @ jump to next instruction
+
diff --git a/vm/mterp/armv5/OP_AND_INT.S b/vm/mterp/armv5/OP_AND_INT.S
new file mode 100644
index 0000000..2994661
--- /dev/null
+++ b/vm/mterp/armv5/OP_AND_INT.S
@@ -0,0 +1,2 @@
+%verify "executed"
+%include "armv5/binop.S" {"instr":"and     r0, r0, r1"}
diff --git a/vm/mterp/armv5/OP_AND_INT_2ADDR.S b/vm/mterp/armv5/OP_AND_INT_2ADDR.S
new file mode 100644
index 0000000..dab5bf9
--- /dev/null
+++ b/vm/mterp/armv5/OP_AND_INT_2ADDR.S
@@ -0,0 +1,2 @@
+%verify "executed"
+%include "armv5/binop2addr.S" {"instr":"and     r0, r0, r1"}
diff --git a/vm/mterp/armv5/OP_AND_INT_LIT16.S b/vm/mterp/armv5/OP_AND_INT_LIT16.S
new file mode 100644
index 0000000..9cc90dd
--- /dev/null
+++ b/vm/mterp/armv5/OP_AND_INT_LIT16.S
@@ -0,0 +1,2 @@
+%verify "executed"
+%include "armv5/binopLit16.S" {"instr":"and     r0, r0, r1"}
diff --git a/vm/mterp/armv5/OP_AND_INT_LIT8.S b/vm/mterp/armv5/OP_AND_INT_LIT8.S
new file mode 100644
index 0000000..c1db66d
--- /dev/null
+++ b/vm/mterp/armv5/OP_AND_INT_LIT8.S
@@ -0,0 +1,2 @@
+%verify "executed"
+%include "armv5/binopLit8.S" {"instr":"and     r0, r0, r1"}
diff --git a/vm/mterp/armv5/OP_AND_LONG.S b/vm/mterp/armv5/OP_AND_LONG.S
new file mode 100644
index 0000000..8fbac54
--- /dev/null
+++ b/vm/mterp/armv5/OP_AND_LONG.S
@@ -0,0 +1,2 @@
+%verify "executed"
+%include "armv5/binopWide.S" {"preinstr":"and     r0, r0, r2", "instr":"and     r1, r1, r3"}
diff --git a/vm/mterp/armv5/OP_AND_LONG_2ADDR.S b/vm/mterp/armv5/OP_AND_LONG_2ADDR.S
new file mode 100644
index 0000000..61b3615
--- /dev/null
+++ b/vm/mterp/armv5/OP_AND_LONG_2ADDR.S
@@ -0,0 +1,2 @@
+%verify "executed"
+%include "armv5/binopWide2addr.S" {"preinstr":"and     r0, r0, r2", "instr":"and     r1, r1, r3"}
diff --git a/vm/mterp/armv5/OP_APUT.S b/vm/mterp/armv5/OP_APUT.S
new file mode 100644
index 0000000..f8ee4a8
--- /dev/null
+++ b/vm/mterp/armv5/OP_APUT.S
@@ -0,0 +1,28 @@
+%default { "store":"str", "shift":"2" }
+%verify "executed"
+    /*
+     * Array put, 32 bits or less.  vBB[vCC] <- vAA.
+     *
+     * Note: using the usual FETCH/and/shift stuff, this fits in exactly 17
+     * instructions.  We use a pair of FETCH_Bs instead.
+     *
+     * for: aput, aput-boolean, aput-byte, aput-char, aput-short
+     */
+    /* op vAA, vBB, vCC */
+    FETCH_B(r2, 1, 0)                   @ r2<- BB
+    mov     r9, rINST, lsr #8           @ r9<- AA
+    FETCH_B(r3, 1, 1)                   @ r3<- CC
+    GET_VREG(r0, r2)                    @ r0<- vBB (array object)
+    GET_VREG(r1, r3)                    @ r1<- vCC (requested index)
+    cmp     r0, #0                      @ null array object?
+    beq     common_errNullObject        @ yes, bail
+    ldr     r3, [r0, #offArrayObject_length]    @ r3<- arrayObj->length
+    add     r0, r0, r1, lsl #$shift     @ r0<- arrayObj + index*width
+    cmp     r1, r3                      @ compare unsigned index, length
+    bcs     common_errArrayIndex        @ index >= length, bail
+    FETCH_ADVANCE_INST(2)               @ advance rPC, load rINST
+    GET_VREG(r2, r9)                    @ r2<- vAA
+    GET_INST_OPCODE(ip)                 @ extract opcode from rINST
+    $store  r2, [r0, #offArrayObject_contents]  @ vBB[vCC]<- r2
+    GOTO_OPCODE(ip)                     @ jump to next instruction
+
diff --git a/vm/mterp/armv5/OP_APUT_BOOLEAN.S b/vm/mterp/armv5/OP_APUT_BOOLEAN.S
new file mode 100644
index 0000000..3e0f32a
--- /dev/null
+++ b/vm/mterp/armv5/OP_APUT_BOOLEAN.S
@@ -0,0 +1,2 @@
+%verify "executed"
+%include "armv5/OP_APUT.S" { "store":"strb", "shift":"0" }
diff --git a/vm/mterp/armv5/OP_APUT_BYTE.S b/vm/mterp/armv5/OP_APUT_BYTE.S
new file mode 100644
index 0000000..3e0f32a
--- /dev/null
+++ b/vm/mterp/armv5/OP_APUT_BYTE.S
@@ -0,0 +1,2 @@
+%verify "executed"
+%include "armv5/OP_APUT.S" { "store":"strb", "shift":"0" }
diff --git a/vm/mterp/armv5/OP_APUT_CHAR.S b/vm/mterp/armv5/OP_APUT_CHAR.S
new file mode 100644
index 0000000..038bc47
--- /dev/null
+++ b/vm/mterp/armv5/OP_APUT_CHAR.S
@@ -0,0 +1,2 @@
+%verify "executed"
+%include "armv5/OP_APUT.S" { "store":"strh", "shift":"1" }
diff --git a/vm/mterp/armv5/OP_APUT_OBJECT.S b/vm/mterp/armv5/OP_APUT_OBJECT.S
new file mode 100644
index 0000000..53a188a
--- /dev/null
+++ b/vm/mterp/armv5/OP_APUT_OBJECT.S
@@ -0,0 +1,46 @@
+%verify "executed"
+    /*
+     * Store an object into an array.  vBB[vCC] <- vAA.
+     *
+     * Note: using the usual FETCH/and/shift stuff, this fits in exactly 17
+     * instructions.  We use a pair of FETCH_Bs instead.
+     *
+     * for: aput, aput-boolean, aput-byte, aput-char, aput-short
+     */
+    /* op vAA, vBB, vCC */
+    FETCH(r0, 1)                        @ r0<- CCBB
+    mov     r9, rINST, lsr #8           @ r9<- AA
+    and     r2, r0, #255                @ r2<- BB
+    mov     r3, r0, lsr #8              @ r3<- CC
+    GET_VREG(r1, r2)                    @ r1<- vBB (array object)
+    GET_VREG(r0, r3)                    @ r0<- vCC (requested index)
+    cmp     r1, #0                      @ null array object?
+    GET_VREG(r9, r9)                    @ r9<- vAA
+    beq     common_errNullObject        @ yes, bail
+    ldr     r3, [r1, #offArrayObject_length]    @ r3<- arrayObj->length
+    add     r10, r1, r0, lsl #2         @ r10<- arrayObj + index*width
+    cmp     r0, r3                      @ compare unsigned index, length
+    bcc     .L${opcode}_finish          @ we're okay, continue on
+    b       common_errArrayIndex        @ index >= length, bail
+
+%break
+    /*
+     * On entry:
+     *  r1 = vBB (arrayObj)
+     *  r9 = vAA (obj)
+     *  r10 = offset into array (vBB + vCC * width)
+     */
+.L${opcode}_finish:
+    cmp     r9, #0                      @ storing null reference?
+    beq     .L${opcode}_skip_check      @ yes, skip type checks
+    ldr     r0, [r9, #offObject_clazz]  @ r0<- obj->clazz
+    ldr     r1, [r1, #offObject_clazz]  @ r1<- arrayObj->clazz
+    bl      dvmCanPutArrayElement       @ test object type vs. array type
+    cmp     r0, #0                      @ okay?
+    beq     common_errArrayStore        @ no
+.L${opcode}_skip_check:
+    FETCH_ADVANCE_INST(2)               @ advance rPC, load rINST
+    GET_INST_OPCODE(ip)                 @ extract opcode from rINST
+    str     r9, [r10, #offArrayObject_contents] @ vBB[vCC]<- vAA
+    GOTO_OPCODE(ip)                     @ jump to next instruction
+
diff --git a/vm/mterp/armv5/OP_APUT_SHORT.S b/vm/mterp/armv5/OP_APUT_SHORT.S
new file mode 100644
index 0000000..038bc47
--- /dev/null
+++ b/vm/mterp/armv5/OP_APUT_SHORT.S
@@ -0,0 +1,2 @@
+%verify "executed"
+%include "armv5/OP_APUT.S" { "store":"strh", "shift":"1" }
diff --git a/vm/mterp/armv5/OP_APUT_WIDE.S b/vm/mterp/armv5/OP_APUT_WIDE.S
new file mode 100644
index 0000000..48738cc
--- /dev/null
+++ b/vm/mterp/armv5/OP_APUT_WIDE.S
@@ -0,0 +1,33 @@
+%verify "executed"
+    /*
+     * Array put, 64 bits.  vBB[vCC] <- vAA.
+     *
+     * Arrays of long/double are 64-bit aligned, so it's okay to use STRD.
+     */
+    /* aput-wide vAA, vBB, vCC */
+    FETCH(r0, 1)                        @ r0<- CCBB
+    mov     r9, rINST, lsr #8           @ r9<- AA
+    and     r2, r0, #255                @ r2<- BB
+    mov     r3, r0, lsr #8              @ r3<- CC
+    GET_VREG(r0, r2)                    @ r0<- vBB (array object)
+    GET_VREG(r1, r3)                    @ r1<- vCC (requested index)
+    cmp     r0, #0                      @ null array object?
+    beq     common_errNullObject        @ yes, bail
+    ldr     r3, [r0, #offArrayObject_length]    @ r3<- arrayObj->length
+    add     r0, r0, r1, lsl #3          @ r0<- arrayObj + index*width
+    cmp     r1, r3                      @ compare unsigned index, length
+    add     r9, rFP, r9, lsl #2         @ r9<- &fp[AA]
+    bcc     .L${opcode}_finish          @ okay, continue below
+    b       common_errArrayIndex        @ index >= length, bail
+    @ May want to swap the order of these two branches depending on how the
+    @ branch prediction (if any) handles conditional forward branches vs.
+    @ unconditional forward branches.
+%break
+
+.L${opcode}_finish:
+    FETCH_ADVANCE_INST(2)               @ advance rPC, load rINST
+    ldmia   r9, {r2-r3}                 @ r2/r3<- vAA/vAA+1
+    GET_INST_OPCODE(ip)                 @ extract opcode from rINST
+    strd    r2, [r0, #offArrayObject_contents]  @ r2/r3<- vBB[vCC]
+    GOTO_OPCODE(ip)                     @ jump to next instruction
+
diff --git a/vm/mterp/armv5/OP_ARRAY_LENGTH.S b/vm/mterp/armv5/OP_ARRAY_LENGTH.S
new file mode 100644
index 0000000..5dc0f93
--- /dev/null
+++ b/vm/mterp/armv5/OP_ARRAY_LENGTH.S
@@ -0,0 +1,16 @@
+%verify "executed"
+    /*
+     * Return the length of an array.
+     */
+    mov     r1, rINST, lsr #12          @ r1<- B
+    mov     r2, rINST, lsr #8           @ r2<- A+
+    GET_VREG(r0, r1)                    @ r0<- vB (object ref)
+    and     r2, r2, #15                 @ r2<- A
+    cmp     r0, #0                      @ is object null?
+    beq     common_errNullObject        @ yup, fail
+    FETCH_ADVANCE_INST(1)               @ advance rPC, load rINST
+    ldr     r3, [r0, #offArrayObject_length]    @ r3<- array length
+    GET_INST_OPCODE(ip)                 @ extract opcode from rINST
+    SET_VREG(r3, r2)                    @ vB<- length
+    GOTO_OPCODE(ip)                     @ jump to next instruction
+
diff --git a/vm/mterp/armv5/OP_CHECK_CAST.S b/vm/mterp/armv5/OP_CHECK_CAST.S
new file mode 100644
index 0000000..74f458b
--- /dev/null
+++ b/vm/mterp/armv5/OP_CHECK_CAST.S
@@ -0,0 +1,73 @@
+%verify "executed"
+%verify "null object"
+%verify "class cast exception thrown, with correct class name"
+%verify "class cast exception not thrown on same class"
+%verify "class cast exception not thrown on subclass"
+%verify "class not resolved"
+%verify "class already resolved"
+    /*
+     * Check to see if a cast from one class to another is allowed.
+     */
+    /* check-cast vAA, class@BBBB */
+    mov     r3, rINST, lsr #8           @ r3<- AA
+    FETCH(r2, 1)                        @ r2<- BBBB
+    GET_VREG(r9, r3)                    @ r9<- object
+    ldr     r0, [rGLUE, #offGlue_methodClassDex]    @ r0<- pDvmDex
+    cmp     r9, #0                      @ is object null?
+    ldr     r0, [r0, #offDvmDex_pResClasses]    @ r0<- pDvmDex->pResClasses
+    beq     .L${opcode}_okay            @ null obj, cast always succeeds
+    ldr     r1, [r0, r2, lsl #2]        @ r1<- resolved class
+    ldr     r0, [r9, #offObject_clazz]  @ r0<- obj->clazz
+    cmp     r1, #0                      @ have we resolved this before?
+    beq     .L${opcode}_resolve         @ not resolved, do it now
+.L${opcode}_resolved:
+    cmp     r0, r1                      @ same class (trivial success)?
+    bne     .L${opcode}_fullcheck       @ no, do full check
+.L${opcode}_okay:
+    FETCH_ADVANCE_INST(2)               @ advance rPC, load rINST
+    GET_INST_OPCODE(ip)                 @ extract opcode from rINST
+    GOTO_OPCODE(ip)                     @ jump to next instruction
+%break
+
+    /*
+     * Trivial test failed, need to perform full check.  This is common.
+     *  r0 holds obj->clazz
+     *  r1 holds class resolved from BBBB
+     *  r9 holds object
+     */
+.L${opcode}_fullcheck:
+    bl      dvmInstanceofNonTrivial     @ r0<- boolean result
+    cmp     r0, #0                      @ failed?
+    bne     .L${opcode}_okay            @ no, success
+
+    @ A cast has failed.  We need to throw a ClassCastException with the
+    @ class of the object that failed to be cast.
+    EXPORT_PC()                         @ about to throw
+    ldr     r3, [r9, #offObject_clazz]  @ r3<- obj->clazz
+    ldr     r0, .LstrClassCastExceptionPtr
+    ldr     r1, [r3, #offClassObject_descriptor] @ r1<- obj->clazz->descriptor
+    bl      dvmThrowExceptionWithClassMessage
+    b       common_exceptionThrown
+
+    /*
+     * Resolution required.  This is the least-likely path.
+     *
+     *  r2 holds BBBB
+     *  r9 holds object
+     */
+.L${opcode}_resolve:
+    EXPORT_PC()                         @ resolve() could throw
+    ldr     r3, [rGLUE, #offGlue_method] @ r3<- glue->method
+    mov     r1, r2                      @ r1<- BBBB
+    mov     r2, #0                      @ r2<- false
+    ldr     r0, [r3, #offMethod_clazz]  @ r0<- method->clazz
+    bl      dvmResolveClass             @ r0<- resolved ClassObject ptr
+    cmp     r0, #0                      @ got null?
+    beq     common_exceptionThrown      @ yes, handle exception
+    mov     r1, r0                      @ r1<- class resolved from BBB
+    ldr     r0, [r9, #offObject_clazz]  @ r0<- obj->clazz
+    b       .L${opcode}_resolved        @ pick up where we left off
+
+.LstrClassCastExceptionPtr:
+    .word   .LstrClassCastException
+
diff --git a/vm/mterp/armv5/OP_CMPG_DOUBLE.S b/vm/mterp/armv5/OP_CMPG_DOUBLE.S
new file mode 100644
index 0000000..706dced
--- /dev/null
+++ b/vm/mterp/armv5/OP_CMPG_DOUBLE.S
@@ -0,0 +1,2 @@
+%verify "executed"
+%include "armv5/OP_CMPL_DOUBLE.S" { "naninst":"mov     r1, #1" }
diff --git a/vm/mterp/armv5/OP_CMPG_FLOAT.S b/vm/mterp/armv5/OP_CMPG_FLOAT.S
new file mode 100644
index 0000000..944fff8
--- /dev/null
+++ b/vm/mterp/armv5/OP_CMPG_FLOAT.S
@@ -0,0 +1,2 @@
+%verify "executed"
+%include "armv5/OP_CMPL_FLOAT.S" { "naninst":"mov     r1, #1" }
diff --git a/vm/mterp/armv5/OP_CMPL_DOUBLE.S b/vm/mterp/armv5/OP_CMPL_DOUBLE.S
new file mode 100644
index 0000000..50ff3f7
--- /dev/null
+++ b/vm/mterp/armv5/OP_CMPL_DOUBLE.S
@@ -0,0 +1,49 @@
+%default { "naninst":"mvn     r1, #0" }
+%verify "executed"
+%verify "basic lt, gt, eq */
+%verify "left arg NaN"
+%verify "right arg NaN"
+    /*
+     * Compare two floating-point values.  Puts 0, 1, or -1 into the
+     * destination register based on the results of the comparison.
+     *
+     * Provide a "naninst" instruction that puts 1 or -1 into r1 depending
+     * on what value we'd like to return when one of the operands is NaN.
+     *
+     * See OP_CMPL_FLOAT for an explanation.
+     *
+     * For: cmpl-double, cmpg-double
+     */
+    /* op vAA, vBB, vCC */
+    FETCH(r0, 1)                        @ r0<- CCBB
+    and     r9, r0, #255                @ r9<- BB
+    mov     r10, r0, lsr #8             @ r10<- CC
+    add     r9, rFP, r9, lsl #2         @ r9<- &fp[BB]
+    add     r10, rFP, r10, lsl #2       @ r10<- &fp[CC]
+    ldmia   r9, {r0-r1}                 @ r0/r1<- vBB/vBB+1
+    ldmia   r10, {r2-r3}                @ r2/r3<- vCC/vCC+1
+    bl      __aeabi_cdcmple             @ cmp <=: C clear if <, Z set if eq
+    bhi     .L${opcode}_gt_or_nan       @ C set and Z clear, disambiguate
+    mvncc   r1, #0                      @ (less than) r1<- -1
+    moveq   r1, #0                      @ (equal) r1<- 0, trumps less than
+.L${opcode}_finish:
+    mov     r3, rINST, lsr #8           @ r3<- AA
+    FETCH_ADVANCE_INST(2)               @ advance rPC, load rINST
+    SET_VREG(r1, r3)                    @ vAA<- r1
+    GET_INST_OPCODE(ip)                 @ extract opcode from rINST
+    GOTO_OPCODE(ip)                     @ jump to next instruction
+%break
+
+    @ Test for NaN with a second comparison.  EABI forbids testing bit
+    @ patterns, and we can't represent 0x7fc00000 in immediate form, so
+    @ make the library call.
+.L${opcode}_gt_or_nan:
+    ldmia   r10, {r0-r1}                @ reverse order
+    ldmia   r9, {r2-r3}
+    bl      __aeabi_cdcmple             @ r0<- Z set if eq, C clear if <
+    @bleq    common_abort
+    movcc   r1, #1                      @ (greater than) r1<- 1
+    bcc     .L${opcode}_finish
+    $naninst                            @ r1<- 1 or -1 for NaN
+    b       .L${opcode}_finish
+
diff --git a/vm/mterp/armv5/OP_CMPL_FLOAT.S b/vm/mterp/armv5/OP_CMPL_FLOAT.S
new file mode 100644
index 0000000..c53d419
--- /dev/null
+++ b/vm/mterp/armv5/OP_CMPL_FLOAT.S
@@ -0,0 +1,116 @@
+%default { "naninst":"mvn     r1, #0" }
+%verify "executed"
+%verify "basic lt, gt, eq */
+%verify "left arg NaN"
+%verify "right arg NaN"
+    /*
+     * Compare two floating-point values.  Puts 0, 1, or -1 into the
+     * destination register based on the results of the comparison.
+     *
+     * Provide a "naninst" instruction that puts 1 or -1 into r1 depending
+     * on what value we'd like to return when one of the operands is NaN.
+     *
+     * The operation we're implementing is:
+     *   if (x == y)
+     *     return 0;
+     *   else if (x < y)
+     *     return -1;
+     *   else if (x > y)
+     *     return 1;
+     *   else
+     *     return {-1,1};  // one or both operands was NaN
+     *
+     * The straightforward implementation requires 3 calls to functions
+     * that return a result in r0.  We can do it with two calls if our
+     * EABI library supports __aeabi_cfcmple (only one if we want to check
+     * for NaN directly):
+     *   check x <= y
+     *     if <, return -1
+     *     if ==, return 0
+     *   check y <= x
+     *     if <, return 1
+     *   return {-1,1}
+     *
+     * for: cmpl-float, cmpg-float
+     */
+    /* op vAA, vBB, vCC */
+    FETCH(r0, 1)                        @ r0<- CCBB
+    and     r2, r0, #255                @ r2<- BB
+    mov     r3, r0, lsr #8              @ r3<- CC
+    GET_VREG(r9, r2)                    @ r9<- vBB
+    GET_VREG(r10, r3)                   @ r10<- vCC
+    mov     r0, r9                      @ copy to arg registers
+    mov     r1, r10
+    bl      __aeabi_cfcmple             @ cmp <=: C clear if <, Z set if eq
+    bhi     .L${opcode}_gt_or_nan       @ C set and Z clear, disambiguate
+    mvncc   r1, #0                      @ (less than) r1<- -1
+    moveq   r1, #0                      @ (equal) r1<- 0, trumps less than
+.L${opcode}_finish:
+    mov     r3, rINST, lsr #8           @ r3<- AA
+    FETCH_ADVANCE_INST(2)               @ advance rPC, load rINST
+    SET_VREG(r1, r3)                    @ vAA<- r1
+    GET_INST_OPCODE(ip)                 @ extract opcode from rINST
+    GOTO_OPCODE(ip)                     @ jump to next instruction
+%break
+
+    @ Test for NaN with a second comparison.  EABI forbids testing bit
+    @ patterns, and we can't represent 0x7fc00000 in immediate form, so
+    @ make the library call.
+.L${opcode}_gt_or_nan:
+    mov     r1, r9                      @ reverse order
+    mov     r0, r10
+    bl      __aeabi_cfcmple             @ r0<- Z set if eq, C clear if <
+    @bleq    common_abort
+    movcc   r1, #1                      @ (greater than) r1<- 1
+    bcc     .L${opcode}_finish
+    $naninst                            @ r1<- 1 or -1 for NaN
+    b       .L${opcode}_finish
+
+
+#if 0       /* "clasic" form */
+    FETCH(r0, 1)                        @ r0<- CCBB
+    and     r2, r0, #255                @ r2<- BB
+    mov     r3, r0, lsr #8              @ r3<- CC
+    GET_VREG(r9, r2)                    @ r9<- vBB
+    GET_VREG(r10, r3)                   @ r10<- vCC
+    mov     r0, r9                      @ r0<- vBB
+    mov     r1, r10                     @ r1<- vCC
+    bl      __aeabi_fcmpeq              @ r0<- (vBB == vCC)
+    cmp     r0, #0                      @ equal?
+    movne   r1, #0                      @ yes, result is 0
+    bne     ${opcode}_finish
+    mov     r0, r9                      @ r0<- vBB
+    mov     r1, r10                     @ r1<- vCC
+    bl      __aeabi_fcmplt              @ r0<- (vBB < vCC)
+    cmp     r0, #0                      @ less than?
+    b       ${opcode}_continue
+@%break
+
+${opcode}_continue:
+    mvnne   r1, #0                      @ yes, result is -1
+    bne     ${opcode}_finish
+    mov     r0, r9                      @ r0<- vBB
+    mov     r1, r10                     @ r1<- vCC
+    bl      __aeabi_fcmpgt              @ r0<- (vBB > vCC)
+    cmp     r0, #0                      @ greater than?
+    beq     ${opcode}_nan               @ no, must be NaN
+    mov     r1, #1                      @ yes, result is 1
+    @ fall through to _finish
+
+${opcode}_finish:
+    mov     r3, rINST, lsr #8           @ r3<- AA
+    FETCH_ADVANCE_INST(2)               @ advance rPC, load rINST
+    SET_VREG(r1, r3)                    @ vAA<- r1
+    GET_INST_OPCODE(ip)                 @ extract opcode from rINST
+    GOTO_OPCODE(ip)                     @ jump to next instruction
+
+    /*
+     * This is expected to be uncommon, so we double-branch (once to here,
+     * again back to _finish).
+     */
+${opcode}_nan:
+    $naninst                            @ r1<- 1 or -1 for NaN
+    b       ${opcode}_finish
+
+#endif
+
diff --git a/vm/mterp/armv5/OP_CMP_LONG.S b/vm/mterp/armv5/OP_CMP_LONG.S
new file mode 100644
index 0000000..d456137
--- /dev/null
+++ b/vm/mterp/armv5/OP_CMP_LONG.S
@@ -0,0 +1,61 @@
+%verify "executed"
+%verify "basic lt, gt, eq"
+%verify "hi equal, lo <=>"
+%verify "lo equal, hi <=>"
+    /*
+     * Compare two 64-bit values.  Puts 0, 1, or -1 into the destination
+     * register based on the results of the comparison.
+     *
+     * We load the full values with LDM, but in practice many values could
+     * be resolved by only looking at the high word.  This could be made
+     * faster or slower by splitting the LDM into a pair of LDRs.
+     *
+     * If we just wanted to set condition flags, we could do this:
+     *  subs    ip, r0, r2
+     *  sbcs    ip, r1, r3
+     *  subeqs  ip, r0, r2
+     * Leaving { <0, 0, >0 } in ip.  However, we have to set it to a specific
+     * integer value, which we can do with 2 conditional mov/mvn instructions
+     * (set 1, set -1; if they're equal we already have 0 in ip), giving
+     * us a constant 5-cycle path plus a branch at the end to the
+     * instruction epilogue code.  The multi-compare approach below needs
+     * 2 or 3 cycles + branch if the high word doesn't match, 6 + branch
+     * in the worst case (the 64-bit values are equal).
+     */
+    /* cmp-long vAA, vBB, vCC */
+    FETCH(r0, 1)                        @ r0<- CCBB
+    mov     r9, rINST, lsr #8           @ r9<- AA
+    and     r2, r0, #255                @ r2<- BB
+    mov     r3, r0, lsr #8              @ r3<- CC
+    add     r2, rFP, r2, lsl #2         @ r2<- &fp[BB]
+    add     r3, rFP, r3, lsl #2         @ r3<- &fp[CC]
+    ldmia   r2, {r0-r1}                 @ r0/r1<- vBB/vBB+1
+    ldmia   r3, {r2-r3}                 @ r2/r3<- vCC/vCC+1
+    cmp     r1, r3                      @ compare (vBB+1, vCC+1)
+    blt     .L${opcode}_less            @ signed compare on high part
+    bgt     .L${opcode}_greater
+    subs    r1, r0, r2                  @ r1<- r0 - r2
+    bhi     .L${opcode}_greater         @ unsigned compare on low part
+    bne     .L${opcode}_less
+    b       .L${opcode}_finish          @ equal; r1 already holds 0
+%break
+
+.L${opcode}_less:
+    mvn     r1, #0                      @ r1<- -1
+    @ Want to cond code the next mov so we can avoid branch, but don't see it;
+    @ instead, we just replicate the tail end.
+    FETCH_ADVANCE_INST(2)               @ advance rPC, load rINST
+    SET_VREG(r1, r9)                    @ vAA<- r1
+    GET_INST_OPCODE(ip)                 @ extract opcode from rINST
+    GOTO_OPCODE(ip)                     @ jump to next instruction
+
+.L${opcode}_greater:
+    mov     r1, #1                      @ r1<- 1
+    @ fall through to _finish
+
+.L${opcode}_finish:
+    FETCH_ADVANCE_INST(2)               @ advance rPC, load rINST
+    SET_VREG(r1, r9)                    @ vAA<- r1
+    GET_INST_OPCODE(ip)                 @ extract opcode from rINST
+    GOTO_OPCODE(ip)                     @ jump to next instruction
+
diff --git a/vm/mterp/armv5/OP_CONST.S b/vm/mterp/armv5/OP_CONST.S
new file mode 100644
index 0000000..4394647
--- /dev/null
+++ b/vm/mterp/armv5/OP_CONST.S
@@ -0,0 +1,11 @@
+%verify "executed"
+    /* const vAA, #+BBBBbbbb */
+    mov     r3, rINST, lsr #8           @ r3<- AA
+    FETCH(r0, 1)                        @ r0<- bbbb (low)
+    FETCH(r1, 2)                        @ r1<- BBBB (high)
+    FETCH_ADVANCE_INST(3)               @ advance rPC, load rINST
+    orr     r0, r0, r1, lsl #16         @ r0<- BBBBbbbb
+    GET_INST_OPCODE(ip)                 @ extract opcode from rINST
+    SET_VREG(r0, r3)                    @ vAA<- r0
+    GOTO_OPCODE(ip)                     @ jump to next instruction
+
diff --git a/vm/mterp/armv5/OP_CONST_16.S b/vm/mterp/armv5/OP_CONST_16.S
new file mode 100644
index 0000000..0b44c99
--- /dev/null
+++ b/vm/mterp/armv5/OP_CONST_16.S
@@ -0,0 +1,9 @@
+%verify "executed"
+    /* const/16 vAA, #+BBBB */
+    FETCH_S(r0, 1)                      @ r0<- ssssBBBB (sign-extended)
+    mov     r3, rINST, lsr #8           @ r3<- AA
+    FETCH_ADVANCE_INST(2)               @ advance rPC, load rINST
+    SET_VREG(r0, r3)                    @ vAA<- r0
+    GET_INST_OPCODE(ip)                 @ extract opcode from rINST
+    GOTO_OPCODE(ip)                     @ jump to next instruction
+
diff --git a/vm/mterp/armv5/OP_CONST_4.S b/vm/mterp/armv5/OP_CONST_4.S
new file mode 100644
index 0000000..9ac53c2
--- /dev/null
+++ b/vm/mterp/armv5/OP_CONST_4.S
@@ -0,0 +1,11 @@
+%verify "executed"
+    /* const/4 vA, #+B */
+    mov     r1, rINST, lsl #16          @ r1<- Bxxx0000
+    mov     r0, rINST, lsr #8           @ r0<- A+
+    FETCH_ADVANCE_INST(1)               @ advance rPC, load rINST
+    mov     r1, r1, asr #28             @ r1<- sssssssB (sign-extended)
+    and     r0, r0, #15
+    GET_INST_OPCODE(ip)                 @ ip<- opcode from rINST
+    SET_VREG(r1, r0)                    @ fp[A]<- r1
+    GOTO_OPCODE(ip)                     @ execute next instruction
+
diff --git a/vm/mterp/armv5/OP_CONST_CLASS.S b/vm/mterp/armv5/OP_CONST_CLASS.S
new file mode 100644
index 0000000..b834553
--- /dev/null
+++ b/vm/mterp/armv5/OP_CONST_CLASS.S
@@ -0,0 +1,36 @@
+%verify "executed"
+%verify "Class already resolved"
+%verify "Class not yet resolved"
+%verify "Class cannot be resolved"
+    /* const/class vAA, Class@BBBB */
+    FETCH(r1, 1)                        @ r1<- BBBB
+    ldr     r2, [rGLUE, #offGlue_methodClassDex]    @ r2<- glue->methodClassDex
+    mov     r9, rINST, lsr #8           @ r9<- AA
+    ldr     r2, [r2, #offDvmDex_pResClasses]   @ r2<- dvmDex->pResClasses
+    ldr     r0, [r2, r1, lsl #2]        @ r0<- pResClasses[BBBB]
+    cmp     r0, #0                      @ not yet resolved?
+    beq     .L${opcode}_resolve
+    FETCH_ADVANCE_INST(2)               @ advance rPC, load rINST
+    GET_INST_OPCODE(ip)                 @ extract opcode from rINST
+    SET_VREG(r0, r9)                    @ vAA<- r0
+    GOTO_OPCODE(ip)                     @ jump to next instruction
+%break
+
+    /*
+     * Continuation if the Class has not yet been resolved.
+     *  r1: BBBB (Class ref)
+     *  r9: target register
+     */
+.L${opcode}_resolve:
+    EXPORT_PC()
+    ldr     r0, [rGLUE, #offGlue_method] @ r0<- glue->method
+    mov     r2, #1                      @ r2<- true
+    ldr     r0, [r0, #offMethod_clazz]  @ r0<- method->clazz
+    bl      dvmResolveClass             @ r0<- Class reference
+    cmp     r0, #0                      @ failed?
+    beq     common_exceptionThrown      @ yup, handle the exception
+    FETCH_ADVANCE_INST(2)               @ advance rPC, load rINST
+    GET_INST_OPCODE(ip)                 @ extract opcode from rINST
+    SET_VREG(r0, r9)                    @ vAA<- r0
+    GOTO_OPCODE(ip)                     @ jump to next instruction
+
diff --git a/vm/mterp/armv5/OP_CONST_HIGH16.S b/vm/mterp/armv5/OP_CONST_HIGH16.S
new file mode 100644
index 0000000..07b5061
--- /dev/null
+++ b/vm/mterp/armv5/OP_CONST_HIGH16.S
@@ -0,0 +1,10 @@
+%verify "executed"
+    /* const/high16 vAA, #+BBBB0000 */
+    FETCH(r0, 1)                        @ r0<- 0000BBBB (zero-extended)
+    mov     r3, rINST, lsr #8           @ r3<- AA
+    mov     r0, r0, lsl #16             @ r0<- BBBB0000
+    FETCH_ADVANCE_INST(2)               @ advance rPC, load rINST
+    SET_VREG(r0, r3)                    @ vAA<- r0
+    GET_INST_OPCODE(ip)                 @ extract opcode from rINST
+    GOTO_OPCODE(ip)                     @ jump to next instruction
+
diff --git a/vm/mterp/armv5/OP_CONST_STRING.S b/vm/mterp/armv5/OP_CONST_STRING.S
new file mode 100644
index 0000000..5e2e389
--- /dev/null
+++ b/vm/mterp/armv5/OP_CONST_STRING.S
@@ -0,0 +1,35 @@
+%verify "executed"
+%verify "String already resolved"
+%verify "String not yet resolved"
+%verify "String cannot be resolved"
+    /* const/string vAA, String@BBBB */
+    FETCH(r1, 1)                        @ r1<- BBBB
+    ldr     r2, [rGLUE, #offGlue_methodClassDex]    @ r2<- glue->methodClassDex
+    mov     r9, rINST, lsr #8           @ r9<- AA
+    ldr     r2, [r2, #offDvmDex_pResStrings]   @ r2<- dvmDex->pResStrings
+    ldr     r0, [r2, r1, lsl #2]        @ r0<- pResStrings[BBBB]
+    cmp     r0, #0                      @ not yet resolved?
+    beq     .L${opcode}_resolve
+    FETCH_ADVANCE_INST(2)               @ advance rPC, load rINST
+    GET_INST_OPCODE(ip)                 @ extract opcode from rINST
+    SET_VREG(r0, r9)                    @ vAA<- r0
+    GOTO_OPCODE(ip)                     @ jump to next instruction
+%break
+
+    /*
+     * Continuation if the String has not yet been resolved.
+     *  r1: BBBB (String ref)
+     *  r9: target register
+     */
+.L${opcode}_resolve:
+    EXPORT_PC()
+    ldr     r0, [rGLUE, #offGlue_method] @ r0<- glue->method
+    ldr     r0, [r0, #offMethod_clazz]  @ r0<- method->clazz
+    bl      dvmResolveString            @ r0<- String reference
+    cmp     r0, #0                      @ failed?
+    beq     common_exceptionThrown      @ yup, handle the exception
+    FETCH_ADVANCE_INST(2)               @ advance rPC, load rINST
+    GET_INST_OPCODE(ip)                 @ extract opcode from rINST
+    SET_VREG(r0, r9)                    @ vAA<- r0
+    GOTO_OPCODE(ip)                     @ jump to next instruction
+
diff --git a/vm/mterp/armv5/OP_CONST_STRING_JUMBO.S b/vm/mterp/armv5/OP_CONST_STRING_JUMBO.S
new file mode 100644
index 0000000..b98e0e1
--- /dev/null
+++ b/vm/mterp/armv5/OP_CONST_STRING_JUMBO.S
@@ -0,0 +1,37 @@
+%verify "executed"
+%verify "String already resolved"
+%verify "String not yet resolved"
+%verify "String cannot be resolved"
+    /* const/string vAA, String@BBBBBBBB */
+    FETCH(r0, 1)                        @ r0<- bbbb (low)
+    FETCH(r1, 2)                        @ r1<- BBBB (high)
+    ldr     r2, [rGLUE, #offGlue_methodClassDex]    @ r2<- glue->methodClassDex
+    mov     r9, rINST, lsr #8           @ r9<- AA
+    ldr     r2, [r2, #offDvmDex_pResStrings]   @ r2<- dvmDex->pResStrings
+    orr     r1, r0, r1, lsl #16         @ r1<- BBBBbbbb
+    ldr     r0, [r2, r1, lsl #2]        @ r0<- pResStrings[BBBB]
+    cmp     r0, #0
+    beq     .L${opcode}_resolve
+    FETCH_ADVANCE_INST(3)               @ advance rPC, load rINST
+    GET_INST_OPCODE(ip)                 @ extract opcode from rINST
+    SET_VREG(r0, r9)                    @ vAA<- r0
+    GOTO_OPCODE(ip)                     @ jump to next instruction
+%break
+
+    /*
+     * Continuation if the String has not yet been resolved.
+     *  r1: BBBBBBBB (String ref)
+     *  r9: target register
+     */
+.L${opcode}_resolve:
+    EXPORT_PC()
+    ldr     r0, [rGLUE, #offGlue_method] @ r0<- glue->method
+    ldr     r0, [r0, #offMethod_clazz]  @ r0<- method->clazz
+    bl      dvmResolveString            @ r0<- String reference
+    cmp     r0, #0                      @ failed?
+    beq     common_exceptionThrown      @ yup, handle the exception
+    FETCH_ADVANCE_INST(3)               @ advance rPC, load rINST
+    GET_INST_OPCODE(ip)                 @ extract opcode from rINST
+    SET_VREG(r0, r9)                    @ vAA<- r0
+    GOTO_OPCODE(ip)                     @ jump to next instruction
+
diff --git a/vm/mterp/armv5/OP_CONST_WIDE.S b/vm/mterp/armv5/OP_CONST_WIDE.S
new file mode 100644
index 0000000..428d423
--- /dev/null
+++ b/vm/mterp/armv5/OP_CONST_WIDE.S
@@ -0,0 +1,15 @@
+%verify "executed"
+    /* const-wide vAA, #+HHHHhhhhBBBBbbbb */
+    FETCH(r0, 1)                        @ r0<- bbbb (low)
+    FETCH(r1, 2)                        @ r1<- BBBB (low middle)
+    FETCH(r2, 3)                        @ r2<- hhhh (high middle)
+    orr     r0, r0, r1, lsl #16         @ r0<- BBBBbbbb (low word)
+    FETCH(r3, 4)                        @ r3<- HHHH (high)
+    mov     r9, rINST, lsr #8           @ r9<- AA
+    orr     r1, r2, r3, lsl #16         @ r1<- HHHHhhhh (high word)
+    FETCH_ADVANCE_INST(5)               @ advance rPC, load rINST
+    add     r9, rFP, r9, lsl #2         @ r9<- &fp[AA]
+    GET_INST_OPCODE(ip)                 @ extract opcode from rINST
+    stmia   r9, {r0-r1}                 @ vAA<- r0/r1
+    GOTO_OPCODE(ip)                     @ jump to next instruction
+
diff --git a/vm/mterp/armv5/OP_CONST_WIDE_16.S b/vm/mterp/armv5/OP_CONST_WIDE_16.S
new file mode 100644
index 0000000..2d37d58
--- /dev/null
+++ b/vm/mterp/armv5/OP_CONST_WIDE_16.S
@@ -0,0 +1,11 @@
+%verify "executed"
+    /* const-wide/16 vAA, #+BBBB */
+    FETCH_S(r0, 1)                      @ r0<- ssssBBBB (sign-extended)
+    mov     r3, rINST, lsr #8           @ r3<- AA
+    mov     r1, r0, asr #31             @ r1<- ssssssss
+    FETCH_ADVANCE_INST(2)               @ advance rPC, load rINST
+    add     r3, rFP, r3, lsl #2         @ r3<- &fp[AA]
+    GET_INST_OPCODE(ip)                 @ extract opcode from rINST
+    stmia   r3, {r0-r1}                 @ vAA<- r0/r1
+    GOTO_OPCODE(ip)                     @ jump to next instruction
+
diff --git a/vm/mterp/armv5/OP_CONST_WIDE_32.S b/vm/mterp/armv5/OP_CONST_WIDE_32.S
new file mode 100644
index 0000000..5991eb4
--- /dev/null
+++ b/vm/mterp/armv5/OP_CONST_WIDE_32.S
@@ -0,0 +1,13 @@
+%verify "executed"
+    /* const-wide/32 vAA, #+BBBBbbbb */
+    FETCH(r0, 1)                        @ r0<- 0000bbbb (low)
+    mov     r3, rINST, lsr #8           @ r3<- AA
+    FETCH_S(r2, 2)                      @ r2<- ssssBBBB (high)
+    FETCH_ADVANCE_INST(3)               @ advance rPC, load rINST
+    orr     r0, r0, r2, lsl #16         @ r0<- BBBBbbbb
+    add     r3, rFP, r3, lsl #2         @ r3<- &fp[AA]
+    mov     r1, r0, asr #31             @ r1<- ssssssss
+    GET_INST_OPCODE(ip)                 @ extract opcode from rINST
+    stmia   r3, {r0-r1}                 @ vAA<- r0/r1
+    GOTO_OPCODE(ip)                     @ jump to next instruction
+
diff --git a/vm/mterp/armv5/OP_CONST_WIDE_HIGH16.S b/vm/mterp/armv5/OP_CONST_WIDE_HIGH16.S
new file mode 100644
index 0000000..2e23f79
--- /dev/null
+++ b/vm/mterp/armv5/OP_CONST_WIDE_HIGH16.S
@@ -0,0 +1,12 @@
+%verify "executed"
+    /* const-wide/high16 vAA, #+BBBB000000000000 */
+    FETCH(r1, 1)                        @ r1<- 0000BBBB (zero-extended)
+    mov     r3, rINST, lsr #8           @ r3<- AA
+    mov     r0, #0                      @ r0<- 00000000
+    mov     r1, r1, lsl #16             @ r1<- BBBB0000
+    FETCH_ADVANCE_INST(2)               @ advance rPC, load rINST
+    add     r3, rFP, r3, lsl #2         @ r3<- &fp[AA]
+    GET_INST_OPCODE(ip)                 @ extract opcode from rINST
+    stmia   r3, {r0-r1}                 @ vAA<- r0/r1
+    GOTO_OPCODE(ip)                     @ jump to next instruction
+
diff --git a/vm/mterp/armv5/OP_DIV_DOUBLE.S b/vm/mterp/armv5/OP_DIV_DOUBLE.S
new file mode 100644
index 0000000..938875d
--- /dev/null
+++ b/vm/mterp/armv5/OP_DIV_DOUBLE.S
@@ -0,0 +1,2 @@
+%verify "executed"
+%include "armv5/binopWide.S" {"instr":"bl      __aeabi_ddiv"}
diff --git a/vm/mterp/armv5/OP_DIV_DOUBLE_2ADDR.S b/vm/mterp/armv5/OP_DIV_DOUBLE_2ADDR.S
new file mode 100644
index 0000000..3b6409c
--- /dev/null
+++ b/vm/mterp/armv5/OP_DIV_DOUBLE_2ADDR.S
@@ -0,0 +1,2 @@
+%verify "executed"
+%include "armv5/binopWide2addr.S" {"instr":"bl      __aeabi_ddiv"}
diff --git a/vm/mterp/armv5/OP_DIV_FLOAT.S b/vm/mterp/armv5/OP_DIV_FLOAT.S
new file mode 100644
index 0000000..7defc9b
--- /dev/null
+++ b/vm/mterp/armv5/OP_DIV_FLOAT.S
@@ -0,0 +1,2 @@
+%verify "executed"
+%include "armv5/binop.S" {"instr":"bl      __aeabi_fdiv"}
diff --git a/vm/mterp/armv5/OP_DIV_FLOAT_2ADDR.S b/vm/mterp/armv5/OP_DIV_FLOAT_2ADDR.S
new file mode 100644
index 0000000..786514e
--- /dev/null
+++ b/vm/mterp/armv5/OP_DIV_FLOAT_2ADDR.S
@@ -0,0 +1,2 @@
+%verify "executed"
+%include "armv5/binop2addr.S" {"instr":"bl      __aeabi_fdiv"}
diff --git a/vm/mterp/armv5/OP_DIV_INT.S b/vm/mterp/armv5/OP_DIV_INT.S
new file mode 100644
index 0000000..c7e9faa
--- /dev/null
+++ b/vm/mterp/armv5/OP_DIV_INT.S
@@ -0,0 +1,2 @@
+%verify "executed"
+%include "armv5/binop.S" {"instr":"bl     __aeabi_idiv","chkzero":"1"}
diff --git a/vm/mterp/armv5/OP_DIV_INT_2ADDR.S b/vm/mterp/armv5/OP_DIV_INT_2ADDR.S
new file mode 100644
index 0000000..3173c60
--- /dev/null
+++ b/vm/mterp/armv5/OP_DIV_INT_2ADDR.S
@@ -0,0 +1,2 @@
+%verify "executed"
+%include "armv5/binop2addr.S" {"instr":"bl     __aeabi_idiv","chkzero":"1"}
diff --git a/vm/mterp/armv5/OP_DIV_INT_LIT16.S b/vm/mterp/armv5/OP_DIV_INT_LIT16.S
new file mode 100644
index 0000000..ca39d67
--- /dev/null
+++ b/vm/mterp/armv5/OP_DIV_INT_LIT16.S
@@ -0,0 +1,2 @@
+%verify "executed"
+%include "armv5/binopLit16.S" {"instr":"bl     __aeabi_idiv","chkzero":"1"}
diff --git a/vm/mterp/armv5/OP_DIV_INT_LIT8.S b/vm/mterp/armv5/OP_DIV_INT_LIT8.S
new file mode 100644
index 0000000..fa518fe
--- /dev/null
+++ b/vm/mterp/armv5/OP_DIV_INT_LIT8.S
@@ -0,0 +1,2 @@
+%verify "executed"
+%include "armv5/binopLit8.S" {"instr":"bl     __aeabi_idiv","chkzero":"1"}
diff --git a/vm/mterp/armv5/OP_DIV_LONG.S b/vm/mterp/armv5/OP_DIV_LONG.S
new file mode 100644
index 0000000..fe4009b
--- /dev/null
+++ b/vm/mterp/armv5/OP_DIV_LONG.S
@@ -0,0 +1,2 @@
+%verify "executed"
+%include "armv5/binopWide.S" {"instr":"bl      __aeabi_ldivmod", "chkzero":"1"}
diff --git a/vm/mterp/armv5/OP_DIV_LONG_2ADDR.S b/vm/mterp/armv5/OP_DIV_LONG_2ADDR.S
new file mode 100644
index 0000000..41d928d
--- /dev/null
+++ b/vm/mterp/armv5/OP_DIV_LONG_2ADDR.S
@@ -0,0 +1,2 @@
+%verify "executed"
+%include "armv5/binopWide2addr.S" {"instr":"bl      __aeabi_ldivmod", "chkzero":"1"}
diff --git a/vm/mterp/armv5/OP_DOUBLE_TO_FLOAT.S b/vm/mterp/armv5/OP_DOUBLE_TO_FLOAT.S
new file mode 100644
index 0000000..16b8e95
--- /dev/null
+++ b/vm/mterp/armv5/OP_DOUBLE_TO_FLOAT.S
@@ -0,0 +1,2 @@
+%verify "executed"
+%include "armv5/unopNarrower.S" {"instr":"bl      __aeabi_d2f"}
diff --git a/vm/mterp/armv5/OP_DOUBLE_TO_INT.S b/vm/mterp/armv5/OP_DOUBLE_TO_INT.S
new file mode 100644
index 0000000..df831a9
--- /dev/null
+++ b/vm/mterp/armv5/OP_DOUBLE_TO_INT.S
@@ -0,0 +1,59 @@
+%verify "executed"
+/* EABI appears to have Java-style conversions of +inf/-inf/NaN */
+%include "armv5/unopNarrower.S" {"instr":"bl      __aeabi_d2iz"}
+
+#if 0
+@include "armv5/unopNarrower.S" {"instr":"bl      d2i_doconv"}
+@break
+/*
+ * Convert the double in r0/r1 to an int in r0.
+ *
+ * We have to clip values to int min/max per the specification.  The
+ * expected common case is a "reasonable" value that converts directly
+ * to modest integer.  The EABI convert function isn't doing this for us.
+ */
+d2i_doconv:
+    stmfd   sp!, {r4, r5, lr}           @ save regs
+    ldr     r2, .L${opcode}_maxlo       @ (double)maxint, lo
+    ldr     r3, .L${opcode}_maxhi       @ (double)maxint, hi
+    sub     sp, sp, #4                  @ align for EABI
+    mov     r4, r0                      @ save r0
+    mov     r5, r1                      @  and r1
+    bl      __aeabi_dcmpge              @ is arg >= maxint?
+    cmp     r0, #0                      @ nonzero == yes
+    mvnne   r0, #0x80000000             @ return maxint (7fffffff)
+    bne     1f
+
+    mov     r0, r4                      @ recover arg
+    mov     r1, r5
+    ldr     r3, .L${opcode}_min         @ (double)minint, hi
+    mov     r2, #0                      @ (double)minint, lo
+    bl      __aeabi_dcmple              @ is arg <= minint?
+    cmp     r0, #0                      @ nonzero == yes
+    movne   r0, #0x80000000             @ return minint (80000000)
+    bne     1f
+
+    mov     r0, r4                      @ recover arg
+    mov     r1, r5
+    mov     r2, r4                      @ compare against self
+    mov     r3, r5
+    bl      __aeabi_dcmpeq              @ is arg == self?
+    cmp     r0, #0                      @ zero == no
+    beq     1f                          @ return zero for NaN
+
+    mov     r0, r4                      @ recover arg
+    mov     r1, r5
+    bl      __aeabi_d2iz                @ convert double to int
+
+1:
+    add     sp, sp, #4
+    ldmfd   sp!, {r4, r5, pc}
+
+.L${opcode}_maxlo:
+    .word   0xffc00000                  @ maxint, as a double (low word)
+.L${opcode}_maxhi:
+    .word   0x41dfffff                  @ maxint, as a double (high word)
+.L${opcode}_min:
+    .word   0xc1e00000                  @ minint, as a double (high word)
+#endif
+
diff --git a/vm/mterp/armv5/OP_DOUBLE_TO_LONG.S b/vm/mterp/armv5/OP_DOUBLE_TO_LONG.S
new file mode 100644
index 0000000..a64f9e7
--- /dev/null
+++ b/vm/mterp/armv5/OP_DOUBLE_TO_LONG.S
@@ -0,0 +1,57 @@
+%verify "executed"
+@include "armv5/unopWide.S" {"instr":"bl      __aeabi_d2lz"}
+%include "armv5/unopWide.S" {"instr":"bl      d2l_doconv"}
+
+%break
+/*
+ * Convert the double in r0/r1 to a long in r0/r1.
+ *
+ * We have to clip values to long min/max per the specification.  The
+ * expected common case is a "reasonable" value that converts directly
+ * to modest integer.  The EABI convert function isn't doing this for us.
+ */
+d2l_doconv:
+    stmfd   sp!, {r4, r5, lr}           @ save regs
+    ldr     r3, .L${opcode}_max         @ (double)maxlong, hi
+    sub     sp, sp, #4                  @ align for EABI
+    mov     r2, #0                      @ (double)maxlong, lo
+    mov     r4, r0                      @ save r0
+    mov     r5, r1                      @  and r1
+    bl      __aeabi_dcmpge              @ is arg >= maxlong?
+    cmp     r0, #0                      @ nonzero == yes
+    mvnne   r0, #0                      @ return maxlong (7fffffffffffffff)
+    mvnne   r1, #0x80000000
+    bne     1f
+
+    mov     r0, r4                      @ recover arg
+    mov     r1, r5
+    ldr     r3, .L${opcode}_min         @ (double)minlong, hi
+    mov     r2, #0                      @ (double)minlong, lo
+    bl      __aeabi_dcmple              @ is arg <= minlong?
+    cmp     r0, #0                      @ nonzero == yes
+    movne   r0, #0                      @ return minlong (8000000000000000)
+    movne   r1, #0x80000000
+    bne     1f
+
+    mov     r0, r4                      @ recover arg
+    mov     r1, r5
+    mov     r2, r4                      @ compare against self
+    mov     r3, r5
+    bl      __aeabi_dcmpeq              @ is arg == self?
+    cmp     r0, #0                      @ zero == no
+    moveq   r1, #0                      @ return zero for NaN
+    beq     1f
+
+    mov     r0, r4                      @ recover arg
+    mov     r1, r5
+    bl      __aeabi_d2lz                @ convert double to long
+
+1:
+    add     sp, sp, #4
+    ldmfd   sp!, {r4, r5, pc}
+
+.L${opcode}_max:
+    .word   0x43e00000                  @ maxlong, as a double (high word)
+.L${opcode}_min:
+    .word   0xc3e00000                  @ minlong, as a double (high word)
+
diff --git a/vm/mterp/armv5/OP_EXECUTE_INLINE.S b/vm/mterp/armv5/OP_EXECUTE_INLINE.S
new file mode 100644
index 0000000..84836cd
--- /dev/null
+++ b/vm/mterp/armv5/OP_EXECUTE_INLINE.S
@@ -0,0 +1,61 @@
+%verify "executed"
+%verify "exception handled"
+    /*
+     * Execute a "native inline" instruction.
+     *
+     * We need to call:
+     *  dvmPerformInlineOp4Std(arg0, arg1, arg2, arg3, &retval, ref)
+     *
+     * The first four args are in r0-r3, but the last two must be pushed
+     * onto the stack.
+     */
+    /* [opt] execute-inline vAA, {vC, vD, vE, vF}, inline@BBBB */
+    FETCH(r10, 1)                       @ r10<- BBBB
+    add     r1, rGLUE, #offGlue_retval  @ r1<- &glue->retval
+    EXPORT_PC()                         @ can throw
+    sub     sp, sp, #8                  @ make room for arg(s)
+    mov     r0, rINST, lsr #12          @ r0<- B
+    str     r1, [sp]                    @ push &glue->retval
+    bl      .L${opcode}_continue        @ make call; will return after
+    add     sp, sp, #8                  @ pop stack
+    cmp     r0, #0                      @ test boolean result of inline
+    beq     common_exceptionThrown      @ returned false, handle exception
+    FETCH_ADVANCE_INST(3)               @ advance rPC, load rINST
+    GET_INST_OPCODE(ip)                 @ extract opcode from rINST
+    GOTO_OPCODE(ip)                     @ jump to next instruction
+%break
+
+    /*
+     * Extract args, call function.
+     *  r0 = #of args (0-4)
+     *  r10 = call index
+     *  lr = return addr, above  [DO NOT bl in here w/o preserving LR]
+     *
+     * Other ideas:
+     * - Use a jump table from the main piece to jump directly into the
+     *   AND/LDR pairs.  Costs a data load, saves a branch.
+     * - Have five separate pieces that do the loading, so we can work the
+     *   interleave a little better.  Increases code size.
+     */
+.L${opcode}_continue:
+    rsb     r0, r0, #4                  @ r0<- 4-r0
+    FETCH(r9, 2)                        @ r9<- FEDC
+    add     pc, pc, r0, lsl #3          @ computed goto, 2 instrs each
+    bl      common_abort                @ (skipped due to ARM prefetch)
+4:  and     ip, r9, #0xf000             @ isolate F
+    ldr     r3, [rFP, ip, lsr #10]      @ r3<- vF (shift right 12, left 2)
+3:  and     ip, r9, #0x0f00             @ isolate E
+    ldr     r2, [rFP, ip, lsr #6]       @ r2<- vE
+2:  and     ip, r9, #0x00f0             @ isolate D
+    ldr     r1, [rFP, ip, lsr #2]       @ r1<- vD
+1:  and     ip, r9, #0x000f             @ isolate C
+    ldr     r0, [rFP, ip, lsl #2]       @ r0<- vC
+0:
+    @b       dvmPerformInlineOp4Std
+    ldr     r9, .L${opcode}_table       @ table of InlineOperation
+    ldr     pc, [r9, r10, lsl #4]       @ sizeof=16, "func" is first entry
+    @ (not reached)
+
+.L${opcode}_table:
+    .word   gDvmInlineOpsTable
+
diff --git a/vm/mterp/armv5/OP_FILLED_NEW_ARRAY.S b/vm/mterp/armv5/OP_FILLED_NEW_ARRAY.S
new file mode 100644
index 0000000..375c811
--- /dev/null
+++ b/vm/mterp/armv5/OP_FILLED_NEW_ARRAY.S
@@ -0,0 +1,101 @@
+%default { "isrange":"0" }
+%verify "executed"
+%verify "unimplemented array type"
+    /*
+     * Create a new array with elements filled from registers.
+     *
+     * for: filled-new-array, filled-new-array/range
+     */
+    /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */
+    /* op {vCCCC..v(CCCC+AA-1)}, type@BBBB */
+    ldr     r3, [rGLUE, #offGlue_methodClassDex]    @ r3<- pDvmDex
+    FETCH(r1, 1)                        @ r1<- BBBB
+    ldr     r3, [r3, #offDvmDex_pResClasses]    @ r3<- pDvmDex->pResClasses
+    EXPORT_PC()                         @ need for resolve and alloc
+    ldr     r0, [r3, r1, lsl #2]        @ r0<- resolved class
+    mov     r10, rINST, lsr #8          @ r10<- AA or BA
+    cmp     r0, #0                      @ already resolved?
+    bne     .L${opcode}_continue        @ yes, continue on
+8:  ldr     r3, [rGLUE, #offGlue_method] @ r3<- glue->method
+    mov     r2, #0                      @ r2<- false
+    ldr     r0, [r3, #offMethod_clazz]  @ r0<- method->clazz
+    bl      dvmResolveClass             @ r0<- call(clazz, ref)
+    cmp     r0, #0                      @ got null?
+    beq     common_exceptionThrown      @ yes, handle exception
+    b       .L${opcode}_continue
+%break
+
+    /*
+     * On entry:
+     *  r0 holds array class
+     *  r10 holds AA or BA
+     */
+.L${opcode}_continue:
+    ldr     r3, [r0, #offClassObject_descriptor] @ r3<- arrayClass->descriptor
+    mov     r2, #ALLOC_DONT_TRACK       @ r2<- alloc flags
+    ldrb    r0, [r3, #1]                @ r0<- descriptor[1]
+    .if     $isrange
+    mov     r1, r10                     @ r1<- AA (length)
+    .else
+    mov     r1, r10, lsr #4             @ r1<- B (length)
+    .endif
+    cmp     r0, #'I'                    @ array of ints?
+    mov     r9, r1                      @ save length in r9
+    bne     .L${opcode}_notimpl         @ no, not handled yet
+    bl      dvmAllocPrimitiveArray      @ r0<- call(typeCh, length, flags)
+    cmp     r0, #0                      @ null return?
+    beq     common_exceptionThrown      @ alloc failed, handle exception
+
+    FETCH(r1, 2)                        @ r1<- FEDC or CCCC
+    str     r0, [rGLUE, #offGlue_retval]    @ retval.l <- new array
+    add     r0, r0, #offArrayObject_contents @ r0<- newArray->contents
+    subs    r9, r9, #1                  @ length--, check for neg
+    FETCH_ADVANCE_INST(3)               @ advance to next instr, load rINST
+    bmi     2f                          @ was zero, bail
+
+    @ copy values from registers into the array
+    @ r0=array, r1=CCCC/FEDC, r9=length (from AA or B), r10=AA/BA
+    .if     $isrange
+    add     r2, rFP, r1, lsl #2         @ r2<- &fp[CCCC]
+1:  ldr     r3, [r2], #4                @ r3<- *r2++
+    subs    r9, r9, #1                  @ count--
+    str     r3, [r0], #4                @ *contents++ = vX
+    bpl     1b
+    @ continue at 2
+    .else
+    cmp     r9, #4                      @ length was initially 5?
+    and     r2, r10, #15                @ r2<- A
+    bne     1f                          @ <= 4 args, branch
+    GET_VREG(r3, r2)                    @ r3<- vA
+    sub     r9, r9, #1                  @ count--
+    str     r3, [r0, #16]               @ contents[4] = vA
+1:  and     r2, r1, #15                 @ r2<- F/E/D/C
+    GET_VREG(r3, r2)                    @ r3<- vF/vE/vD/vC
+    mov     r1, r1, lsr #4              @ r1<- next reg in low 4
+    subs    r9, r9, #1                  @ count--
+    str     r3, [r0], #4                @ *contents++ = vX
+    bpl     1b
+    @ continue at 2
+    .endif
+
+2:
+    GET_INST_OPCODE(ip)                 @ ip<- opcode from rINST
+    GOTO_OPCODE(ip)                     @ execute it
+
+    /*
+     * Throw an exception indicating that we have not implemented this
+     * mode of filled-new-array.
+     */
+.L${opcode}_notimpl:
+    ldr     r0, strInternalError
+    ldr     r1, strFilledNewArrayNotImpl
+    bl      dvmThrowException
+    b       common_exceptionThrown
+
+    .if     (!$isrange)                 @ define in one or the other, not both
+strFilledNewArrayNotImpl:
+    .word   .LstrFilledNewArrayNotImpl
+strInternalError:
+    .word   .LstrInternalError
+    .endif
+
diff --git a/vm/mterp/armv5/OP_FILLED_NEW_ARRAY_RANGE.S b/vm/mterp/armv5/OP_FILLED_NEW_ARRAY_RANGE.S
new file mode 100644
index 0000000..b65cc04
--- /dev/null
+++ b/vm/mterp/armv5/OP_FILLED_NEW_ARRAY_RANGE.S
@@ -0,0 +1,2 @@
+%verify "executed"
+%include "armv5/OP_FILLED_NEW_ARRAY.S" { "isrange":"1" }
diff --git a/vm/mterp/armv5/OP_FILL_ARRAY_DATA.S b/vm/mterp/armv5/OP_FILL_ARRAY_DATA.S
new file mode 100644
index 0000000..a0d8399
--- /dev/null
+++ b/vm/mterp/armv5/OP_FILL_ARRAY_DATA.S
@@ -0,0 +1,15 @@
+%verify "executed"
+    /* fill-array-data vAA, +BBBBBBBB */
+    FETCH(r0, 1)                        @ r0<- bbbb (lo)
+    FETCH(r1, 2)                        @ r1<- BBBB (hi)
+    mov     r3, rINST, lsr #8           @ r3<- AA
+    orr     r1, r0, r1, lsl #16         @ r1<- BBBBbbbb
+    GET_VREG(r0, r3)                    @ r0<- vAA (array object)
+    add     r1, rPC, r1, lsl #1         @ r1<- PC + BBBBbbbb*2 (array data off.)
+    EXPORT_PC();
+    bl      dvmInterpHandleFillArrayData@ fill the array with predefined data
+    cmp     r0, #0                      @ 0 means an exception is thrown
+    beq     common_exceptionThrown      @ has exception
+    FETCH_ADVANCE_INST(3)               @ advance rPC, load rINST
+    GET_INST_OPCODE(ip)                 @ extract opcode from rINST
+    GOTO_OPCODE(ip)                     @ jump to next instruction
diff --git a/vm/mterp/armv5/OP_FLOAT_TO_DOUBLE.S b/vm/mterp/armv5/OP_FLOAT_TO_DOUBLE.S
new file mode 100644
index 0000000..3e00f8f
--- /dev/null
+++ b/vm/mterp/armv5/OP_FLOAT_TO_DOUBLE.S
@@ -0,0 +1,2 @@
+%verify "executed"
+%include "armv5/unopWider.S" {"instr":"bl      __aeabi_f2d"}
diff --git a/vm/mterp/armv5/OP_FLOAT_TO_INT.S b/vm/mterp/armv5/OP_FLOAT_TO_INT.S
new file mode 100644
index 0000000..2405551
--- /dev/null
+++ b/vm/mterp/armv5/OP_FLOAT_TO_INT.S
@@ -0,0 +1,41 @@
+%verify "executed"
+/* EABI appears to have Java-style conversions of +inf/-inf/NaN */
+%include "armv5/unop.S" {"instr":"bl      __aeabi_f2iz"}
+
+#if 0
+@include "armv5/unop.S" {"instr":"bl      f2i_doconv"}
+@break
+/*
+ * Convert the float in r0 to an int in r0.
+ *
+ * We have to clip values to int min/max per the specification.  The
+ * expected common case is a "reasonable" value that converts directly
+ * to modest integer.  The EABI convert function isn't doing this for us.
+ */
+f2i_doconv:
+    stmfd   sp!, {r4, lr}
+    mov     r1, #0x4f000000             @ (float)maxint
+    mov     r4, r0
+    bl      __aeabi_fcmpge              @ is arg >= maxint?
+    cmp     r0, #0                      @ nonzero == yes
+    mvnne   r0, #0x80000000             @ return maxint (7fffffff)
+    ldmnefd sp!, {r4, pc}
+
+    mov     r0, r4                      @ recover arg
+    mov     r1, #0xcf000000             @ (float)minint
+    bl      __aeabi_fcmple              @ is arg <= minint?
+    cmp     r0, #0                      @ nonzero == yes
+    movne   r0, #0x80000000             @ return minint (80000000)
+    ldmnefd sp!, {r4, pc}
+
+    mov     r0, r4                      @ recover arg
+    mov     r1, r4
+    bl      __aeabi_fcmpeq              @ is arg == self?
+    cmp     r0, #0                      @ zero == no
+    ldmeqfd sp!, {r4, pc}               @ return zero for NaN
+
+    mov     r0, r4                      @ recover arg
+    bl      __aeabi_f2iz                @ convert float to int
+    ldmfd   sp!, {r4, pc}
+#endif
+
diff --git a/vm/mterp/armv5/OP_FLOAT_TO_LONG.S b/vm/mterp/armv5/OP_FLOAT_TO_LONG.S
new file mode 100644
index 0000000..e7e27a2
--- /dev/null
+++ b/vm/mterp/armv5/OP_FLOAT_TO_LONG.S
@@ -0,0 +1,41 @@
+%verify "executed"
+@include "armv5/unopWider.S" {"instr":"bl      __aeabi_f2lz"}
+%include "armv5/unopWider.S" {"instr":"bl      f2l_doconv"}
+
+%break
+/*
+ * Convert the float in r0 to a long in r0/r1.
+ *
+ * We have to clip values to long min/max per the specification.  The
+ * expected common case is a "reasonable" value that converts directly
+ * to modest integer.  The EABI convert function isn't doing this for us.
+ */
+f2l_doconv:
+    stmfd   sp!, {r4, lr}
+    mov     r1, #0x5f000000             @ (float)maxlong
+    mov     r4, r0
+    bl      __aeabi_fcmpge              @ is arg >= maxlong?
+    cmp     r0, #0                      @ nonzero == yes
+    mvnne   r0, #0                      @ return maxlong (7fffffff)
+    mvnne   r1, #0x80000000
+    ldmnefd sp!, {r4, pc}
+
+    mov     r0, r4                      @ recover arg
+    mov     r1, #0xdf000000             @ (float)minlong
+    bl      __aeabi_fcmple              @ is arg <= minlong?
+    cmp     r0, #0                      @ nonzero == yes
+    movne   r0, #0                      @ return minlong (80000000)
+    movne   r1, #0x80000000
+    ldmnefd sp!, {r4, pc}
+
+    mov     r0, r4                      @ recover arg
+    mov     r1, r4
+    bl      __aeabi_fcmpeq              @ is arg == self?
+    cmp     r0, #0                      @ zero == no
+    moveq   r1, #0                      @ return zero for NaN
+    ldmeqfd sp!, {r4, pc}
+
+    mov     r0, r4                      @ recover arg
+    bl      __aeabi_f2lz                @ convert float to long
+    ldmfd   sp!, {r4, pc}
+
diff --git a/vm/mterp/armv5/OP_GOTO.S b/vm/mterp/armv5/OP_GOTO.S
new file mode 100644
index 0000000..3433a73
--- /dev/null
+++ b/vm/mterp/armv5/OP_GOTO.S
@@ -0,0 +1,17 @@
+%verify "executed"
+%verify "forward and backward"
+    /*
+     * Unconditional branch, 8-bit offset.
+     *
+     * The branch distance is a signed code-unit offset, which we need to
+     * double to get a byte offset.
+     */
+    /* goto +AA */
+    mov     r0, rINST, lsl #16          @ r0<- AAxx0000
+    movs    r9, r0, asr #24             @ r9<- ssssssAA (sign-extended)
+    mov     r9, r9, lsl #1              @ r9<- byte offset
+    bmi     common_backwardBranch       @ backward branch, do periodic checks
+    FETCH_ADVANCE_INST_RB(r9)           @ update rPC, load rINST
+    GET_INST_OPCODE(ip)                 @ extract opcode from rINST
+    GOTO_OPCODE(ip)                     @ jump to next instruction
+
diff --git a/vm/mterp/armv5/OP_GOTO_16.S b/vm/mterp/armv5/OP_GOTO_16.S
new file mode 100644
index 0000000..479438e
--- /dev/null
+++ b/vm/mterp/armv5/OP_GOTO_16.S
@@ -0,0 +1,16 @@
+%verify "executed"
+%verify "forward and backward"
+    /*
+     * Unconditional branch, 16-bit offset.
+     *
+     * The branch distance is a signed code-unit offset, which we need to
+     * double to get a byte offset.
+     */
+    /* goto/16 +AAAA */
+    FETCH_S(r0, 1)                      @ r0<- ssssAAAA (sign-extended)
+    movs    r9, r0, asl #1              @ r9<- byte offset, check sign
+    bmi     common_backwardBranch       @ backward branch, do periodic checks
+    FETCH_ADVANCE_INST_RB(r9)           @ update rPC, load rINST
+    GET_INST_OPCODE(ip)                 @ extract opcode from rINST
+    GOTO_OPCODE(ip)                     @ jump to next instruction
+
diff --git a/vm/mterp/armv5/OP_GOTO_32.S b/vm/mterp/armv5/OP_GOTO_32.S
new file mode 100644
index 0000000..617b8ba
--- /dev/null
+++ b/vm/mterp/armv5/OP_GOTO_32.S
@@ -0,0 +1,24 @@
+%verify "executed"
+%verify "forward, backward, self"
+    /*
+     * Unconditional branch, 32-bit offset.
+     *
+     * The branch distance is a signed code-unit offset, which we need to
+     * double to get a byte offset.
+     *
+     * Unlike most opcodes, this one is allowed to branch to itself, so
+     * our "backward branch" test must be "<=0" instead of "<0".  The ORRS
+     * instruction doesn't affect the V flag, so we need to clear it
+     * explicitly.
+     */
+    /* goto/32 +AAAAAAAA */
+    FETCH(r0, 1)                        @ r0<- aaaa (lo)
+    FETCH(r1, 2)                        @ r1<- AAAA (hi)
+    cmp     ip, ip                      @ (clear V flag during stall)
+    orrs    r0, r0, r1, lsl #16         @ r0<- AAAAaaaa, check sign
+    mov     r9, r0, asl #1              @ r9<- byte offset
+    ble     common_backwardBranch       @ backward branch, do periodic checks
+    FETCH_ADVANCE_INST_RB(r9)           @ update rPC, load rINST
+    GET_INST_OPCODE(ip)                 @ extract opcode from rINST
+    GOTO_OPCODE(ip)                     @ jump to next instruction
+
diff --git a/vm/mterp/armv5/OP_IF_EQ.S b/vm/mterp/armv5/OP_IF_EQ.S
new file mode 100644
index 0000000..905cb90
--- /dev/null
+++ b/vm/mterp/armv5/OP_IF_EQ.S
@@ -0,0 +1,2 @@
+%verify "executed"
+%include "armv5/bincmp.S" { "revcmp":"ne" }
diff --git a/vm/mterp/armv5/OP_IF_EQZ.S b/vm/mterp/armv5/OP_IF_EQZ.S
new file mode 100644
index 0000000..bc33dec
--- /dev/null
+++ b/vm/mterp/armv5/OP_IF_EQZ.S
@@ -0,0 +1,2 @@
+%verify "executed"
+%include "armv5/zcmp.S" { "revcmp":"ne" }
diff --git a/vm/mterp/armv5/OP_IF_GE.S b/vm/mterp/armv5/OP_IF_GE.S
new file mode 100644
index 0000000..aaaf770
--- /dev/null
+++ b/vm/mterp/armv5/OP_IF_GE.S
@@ -0,0 +1,2 @@
+%verify "executed"
+%include "armv5/bincmp.S" { "revcmp":"lt" }
diff --git a/vm/mterp/armv5/OP_IF_GEZ.S b/vm/mterp/armv5/OP_IF_GEZ.S
new file mode 100644
index 0000000..0e6bfc2
--- /dev/null
+++ b/vm/mterp/armv5/OP_IF_GEZ.S
@@ -0,0 +1,2 @@
+%verify "executed"
+%include "armv5/zcmp.S" { "revcmp":"lt" }
diff --git a/vm/mterp/armv5/OP_IF_GT.S b/vm/mterp/armv5/OP_IF_GT.S
new file mode 100644
index 0000000..8ca99d9
--- /dev/null
+++ b/vm/mterp/armv5/OP_IF_GT.S
@@ -0,0 +1,2 @@
+%verify "executed"
+%include "armv5/bincmp.S" { "revcmp":"le" }
diff --git a/vm/mterp/armv5/OP_IF_GTZ.S b/vm/mterp/armv5/OP_IF_GTZ.S
new file mode 100644
index 0000000..264ae30
--- /dev/null
+++ b/vm/mterp/armv5/OP_IF_GTZ.S
@@ -0,0 +1,2 @@
+%verify "executed"
+%include "armv5/zcmp.S" { "revcmp":"le" }
diff --git a/vm/mterp/armv5/OP_IF_LE.S b/vm/mterp/armv5/OP_IF_LE.S
new file mode 100644
index 0000000..55d4d3e
--- /dev/null
+++ b/vm/mterp/armv5/OP_IF_LE.S
@@ -0,0 +1,2 @@
+%verify "executed"
+%include "armv5/bincmp.S" { "revcmp":"gt" }
diff --git a/vm/mterp/armv5/OP_IF_LEZ.S b/vm/mterp/armv5/OP_IF_LEZ.S
new file mode 100644
index 0000000..1cd273d
--- /dev/null
+++ b/vm/mterp/armv5/OP_IF_LEZ.S
@@ -0,0 +1,2 @@
+%verify "executed"
+%include "armv5/zcmp.S" { "revcmp":"gt" }
diff --git a/vm/mterp/armv5/OP_IF_LT.S b/vm/mterp/armv5/OP_IF_LT.S
new file mode 100644
index 0000000..8be9a8d
--- /dev/null
+++ b/vm/mterp/armv5/OP_IF_LT.S
@@ -0,0 +1,2 @@
+%verify "executed"
+%include "armv5/bincmp.S" { "revcmp":"ge" }
diff --git a/vm/mterp/armv5/OP_IF_LTZ.S b/vm/mterp/armv5/OP_IF_LTZ.S
new file mode 100644
index 0000000..84052b6
--- /dev/null
+++ b/vm/mterp/armv5/OP_IF_LTZ.S
@@ -0,0 +1,2 @@
+%verify "executed"
+%include "armv5/zcmp.S" { "revcmp":"ge" }
diff --git a/vm/mterp/armv5/OP_IF_NE.S b/vm/mterp/armv5/OP_IF_NE.S
new file mode 100644
index 0000000..bd42a70
--- /dev/null
+++ b/vm/mterp/armv5/OP_IF_NE.S
@@ -0,0 +1,2 @@
+%verify "executed"
+%include "armv5/bincmp.S" { "revcmp":"eq" }
diff --git a/vm/mterp/armv5/OP_IF_NEZ.S b/vm/mterp/armv5/OP_IF_NEZ.S
new file mode 100644
index 0000000..a418241
--- /dev/null
+++ b/vm/mterp/armv5/OP_IF_NEZ.S
@@ -0,0 +1,2 @@
+%verify "executed"
+%include "armv5/zcmp.S" { "revcmp":"eq" }
diff --git a/vm/mterp/armv5/OP_IGET.S b/vm/mterp/armv5/OP_IGET.S
new file mode 100644
index 0000000..a347e5c
--- /dev/null
+++ b/vm/mterp/armv5/OP_IGET.S
@@ -0,0 +1,47 @@
+%default { "load":"ldr", "sqnum":"0" }
+%verify "executed"
+%verify "null object"
+%verify "field already resolved"
+%verify "field not yet resolved"
+%verify "field cannot be resolved"
+    /*
+     * General 32-bit instance field get.
+     *
+     * for: iget, iget-object, iget-boolean, iget-byte, iget-char, iget-short
+     */
+    /* op vA, vB, field@CCCC */
+    mov     r0, rINST, lsr #12          @ r0<- B
+    ldr     r3, [rGLUE, #offGlue_methodClassDex]    @ r3<- DvmDex
+    FETCH(r1, 1)                        @ r1<- field ref CCCC
+    ldr     r2, [r3, #offDvmDex_pResFields] @ r2<- pDvmDex->pResFields
+    GET_VREG(r9, r0)                    @ r9<- fp[B], the object pointer
+    ldr     r0, [r2, r1, lsl #2]        @ r0<- resolved InstField ptr
+    cmp     r0, #0                      @ is resolved entry null?
+    bne     .L${opcode}_finish          @ no, already resolved
+8:  ldr     r2, [rGLUE, #offGlue_method]    @ r2<- current method
+    EXPORT_PC()                         @ resolve() could throw
+    ldr     r0, [r2, #offMethod_clazz]  @ r0<- method->clazz
+    bl      dvmResolveInstField         @ r0<- resolved InstField ptr
+    cmp     r0, #0
+    bne     .L${opcode}_finish
+    b       common_exceptionThrown
+%break
+
+    /*
+     * Currently:
+     *  r0 holds resolved field
+     *  r9 holds object
+     */
+.L${opcode}_finish:
+    @bl      common_squeak${sqnum}
+    cmp     r9, #0                      @ check object for null
+    ldr     r3, [r0, #offInstField_byteOffset]  @ r3<- byte offset of field
+    beq     common_errNullObject        @ object was null
+    $load   r0, [r9, r3]                @ r0<- obj.field (8/16/32 bits)
+    mov     r2, rINST, lsr #8           @ r2<- A+
+    FETCH_ADVANCE_INST(2)               @ advance rPC, load rINST
+    and     r2, r2, #15                 @ r2<- A
+    GET_INST_OPCODE(ip)                 @ extract opcode from rINST
+    SET_VREG(r0, r2)                    @ fp[A]<- r0
+    GOTO_OPCODE(ip)                     @ jump to next instruction
+
diff --git a/vm/mterp/armv5/OP_IGET_BOOLEAN.S b/vm/mterp/armv5/OP_IGET_BOOLEAN.S
new file mode 100644
index 0000000..acdd60d
--- /dev/null
+++ b/vm/mterp/armv5/OP_IGET_BOOLEAN.S
@@ -0,0 +1,3 @@
+%verify "executed"
+@include "armv5/OP_IGET.S" { "load":"ldrb", "sqnum":"1" }
+%include "armv5/OP_IGET.S" { "load":"ldr", "sqnum":"1" }
diff --git a/vm/mterp/armv5/OP_IGET_BYTE.S b/vm/mterp/armv5/OP_IGET_BYTE.S
new file mode 100644
index 0000000..52c0955
--- /dev/null
+++ b/vm/mterp/armv5/OP_IGET_BYTE.S
@@ -0,0 +1,4 @@
+%verify "executed"
+%verify "negative value is sign-extended"
+@include "armv5/OP_IGET.S" { "load":"ldrsb", "sqnum":"2" }
+%include "armv5/OP_IGET.S" { "load":"ldr", "sqnum":"2" }
diff --git a/vm/mterp/armv5/OP_IGET_CHAR.S b/vm/mterp/armv5/OP_IGET_CHAR.S
new file mode 100644
index 0000000..cc7501a
--- /dev/null
+++ b/vm/mterp/armv5/OP_IGET_CHAR.S
@@ -0,0 +1,4 @@
+%verify "executed"
+%verify "large values are not sign-extended"
+@include "armv5/OP_IGET.S" { "load":"ldrh", "sqnum":"3" }
+%include "armv5/OP_IGET.S" { "load":"ldr", "sqnum":"3" }
diff --git a/vm/mterp/armv5/OP_IGET_OBJECT.S b/vm/mterp/armv5/OP_IGET_OBJECT.S
new file mode 100644
index 0000000..108f9a5
--- /dev/null
+++ b/vm/mterp/armv5/OP_IGET_OBJECT.S
@@ -0,0 +1,2 @@
+%verify "executed"
+%include "armv5/OP_IGET.S"
diff --git a/vm/mterp/armv5/OP_IGET_OBJECT_QUICK.S b/vm/mterp/armv5/OP_IGET_OBJECT_QUICK.S
new file mode 100644
index 0000000..546d435
--- /dev/null
+++ b/vm/mterp/armv5/OP_IGET_OBJECT_QUICK.S
@@ -0,0 +1,2 @@
+%verify "executed"
+%include "armv5/OP_IGET_QUICK.S"
diff --git a/vm/mterp/armv5/OP_IGET_QUICK.S b/vm/mterp/armv5/OP_IGET_QUICK.S
new file mode 100644
index 0000000..f248bc2
--- /dev/null
+++ b/vm/mterp/armv5/OP_IGET_QUICK.S
@@ -0,0 +1,17 @@
+%verify "executed"
+%verify "null object"
+    /* For: iget-quick, iget-object-quick */
+    /* op vA, vB, offset@CCCC */
+    mov     r2, rINST, lsr #12          @ r2<- B
+    GET_VREG(r3, r2)                    @ r3<- object we're operating on
+    FETCH(r1, 1)                        @ r1<- field byte offset
+    cmp     r3, #0                      @ check object for null
+    mov     r2, rINST, lsr #8           @ r2<- A(+)
+    beq     common_errNullObject        @ object was null
+    ldr     r0, [r3, r1]                @ r0<- obj.field (always 32 bits)
+    FETCH_ADVANCE_INST(2)               @ advance rPC, load rINST
+    and     r2, r2, #15
+    GET_INST_OPCODE(ip)                 @ extract opcode from rINST
+    SET_VREG(r0, r2)                    @ fp[A]<- r0
+    GOTO_OPCODE(ip)                     @ jump to next instruction
+
diff --git a/vm/mterp/armv5/OP_IGET_SHORT.S b/vm/mterp/armv5/OP_IGET_SHORT.S
new file mode 100644
index 0000000..7c8b0ee
--- /dev/null
+++ b/vm/mterp/armv5/OP_IGET_SHORT.S
@@ -0,0 +1,4 @@
+%verify "executed"
+%verify "negative value is sign-extended"
+@include "armv5/OP_IGET.S" { "load":"ldrsh", "sqnum":"4" }
+%include "armv5/OP_IGET.S" { "load":"ldr", "sqnum":"4" }
diff --git a/vm/mterp/armv5/OP_IGET_WIDE.S b/vm/mterp/armv5/OP_IGET_WIDE.S
new file mode 100644
index 0000000..22377d9
--- /dev/null
+++ b/vm/mterp/armv5/OP_IGET_WIDE.S
@@ -0,0 +1,44 @@
+%verify "executed"
+%verify "null object"
+%verify "field already resolved"
+%verify "field not yet resolved"
+%verify "field cannot be resolved"
+    /*
+     * Wide 32-bit instance field get.
+     */
+    /* iget-wide vA, vB, field@CCCC */
+    mov     r0, rINST, lsr #12          @ r0<- B
+    ldr     r3, [rGLUE, #offGlue_methodClassDex]    @ r3<- DvmDex
+    FETCH(r1, 1)                        @ r1<- field ref CCCC
+    ldr     r2, [r3, #offDvmDex_pResFields] @ r2<- pResFields
+    GET_VREG(r9, r0)                    @ r9<- fp[B], the object pointer
+    ldr     r0, [r2, r1, lsl #2]        @ r0<- resolved InstField ptr
+    cmp     r0, #0                      @ is resolved entry null?
+    bne     .L${opcode}_finish          @ no, already resolved
+8:  ldr     r2, [rGLUE, #offGlue_method] @ r2<- current method
+    EXPORT_PC()                         @ resolve() could throw
+    ldr     r0, [r2, #offMethod_clazz]  @ r0<- method->clazz
+    bl      dvmResolveInstField         @ r0<- resolved InstField ptr
+    cmp     r0, #0
+    bne     .L${opcode}_finish
+    b       common_exceptionThrown
+%break
+
+    /*
+     * Currently:
+     *  r0 holds resolved field
+     *  r9 holds object
+     */
+.L${opcode}_finish:
+    cmp     r9, #0                      @ check object for null
+    ldr     r3, [r0, #offInstField_byteOffset]  @ r3<- byte offset of field
+    beq     common_errNullObject        @ object was null
+    mov     r2, rINST, lsr #8           @ r2<- A+
+    ldrd    r0, [r9, r3]                @ r0/r1<- obj.field (64-bit align ok)
+    and     r2, r2, #15                 @ r2<- A
+    FETCH_ADVANCE_INST(2)               @ advance rPC, load rINST
+    add     r3, rFP, r2, lsl #2         @ r3<- &fp[A]
+    GET_INST_OPCODE(ip)                 @ extract opcode from rINST
+    stmia   r3, {r0-r1}                 @ fp[A]<- r0/r1
+    GOTO_OPCODE(ip)                     @ jump to next instruction
+
diff --git a/vm/mterp/armv5/OP_IGET_WIDE_QUICK.S b/vm/mterp/armv5/OP_IGET_WIDE_QUICK.S
new file mode 100644
index 0000000..ece7e7a
--- /dev/null
+++ b/vm/mterp/armv5/OP_IGET_WIDE_QUICK.S
@@ -0,0 +1,17 @@
+%verify "executed"
+%verify "null object"
+    /* iget-wide-quick vA, vB, offset@CCCC */
+    mov     r2, rINST, lsr #12          @ r2<- B
+    GET_VREG(r3, r2)                    @ r3<- object we're operating on
+    FETCH(r1, 1)                        @ r1<- field byte offset
+    cmp     r3, #0                      @ check object for null
+    mov     r2, rINST, lsr #8           @ r2<- A(+)
+    beq     common_errNullObject        @ object was null
+    ldrd    r0, [r3, r1]                @ r0<- obj.field (64 bits, aligned)
+    and     r2, r2, #15
+    FETCH_ADVANCE_INST(2)               @ advance rPC, load rINST
+    add     r3, rFP, r2, lsl #2         @ r3<- &fp[A]
+    GET_INST_OPCODE(ip)                 @ extract opcode from rINST
+    stmia   r3, {r0-r1}                 @ fp[A]<- r0/r1
+    GOTO_OPCODE(ip)                     @ jump to next instruction
+
diff --git a/vm/mterp/armv5/OP_INSTANCE_OF.S b/vm/mterp/armv5/OP_INSTANCE_OF.S
new file mode 100644
index 0000000..da9f450
--- /dev/null
+++ b/vm/mterp/armv5/OP_INSTANCE_OF.S
@@ -0,0 +1,86 @@
+%verify "executed"
+%verify "null object"
+%verify "class cast exception thrown, with correct class name"
+%verify "class cast exception not thrown on same class"
+%verify "class cast exception not thrown on subclass"
+%verify "class not resolved"
+%verify "class already resolved"
+    /*
+     * Check to see if an object reference is an instance of a class.
+     *
+     * Most common situation is a non-null object, being compared against
+     * an already-resolved class.
+     */
+    /* instance-of vA, vB, class@CCCC */
+    mov     r3, rINST, lsr #12          @ r3<- B
+    mov     r9, rINST, lsr #8           @ r9<- A+
+    GET_VREG(r0, r3)                    @ r0<- vB (object)
+    and     r9, r9, #15                 @ r9<- A
+    cmp     r0, #0                      @ is object null?
+    ldr     r2, [rGLUE, #offGlue_methodClassDex]    @ r2<- pDvmDex
+    beq     .L${opcode}_store           @ null obj, not an instance, store r0
+    FETCH(r3, 1)                        @ r3<- CCCC
+    ldr     r2, [r2, #offDvmDex_pResClasses]    @ r2<- pDvmDex->pResClasses
+    ldr     r1, [r2, r3, lsl #2]        @ r1<- resolved class
+    ldr     r0, [r0, #offObject_clazz]  @ r0<- obj->clazz
+    cmp     r1, #0                      @ have we resolved this before?
+    beq     .L${opcode}_resolve         @ not resolved, do it now
+.L${opcode}_resolved: @ r0=obj->clazz, r1=resolved class
+    cmp     r0, r1                      @ same class (trivial success)?
+    beq     .L${opcode}_trivial         @ yes, trivial finish
+    b       .L${opcode}_fullcheck       @ no, do full check
+%break
+
+    /*
+     * Trivial test failed, need to perform full check.  This is common.
+     *  r0 holds obj->clazz
+     *  r1 holds class resolved from BBBB
+     *  r9 holds A
+     */
+.L${opcode}_fullcheck:
+    bl      dvmInstanceofNonTrivial     @ r0<- boolean result
+    @ fall through to ${opcode}_store
+
+    /*
+     * r0 holds boolean result
+     * r9 holds A
+     */
+.L${opcode}_store:
+    FETCH_ADVANCE_INST(2)               @ advance rPC, load rINST
+    SET_VREG(r0, r9)                    @ vA<- r0
+    GET_INST_OPCODE(ip)                 @ extract opcode from rINST
+    GOTO_OPCODE(ip)                     @ jump to next instruction
+
+    /*
+     * Trivial test succeeded, save and bail.
+     *  r9 holds A
+     */
+.L${opcode}_trivial:
+    mov     r0, #1                      @ indicate success
+    @ could b ${opcode}_store, but copying is faster and cheaper
+    FETCH_ADVANCE_INST(2)               @ advance rPC, load rINST
+    SET_VREG(r0, r9)                    @ vA<- r0
+    GET_INST_OPCODE(ip)                 @ extract opcode from rINST
+    GOTO_OPCODE(ip)                     @ jump to next instruction
+
+    /*
+     * Resolution required.  This is the least-likely path.
+     *
+     *  r3 holds BBBB
+     *  r9 holds A
+     */
+.L${opcode}_resolve:
+    EXPORT_PC()                         @ resolve() could throw
+    ldr     r0, [rGLUE, #offGlue_method]    @ r0<- glue->method
+    mov     r1, r3                      @ r1<- BBBB
+    mov     r2, #1                      @ r2<- true
+    ldr     r0, [r0, #offMethod_clazz]  @ r0<- method->clazz
+    bl      dvmResolveClass             @ r0<- resolved ClassObject ptr
+    cmp     r0, #0                      @ got null?
+    beq     common_exceptionThrown      @ yes, handle exception
+    mov     r1, r0                      @ r1<- class resolved from BBB
+    mov     r3, rINST, lsr #12          @ r3<- B
+    GET_VREG(r0, r3)                    @ r0<- vB (object)
+    ldr     r0, [r0, #offObject_clazz]  @ r0<- obj->clazz
+    b       .L${opcode}_resolved        @ pick up where we left off
+
diff --git a/vm/mterp/armv5/OP_INT_TO_BYTE.S b/vm/mterp/armv5/OP_INT_TO_BYTE.S
new file mode 100644
index 0000000..568193e
--- /dev/null
+++ b/vm/mterp/armv5/OP_INT_TO_BYTE.S
@@ -0,0 +1,2 @@
+%verify "executed"
+%include "armv5/unop.S" {"preinstr":"mov     r0, r0, asl #24", "instr":"mov     r0, r0, asr #24"}
diff --git a/vm/mterp/armv5/OP_INT_TO_CHAR.S b/vm/mterp/armv5/OP_INT_TO_CHAR.S
new file mode 100644
index 0000000..15da1f0
--- /dev/null
+++ b/vm/mterp/armv5/OP_INT_TO_CHAR.S
@@ -0,0 +1,2 @@
+%verify "executed"
+%include "armv5/unop.S" {"preinstr":"mov     r0, r0, asl #16", "instr":"mov     r0, r0, lsr #16"}
diff --git a/vm/mterp/armv5/OP_INT_TO_DOUBLE.S b/vm/mterp/armv5/OP_INT_TO_DOUBLE.S
new file mode 100644
index 0000000..342ad84
--- /dev/null
+++ b/vm/mterp/armv5/OP_INT_TO_DOUBLE.S
@@ -0,0 +1,2 @@
+%verify "executed"
+%include "armv5/unopWider.S" {"instr":"bl      __aeabi_i2d"}
diff --git a/vm/mterp/armv5/OP_INT_TO_FLOAT.S b/vm/mterp/armv5/OP_INT_TO_FLOAT.S
new file mode 100644
index 0000000..464df47
--- /dev/null
+++ b/vm/mterp/armv5/OP_INT_TO_FLOAT.S
@@ -0,0 +1,2 @@
+%verify "executed"
+%include "armv5/unop.S" {"instr":"bl      __aeabi_i2f"}
diff --git a/vm/mterp/armv5/OP_INT_TO_LONG.S b/vm/mterp/armv5/OP_INT_TO_LONG.S
new file mode 100644
index 0000000..31c0216
--- /dev/null
+++ b/vm/mterp/armv5/OP_INT_TO_LONG.S
@@ -0,0 +1,2 @@
+%verify "executed"
+%include "armv5/unopWider.S" {"instr":"mov     r1, r0, asr #31"}
diff --git a/vm/mterp/armv5/OP_INT_TO_SHORT.S b/vm/mterp/armv5/OP_INT_TO_SHORT.S
new file mode 100644
index 0000000..d15de8b
--- /dev/null
+++ b/vm/mterp/armv5/OP_INT_TO_SHORT.S
@@ -0,0 +1,2 @@
+%verify "executed"
+%include "armv5/unop.S" {"preinstr":"mov     r0, r0, asl #16", "instr":"mov     r0, r0, asr #16"}
diff --git a/vm/mterp/armv5/OP_INVOKE_DIRECT.S b/vm/mterp/armv5/OP_INVOKE_DIRECT.S
new file mode 100644
index 0000000..c8d551c
--- /dev/null
+++ b/vm/mterp/armv5/OP_INVOKE_DIRECT.S
@@ -0,0 +1,48 @@
+%default { "isrange":"0", "routine":"NoRange" }
+%verify "executed"
+%verify "unknown method"
+    /*
+     * Handle a direct method call.
+     *
+     * (We could defer the "is 'this' pointer null" test to the common
+     * method invocation code, and use a flag to indicate that static
+     * calls don't count.  If we do this as part of copying the arguments
+     * out we could avoiding loading the first arg twice.)
+     *
+     * for: invoke-direct, invoke-direct/range
+     */
+    /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */
+    /* op {vCCCC..v(CCCC+AA-1)}, meth@BBBB */
+    ldr     r3, [rGLUE, #offGlue_methodClassDex]    @ r3<- pDvmDex
+    FETCH(r1, 1)                        @ r1<- BBBB
+    ldr     r3, [r3, #offDvmDex_pResMethods]    @ r3<- pDvmDex->pResMethods
+    FETCH(r10, 2)                       @ r10<- GFED or CCCC
+    ldr     r0, [r3, r1, lsl #2]        @ r0<- resolved methodToCall
+    .if     (!$isrange)
+    and     r10, r10, #15               @ r10<- D (or stays CCCC)
+    .endif
+    cmp     r0, #0                      @ already resolved?
+    EXPORT_PC()                         @ must export for invoke
+    GET_VREG(r2, r10)                   @ r2<- "this" ptr
+    beq     .L${opcode}_resolve         @ not resolved, do it now
+.L${opcode}_finish:
+    cmp     r2, #0                      @ null "this" ref?
+    bne     common_invokeMethod${routine}   @ no, continue on
+    b       common_errNullObject        @ yes, throw exception
+%break
+
+    /*
+     * On entry:
+     *  r1 = reference (BBBB or CCCC)
+     *  r10 = "this" register
+     */
+.L${opcode}_resolve:
+    ldr     r3, [rGLUE, #offGlue_method] @ r3<- glue->method
+    ldr     r0, [r3, #offMethod_clazz]  @ r0<- method->clazz
+    mov     r2, #METHOD_DIRECT          @ resolver method type
+    bl      dvmResolveMethod            @ r0<- call(clazz, ref, flags)
+    cmp     r0, #0                      @ got null?
+    GET_VREG(r2, r10)                   @ r2<- "this" ptr (reload)
+    bne     .L${opcode}_finish          @ no, continue
+    b       common_exceptionThrown      @ yes, handle exception
+
diff --git a/vm/mterp/armv5/OP_INVOKE_DIRECT_EMPTY.S b/vm/mterp/armv5/OP_INVOKE_DIRECT_EMPTY.S
new file mode 100644
index 0000000..3c6b192
--- /dev/null
+++ b/vm/mterp/armv5/OP_INVOKE_DIRECT_EMPTY.S
@@ -0,0 +1,7 @@
+%verify "executed"
+    /*
+     * invoke-direct-empty is a no-op in a "standard" interpreter.
+     */
+    FETCH_ADVANCE_INST(3)               @ advance to next instr, load rINST
+    GET_INST_OPCODE(ip)                 @ ip<- opcode from rINST
+    GOTO_OPCODE(ip)                     @ execute it
diff --git a/vm/mterp/armv5/OP_INVOKE_DIRECT_RANGE.S b/vm/mterp/armv5/OP_INVOKE_DIRECT_RANGE.S
new file mode 100644
index 0000000..1a6fd37
--- /dev/null
+++ b/vm/mterp/armv5/OP_INVOKE_DIRECT_RANGE.S
@@ -0,0 +1,2 @@
+%verify "executed"
+%include "armv5/OP_INVOKE_DIRECT.S" { "isrange":"1", "routine":"Range" }
diff --git a/vm/mterp/armv5/OP_INVOKE_INTERFACE.S b/vm/mterp/armv5/OP_INVOKE_INTERFACE.S
new file mode 100644
index 0000000..5463d5c
--- /dev/null
+++ b/vm/mterp/armv5/OP_INVOKE_INTERFACE.S
@@ -0,0 +1,28 @@
+%default { "isrange":"0", "routine":"NoRange" }
+%verify "executed"
+%verify "unknown method"
+%verify "null object"
+    /*
+     * Handle an interface method call.
+     *
+     * for: invoke-interface, invoke-interface/range
+     */
+    /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */
+    /* op {vCCCC..v(CCCC+AA-1)}, meth@BBBB */
+    FETCH(r2, 2)                        @ r2<- FEDC or CCCC
+    FETCH(r1, 1)                        @ r1<- BBBB
+    .if     (!$isrange)
+    and     r2, r2, #15                 @ r2<- C (or stays CCCC)
+    .endif
+    EXPORT_PC()                         @ must export for invoke
+    GET_VREG(r0, r2)                    @ r0<- first arg ("this")
+    ldr     r3, [rGLUE, #offGlue_methodClassDex]    @ r3<- methodClassDex
+    cmp     r0, #0                      @ null obj?
+    ldr     r2, [rGLUE, #offGlue_method]  @ r2<- method
+    beq     common_errNullObject        @ yes, fail
+    ldr     r0, [r0, #offObject_clazz]  @ r0<- thisPtr->clazz
+    bl      dvmFindInterfaceMethodInCache @ r0<- call(class, ref, method, dex)
+    cmp     r0, #0                      @ failed?
+    beq     common_exceptionThrown      @ yes, handle exception
+    b       common_invokeMethod${routine} @ jump to common handler 
+
diff --git a/vm/mterp/armv5/OP_INVOKE_INTERFACE_RANGE.S b/vm/mterp/armv5/OP_INVOKE_INTERFACE_RANGE.S
new file mode 100644
index 0000000..8ed5518
--- /dev/null
+++ b/vm/mterp/armv5/OP_INVOKE_INTERFACE_RANGE.S
@@ -0,0 +1,2 @@
+%verify "executed"
+%include "armv5/OP_INVOKE_INTERFACE.S" { "isrange":"1", "routine":"Range" }
diff --git a/vm/mterp/armv5/OP_INVOKE_STATIC.S b/vm/mterp/armv5/OP_INVOKE_STATIC.S
new file mode 100644
index 0000000..57d337b
--- /dev/null
+++ b/vm/mterp/armv5/OP_INVOKE_STATIC.S
@@ -0,0 +1,25 @@
+%default { "routine":"NoRange" }
+%verify "executed"
+%verify "unknown method"
+    /*
+     * Handle a static method call.
+     *
+     * for: invoke-static, invoke-static/range
+     */
+    /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */
+    /* op {vCCCC..v(CCCC+AA-1)}, meth@BBBB */
+    ldr     r3, [rGLUE, #offGlue_methodClassDex]    @ r3<- pDvmDex
+    FETCH(r1, 1)                        @ r1<- BBBB
+    ldr     r3, [r3, #offDvmDex_pResMethods]    @ r3<- pDvmDex->pResMethods
+    ldr     r0, [r3, r1, lsl #2]        @ r0<- resolved methodToCall
+    cmp     r0, #0                      @ already resolved?
+    EXPORT_PC()                         @ must export for invoke
+    bne     common_invokeMethod${routine} @ yes, continue on
+0:  ldr     r3, [rGLUE, #offGlue_method] @ r3<- glue->method
+    ldr     r0, [r3, #offMethod_clazz]  @ r0<- method->clazz
+    mov     r2, #METHOD_STATIC          @ resolver method type
+    bl      dvmResolveMethod            @ r0<- call(clazz, ref, flags)
+    cmp     r0, #0                      @ got null?
+    bne     common_invokeMethod${routine} @ no, continue
+    b       common_exceptionThrown      @ yes, handle exception
+
diff --git a/vm/mterp/armv5/OP_INVOKE_STATIC_RANGE.S b/vm/mterp/armv5/OP_INVOKE_STATIC_RANGE.S
new file mode 100644
index 0000000..a972a9e
--- /dev/null
+++ b/vm/mterp/armv5/OP_INVOKE_STATIC_RANGE.S
@@ -0,0 +1,2 @@
+%verify "executed"
+%include "armv5/OP_INVOKE_STATIC.S" { "routine":"Range" }
diff --git a/vm/mterp/armv5/OP_INVOKE_SUPER.S b/vm/mterp/armv5/OP_INVOKE_SUPER.S
new file mode 100644
index 0000000..f24f690
--- /dev/null
+++ b/vm/mterp/armv5/OP_INVOKE_SUPER.S
@@ -0,0 +1,61 @@
+%default { "isrange":"0", "routine":"NoRange" }
+%verify "executed"
+%verify "unknown method"
+    /*
+     * Handle a "super" method call.
+     *
+     * for: invoke-super, invoke-super/range
+     */
+    /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */
+    /* op vAA, {vCCCC..v(CCCC+AA-1)}, meth@BBBB */
+    FETCH(r10, 2)                       @ r10<- GFED or CCCC
+    ldr     r3, [rGLUE, #offGlue_methodClassDex]    @ r3<- pDvmDex
+    .if     (!$isrange)
+    and     r10, r10, #15               @ r10<- D (or stays CCCC)
+    .endif
+    FETCH(r1, 1)                        @ r1<- BBBB
+    ldr     r3, [r3, #offDvmDex_pResMethods]    @ r3<- pDvmDex->pResMethods
+    GET_VREG(r2, r10)                   @ r2<- "this" ptr
+    ldr     r0, [r3, r1, lsl #2]        @ r0<- resolved baseMethod
+    cmp     r2, #0                      @ null "this"?
+    ldr     r9, [rGLUE, #offGlue_method] @ r9<- current method
+    beq     common_errNullObject        @ null "this", throw exception
+    cmp     r0, #0                      @ already resolved?
+    ldr     r9, [r9, #offMethod_clazz]  @ r9<- method->clazz
+    EXPORT_PC()                         @ must export for invoke
+    bne     .L${opcode}_continue        @ resolved, continue on
+    b       .L${opcode}_resolve         @ do resolve now
+%break
+
+    /*
+     * At this point:
+     *  r0 = resolved base method
+     *  r9 = method->clazz
+     */
+.L${opcode}_continue:
+    ldr     r1, [r9, #offClassObject_super]     @ r1<- method->clazz->super
+    ldrh    r2, [r0, #offMethod_methodIndex]    @ r2<- baseMethod->methodIndex
+    ldr     r3, [r1, #offClassObject_vtableCount]   @ r3<- super->vtableCount
+    EXPORT_PC()                         @ must export for invoke
+    cmp     r2, r3                      @ compare (methodIndex, vtableCount)
+    bcs     .L${opcode}_nsm             @ method not present in superclass
+    ldr     r1, [r1, #offClassObject_vtable]    @ r1<- ...clazz->super->vtable
+    ldr     r0, [r1, r2, lsl #2]        @ r3<- vtable[methodIndex]
+    bl      common_invokeMethod${routine} @ continue on
+
+.L${opcode}_resolve:
+    mov     r0, r9                      @ r0<- method->clazz
+    mov     r2, #METHOD_VIRTUAL         @ resolver method type
+    bl      dvmResolveMethod            @ r0<- call(clazz, ref, flags)
+    cmp     r0, #0                      @ got null?
+    bne     .L${opcode}_continue        @ no, continue
+    b       common_exceptionThrown      @ yes, handle exception
+
+    /*
+     * Throw a NoSuchMethodError with the method name as the message.
+     *  r0 = resolved base method
+     */
+.L${opcode}_nsm:
+    ldr     r1, [r0, #offMethod_name]   @ r1<- method name
+    b       common_errNoSuchMethod
+
diff --git a/vm/mterp/armv5/OP_INVOKE_SUPER_QUICK.S b/vm/mterp/armv5/OP_INVOKE_SUPER_QUICK.S
new file mode 100644
index 0000000..90b2a91
--- /dev/null
+++ b/vm/mterp/armv5/OP_INVOKE_SUPER_QUICK.S
@@ -0,0 +1,26 @@
+%default { "isrange":"0", "routine":"NoRange" }
+%verify "executed"
+%verify "unknown method"
+    /*
+     * Handle an optimized "super" method call.
+     *
+     * for: [opt] invoke-super-quick, invoke-super-quick/range
+     */
+    /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */
+    /* op vAA, {vCCCC..v(CCCC+AA-1)}, meth@BBBB */
+    FETCH(r10, 2)                       @ r10<- GFED or CCCC
+    ldr     r2, [rGLUE, #offGlue_method]    @ r2<- current method
+    .if     (!$isrange)
+    and     r10, r10, #15               @ r10<- D (or stays CCCC)
+    .endif
+    FETCH(r1, 1)                        @ r1<- BBBB
+    ldr     r2, [r2, #offMethod_clazz]  @ r2<- method->clazz
+    EXPORT_PC()                         @ must export for invoke
+    ldr     r2, [r2, #offClassObject_super]     @ r2<- method->clazz->super
+    GET_VREG(r3, r10)                   @ r3<- "this"
+    ldr     r2, [r2, #offClassObject_vtable]    @ r2<- ...clazz->super->vtable
+    cmp     r3, #0                      @ null "this" ref?
+    ldr     r0, [r2, r1, lsl #2]        @ r0<- super->vtable[BBBB]
+    beq     common_errNullObject        @ "this" is null, throw exception
+    bl      common_invokeMethod${routine} @ continue on
+
diff --git a/vm/mterp/armv5/OP_INVOKE_SUPER_QUICK_RANGE.S b/vm/mterp/armv5/OP_INVOKE_SUPER_QUICK_RANGE.S
new file mode 100644
index 0000000..2b9ac78
--- /dev/null
+++ b/vm/mterp/armv5/OP_INVOKE_SUPER_QUICK_RANGE.S
@@ -0,0 +1,2 @@
+%verify "executed"
+%include "armv5/OP_INVOKE_SUPER_QUICK.S" { "isrange":"1", "routine":"Range" }
diff --git a/vm/mterp/armv5/OP_INVOKE_SUPER_RANGE.S b/vm/mterp/armv5/OP_INVOKE_SUPER_RANGE.S
new file mode 100644
index 0000000..a0b9f75
--- /dev/null
+++ b/vm/mterp/armv5/OP_INVOKE_SUPER_RANGE.S
@@ -0,0 +1,2 @@
+%verify "executed"
+%include "armv5/OP_INVOKE_SUPER.S" { "isrange":"1", "routine":"Range" }
diff --git a/vm/mterp/armv5/OP_INVOKE_VIRTUAL.S b/vm/mterp/armv5/OP_INVOKE_VIRTUAL.S
new file mode 100644
index 0000000..33dc108
--- /dev/null
+++ b/vm/mterp/armv5/OP_INVOKE_VIRTUAL.S
@@ -0,0 +1,46 @@
+%default { "isrange":"0", "routine":"NoRange" }
+%verify "executed"
+%verify "unknown method"
+%verify "null object"
+    /*
+     * Handle a virtual method call.
+     *
+     * for: invoke-virtual, invoke-virtual/range
+     */
+    /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */
+    /* op vAA, {vCCCC..v(CCCC+AA-1)}, meth@BBBB */
+    ldr     r3, [rGLUE, #offGlue_methodClassDex]    @ r3<- pDvmDex
+    FETCH(r1, 1)                        @ r1<- BBBB
+    ldr     r3, [r3, #offDvmDex_pResMethods]    @ r3<- pDvmDex->pResMethods
+    FETCH(r10, 2)                       @ r10<- GFED or CCCC
+    ldr     r0, [r3, r1, lsl #2]        @ r0<- resolved baseMethod
+    .if     (!$isrange)
+    and     r10, r10, #15               @ r10<- D (or stays CCCC)
+    .endif
+    cmp     r0, #0                      @ already resolved?
+    EXPORT_PC()                         @ must export for invoke
+    bne     .L${opcode}_continue        @ yes, continue on
+    ldr     r3, [rGLUE, #offGlue_method] @ r3<- glue->method
+    ldr     r0, [r3, #offMethod_clazz]  @ r0<- method->clazz
+    mov     r2, #METHOD_VIRTUAL         @ resolver method type
+    bl      dvmResolveMethod            @ r0<- call(clazz, ref, flags)
+    cmp     r0, #0                      @ got null?
+    bne     .L${opcode}_continue        @ no, continue
+    b       common_exceptionThrown      @ yes, handle exception
+%break
+
+    /*
+     * At this point:
+     *  r0 = resolved base method
+     *  r10 = C or CCCC (index of first arg, which is the "this" ptr)
+     */
+.L${opcode}_continue:
+    GET_VREG(r1, r10)                   @ r1<- "this" ptr
+    ldrh    r2, [r0, #offMethod_methodIndex]    @ r2<- baseMethod->methodIndex
+    cmp     r1, #0                      @ is "this" null?
+    beq     common_errNullObject        @ null "this", throw exception
+    ldr     r3, [r1, #offObject_clazz]  @ r1<- thisPtr->clazz
+    ldr     r3, [r3, #offClassObject_vtable]    @ r3<- thisPtr->clazz->vtable
+    ldr     r0, [r3, r2, lsl #2]        @ r3<- vtable[methodIndex]
+    bl      common_invokeMethod${routine} @ continue on
+
diff --git a/vm/mterp/armv5/OP_INVOKE_VIRTUAL_QUICK.S b/vm/mterp/armv5/OP_INVOKE_VIRTUAL_QUICK.S
new file mode 100644
index 0000000..bc34023
--- /dev/null
+++ b/vm/mterp/armv5/OP_INVOKE_VIRTUAL_QUICK.S
@@ -0,0 +1,23 @@
+%default { "isrange":"0", "routine":"NoRange" }
+%verify "executed"
+%verify "null object"
+    /*
+     * Handle an optimized virtual method call.
+     *
+     * for: [opt] invoke-virtual-quick, invoke-virtual-quick/range
+     */
+    /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */
+    /* op vAA, {vCCCC..v(CCCC+AA-1)}, meth@BBBB */
+    FETCH(r3, 2)                        @ r3<- FEDC or CCCC
+    FETCH(r1, 1)                        @ r1<- BBBB
+    .if     (!$isrange)
+    and     r3, r3, #15                 @ r3<- C (or stays CCCC)
+    .endif
+    GET_VREG(r2, r3)                    @ r2<- vC ("this" ptr)
+    cmp     r2, #0                      @ is "this" null?
+    beq     common_errNullObject        @ null "this", throw exception
+    ldr     r2, [r2, #offObject_clazz]  @ r2<- thisPtr->clazz
+    ldr     r2, [r2, #offClassObject_vtable]    @ r2<- thisPtr->clazz->vtable
+    EXPORT_PC()                         @ invoke must export
+    ldr     r0, [r2, r1, lsl #2]        @ r3<- vtable[BBBB]
+    bl      common_invokeMethod${routine} @ continue on
diff --git a/vm/mterp/armv5/OP_INVOKE_VIRTUAL_QUICK_RANGE.S b/vm/mterp/armv5/OP_INVOKE_VIRTUAL_QUICK_RANGE.S
new file mode 100644
index 0000000..c43ce3a
--- /dev/null
+++ b/vm/mterp/armv5/OP_INVOKE_VIRTUAL_QUICK_RANGE.S
@@ -0,0 +1,2 @@
+%verify "executed"
+%include "armv5/OP_INVOKE_VIRTUAL_QUICK.S" { "isrange":"1", "routine":"Range" }
diff --git a/vm/mterp/armv5/OP_INVOKE_VIRTUAL_RANGE.S b/vm/mterp/armv5/OP_INVOKE_VIRTUAL_RANGE.S
new file mode 100644
index 0000000..5ca501c
--- /dev/null
+++ b/vm/mterp/armv5/OP_INVOKE_VIRTUAL_RANGE.S
@@ -0,0 +1,2 @@
+%verify "executed"
+%include "armv5/OP_INVOKE_VIRTUAL.S" { "isrange":"1", "routine":"Range" }
diff --git a/vm/mterp/armv5/OP_IPUT.S b/vm/mterp/armv5/OP_IPUT.S
new file mode 100644
index 0000000..35c139f
--- /dev/null
+++ b/vm/mterp/armv5/OP_IPUT.S
@@ -0,0 +1,47 @@
+%default { "store":"str", "sqnum":"0" }
+%verify "executed"
+%verify "null object"
+%verify "field already resolved"
+%verify "field not yet resolved"
+%verify "field cannot be resolved"
+    /*
+     * General 32-bit instance field put.
+     *
+     * for: iput, iput-object, iput-boolean, iput-byte, iput-char, iput-short
+     */
+    /* op vA, vB, field@CCCC */
+    mov     r0, rINST, lsr #12          @ r0<- B
+    ldr     r3, [rGLUE, #offGlue_methodClassDex]    @ r3<- DvmDex
+    FETCH(r1, 1)                        @ r1<- field ref CCCC
+    ldr     r2, [r3, #offDvmDex_pResFields] @ r2<- pDvmDex->pResFields
+    GET_VREG(r9, r0)                    @ r9<- fp[B], the object pointer
+    ldr     r0, [r2, r1, lsl #2]        @ r0<- resolved InstField ptr
+    cmp     r0, #0                      @ is resolved entry null?
+    bne     .L${opcode}_finish          @ no, already resolved
+8:  ldr     r2, [rGLUE, #offGlue_method]    @ r2<- current method
+    EXPORT_PC()                         @ resolve() could throw
+    ldr     r0, [r2, #offMethod_clazz]  @ r0<- method->clazz
+    bl      dvmResolveInstField         @ r0<- resolved InstField ptr
+    cmp     r0, #0                      @ success?
+    bne     .L${opcode}_finish          @ yes, finish up
+    b       common_exceptionThrown
+%break
+
+    /*
+     * Currently:
+     *  r0 holds resolved field
+     *  r9 holds object
+     */
+.L${opcode}_finish:
+    @bl      common_squeak${sqnum}
+    mov     r1, rINST, lsr #8           @ r1<- A+
+    ldr     r3, [r0, #offInstField_byteOffset]  @ r3<- byte offset of field
+    and     r1, r1, #15                 @ r1<- A
+    cmp     r9, #0                      @ check object for null
+    GET_VREG(r0, r1)                    @ r0<- fp[A]
+    beq     common_errNullObject        @ object was null
+    FETCH_ADVANCE_INST(2)               @ advance rPC, load rINST
+    GET_INST_OPCODE(ip)                 @ extract opcode from rINST
+    $store  r0, [r9, r3]                @ obj.field (8/16/32 bits)<- r0
+    GOTO_OPCODE(ip)                     @ jump to next instruction
+
diff --git a/vm/mterp/armv5/OP_IPUT_BOOLEAN.S b/vm/mterp/armv5/OP_IPUT_BOOLEAN.S
new file mode 100644
index 0000000..1af9629
--- /dev/null
+++ b/vm/mterp/armv5/OP_IPUT_BOOLEAN.S
@@ -0,0 +1,3 @@
+%verify "executed"
+@include "armv5/OP_IPUT.S" { "store":"strb", "sqnum":"1" }
+%include "armv5/OP_IPUT.S" { "store":"str", "sqnum":"1" }
diff --git a/vm/mterp/armv5/OP_IPUT_BYTE.S b/vm/mterp/armv5/OP_IPUT_BYTE.S
new file mode 100644
index 0000000..8385e07
--- /dev/null
+++ b/vm/mterp/armv5/OP_IPUT_BYTE.S
@@ -0,0 +1,3 @@
+%verify "executed"
+@include "armv5/OP_IPUT.S" { "store":"strb", "sqnum":"2" }
+%include "armv5/OP_IPUT.S" { "store":"str", "sqnum":"2" }
diff --git a/vm/mterp/armv5/OP_IPUT_CHAR.S b/vm/mterp/armv5/OP_IPUT_CHAR.S
new file mode 100644
index 0000000..c0220c0
--- /dev/null
+++ b/vm/mterp/armv5/OP_IPUT_CHAR.S
@@ -0,0 +1,3 @@
+%verify "executed"
+@include "armv5/OP_IPUT.S" { "store":"strh", "sqnum":"3" }
+%include "armv5/OP_IPUT.S" { "store":"str", "sqnum":"3" }
diff --git a/vm/mterp/armv5/OP_IPUT_OBJECT.S b/vm/mterp/armv5/OP_IPUT_OBJECT.S
new file mode 100644
index 0000000..83f3665
--- /dev/null
+++ b/vm/mterp/armv5/OP_IPUT_OBJECT.S
@@ -0,0 +1,2 @@
+%verify "executed"
+%include "armv5/OP_IPUT.S"
diff --git a/vm/mterp/armv5/OP_IPUT_OBJECT_QUICK.S b/vm/mterp/armv5/OP_IPUT_OBJECT_QUICK.S
new file mode 100644
index 0000000..d46b1d8
--- /dev/null
+++ b/vm/mterp/armv5/OP_IPUT_OBJECT_QUICK.S
@@ -0,0 +1,2 @@
+%verify "executed"
+%include "armv5/OP_IPUT_QUICK.S"
diff --git a/vm/mterp/armv5/OP_IPUT_QUICK.S b/vm/mterp/armv5/OP_IPUT_QUICK.S
new file mode 100644
index 0000000..c33a738
--- /dev/null
+++ b/vm/mterp/armv5/OP_IPUT_QUICK.S
@@ -0,0 +1,17 @@
+%verify "executed"
+%verify "null object"
+    /* For: iput-quick, iput-object-quick */
+    /* op vA, vB, offset@CCCC */
+    mov     r2, rINST, lsr #12          @ r2<- B
+    GET_VREG(r3, r2)                    @ r3<- fp[B], the object pointer
+    FETCH(r1, 1)                        @ r1<- field byte offset
+    cmp     r3, #0                      @ check object for null
+    mov     r2, rINST, lsr #8           @ r2<- A(+)
+    beq     common_errNullObject        @ object was null
+    and     r2, r2, #15
+    GET_VREG(r0, r2)                    @ r0<- fp[A]
+    FETCH_ADVANCE_INST(2)               @ advance rPC, load rINST
+    str     r0, [r3, r1]                @ obj.field (always 32 bits)<- r0
+    GET_INST_OPCODE(ip)                 @ extract opcode from rINST
+    GOTO_OPCODE(ip)                     @ jump to next instruction
+
diff --git a/vm/mterp/armv5/OP_IPUT_SHORT.S b/vm/mterp/armv5/OP_IPUT_SHORT.S
new file mode 100644
index 0000000..f53b80b
--- /dev/null
+++ b/vm/mterp/armv5/OP_IPUT_SHORT.S
@@ -0,0 +1,3 @@
+%verify "executed"
+@include "armv5/OP_IPUT.S" { "store":"strh", "sqnum":"4" }
+%include "armv5/OP_IPUT.S" { "store":"str", "sqnum":"4" }
diff --git a/vm/mterp/armv5/OP_IPUT_WIDE.S b/vm/mterp/armv5/OP_IPUT_WIDE.S
new file mode 100644
index 0000000..75465ec
--- /dev/null
+++ b/vm/mterp/armv5/OP_IPUT_WIDE.S
@@ -0,0 +1,41 @@
+%verify "executed"
+%verify "null object"
+%verify "field already resolved"
+%verify "field not yet resolved"
+%verify "field cannot be resolved"
+    /* iput-wide vA, vB, field@CCCC */
+    mov     r0, rINST, lsr #12          @ r0<- B
+    ldr     r3, [rGLUE, #offGlue_methodClassDex]    @ r3<- DvmDex
+    FETCH(r1, 1)                        @ r1<- field ref CCCC
+    ldr     r2, [r3, #offDvmDex_pResFields] @ r2<- pResFields
+    GET_VREG(r9, r0)                    @ r9<- fp[B], the object pointer
+    ldr     r0, [r2, r1, lsl #2]        @ r0<- resolved InstField ptr
+    cmp     r0, #0                      @ is resolved entry null?
+    bne     .L${opcode}_finish          @ no, already resolved
+8:  ldr     r2, [rGLUE, #offGlue_method] @ r2<- current method
+    EXPORT_PC()                         @ resolve() could throw
+    ldr     r0, [r2, #offMethod_clazz]  @ r0<- method->clazz
+    bl      dvmResolveInstField         @ r0<- resolved InstField ptr
+    cmp     r0, #0                      @ success?
+    bne     .L${opcode}_finish          @ yes, finish up
+    b       common_exceptionThrown
+%break
+
+    /*
+     * Currently:
+     *  r0 holds resolved field
+     *  r9 holds object
+     */
+.L${opcode}_finish:
+    mov     r2, rINST, lsr #8           @ r2<- A+
+    cmp     r9, #0                      @ check object for null
+    and     r2, r2, #15                 @ r2<- A
+    ldr     r3, [r0, #offInstField_byteOffset]  @ r3<- byte offset of field
+    add     r2, rFP, r2, lsl #2         @ r3<- &fp[A]
+    beq     common_errNullObject        @ object was null
+    FETCH_ADVANCE_INST(2)               @ advance rPC, load rINST
+    ldmia   r2, {r0-r1}                 @ r0/r1<- fp[A]
+    GET_INST_OPCODE(ip)                 @ extract opcode from rINST
+    strd    r0, [r9, r3]                @ obj.field (64 bits, aligned)<- r0
+    GOTO_OPCODE(ip)                     @ jump to next instruction
+
diff --git a/vm/mterp/armv5/OP_IPUT_WIDE_QUICK.S b/vm/mterp/armv5/OP_IPUT_WIDE_QUICK.S
new file mode 100644
index 0000000..290591c
--- /dev/null
+++ b/vm/mterp/armv5/OP_IPUT_WIDE_QUICK.S
@@ -0,0 +1,17 @@
+%verify "executed"
+%verify "null object"
+    /* iput-wide-quick vA, vB, offset@CCCC */
+    mov     r0, rINST, lsr #8           @ r0<- A(+)
+    mov     r1, rINST, lsr #12          @ r1<- B
+    and     r0, r0, #15
+    GET_VREG(r2, r1)                    @ r2<- fp[B], the object pointer
+    add     r3, rFP, r0, lsl #2         @ r3<- &fp[A]
+    cmp     r2, #0                      @ check object for null
+    ldmia   r3, {r0-r1}                 @ r0/r1<- fp[A]
+    beq     common_errNullObject        @ object was null
+    FETCH(r3, 1)                        @ r3<- field byte offset
+    FETCH_ADVANCE_INST(2)               @ advance rPC, load rINST
+    strd    r0, [r2, r3]                @ obj.field (64 bits, aligned)<- r0/r1
+    GET_INST_OPCODE(ip)                 @ extract opcode from rINST
+    GOTO_OPCODE(ip)                     @ jump to next instruction
+
diff --git a/vm/mterp/armv5/OP_LONG_TO_DOUBLE.S b/vm/mterp/armv5/OP_LONG_TO_DOUBLE.S
new file mode 100644
index 0000000..a10258b
--- /dev/null
+++ b/vm/mterp/armv5/OP_LONG_TO_DOUBLE.S
@@ -0,0 +1,2 @@
+%verify "executed"
+%include "armv5/unopWide.S" {"instr":"bl      __aeabi_l2d"}
diff --git a/vm/mterp/armv5/OP_LONG_TO_FLOAT.S b/vm/mterp/armv5/OP_LONG_TO_FLOAT.S
new file mode 100644
index 0000000..9e1e998
--- /dev/null
+++ b/vm/mterp/armv5/OP_LONG_TO_FLOAT.S
@@ -0,0 +1,2 @@
+%verify "executed"
+%include "armv5/unopNarrower.S" {"instr":"bl      __aeabi_l2f"}
diff --git a/vm/mterp/armv5/OP_LONG_TO_INT.S b/vm/mterp/armv5/OP_LONG_TO_INT.S
new file mode 100644
index 0000000..3dfda7d
--- /dev/null
+++ b/vm/mterp/armv5/OP_LONG_TO_INT.S
@@ -0,0 +1,3 @@
+%verify "executed"
+/* we ignore the high word, making this equivalent to a 32-bit reg move */
+%include "armv5/OP_MOVE.S"
diff --git a/vm/mterp/armv5/OP_MONITOR_ENTER.S b/vm/mterp/armv5/OP_MONITOR_ENTER.S
new file mode 100644
index 0000000..6d4c2d8
--- /dev/null
+++ b/vm/mterp/armv5/OP_MONITOR_ENTER.S
@@ -0,0 +1,25 @@
+%verify "executed"
+%verify "exception for null object"
+    /*
+     * Synchronize on an object.
+     */
+    /* monitor-enter vAA */
+    mov     r2, rINST, lsr #8           @ r2<- AA
+    GET_VREG(r1, r2)                    @ r1<- vAA (object)
+    ldr     r0, [rGLUE, #offGlue_self]  @ r0<- glue->self
+    cmp     r1, #0                      @ null object?
+#ifdef WITH_MONITOR_TRACKING
+    EXPORT_PC()                         @ export PC so we can grab stack trace
+#endif
+    beq     common_errNullObject        @ null object, throw an exception
+    FETCH_ADVANCE_INST(1)               @ advance rPC, load rINST
+    bl      dvmLockObject               @ call(self, obj)
+#ifdef WITH_DEADLOCK_PREDICTION /* implies WITH_MONITOR_TRACKING */
+    ldr     r0, [rGLUE, #offGlue_self]  @ r0<- glue->self
+    ldr     r1, [r0, #offThread_exception] @ check for exception
+    cmp     r1, #0
+    bne     common_exceptionThrown      @ exception raised, bail out
+#endif
+    GET_INST_OPCODE(ip)                 @ extract opcode from rINST
+    GOTO_OPCODE(ip)                     @ jump to next instruction
+
diff --git a/vm/mterp/armv5/OP_MONITOR_EXIT.S b/vm/mterp/armv5/OP_MONITOR_EXIT.S
new file mode 100644
index 0000000..b334ae9
--- /dev/null
+++ b/vm/mterp/armv5/OP_MONITOR_EXIT.S
@@ -0,0 +1,24 @@
+%verify "executed"
+%verify "exception for null object (impossible in javac)"
+%verify "dvmUnlockObject fails"
+    /*
+     * Unlock an object.
+     *
+     * Exceptions that occur when unlocking a monitor need to appear as
+     * if they happened at the following instruction.  See the Dalvik
+     * instruction spec.
+     */
+    /* monitor-exit vAA */
+    mov     r2, rINST, lsr #8           @ r2<- AA
+    EXPORT_PC()                         @ before fetch: export the PC
+    GET_VREG(r1, r2)                    @ r1<- vAA (object)
+    cmp     r1, #0                      @ null object?
+    beq     common_errNullObject        @ yes
+    ldr     r0, [rGLUE, #offGlue_self]  @ r0<- glue->self
+    bl      dvmUnlockObject             @ r0<- success for unlock(self, obj)
+    cmp     r0, #0                      @ failed?
+    beq     common_exceptionThrown      @ yes, exception is pending
+    FETCH_ADVANCE_INST(1)               @ before throw: advance rPC, load rINST
+    GET_INST_OPCODE(ip)                 @ extract opcode from rINST
+    GOTO_OPCODE(ip)                     @ jump to next instruction
+
diff --git a/vm/mterp/armv5/OP_MOVE.S b/vm/mterp/armv5/OP_MOVE.S
new file mode 100644
index 0000000..48db45f
--- /dev/null
+++ b/vm/mterp/armv5/OP_MOVE.S
@@ -0,0 +1,12 @@
+%verify "executed"
+    /* for move, move-object, long-to-int */
+    /* op vA, vB */
+    mov     r1, rINST, lsr #12          @ r1<- B from 15:12
+    mov     r0, rINST, lsr #8           @ r0<- A from 11:8
+    FETCH_ADVANCE_INST(1)               @ advance rPC, load rINST
+    GET_VREG(r2, r1)                    @ r2<- fp[B]
+    and     r0, r0, #15
+    GET_INST_OPCODE(ip)                 @ ip<- opcode from rINST
+    SET_VREG(r2, r0)                    @ fp[A]<- r2
+    GOTO_OPCODE(ip)                     @ execute next instruction
+
diff --git a/vm/mterp/armv5/OP_MOVE_16.S b/vm/mterp/armv5/OP_MOVE_16.S
new file mode 100644
index 0000000..f20f57b
--- /dev/null
+++ b/vm/mterp/armv5/OP_MOVE_16.S
@@ -0,0 +1,11 @@
+%verify "executed"
+    /* for: move/16, move-object/16 */
+    /* op vAAAA, vBBBB */
+    FETCH(r1, 2)                        @ r1<- BBBB
+    FETCH(r0, 1)                        @ r0<- AAAA
+    FETCH_ADVANCE_INST(3)               @ advance rPC, load rINST
+    GET_VREG(r2, r1)                    @ r2<- fp[BBBB]
+    GET_INST_OPCODE(ip)                 @ extract opcode from rINST
+    SET_VREG(r2, r0)                    @ fp[AAAA]<- r2
+    GOTO_OPCODE(ip)                     @ jump to next instruction
+
diff --git a/vm/mterp/armv5/OP_MOVE_EXCEPTION.S b/vm/mterp/armv5/OP_MOVE_EXCEPTION.S
new file mode 100644
index 0000000..45e66fa
--- /dev/null
+++ b/vm/mterp/armv5/OP_MOVE_EXCEPTION.S
@@ -0,0 +1,12 @@
+%verify "executed"
+    /* move-exception vAA */
+    ldr     r0, [rGLUE, #offGlue_self]  @ r0<- glue->self
+    mov     r2, rINST, lsr #8           @ r2<- AA
+    ldr     r3, [r0, #offThread_exception]  @ r3<- dvmGetException bypass
+    mov     r1, #0                      @ r1<- 0
+    FETCH_ADVANCE_INST(1)               @ advance rPC, load rINST
+    SET_VREG(r3, r2)                    @ fp[AA]<- exception obj
+    GET_INST_OPCODE(ip)                 @ extract opcode from rINST
+    str     r1, [r0, #offThread_exception]  @ dvmClearException bypass
+    GOTO_OPCODE(ip)                     @ jump to next instruction
+
diff --git a/vm/mterp/armv5/OP_MOVE_FROM16.S b/vm/mterp/armv5/OP_MOVE_FROM16.S
new file mode 100644
index 0000000..fe54374
--- /dev/null
+++ b/vm/mterp/armv5/OP_MOVE_FROM16.S
@@ -0,0 +1,11 @@
+%verify "executed"
+    /* for: move/from16, move-object/from16 */
+    /* op vAA, vBBBB */
+    FETCH(r1, 1)                        @ r1<- BBBB
+    mov     r0, rINST, lsr #8           @ r0<- AA
+    FETCH_ADVANCE_INST(2)               @ advance rPC, load rINST
+    GET_VREG(r2, r1)                    @ r2<- fp[BBBB]
+    GET_INST_OPCODE(ip)                 @ extract opcode from rINST
+    SET_VREG(r2, r0)                    @ fp[AA]<- r2
+    GOTO_OPCODE(ip)                     @ jump to next instruction
+
diff --git a/vm/mterp/armv5/OP_MOVE_OBJECT.S b/vm/mterp/armv5/OP_MOVE_OBJECT.S
new file mode 100644
index 0000000..4003409
--- /dev/null
+++ b/vm/mterp/armv5/OP_MOVE_OBJECT.S
@@ -0,0 +1,2 @@
+%verify "executed"
+%include "armv5/OP_MOVE.S"
diff --git a/vm/mterp/armv5/OP_MOVE_OBJECT_16.S b/vm/mterp/armv5/OP_MOVE_OBJECT_16.S
new file mode 100644
index 0000000..3ce9af7
--- /dev/null
+++ b/vm/mterp/armv5/OP_MOVE_OBJECT_16.S
@@ -0,0 +1,2 @@
+%verify "executed"
+%include "armv5/OP_MOVE_16.S"
diff --git a/vm/mterp/armv5/OP_MOVE_OBJECT_FROM16.S b/vm/mterp/armv5/OP_MOVE_OBJECT_FROM16.S
new file mode 100644
index 0000000..4abb4bf
--- /dev/null
+++ b/vm/mterp/armv5/OP_MOVE_OBJECT_FROM16.S
@@ -0,0 +1,2 @@
+%verify "executed"
+%include "armv5/OP_MOVE_FROM16.S"
diff --git a/vm/mterp/armv5/OP_MOVE_RESULT.S b/vm/mterp/armv5/OP_MOVE_RESULT.S
new file mode 100644
index 0000000..d19de23
--- /dev/null
+++ b/vm/mterp/armv5/OP_MOVE_RESULT.S
@@ -0,0 +1,10 @@
+%verify "executed"
+    /* for: move-result, move-result-object */
+    /* op vAA */
+    mov     r2, rINST, lsr #8           @ r2<- AA
+    FETCH_ADVANCE_INST(1)               @ advance rPC, load rINST
+    ldr     r0, [rGLUE, #offGlue_retval]    @ r0<- glue->retval.i
+    GET_INST_OPCODE(ip)                 @ extract opcode from rINST
+    SET_VREG(r0, r2)                    @ fp[AA]<- r0
+    GOTO_OPCODE(ip)                     @ jump to next instruction
+
diff --git a/vm/mterp/armv5/OP_MOVE_RESULT_OBJECT.S b/vm/mterp/armv5/OP_MOVE_RESULT_OBJECT.S
new file mode 100644
index 0000000..0d5a4d8
--- /dev/null
+++ b/vm/mterp/armv5/OP_MOVE_RESULT_OBJECT.S
@@ -0,0 +1,2 @@
+%verify "executed"
+%include "armv5/OP_MOVE_RESULT.S"
diff --git a/vm/mterp/armv5/OP_MOVE_RESULT_WIDE.S b/vm/mterp/armv5/OP_MOVE_RESULT_WIDE.S
new file mode 100644
index 0000000..7bb8254
--- /dev/null
+++ b/vm/mterp/armv5/OP_MOVE_RESULT_WIDE.S
@@ -0,0 +1,11 @@
+%verify "executed"
+    /* move-result-wide vAA */
+    mov     r2, rINST, lsr #8           @ r2<- AA
+    add     r3, rGLUE, #offGlue_retval  @ r3<- &glue->retval
+    add     r2, rFP, r2, lsl #2         @ r2<- &fp[AA]
+    ldmia   r3, {r0-r1}                 @ r0/r1<- retval.j
+    FETCH_ADVANCE_INST(1)               @ advance rPC, load rINST
+    GET_INST_OPCODE(ip)                 @ extract opcode from rINST
+    stmia   r2, {r0-r1}                 @ fp[AA]<- r0/r1
+    GOTO_OPCODE(ip)                     @ jump to next instruction
+
diff --git a/vm/mterp/armv5/OP_MOVE_WIDE.S b/vm/mterp/armv5/OP_MOVE_WIDE.S
new file mode 100644
index 0000000..6bcc89a
--- /dev/null
+++ b/vm/mterp/armv5/OP_MOVE_WIDE.S
@@ -0,0 +1,14 @@
+%verify "executed"
+    /* move-wide vA, vB */
+    /* NOTE: regs can overlap, e.g. "move v6,v7" or "move v7,v6" */
+    mov     r2, rINST, lsr #8           @ r2<- A(+)
+    mov     r3, rINST, lsr #12          @ r3<- B
+    and     r2, r2, #15
+    add     r3, rFP, r3, lsl #2         @ r3<- &fp[B]
+    add     r2, rFP, r2, lsl #2         @ r2<- &fp[A]
+    ldmia   r3, {r0-r1}                 @ r0/r1<- fp[B]
+    FETCH_ADVANCE_INST(1)               @ advance rPC, load rINST
+    GET_INST_OPCODE(ip)                 @ extract opcode from rINST
+    stmia   r2, {r0-r1}                 @ fp[A]<- r0/r1
+    GOTO_OPCODE(ip)                     @ jump to next instruction
+
diff --git a/vm/mterp/armv5/OP_MOVE_WIDE_16.S b/vm/mterp/armv5/OP_MOVE_WIDE_16.S
new file mode 100644
index 0000000..1e0b5f2
--- /dev/null
+++ b/vm/mterp/armv5/OP_MOVE_WIDE_16.S
@@ -0,0 +1,13 @@
+%verify "executed"
+    /* move-wide/16 vAAAA, vBBBB */
+    /* NOTE: regs can overlap, e.g. "move v6,v7" or "move v7,v6" */
+    FETCH(r3, 2)                        @ r3<- BBBB
+    FETCH(r2, 1)                        @ r2<- AAAA
+    add     r3, rFP, r3, lsl #2         @ r3<- &fp[BBBB]
+    add     r2, rFP, r2, lsl #2         @ r2<- &fp[AAAA]
+    ldmia   r3, {r0-r1}                 @ r0/r1<- fp[BBBB]
+    FETCH_ADVANCE_INST(2)               @ advance rPC, load rINST
+    GET_INST_OPCODE(ip)                 @ extract opcode from rINST
+    stmia   r2, {r0-r1}                 @ fp[AAAA]<- r0/r1
+    GOTO_OPCODE(ip)                     @ jump to next instruction
+
diff --git a/vm/mterp/armv5/OP_MOVE_WIDE_FROM16.S b/vm/mterp/armv5/OP_MOVE_WIDE_FROM16.S
new file mode 100644
index 0000000..0771dbc
--- /dev/null
+++ b/vm/mterp/armv5/OP_MOVE_WIDE_FROM16.S
@@ -0,0 +1,13 @@
+%verify "executed"
+    /* move-wide/from16 vAA, vBBBB */
+    /* NOTE: regs can overlap, e.g. "move v6,v7" or "move v7,v6" */
+    FETCH(r3, 1)                        @ r3<- BBBB
+    mov     r2, rINST, lsr #8           @ r2<- AA
+    add     r3, rFP, r3, lsl #2         @ r3<- &fp[BBBB]
+    add     r2, rFP, r2, lsl #2         @ r2<- &fp[AA]
+    ldmia   r3, {r0-r1}                 @ r0/r1<- fp[BBBB]
+    FETCH_ADVANCE_INST(2)               @ advance rPC, load rINST
+    GET_INST_OPCODE(ip)                 @ extract opcode from rINST
+    stmia   r2, {r0-r1}                 @ fp[AA]<- r0/r1
+    GOTO_OPCODE(ip)                     @ jump to next instruction
+
diff --git a/vm/mterp/armv5/OP_MUL_DOUBLE.S b/vm/mterp/armv5/OP_MUL_DOUBLE.S
new file mode 100644
index 0000000..d3ed1b7
--- /dev/null
+++ b/vm/mterp/armv5/OP_MUL_DOUBLE.S
@@ -0,0 +1,2 @@
+%verify "executed"
+%include "armv5/binopWide.S" {"instr":"bl      __aeabi_dmul"}
diff --git a/vm/mterp/armv5/OP_MUL_DOUBLE_2ADDR.S b/vm/mterp/armv5/OP_MUL_DOUBLE_2ADDR.S
new file mode 100644
index 0000000..4cc9cd6
--- /dev/null
+++ b/vm/mterp/armv5/OP_MUL_DOUBLE_2ADDR.S
@@ -0,0 +1,2 @@
+%verify "executed"
+%include "armv5/binopWide2addr.S" {"instr":"bl      __aeabi_dmul"}
diff --git a/vm/mterp/armv5/OP_MUL_FLOAT.S b/vm/mterp/armv5/OP_MUL_FLOAT.S
new file mode 100644
index 0000000..7b3d51c
--- /dev/null
+++ b/vm/mterp/armv5/OP_MUL_FLOAT.S
@@ -0,0 +1,2 @@
+%verify "executed"
+%include "armv5/binop.S" {"instr":"bl      __aeabi_fmul"}
diff --git a/vm/mterp/armv5/OP_MUL_FLOAT_2ADDR.S b/vm/mterp/armv5/OP_MUL_FLOAT_2ADDR.S
new file mode 100644
index 0000000..a440145
--- /dev/null
+++ b/vm/mterp/armv5/OP_MUL_FLOAT_2ADDR.S
@@ -0,0 +1,2 @@
+%verify "executed"
+%include "armv5/binop2addr.S" {"instr":"bl      __aeabi_fmul"}
diff --git a/vm/mterp/armv5/OP_MUL_INT.S b/vm/mterp/armv5/OP_MUL_INT.S
new file mode 100644
index 0000000..178f479
--- /dev/null
+++ b/vm/mterp/armv5/OP_MUL_INT.S
@@ -0,0 +1,3 @@
+%verify "executed"
+/* must be "mul r0, r1, r0" -- "r0, r0, r1" is illegal */
+%include "armv5/binop.S" {"instr":"mul     r0, r1, r0"}
diff --git a/vm/mterp/armv5/OP_MUL_INT_2ADDR.S b/vm/mterp/armv5/OP_MUL_INT_2ADDR.S
new file mode 100644
index 0000000..270ccb5
--- /dev/null
+++ b/vm/mterp/armv5/OP_MUL_INT_2ADDR.S
@@ -0,0 +1,3 @@
+%verify "executed"
+/* must be "mul r0, r1, r0" -- "r0, r0, r1" is illegal */
+%include "armv5/binop2addr.S" {"instr":"mul     r0, r1, r0"}
diff --git a/vm/mterp/armv5/OP_MUL_INT_LIT16.S b/vm/mterp/armv5/OP_MUL_INT_LIT16.S
new file mode 100644
index 0000000..2f135d6
--- /dev/null
+++ b/vm/mterp/armv5/OP_MUL_INT_LIT16.S
@@ -0,0 +1,3 @@
+%verify "executed"
+/* must be "mul r0, r1, r0" -- "r0, r0, r1" is illegal */
+%include "armv5/binopLit16.S" {"instr":"mul     r0, r1, r0"}
diff --git a/vm/mterp/armv5/OP_MUL_INT_LIT8.S b/vm/mterp/armv5/OP_MUL_INT_LIT8.S
new file mode 100644
index 0000000..8a768b4
--- /dev/null
+++ b/vm/mterp/armv5/OP_MUL_INT_LIT8.S
@@ -0,0 +1,3 @@
+%verify "executed"
+/* must be "mul r0, r1, r0" -- "r0, r0, r1" is illegal */
+%include "armv5/binopLit8.S" {"instr":"mul     r0, r1, r0"}
diff --git a/vm/mterp/armv5/OP_MUL_LONG.S b/vm/mterp/armv5/OP_MUL_LONG.S
new file mode 100644
index 0000000..b812ca1
--- /dev/null
+++ b/vm/mterp/armv5/OP_MUL_LONG.S
@@ -0,0 +1,42 @@
+%verify "executed"
+    /*
+     * Signed 64-bit integer multiply.
+     *
+     * Consider WXxYZ (r1r0 x r3r2) with a long multiply:
+     *        WX
+     *      x YZ
+     *  --------
+     *     ZW ZX
+     *  YW YX
+     *
+     * The low word of the result holds ZX, the high word holds
+     * (ZW+YX) + (the high overflow from ZX).  YW doesn't matter because
+     * it doesn't fit in the low 64 bits.
+     *
+     * Unlike most ARM math operations, multiply instructions have
+     * restrictions on using the same register more than once (Rd and Rm
+     * cannot be the same).
+     */
+    /* mul-long vAA, vBB, vCC */
+    FETCH(r0, 1)                        @ r0<- CCBB
+    and     r2, r0, #255                @ r2<- BB
+    mov     r3, r0, lsr #8              @ r3<- CC
+    add     r2, rFP, r2, lsl #2         @ r2<- &fp[BB]
+    add     r3, rFP, r3, lsl #2         @ r3<- &fp[CC]
+    ldmia   r2, {r0-r1}                 @ r0/r1<- vBB/vBB+1
+    ldmia   r3, {r2-r3}                 @ r2/r3<- vCC/vCC+1
+    mul     ip, r2, r1                  @  ip<- ZxW
+    umull   r9, r10, r2, r0             @  r9/r10 <- ZxX
+    mla     r2, r0, r3, ip              @  r2<- YxX + (ZxW)
+    mov     r0, rINST, lsr #8           @ r0<- AA
+    add     r10, r2, r10                @  r10<- r10 + low(ZxW + (YxX))
+    add     r0, rFP, r0, lsl #2         @ r0<- &fp[AA]
+    FETCH_ADVANCE_INST(2)               @ advance rPC, load rINST
+    b       .L${opcode}_finish
+%break
+
+.L${opcode}_finish:
+    GET_INST_OPCODE(ip)                 @ extract opcode from rINST
+    stmia   r0, {r9-r10}                @ vAA/vAA+1<- r9/r10
+    GOTO_OPCODE(ip)                     @ jump to next instruction
+
diff --git a/vm/mterp/armv5/OP_MUL_LONG_2ADDR.S b/vm/mterp/armv5/OP_MUL_LONG_2ADDR.S
new file mode 100644
index 0000000..4c1bcb8
--- /dev/null
+++ b/vm/mterp/armv5/OP_MUL_LONG_2ADDR.S
@@ -0,0 +1,27 @@
+%verify "executed"
+    /*
+     * Signed 64-bit integer multiply, "/2addr" version.
+     *
+     * See OP_MUL_LONG for an explanation.
+     *
+     * We get a little tight on registers, so to avoid looking up &fp[A]
+     * again we stuff it into rINST.
+     */
+    /* mul-long/2addr vA, vB */
+    mov     r9, rINST, lsr #8           @ r9<- A+
+    mov     r1, rINST, lsr #12          @ r1<- B
+    and     r9, r9, #15
+    add     r1, rFP, r1, lsl #2         @ r1<- &fp[B]
+    add     rINST, rFP, r9, lsl #2      @ rINST<- &fp[A]
+    ldmia   r1, {r2-r3}                 @ r2/r3<- vBB/vBB+1
+    ldmia   rINST, {r0-r1}              @ r0/r1<- vAA/vAA+1
+    mul     ip, r2, r1                  @  ip<- ZxW
+    umull   r9, r10, r2, r0             @  r9/r10 <- ZxX
+    mla     r2, r0, r3, ip              @  r2<- YxX + (ZxW)
+    mov     r0, rINST                   @ r0<- &fp[A] (free up rINST)
+    FETCH_ADVANCE_INST(1)               @ advance rPC, load rINST
+    add     r10, r2, r10                @  r10<- r10 + low(ZxW + (YxX))
+    GET_INST_OPCODE(ip)                 @ extract opcode from rINST
+    stmia   r0, {r9-r10}                @ vAA/vAA+1<- r9/r10
+    GOTO_OPCODE(ip)                     @ jump to next instruction
+
diff --git a/vm/mterp/armv5/OP_NEG_DOUBLE.S b/vm/mterp/armv5/OP_NEG_DOUBLE.S
new file mode 100644
index 0000000..c8748a7
--- /dev/null
+++ b/vm/mterp/armv5/OP_NEG_DOUBLE.S
@@ -0,0 +1,2 @@
+%verify "executed"
+%include "armv5/unopWide.S" {"instr":"add     r1, r1, #0x80000000"}
diff --git a/vm/mterp/armv5/OP_NEG_FLOAT.S b/vm/mterp/armv5/OP_NEG_FLOAT.S
new file mode 100644
index 0000000..57e7bf3
--- /dev/null
+++ b/vm/mterp/armv5/OP_NEG_FLOAT.S
@@ -0,0 +1,2 @@
+%verify "executed"
+%include "armv5/unop.S" {"instr":"add     r0, r0, #0x80000000"}
diff --git a/vm/mterp/armv5/OP_NEG_INT.S b/vm/mterp/armv5/OP_NEG_INT.S
new file mode 100644
index 0000000..e3071c7
--- /dev/null
+++ b/vm/mterp/armv5/OP_NEG_INT.S
@@ -0,0 +1,2 @@
+%verify "executed"
+%include "armv5/unop.S" {"instr":"rsb     r0, r0, #0"}
diff --git a/vm/mterp/armv5/OP_NEG_LONG.S b/vm/mterp/armv5/OP_NEG_LONG.S
new file mode 100644
index 0000000..9c6c688
--- /dev/null
+++ b/vm/mterp/armv5/OP_NEG_LONG.S
@@ -0,0 +1,2 @@
+%verify "executed"
+%include "armv5/unopWide.S" {"preinstr":"rsbs    r0, r0, #0", "instr":"rsc     r1, r1, #0"}
diff --git a/vm/mterp/armv5/OP_NEW_ARRAY.S b/vm/mterp/armv5/OP_NEW_ARRAY.S
new file mode 100644
index 0000000..55fc4f3
--- /dev/null
+++ b/vm/mterp/armv5/OP_NEW_ARRAY.S
@@ -0,0 +1,62 @@
+%verify "executed"
+%verify "negative array length"
+%verify "allocation fails"
+    /*
+     * Allocate an array of objects, specified with the array class
+     * and a count.
+     *
+     * The verifier guarantees that this is an array class, so we don't
+     * check for it here.
+     */
+    /* new-array vA, vB, class@CCCC */
+    mov     r0, rINST, lsr #12          @ r0<- B
+    FETCH(r2, 1)                        @ r2<- CCCC
+    ldr     r3, [rGLUE, #offGlue_methodClassDex]    @ r3<- pDvmDex
+    GET_VREG(r1, r0)                    @ r1<- vB (array length)
+    ldr     r3, [r3, #offDvmDex_pResClasses]    @ r3<- pDvmDex->pResClasses
+    cmp     r1, #0                      @ check length
+    ldr     r0, [r3, r2, lsl #2]        @ r0<- resolved class
+    bmi     common_errNegativeArraySize @ negative length, bail
+    cmp     r0, #0                      @ already resolved?
+    EXPORT_PC()                         @ req'd for resolve, alloc
+    bne     .L${opcode}_finish          @ resolved, continue
+    b       .L${opcode}_resolve         @ do resolve now
+%break
+
+
+    /*
+     * Resolve class.  (This is an uncommon case.)
+     *
+     *  r1 holds array length
+     *  r2 holds class ref CCCC
+     */
+.L${opcode}_resolve:
+    ldr     r3, [rGLUE, #offGlue_method] @ r3<- glue->method
+    mov     r9, r1                      @ r9<- length (save)
+    mov     r1, r2                      @ r1<- CCCC
+    mov     r2, #0                      @ r2<- false
+    ldr     r0, [r3, #offMethod_clazz]  @ r0<- method->clazz
+    bl      dvmResolveClass             @ r0<- call(clazz, ref)
+    cmp     r0, #0                      @ got null?
+    mov     r1, r9                      @ r1<- length (restore)
+    beq     common_exceptionThrown      @ yes, handle exception
+    @ fall through to ${opcode}_finish
+
+    /*
+     * Finish allocation.
+     *
+     *  r0 holds class
+     *  r1 holds array length
+     */
+.L${opcode}_finish:
+    mov     r2, #ALLOC_DONT_TRACK       @ don't track in local refs table
+    bl      dvmAllocArrayByClass        @ r0<- call(clazz, length, flags)
+    cmp     r0, #0                      @ failed?
+    mov     r2, rINST, lsr #8           @ r2<- A+
+    beq     common_exceptionThrown      @ yes, handle the exception
+    FETCH_ADVANCE_INST(2)               @ advance rPC, load rINST
+    and     r2, r2, #15                 @ r2<- A
+    GET_INST_OPCODE(ip)                 @ extract opcode from rINST
+    SET_VREG(r0, r2)                    @ vA<- r0
+    GOTO_OPCODE(ip)                     @ jump to next instruction
+
diff --git a/vm/mterp/armv5/OP_NEW_INSTANCE.S b/vm/mterp/armv5/OP_NEW_INSTANCE.S
new file mode 100644
index 0000000..d1d2df6
--- /dev/null
+++ b/vm/mterp/armv5/OP_NEW_INSTANCE.S
@@ -0,0 +1,84 @@
+%verify "executed"
+%verify "class not resolved"
+%verify "class cannot be resolved"
+%verify "class not initialized"
+%verify "class fails to initialize"
+%verify "class already resolved/initialized"
+%verify "class is abstract or interface"
+%verify "allocation fails"
+    /*
+     * Create a new instance of a class.
+     */
+    /* new-instance vAA, class@BBBB */
+    ldr     r3, [rGLUE, #offGlue_methodClassDex]    @ r3<- pDvmDex
+    FETCH(r1, 1)                        @ r1<- BBBB
+    ldr     r3, [r3, #offDvmDex_pResClasses]    @ r3<- pDvmDex->pResClasses
+    ldr     r0, [r3, r1, lsl #2]        @ r0<- resolved class
+    EXPORT_PC()                         @ req'd for init, resolve, alloc
+    cmp     r0, #0                      @ already resolved?
+    beq     .L${opcode}_resolve         @ no, resolve it now
+.L${opcode}_resolved:   @ r0=class
+    ldrb    r1, [r0, #offClassObject_status]    @ r1<- ClassStatus enum
+    cmp     r1, #CLASS_INITIALIZED      @ has class been initialized?
+    bne     .L${opcode}_needinit        @ no, init class now
+.L${opcode}_initialized: @ r0=class
+    ldr     r3, [r0, #offClassObject_accessFlags]   @ r3<- clazz->accessFlags
+    tst     r3, #(ACC_INTERFACE|ACC_ABSTRACT)   @ abstract or interface?
+    mov     r1, #ALLOC_DONT_TRACK       @ flags for alloc call
+    beq     .L${opcode}_finish          @ concrete class, continue
+    b       .L${opcode}_abstract        @ fail
+%break
+
+    .balign 32                          @ minimize cache lines
+.L${opcode}_finish: @ r0=class
+    bl      dvmAllocObject              @ r0<- new object
+    mov     r3, rINST, lsr #8           @ r3<- AA
+    cmp     r0, #0                      @ failed?
+    beq     common_exceptionThrown      @ yes, handle the exception
+    FETCH_ADVANCE_INST(2)               @ advance rPC, load rINST
+    GET_INST_OPCODE(ip)                 @ extract opcode from rINST
+    SET_VREG(r0, r3)                    @ vAA<- r0
+    GOTO_OPCODE(ip)                     @ jump to next instruction
+
+    /*
+     * Class initialization required.
+     *
+     *  r0 holds class object
+     */
+.L${opcode}_needinit:
+    mov     r9, r0                      @ save r0
+    bl      dvmInitClass                @ initialize class
+    cmp     r0, #0                      @ check boolean result
+    mov     r0, r9                      @ restore r0
+    bne     .L${opcode}_initialized     @ success, continue
+    b       common_exceptionThrown      @ failed, deal with init exception
+
+    /*
+     * Resolution required.  This is the least-likely path.
+     *
+     *  r1 holds BBBB
+     */
+.L${opcode}_resolve:
+    ldr     r3, [rGLUE, #offGlue_method] @ r3<- glue->method
+    mov     r2, #0                      @ r2<- false
+    ldr     r0, [r3, #offMethod_clazz]  @ r0<- method->clazz
+    bl      dvmResolveClass             @ r0<- resolved ClassObject ptr
+    cmp     r0, #0                      @ got null?
+    bne     .L${opcode}_resolved        @ no, continue
+    b       common_exceptionThrown      @ yes, handle exception
+
+    /*
+     * We can't instantiate an abstract class or interface, so throw an
+     * InstantiationError with the class descriptor as the message.
+     *
+     *  r0 holds class object
+     */
+.L${opcode}_abstract:
+    ldr     r1, [r0, #offClassObject_descriptor]
+    ldr     r0, .LstrInstantiationErrorPtr
+    bl      dvmThrowExceptionWithClassMessage
+    b       common_exceptionThrown
+
+.LstrInstantiationErrorPtr:
+    .word   .LstrInstantiationError
+
diff --git a/vm/mterp/armv5/OP_NOP.S b/vm/mterp/armv5/OP_NOP.S
new file mode 100644
index 0000000..1b72d3c
--- /dev/null
+++ b/vm/mterp/armv5/OP_NOP.S
@@ -0,0 +1,15 @@
+%verify "executed"
+    FETCH_ADVANCE_INST(1)               @ advance to next instr, load rINST
+    GET_INST_OPCODE(ip)                 @ ip<- opcode from rINST
+    GOTO_OPCODE(ip)                     @ execute it
+
+#ifdef ASSIST_DEBUGGER
+    /* insert fake function header to help gdb find the stack frame */
+    .type   dalvik_inst, %function
+dalvik_inst:
+    .fnstart
+    MTERP_ENTRY1
+    MTERP_ENTRY2
+    .fnend
+#endif
+
diff --git a/vm/mterp/armv5/OP_NOT_INT.S b/vm/mterp/armv5/OP_NOT_INT.S
new file mode 100644
index 0000000..c4fe18b
--- /dev/null
+++ b/vm/mterp/armv5/OP_NOT_INT.S
@@ -0,0 +1,2 @@
+%verify "executed"
+%include "armv5/unop.S" {"instr":"mvn     r0, r0"}
diff --git a/vm/mterp/armv5/OP_NOT_LONG.S b/vm/mterp/armv5/OP_NOT_LONG.S
new file mode 100644
index 0000000..46256cc
--- /dev/null
+++ b/vm/mterp/armv5/OP_NOT_LONG.S
@@ -0,0 +1,2 @@
+%verify "executed"
+%include "armv5/unopWide.S" {"preinstr":"mvn     r0, r0", "instr":"mvn     r1, r1"}
diff --git a/vm/mterp/armv5/OP_OR_INT.S b/vm/mterp/armv5/OP_OR_INT.S
new file mode 100644
index 0000000..2ad6511
--- /dev/null
+++ b/vm/mterp/armv5/OP_OR_INT.S
@@ -0,0 +1,2 @@
+%verify "executed"
+%include "armv5/binop.S" {"instr":"orr     r0, r0, r1"}
diff --git a/vm/mterp/armv5/OP_OR_INT_2ADDR.S b/vm/mterp/armv5/OP_OR_INT_2ADDR.S
new file mode 100644
index 0000000..6526175
--- /dev/null
+++ b/vm/mterp/armv5/OP_OR_INT_2ADDR.S
@@ -0,0 +1,2 @@
+%verify "executed"
+%include "armv5/binop2addr.S" {"instr":"orr     r0, r0, r1"}
diff --git a/vm/mterp/armv5/OP_OR_INT_LIT16.S b/vm/mterp/armv5/OP_OR_INT_LIT16.S
new file mode 100644
index 0000000..1c5e8e2
--- /dev/null
+++ b/vm/mterp/armv5/OP_OR_INT_LIT16.S
@@ -0,0 +1,2 @@
+%verify "executed"
+%include "armv5/binopLit16.S" {"instr":"orr     r0, r0, r1"}
diff --git a/vm/mterp/armv5/OP_OR_INT_LIT8.S b/vm/mterp/armv5/OP_OR_INT_LIT8.S
new file mode 100644
index 0000000..34be3e7
--- /dev/null
+++ b/vm/mterp/armv5/OP_OR_INT_LIT8.S
@@ -0,0 +1,2 @@
+%verify "executed"
+%include "armv5/binopLit8.S" {"instr":"orr     r0, r0, r1"}
diff --git a/vm/mterp/armv5/OP_OR_LONG.S b/vm/mterp/armv5/OP_OR_LONG.S
new file mode 100644
index 0000000..b7600c3
--- /dev/null
+++ b/vm/mterp/armv5/OP_OR_LONG.S
@@ -0,0 +1,2 @@
+%verify "executed"
+%include "armv5/binopWide.S" {"preinstr":"orr     r0, r0, r2", "instr":"orr     r1, r1, r3"}
diff --git a/vm/mterp/armv5/OP_OR_LONG_2ADDR.S b/vm/mterp/armv5/OP_OR_LONG_2ADDR.S
new file mode 100644
index 0000000..7ea02a7
--- /dev/null
+++ b/vm/mterp/armv5/OP_OR_LONG_2ADDR.S
@@ -0,0 +1,2 @@
+%verify "executed"
+%include "armv5/binopWide2addr.S" {"preinstr":"orr     r0, r0, r2", "instr":"orr     r1, r1, r3"}
diff --git a/vm/mterp/armv5/OP_PACKED_SWITCH.S b/vm/mterp/armv5/OP_PACKED_SWITCH.S
new file mode 100644
index 0000000..6fde05b
--- /dev/null
+++ b/vm/mterp/armv5/OP_PACKED_SWITCH.S
@@ -0,0 +1,26 @@
+%default { "func":"dvmInterpHandlePackedSwitch" }
+%verify executed
+    /*
+     * Handle a packed-switch or sparse-switch instruction.  In both cases
+     * we decode it and hand it off to a helper function.
+     *
+     * We don't really expect backward branches in a switch statement, but
+     * they're perfectly legal, so we check for them here.
+     *
+     * for: packed-switch, sparse-switch
+     */
+    /* op vAA, +BBBB */
+    FETCH(r0, 1)                        @ r0<- bbbb (lo)
+    FETCH(r1, 2)                        @ r1<- BBBB (hi)
+    mov     r3, rINST, lsr #8           @ r3<- AA
+    orr     r0, r0, r1, lsl #16         @ r0<- BBBBbbbb
+    GET_VREG(r1, r3)                    @ r1<- vAA
+    add     r0, rPC, r0, lsl #1         @ r0<- PC + BBBBbbbb*2
+    bl      $func                       @ r0<- code-unit branch offset
+    movs    r9, r0, asl #1              @ r9<- branch byte offset, check sign
+    bmi     common_backwardBranch       @ backward branch, do periodic checks
+    beq     common_backwardBranch       @ (want to use BLE but V is unknown)
+    FETCH_ADVANCE_INST_RB(r9)           @ update rPC, load rINST
+    GET_INST_OPCODE(ip)                 @ extract opcode from rINST
+    GOTO_OPCODE(ip)                     @ jump to next instruction
+
diff --git a/vm/mterp/armv5/OP_REM_DOUBLE.S b/vm/mterp/armv5/OP_REM_DOUBLE.S
new file mode 100644
index 0000000..0d5ec5f
--- /dev/null
+++ b/vm/mterp/armv5/OP_REM_DOUBLE.S
@@ -0,0 +1,3 @@
+%verify "executed"
+/* EABI doesn't define a double remainder function, but libm does */
+%include "armv5/binopWide.S" {"instr":"bl      fmod"}
diff --git a/vm/mterp/armv5/OP_REM_DOUBLE_2ADDR.S b/vm/mterp/armv5/OP_REM_DOUBLE_2ADDR.S
new file mode 100644
index 0000000..0308b4a
--- /dev/null
+++ b/vm/mterp/armv5/OP_REM_DOUBLE_2ADDR.S
@@ -0,0 +1,3 @@
+%verify "executed"
+/* EABI doesn't define a double remainder function, but libm does */
+%include "armv5/binopWide2addr.S" {"instr":"bl      fmod"}
diff --git a/vm/mterp/armv5/OP_REM_FLOAT.S b/vm/mterp/armv5/OP_REM_FLOAT.S
new file mode 100644
index 0000000..46c5fa8
--- /dev/null
+++ b/vm/mterp/armv5/OP_REM_FLOAT.S
@@ -0,0 +1,3 @@
+%verify "executed"
+/* EABI doesn't define a float remainder function, but libm does */
+%include "armv5/binop.S" {"instr":"bl      fmodf"}
diff --git a/vm/mterp/armv5/OP_REM_FLOAT_2ADDR.S b/vm/mterp/armv5/OP_REM_FLOAT_2ADDR.S
new file mode 100644
index 0000000..2cace1a
--- /dev/null
+++ b/vm/mterp/armv5/OP_REM_FLOAT_2ADDR.S
@@ -0,0 +1,3 @@
+%verify "executed"
+/* EABI doesn't define a float remainder function, but libm does */
+%include "armv5/binop2addr.S" {"instr":"bl      fmodf"}
diff --git a/vm/mterp/armv5/OP_REM_INT.S b/vm/mterp/armv5/OP_REM_INT.S
new file mode 100644
index 0000000..cd6d6fd
--- /dev/null
+++ b/vm/mterp/armv5/OP_REM_INT.S
@@ -0,0 +1,3 @@
+%verify "executed"
+/* idivmod returns quotient in r0 and remainder in r1 */
+%include "armv5/binop.S" {"instr":"bl      __aeabi_idivmod", "result":"r1", "chkzero":"1"}
diff --git a/vm/mterp/armv5/OP_REM_INT_2ADDR.S b/vm/mterp/armv5/OP_REM_INT_2ADDR.S
new file mode 100644
index 0000000..2dad3ca
--- /dev/null
+++ b/vm/mterp/armv5/OP_REM_INT_2ADDR.S
@@ -0,0 +1,3 @@
+%verify "executed"
+/* idivmod returns quotient in r0 and remainder in r1 */
+%include "armv5/binop2addr.S" {"instr":"bl      __aeabi_idivmod", "result":"r1", "chkzero":"1"}
diff --git a/vm/mterp/armv5/OP_REM_INT_LIT16.S b/vm/mterp/armv5/OP_REM_INT_LIT16.S
new file mode 100644
index 0000000..c9f5a87
--- /dev/null
+++ b/vm/mterp/armv5/OP_REM_INT_LIT16.S
@@ -0,0 +1,3 @@
+%verify "executed"
+/* idivmod returns quotient in r0 and remainder in r1 */
+%include "armv5/binopLit16.S" {"instr":"bl      __aeabi_idivmod", "result":"r1", "chkzero":"1"}
diff --git a/vm/mterp/armv5/OP_REM_INT_LIT8.S b/vm/mterp/armv5/OP_REM_INT_LIT8.S
new file mode 100644
index 0000000..aeddaba
--- /dev/null
+++ b/vm/mterp/armv5/OP_REM_INT_LIT8.S
@@ -0,0 +1,3 @@
+%verify "executed"
+/* idivmod returns quotient in r0 and remainder in r1 */
+%include "armv5/binopLit8.S" {"instr":"bl      __aeabi_idivmod", "result":"r1", "chkzero":"1"}
diff --git a/vm/mterp/armv5/OP_REM_LONG.S b/vm/mterp/armv5/OP_REM_LONG.S
new file mode 100644
index 0000000..0687b43
--- /dev/null
+++ b/vm/mterp/armv5/OP_REM_LONG.S
@@ -0,0 +1,3 @@
+%verify "executed"
+/* ldivmod returns quotient in r0/r1 and remainder in r2/r3 */
+%include "armv5/binopWide.S" {"instr":"bl      __aeabi_ldivmod", "result0":"r2", "result1":"r3", "chkzero":"1"}
diff --git a/vm/mterp/armv5/OP_REM_LONG_2ADDR.S b/vm/mterp/armv5/OP_REM_LONG_2ADDR.S
new file mode 100644
index 0000000..d4a64f8
--- /dev/null
+++ b/vm/mterp/armv5/OP_REM_LONG_2ADDR.S
@@ -0,0 +1,3 @@
+%verify "executed"
+/* ldivmod returns quotient in r0/r1 and remainder in r2/r3 */
+%include "armv5/binopWide2addr.S" {"instr":"bl      __aeabi_ldivmod", "result0":"r2", "result1":"r3", "chkzero":"1"}
diff --git a/vm/mterp/armv5/OP_RETURN.S b/vm/mterp/armv5/OP_RETURN.S
new file mode 100644
index 0000000..f76a223
--- /dev/null
+++ b/vm/mterp/armv5/OP_RETURN.S
@@ -0,0 +1,13 @@
+%verify "executed"
+    /*
+     * Return a 32-bit value.  Copies the return value into the "glue"
+     * structure, then jumps to the return handler.
+     *
+     * for: return, return-object
+     */
+    /* op vAA */
+    mov     r2, rINST, lsr #8           @ r2<- AA
+    GET_VREG(r0, r2)                    @ r0<- vAA
+    str     r0, [rGLUE, #offGlue_retval] @ retval.i <- vAA
+    b       common_returnFromMethod
+
diff --git a/vm/mterp/armv5/OP_RETURN_OBJECT.S b/vm/mterp/armv5/OP_RETURN_OBJECT.S
new file mode 100644
index 0000000..e4c5c49
--- /dev/null
+++ b/vm/mterp/armv5/OP_RETURN_OBJECT.S
@@ -0,0 +1,2 @@
+%verify "executed"
+%include "armv5/OP_RETURN.S"
diff --git a/vm/mterp/armv5/OP_RETURN_VOID.S b/vm/mterp/armv5/OP_RETURN_VOID.S
new file mode 100644
index 0000000..647002d
--- /dev/null
+++ b/vm/mterp/armv5/OP_RETURN_VOID.S
@@ -0,0 +1,3 @@
+%verify "executed"
+    b       common_returnFromMethod
+
diff --git a/vm/mterp/armv5/OP_RETURN_WIDE.S b/vm/mterp/armv5/OP_RETURN_WIDE.S
new file mode 100644
index 0000000..2d30792
--- /dev/null
+++ b/vm/mterp/armv5/OP_RETURN_WIDE.S
@@ -0,0 +1,13 @@
+%verify "executed"
+    /*
+     * Return a 64-bit value.  Copies the return value into the "glue"
+     * structure, then jumps to the return handler.
+     */
+    /* return-wide vAA */
+    mov     r2, rINST, lsr #8           @ r2<- AA
+    add     r2, rFP, r2, lsl #2         @ r2<- &fp[AA]
+    add     r3, rGLUE, #offGlue_retval  @ r3<- &glue->retval
+    ldmia   r2, {r0-r1}                 @ r0/r1 <- vAA/vAA+1
+    stmia   r3, {r0-r1}                 @ retval<- r0/r1
+    b       common_returnFromMethod
+
diff --git a/vm/mterp/armv5/OP_RSUB_INT.S b/vm/mterp/armv5/OP_RSUB_INT.S
new file mode 100644
index 0000000..dbf8056
--- /dev/null
+++ b/vm/mterp/armv5/OP_RSUB_INT.S
@@ -0,0 +1,3 @@
+%verify "executed"
+/* this op is "rsub-int", but can be thought of as "rsub-int/lit16" */
+%include "armv5/binopLit16.S" {"instr":"rsb     r0, r0, r1"}
diff --git a/vm/mterp/armv5/OP_RSUB_INT_LIT8.S b/vm/mterp/armv5/OP_RSUB_INT_LIT8.S
new file mode 100644
index 0000000..3bed616
--- /dev/null
+++ b/vm/mterp/armv5/OP_RSUB_INT_LIT8.S
@@ -0,0 +1,2 @@
+%verify "executed"
+%include "armv5/binopLit8.S" {"instr":"rsb     r0, r0, r1"}
diff --git a/vm/mterp/armv5/OP_SGET.S b/vm/mterp/armv5/OP_SGET.S
new file mode 100644
index 0000000..17ab26a
--- /dev/null
+++ b/vm/mterp/armv5/OP_SGET.S
@@ -0,0 +1,38 @@
+%verify "executed"
+%verify "field already resolved"
+%verify "field not yet resolved"
+%verify "field cannot be resolved"
+    /*
+     * General 32-bit SGET handler.
+     *
+     * for: sget, sget-object, sget-boolean, sget-byte, sget-char, sget-short
+     */
+    /* op vAA, field@BBBB */
+    ldr     r2, [rGLUE, #offGlue_methodClassDex]    @ r2<- DvmDex
+    FETCH(r1, 1)                        @ r1<- field ref BBBB
+    ldr     r2, [r2, #offDvmDex_pResFields] @ r2<- dvmDex->pResFields
+    ldr     r0, [r2, r1, lsl #2]        @ r0<- resolved StaticField ptr
+    cmp     r0, #0                      @ is resolved entry null?
+    beq     .L${opcode}_resolve         @ yes, do resolve
+.L${opcode}_finish: @ field ptr in r0
+    ldr     r1, [r0, #offStaticField_value] @ r1<- field value
+    mov     r2, rINST, lsr #8           @ r2<- AA
+    FETCH_ADVANCE_INST(2)               @ advance rPC, load rINST
+    SET_VREG(r1, r2)                    @ fp[AA]<- r1
+    GET_INST_OPCODE(ip)                 @ extract opcode from rINST
+    GOTO_OPCODE(ip)                     @ jump to next instruction
+%break
+
+    /*
+     * Continuation if the field has not yet been resolved.
+     *  r1: BBBB field ref
+     */
+.L${opcode}_resolve:
+    ldr     r2, [rGLUE, #offGlue_method]    @ r2<- current method
+    EXPORT_PC()                         @ resolve() could throw, so export now
+    ldr     r0, [r2, #offMethod_clazz]  @ r0<- method->clazz
+    bl      dvmResolveStaticField       @ r0<- resolved StaticField ptr
+    cmp     r0, #0                      @ success?
+    bne     .L${opcode}_finish          @ yes, finish
+    b       common_exceptionThrown      @ no, handle exception
+
diff --git a/vm/mterp/armv5/OP_SGET_BOOLEAN.S b/vm/mterp/armv5/OP_SGET_BOOLEAN.S
new file mode 100644
index 0000000..0cbd28f
--- /dev/null
+++ b/vm/mterp/armv5/OP_SGET_BOOLEAN.S
@@ -0,0 +1,2 @@
+%verify "executed"
+%include "armv5/OP_SGET.S"
diff --git a/vm/mterp/armv5/OP_SGET_BYTE.S b/vm/mterp/armv5/OP_SGET_BYTE.S
new file mode 100644
index 0000000..0cbd28f
--- /dev/null
+++ b/vm/mterp/armv5/OP_SGET_BYTE.S
@@ -0,0 +1,2 @@
+%verify "executed"
+%include "armv5/OP_SGET.S"
diff --git a/vm/mterp/armv5/OP_SGET_CHAR.S b/vm/mterp/armv5/OP_SGET_CHAR.S
new file mode 100644
index 0000000..0cbd28f
--- /dev/null
+++ b/vm/mterp/armv5/OP_SGET_CHAR.S
@@ -0,0 +1,2 @@
+%verify "executed"
+%include "armv5/OP_SGET.S"
diff --git a/vm/mterp/armv5/OP_SGET_OBJECT.S b/vm/mterp/armv5/OP_SGET_OBJECT.S
new file mode 100644
index 0000000..0cbd28f
--- /dev/null
+++ b/vm/mterp/armv5/OP_SGET_OBJECT.S
@@ -0,0 +1,2 @@
+%verify "executed"
+%include "armv5/OP_SGET.S"
diff --git a/vm/mterp/armv5/OP_SGET_SHORT.S b/vm/mterp/armv5/OP_SGET_SHORT.S
new file mode 100644
index 0000000..0cbd28f
--- /dev/null
+++ b/vm/mterp/armv5/OP_SGET_SHORT.S
@@ -0,0 +1,2 @@
+%verify "executed"
+%include "armv5/OP_SGET.S"
diff --git a/vm/mterp/armv5/OP_SGET_WIDE.S b/vm/mterp/armv5/OP_SGET_WIDE.S
new file mode 100644
index 0000000..1f93a2f
--- /dev/null
+++ b/vm/mterp/armv5/OP_SGET_WIDE.S
@@ -0,0 +1,37 @@
+%verify "executed"
+%verify "field already resolved"
+%verify "field not yet resolved"
+%verify "field cannot be resolved"
+    /*
+     * 64-bit SGET handler.
+     */
+    /* sget-wide vAA, field@BBBB */
+    ldr     r2, [rGLUE, #offGlue_methodClassDex]    @ r2<- DvmDex
+    FETCH(r1, 1)                        @ r1<- field ref BBBB
+    ldr     r2, [r2, #offDvmDex_pResFields] @ r2<- dvmDex->pResFields
+    ldr     r0, [r2, r1, lsl #2]        @ r0<- resolved StaticField ptr
+    cmp     r0, #0                      @ is resolved entry null?
+    beq     .L${opcode}_resolve         @ yes, do resolve
+.L${opcode}_finish:
+    mov     r1, rINST, lsr #8           @ r1<- AA
+    ldrd    r2, [r0, #offStaticField_value] @ r2/r3<- field value (aligned)
+    add     r1, rFP, r1, lsl #2         @ r1<- &fp[AA]
+    FETCH_ADVANCE_INST(2)               @ advance rPC, load rINST
+    stmia   r1, {r2-r3}                 @ vAA/vAA+1<- r2/r3
+    GET_INST_OPCODE(ip)                 @ extract opcode from rINST
+    GOTO_OPCODE(ip)                     @ jump to next instruction
+%break
+
+    /*
+     * Continuation if the field has not yet been resolved.
+     *  r1: BBBB field ref
+     */
+.L${opcode}_resolve:
+    ldr     r2, [rGLUE, #offGlue_method]    @ r2<- current method
+    EXPORT_PC()                         @ resolve() could throw, so export now
+    ldr     r0, [r2, #offMethod_clazz]  @ r0<- method->clazz
+    bl      dvmResolveStaticField       @ r0<- resolved StaticField ptr
+    cmp     r0, #0                      @ success?
+    bne     .L${opcode}_finish          @ yes, finish
+    b       common_exceptionThrown      @ no, handle exception
+
diff --git a/vm/mterp/armv5/OP_SHL_INT.S b/vm/mterp/armv5/OP_SHL_INT.S
new file mode 100644
index 0000000..b8fdfcc
--- /dev/null
+++ b/vm/mterp/armv5/OP_SHL_INT.S
@@ -0,0 +1,2 @@
+%verify "executed"
+%include "armv5/binop.S" {"preinstr":"and     r1, r1, #31", "instr":"mov     r0, r0, asl r1"}
diff --git a/vm/mterp/armv5/OP_SHL_INT_2ADDR.S b/vm/mterp/armv5/OP_SHL_INT_2ADDR.S
new file mode 100644
index 0000000..753e95b
--- /dev/null
+++ b/vm/mterp/armv5/OP_SHL_INT_2ADDR.S
@@ -0,0 +1,2 @@
+%verify "executed"
+%include "armv5/binop2addr.S" {"preinstr":"and     r1, r1, #31", "instr":"mov     r0, r0, asl r1"}
diff --git a/vm/mterp/armv5/OP_SHL_INT_LIT8.S b/vm/mterp/armv5/OP_SHL_INT_LIT8.S
new file mode 100644
index 0000000..8843197
--- /dev/null
+++ b/vm/mterp/armv5/OP_SHL_INT_LIT8.S
@@ -0,0 +1,2 @@
+%verify "executed"
+%include "armv5/binopLit8.S" {"preinstr":"and     r1, r1, #31", "instr":"mov     r0, r0, asl r1"}
diff --git a/vm/mterp/armv5/OP_SHL_LONG.S b/vm/mterp/armv5/OP_SHL_LONG.S
new file mode 100644
index 0000000..3510565
--- /dev/null
+++ b/vm/mterp/armv5/OP_SHL_LONG.S
@@ -0,0 +1,33 @@
+%verify "executed"
+    /*
+     * Long integer shift.  This is different from the generic 32/64-bit
+     * binary operations because vAA/vBB are 64-bit but vCC (the shift
+     * distance) is 32-bit.  Also, Dalvik requires us to mask off the low
+     * 6 bits of the shift distance.
+     */
+    /* shl-long vAA, vBB, vCC */
+    FETCH(r0, 1)                        @ r0<- CCBB
+    mov     r9, rINST, lsr #8           @ r9<- AA
+    and     r3, r0, #255                @ r3<- BB
+    mov     r0, r0, lsr #8              @ r0<- CC
+    add     r3, rFP, r3, lsl #2         @ r3<- &fp[BB]
+    GET_VREG(r2, r0)                    @ r2<- vCC
+    ldmia   r3, {r0-r1}                 @ r0/r1<- vBB/vBB+1
+    and     r2, r2, #63                 @ r2<- r2 & 0x3f
+    add     r9, rFP, r9, lsl #2         @ r9<- &fp[AA]
+
+    mov     r1, r1, asl r2              @  r1<- r1 << r2
+    rsb     r3, r2, #32                 @  r3<- 32 - r2
+    orr     r1, r1, r0, lsr r3          @  r1<- r1 | (r0 << (32-r2))
+    subs    ip, r2, #32                 @  ip<- r2 - 32
+    movpl   r1, r0, asl ip              @  if r2 >= 32, r1<- r0 << (r2-32)
+    FETCH_ADVANCE_INST(2)               @ advance rPC, load rINST
+    b       .L${opcode}_finish
+%break
+
+.L${opcode}_finish:
+    mov     r0, r0, asl r2              @  r0<- r0 << r2
+    GET_INST_OPCODE(ip)                 @ extract opcode from rINST
+    stmia   r9, {r0-r1}                 @ vAA/vAA+1<- r0/r1
+    GOTO_OPCODE(ip)                     @ jump to next instruction
+
diff --git a/vm/mterp/armv5/OP_SHL_LONG_2ADDR.S b/vm/mterp/armv5/OP_SHL_LONG_2ADDR.S
new file mode 100644
index 0000000..93f8260
--- /dev/null
+++ b/vm/mterp/armv5/OP_SHL_LONG_2ADDR.S
@@ -0,0 +1,29 @@
+%verify "executed"
+    /*
+     * Long integer shift, 2addr version.  vA is 64-bit value/result, vB is
+     * 32-bit shift distance.
+     */
+    /* shl-long/2addr vA, vB */
+    mov     r9, rINST, lsr #8           @ r9<- A+
+    mov     r3, rINST, lsr #12          @ r3<- B
+    and     r9, r9, #15
+    GET_VREG(r2, r3)                    @ r2<- vB
+    add     r9, rFP, r9, lsl #2         @ r9<- &fp[A]
+    and     r2, r2, #63                 @ r2<- r2 & 0x3f
+    ldmia   r9, {r0-r1}                 @ r0/r1<- vAA/vAA+1
+
+    mov     r1, r1, asl r2              @  r1<- r1 << r2
+    rsb     r3, r2, #32                 @  r3<- 32 - r2
+    orr     r1, r1, r0, lsr r3          @  r1<- r1 | (r0 << (32-r2))
+    subs    ip, r2, #32                 @  ip<- r2 - 32
+    FETCH_ADVANCE_INST(1)               @ advance rPC, load rINST
+    movpl   r1, r0, asl ip              @  if r2 >= 32, r1<- r0 << (r2-32)
+    mov     r0, r0, asl r2              @  r0<- r0 << r2
+    b       .L${opcode}_finish
+%break
+
+.L${opcode}_finish:
+    GET_INST_OPCODE(ip)                 @ extract opcode from rINST
+    stmia   r9, {r0-r1}                 @ vAA/vAA+1<- r0/r1
+    GOTO_OPCODE(ip)                     @ jump to next instruction
+
diff --git a/vm/mterp/armv5/OP_SHR_INT.S b/vm/mterp/armv5/OP_SHR_INT.S
new file mode 100644
index 0000000..7f35381
--- /dev/null
+++ b/vm/mterp/armv5/OP_SHR_INT.S
@@ -0,0 +1,2 @@
+%verify "executed"
+%include "armv5/binop.S" {"preinstr":"and     r1, r1, #31", "instr":"mov     r0, r0, asr r1"}
diff --git a/vm/mterp/armv5/OP_SHR_INT_2ADDR.S b/vm/mterp/armv5/OP_SHR_INT_2ADDR.S
new file mode 100644
index 0000000..0632ecc
--- /dev/null
+++ b/vm/mterp/armv5/OP_SHR_INT_2ADDR.S
@@ -0,0 +1,2 @@
+%verify "executed"
+%include "armv5/binop2addr.S" {"preinstr":"and     r1, r1, #31", "instr":"mov     r0, r0, asr r1"}
diff --git a/vm/mterp/armv5/OP_SHR_INT_LIT8.S b/vm/mterp/armv5/OP_SHR_INT_LIT8.S
new file mode 100644
index 0000000..df3126c
--- /dev/null
+++ b/vm/mterp/armv5/OP_SHR_INT_LIT8.S
@@ -0,0 +1,2 @@
+%verify "executed"
+%include "armv5/binopLit8.S" {"preinstr":"and     r1, r1, #31", "instr":"mov     r0, r0, asr r1"}
diff --git a/vm/mterp/armv5/OP_SHR_LONG.S b/vm/mterp/armv5/OP_SHR_LONG.S
new file mode 100644
index 0000000..c7f10aa
--- /dev/null
+++ b/vm/mterp/armv5/OP_SHR_LONG.S
@@ -0,0 +1,33 @@
+%verify "executed"
+    /*
+     * Long integer shift.  This is different from the generic 32/64-bit
+     * binary operations because vAA/vBB are 64-bit but vCC (the shift
+     * distance) is 32-bit.  Also, Dalvik requires us to mask off the low
+     * 6 bits of the shift distance.
+     */
+    /* shr-long vAA, vBB, vCC */
+    FETCH(r0, 1)                        @ r0<- CCBB
+    mov     r9, rINST, lsr #8           @ r9<- AA
+    and     r3, r0, #255                @ r3<- BB
+    mov     r0, r0, lsr #8              @ r0<- CC
+    add     r3, rFP, r3, lsl #2         @ r3<- &fp[BB]
+    GET_VREG(r2, r0)                    @ r2<- vCC
+    ldmia   r3, {r0-r1}                 @ r0/r1<- vBB/vBB+1
+    and     r2, r2, #63                 @ r0<- r0 & 0x3f
+    add     r9, rFP, r9, lsl #2         @ r9<- &fp[AA]
+
+    mov     r0, r0, lsr r2              @  r0<- r2 >> r2
+    rsb     r3, r2, #32                 @  r3<- 32 - r2
+    orr     r0, r0, r1, asl r3          @  r0<- r0 | (r1 << (32-r2))
+    subs    ip, r2, #32                 @  ip<- r2 - 32
+    movpl   r0, r1, asr ip              @  if r2 >= 32, r0<-r1 >> (r2-32)
+    FETCH_ADVANCE_INST(2)               @ advance rPC, load rINST
+    b       .L${opcode}_finish
+%break
+
+.L${opcode}_finish:
+    mov     r1, r1, asr r2              @  r1<- r1 >> r2
+    GET_INST_OPCODE(ip)                 @ extract opcode from rINST
+    stmia   r9, {r0-r1}                 @ vAA/vAA+1<- r0/r1
+    GOTO_OPCODE(ip)                     @ jump to next instruction
+
diff --git a/vm/mterp/armv5/OP_SHR_LONG_2ADDR.S b/vm/mterp/armv5/OP_SHR_LONG_2ADDR.S
new file mode 100644
index 0000000..188017f
--- /dev/null
+++ b/vm/mterp/armv5/OP_SHR_LONG_2ADDR.S
@@ -0,0 +1,29 @@
+%verify "executed"
+    /*
+     * Long integer shift, 2addr version.  vA is 64-bit value/result, vB is
+     * 32-bit shift distance.
+     */
+    /* shr-long/2addr vA, vB */
+    mov     r9, rINST, lsr #8           @ r9<- A+
+    mov     r3, rINST, lsr #12          @ r3<- B
+    and     r9, r9, #15
+    GET_VREG(r2, r3)                    @ r2<- vB
+    add     r9, rFP, r9, lsl #2         @ r9<- &fp[A]
+    and     r2, r2, #63                 @ r2<- r2 & 0x3f
+    ldmia   r9, {r0-r1}                 @ r0/r1<- vAA/vAA+1
+
+    mov     r0, r0, lsr r2              @  r0<- r2 >> r2
+    rsb     r3, r2, #32                 @  r3<- 32 - r2
+    orr     r0, r0, r1, asl r3          @  r0<- r0 | (r1 << (32-r2))
+    subs    ip, r2, #32                 @  ip<- r2 - 32
+    FETCH_ADVANCE_INST(1)               @ advance rPC, load rINST
+    movpl   r0, r1, asr ip              @  if r2 >= 32, r0<-r1 >> (r2-32)
+    mov     r1, r1, asr r2              @  r1<- r1 >> r2
+    b       .L${opcode}_finish
+%break
+
+.L${opcode}_finish:
+    GET_INST_OPCODE(ip)                 @ extract opcode from rINST
+    stmia   r9, {r0-r1}                 @ vAA/vAA+1<- r0/r1
+    GOTO_OPCODE(ip)                     @ jump to next instruction
+
diff --git a/vm/mterp/armv5/OP_SPARSE_SWITCH.S b/vm/mterp/armv5/OP_SPARSE_SWITCH.S
new file mode 100644
index 0000000..44bc72e
--- /dev/null
+++ b/vm/mterp/armv5/OP_SPARSE_SWITCH.S
@@ -0,0 +1,2 @@
+%verify "executed"
+%include "armv5/OP_PACKED_SWITCH.S" { "func":"dvmInterpHandleSparseSwitch" }
diff --git a/vm/mterp/armv5/OP_SPUT.S b/vm/mterp/armv5/OP_SPUT.S
new file mode 100644
index 0000000..5236614
--- /dev/null
+++ b/vm/mterp/armv5/OP_SPUT.S
@@ -0,0 +1,38 @@
+%verify "executed"
+%verify "field already resolved"
+%verify "field not yet resolved"
+%verify "field cannot be resolved"
+    /*
+     * General 32-bit SPUT handler.
+     *
+     * for: sput, sput-object, sput-boolean, sput-byte, sput-char, sput-short
+     */
+    /* op vAA, field@BBBB */
+    ldr     r2, [rGLUE, #offGlue_methodClassDex]    @ r2<- DvmDex
+    FETCH(r1, 1)                        @ r1<- field ref BBBB
+    ldr     r2, [r2, #offDvmDex_pResFields] @ r2<- dvmDex->pResFields
+    ldr     r0, [r2, r1, lsl #2]        @ r0<- resolved StaticField ptr
+    cmp     r0, #0                      @ is resolved entry null?
+    beq     .L${opcode}_resolve         @ yes, do resolve
+.L${opcode}_finish:   @ field ptr in r0
+    mov     r2, rINST, lsr #8           @ r2<- AA
+    FETCH_ADVANCE_INST(2)               @ advance rPC, load rINST
+    GET_VREG(r1, r2)                    @ r1<- fp[AA]
+    GET_INST_OPCODE(ip)                 @ extract opcode from rINST
+    str     r1, [r0, #offStaticField_value] @ field<- vAA
+    GOTO_OPCODE(ip)                     @ jump to next instruction
+%break
+
+    /*
+     * Continuation if the field has not yet been resolved.
+     *  r1: BBBB field ref
+     */
+.L${opcode}_resolve:
+    ldr     r2, [rGLUE, #offGlue_method]    @ r2<- current method
+    EXPORT_PC()                         @ resolve() could throw, so export now
+    ldr     r0, [r2, #offMethod_clazz]  @ r0<- method->clazz
+    bl      dvmResolveStaticField       @ r0<- resolved StaticField ptr
+    cmp     r0, #0                      @ success?
+    bne     .L${opcode}_finish          @ yes, finish
+    b       common_exceptionThrown      @ no, handle exception
+
diff --git a/vm/mterp/armv5/OP_SPUT_BOOLEAN.S b/vm/mterp/armv5/OP_SPUT_BOOLEAN.S
new file mode 100644
index 0000000..33b968b
--- /dev/null
+++ b/vm/mterp/armv5/OP_SPUT_BOOLEAN.S
@@ -0,0 +1,2 @@
+%verify "executed"
+%include "armv5/OP_SPUT.S"
diff --git a/vm/mterp/armv5/OP_SPUT_BYTE.S b/vm/mterp/armv5/OP_SPUT_BYTE.S
new file mode 100644
index 0000000..33b968b
--- /dev/null
+++ b/vm/mterp/armv5/OP_SPUT_BYTE.S
@@ -0,0 +1,2 @@
+%verify "executed"
+%include "armv5/OP_SPUT.S"
diff --git a/vm/mterp/armv5/OP_SPUT_CHAR.S b/vm/mterp/armv5/OP_SPUT_CHAR.S
new file mode 100644
index 0000000..33b968b
--- /dev/null
+++ b/vm/mterp/armv5/OP_SPUT_CHAR.S
@@ -0,0 +1,2 @@
+%verify "executed"
+%include "armv5/OP_SPUT.S"
diff --git a/vm/mterp/armv5/OP_SPUT_OBJECT.S b/vm/mterp/armv5/OP_SPUT_OBJECT.S
new file mode 100644
index 0000000..33b968b
--- /dev/null
+++ b/vm/mterp/armv5/OP_SPUT_OBJECT.S
@@ -0,0 +1,2 @@
+%verify "executed"
+%include "armv5/OP_SPUT.S"
diff --git a/vm/mterp/armv5/OP_SPUT_SHORT.S b/vm/mterp/armv5/OP_SPUT_SHORT.S
new file mode 100644
index 0000000..33b968b
--- /dev/null
+++ b/vm/mterp/armv5/OP_SPUT_SHORT.S
@@ -0,0 +1,2 @@
+%verify "executed"
+%include "armv5/OP_SPUT.S"
diff --git a/vm/mterp/armv5/OP_SPUT_WIDE.S b/vm/mterp/armv5/OP_SPUT_WIDE.S
new file mode 100644
index 0000000..a7bc5f2
--- /dev/null
+++ b/vm/mterp/armv5/OP_SPUT_WIDE.S
@@ -0,0 +1,38 @@
+%verify "executed"
+%verify "field already resolved"
+%verify "field not yet resolved"
+%verify "field cannot be resolved"
+    /*
+     * 64-bit SPUT handler.
+     */
+    /* sput-wide vAA, field@BBBB */
+    ldr     r2, [rGLUE, #offGlue_methodClassDex]    @ r2<- DvmDex
+    FETCH(r1, 1)                        @ r1<- field ref BBBB
+    ldr     r2, [r2, #offDvmDex_pResFields] @ r2<- dvmDex->pResFields
+    mov     r9, rINST, lsr #8           @ r9<- AA
+    ldr     r0, [r2, r1, lsl #2]        @ r0<- resolved StaticField ptr
+    add     r9, rFP, r9, lsl #2         @ r9<- &fp[AA]
+    cmp     r0, #0                      @ is resolved entry null?
+    beq     .L${opcode}_resolve         @ yes, do resolve
+.L${opcode}_finish: @ field ptr in r0, AA in r9
+    FETCH_ADVANCE_INST(2)               @ advance rPC, load rINST
+    ldmia   r9, {r2-r3}                 @ r2/r3<- vAA/vAA+1
+    GET_INST_OPCODE(ip)                 @ extract opcode from rINST
+    strd    r2, [r0, #offStaticField_value] @ field<- vAA/vAA+1
+    GOTO_OPCODE(ip)                     @ jump to next instruction
+%break
+
+    /*
+     * Continuation if the field has not yet been resolved.
+     *  r1: BBBB field ref
+     *  r9: &fp[AA]
+     */
+.L${opcode}_resolve:
+    ldr     r2, [rGLUE, #offGlue_method]    @ r2<- current method
+    EXPORT_PC()                         @ resolve() could throw, so export now
+    ldr     r0, [r2, #offMethod_clazz]  @ r0<- method->clazz
+    bl      dvmResolveStaticField       @ r0<- resolved StaticField ptr
+    cmp     r0, #0                      @ success?
+    bne     .L${opcode}_finish          @ yes, finish
+    b       common_exceptionThrown      @ no, handle exception
+
diff --git a/vm/mterp/armv5/OP_SUB_DOUBLE.S b/vm/mterp/armv5/OP_SUB_DOUBLE.S
new file mode 100644
index 0000000..5aff638
--- /dev/null
+++ b/vm/mterp/armv5/OP_SUB_DOUBLE.S
@@ -0,0 +1,2 @@
+%verify "executed"
+%include "armv5/binopWide.S" {"instr":"bl      __aeabi_dsub"}
diff --git a/vm/mterp/armv5/OP_SUB_DOUBLE_2ADDR.S b/vm/mterp/armv5/OP_SUB_DOUBLE_2ADDR.S
new file mode 100644
index 0000000..7351d9e
--- /dev/null
+++ b/vm/mterp/armv5/OP_SUB_DOUBLE_2ADDR.S
@@ -0,0 +1,2 @@
+%verify "executed"
+%include "armv5/binopWide2addr.S" {"instr":"bl      __aeabi_dsub"}
diff --git a/vm/mterp/armv5/OP_SUB_FLOAT.S b/vm/mterp/armv5/OP_SUB_FLOAT.S
new file mode 100644
index 0000000..c902fa2
--- /dev/null
+++ b/vm/mterp/armv5/OP_SUB_FLOAT.S
@@ -0,0 +1,2 @@
+%verify "executed"
+%include "armv5/binop.S" {"instr":"bl      __aeabi_fsub"}
diff --git a/vm/mterp/armv5/OP_SUB_FLOAT_2ADDR.S b/vm/mterp/armv5/OP_SUB_FLOAT_2ADDR.S
new file mode 100644
index 0000000..994c820
--- /dev/null
+++ b/vm/mterp/armv5/OP_SUB_FLOAT_2ADDR.S
@@ -0,0 +1,2 @@
+%verify "executed"
+%include "armv5/binop2addr.S" {"instr":"bl      __aeabi_fsub"}
diff --git a/vm/mterp/armv5/OP_SUB_INT.S b/vm/mterp/armv5/OP_SUB_INT.S
new file mode 100644
index 0000000..cbd865e
--- /dev/null
+++ b/vm/mterp/armv5/OP_SUB_INT.S
@@ -0,0 +1,2 @@
+%verify "executed"
+%include "armv5/binop.S" {"instr":"sub     r0, r0, r1"}
diff --git a/vm/mterp/armv5/OP_SUB_INT_2ADDR.S b/vm/mterp/armv5/OP_SUB_INT_2ADDR.S
new file mode 100644
index 0000000..93a32ec
--- /dev/null
+++ b/vm/mterp/armv5/OP_SUB_INT_2ADDR.S
@@ -0,0 +1,2 @@
+%verify "executed"
+%include "armv5/binop2addr.S" {"instr":"sub     r0, r0, r1"}
diff --git a/vm/mterp/armv5/OP_SUB_LONG.S b/vm/mterp/armv5/OP_SUB_LONG.S
new file mode 100644
index 0000000..34b0a4d
--- /dev/null
+++ b/vm/mterp/armv5/OP_SUB_LONG.S
@@ -0,0 +1,2 @@
+%verify "executed"
+%include "armv5/binopWide.S" {"preinstr":"subs    r0, r0, r2", "instr":"sbc     r1, r1, r3"}
diff --git a/vm/mterp/armv5/OP_SUB_LONG_2ADDR.S b/vm/mterp/armv5/OP_SUB_LONG_2ADDR.S
new file mode 100644
index 0000000..ee5bf9e
--- /dev/null
+++ b/vm/mterp/armv5/OP_SUB_LONG_2ADDR.S
@@ -0,0 +1,2 @@
+%verify "executed"
+%include "armv5/binopWide2addr.S" {"preinstr":"subs    r0, r0, r2", "instr":"sbc     r1, r1, r3"}
diff --git a/vm/mterp/armv5/OP_THROW.S b/vm/mterp/armv5/OP_THROW.S
new file mode 100644
index 0000000..c8c5c36
--- /dev/null
+++ b/vm/mterp/armv5/OP_THROW.S
@@ -0,0 +1,15 @@
+%verify "executed"
+%verify "exception for null object"
+    /*
+     * Throw an exception object in the current thread.
+     */
+    /* throw vAA */
+    mov     r2, rINST, lsr #8           @ r2<- AA
+    GET_VREG(r1, r2)                    @ r1<- vAA (exception object)
+    ldr     r0, [rGLUE, #offGlue_self]  @ r0<- glue->self
+    cmp     r1, #0                      @ null object?
+    beq     common_errNullObject        @ yes, throw an NPE instead
+    @ bypass dvmSetException, just store it
+    str     r1, [r0, #offThread_exception]  @ thread->exception<- obj
+    b       common_exceptionThrown
+
diff --git a/vm/mterp/armv5/OP_UNUSED_3E.S b/vm/mterp/armv5/OP_UNUSED_3E.S
new file mode 100644
index 0000000..9fdef1d
--- /dev/null
+++ b/vm/mterp/armv5/OP_UNUSED_3E.S
@@ -0,0 +1 @@
+%include "armv5/unused.S"
diff --git a/vm/mterp/armv5/OP_UNUSED_3F.S b/vm/mterp/armv5/OP_UNUSED_3F.S
new file mode 100644
index 0000000..9fdef1d
--- /dev/null
+++ b/vm/mterp/armv5/OP_UNUSED_3F.S
@@ -0,0 +1 @@
+%include "armv5/unused.S"
diff --git a/vm/mterp/armv5/OP_UNUSED_40.S b/vm/mterp/armv5/OP_UNUSED_40.S
new file mode 100644
index 0000000..9fdef1d
--- /dev/null
+++ b/vm/mterp/armv5/OP_UNUSED_40.S
@@ -0,0 +1 @@
+%include "armv5/unused.S"
diff --git a/vm/mterp/armv5/OP_UNUSED_41.S b/vm/mterp/armv5/OP_UNUSED_41.S
new file mode 100644
index 0000000..9fdef1d
--- /dev/null
+++ b/vm/mterp/armv5/OP_UNUSED_41.S
@@ -0,0 +1 @@
+%include "armv5/unused.S"
diff --git a/vm/mterp/armv5/OP_UNUSED_42.S b/vm/mterp/armv5/OP_UNUSED_42.S
new file mode 100644
index 0000000..9fdef1d
--- /dev/null
+++ b/vm/mterp/armv5/OP_UNUSED_42.S
@@ -0,0 +1 @@
+%include "armv5/unused.S"
diff --git a/vm/mterp/armv5/OP_UNUSED_43.S b/vm/mterp/armv5/OP_UNUSED_43.S
new file mode 100644
index 0000000..9fdef1d
--- /dev/null
+++ b/vm/mterp/armv5/OP_UNUSED_43.S
@@ -0,0 +1 @@
+%include "armv5/unused.S"
diff --git a/vm/mterp/armv5/OP_UNUSED_73.S b/vm/mterp/armv5/OP_UNUSED_73.S
new file mode 100644
index 0000000..9fdef1d
--- /dev/null
+++ b/vm/mterp/armv5/OP_UNUSED_73.S
@@ -0,0 +1 @@
+%include "armv5/unused.S"
diff --git a/vm/mterp/armv5/OP_UNUSED_79.S b/vm/mterp/armv5/OP_UNUSED_79.S
new file mode 100644
index 0000000..9fdef1d
--- /dev/null
+++ b/vm/mterp/armv5/OP_UNUSED_79.S
@@ -0,0 +1 @@
+%include "armv5/unused.S"
diff --git a/vm/mterp/armv5/OP_UNUSED_7A.S b/vm/mterp/armv5/OP_UNUSED_7A.S
new file mode 100644
index 0000000..9fdef1d
--- /dev/null
+++ b/vm/mterp/armv5/OP_UNUSED_7A.S
@@ -0,0 +1 @@
+%include "armv5/unused.S"
diff --git a/vm/mterp/armv5/OP_UNUSED_E3.S b/vm/mterp/armv5/OP_UNUSED_E3.S
new file mode 100644
index 0000000..9fdef1d
--- /dev/null
+++ b/vm/mterp/armv5/OP_UNUSED_E3.S
@@ -0,0 +1 @@
+%include "armv5/unused.S"
diff --git a/vm/mterp/armv5/OP_UNUSED_E4.S b/vm/mterp/armv5/OP_UNUSED_E4.S
new file mode 100644
index 0000000..9fdef1d
--- /dev/null
+++ b/vm/mterp/armv5/OP_UNUSED_E4.S
@@ -0,0 +1 @@
+%include "armv5/unused.S"
diff --git a/vm/mterp/armv5/OP_UNUSED_E5.S b/vm/mterp/armv5/OP_UNUSED_E5.S
new file mode 100644
index 0000000..9fdef1d
--- /dev/null
+++ b/vm/mterp/armv5/OP_UNUSED_E5.S
@@ -0,0 +1 @@
+%include "armv5/unused.S"
diff --git a/vm/mterp/armv5/OP_UNUSED_E6.S b/vm/mterp/armv5/OP_UNUSED_E6.S
new file mode 100644
index 0000000..9fdef1d
--- /dev/null
+++ b/vm/mterp/armv5/OP_UNUSED_E6.S
@@ -0,0 +1 @@
+%include "armv5/unused.S"
diff --git a/vm/mterp/armv5/OP_UNUSED_E7.S b/vm/mterp/armv5/OP_UNUSED_E7.S
new file mode 100644
index 0000000..9fdef1d
--- /dev/null
+++ b/vm/mterp/armv5/OP_UNUSED_E7.S
@@ -0,0 +1 @@
+%include "armv5/unused.S"
diff --git a/vm/mterp/armv5/OP_UNUSED_E8.S b/vm/mterp/armv5/OP_UNUSED_E8.S
new file mode 100644
index 0000000..9fdef1d
--- /dev/null
+++ b/vm/mterp/armv5/OP_UNUSED_E8.S
@@ -0,0 +1 @@
+%include "armv5/unused.S"
diff --git a/vm/mterp/armv5/OP_UNUSED_E9.S b/vm/mterp/armv5/OP_UNUSED_E9.S
new file mode 100644
index 0000000..9fdef1d
--- /dev/null
+++ b/vm/mterp/armv5/OP_UNUSED_E9.S
@@ -0,0 +1 @@
+%include "armv5/unused.S"
diff --git a/vm/mterp/armv5/OP_UNUSED_EA.S b/vm/mterp/armv5/OP_UNUSED_EA.S
new file mode 100644
index 0000000..9fdef1d
--- /dev/null
+++ b/vm/mterp/armv5/OP_UNUSED_EA.S
@@ -0,0 +1 @@
+%include "armv5/unused.S"
diff --git a/vm/mterp/armv5/OP_UNUSED_EB.S b/vm/mterp/armv5/OP_UNUSED_EB.S
new file mode 100644
index 0000000..9fdef1d
--- /dev/null
+++ b/vm/mterp/armv5/OP_UNUSED_EB.S
@@ -0,0 +1 @@
+%include "armv5/unused.S"
diff --git a/vm/mterp/armv5/OP_UNUSED_EC.S b/vm/mterp/armv5/OP_UNUSED_EC.S
new file mode 100644
index 0000000..9fdef1d
--- /dev/null
+++ b/vm/mterp/armv5/OP_UNUSED_EC.S
@@ -0,0 +1 @@
+%include "armv5/unused.S"
diff --git a/vm/mterp/armv5/OP_UNUSED_ED.S b/vm/mterp/armv5/OP_UNUSED_ED.S
new file mode 100644
index 0000000..9fdef1d
--- /dev/null
+++ b/vm/mterp/armv5/OP_UNUSED_ED.S
@@ -0,0 +1 @@
+%include "armv5/unused.S"
diff --git a/vm/mterp/armv5/OP_UNUSED_EF.S b/vm/mterp/armv5/OP_UNUSED_EF.S
new file mode 100644
index 0000000..9fdef1d
--- /dev/null
+++ b/vm/mterp/armv5/OP_UNUSED_EF.S
@@ -0,0 +1 @@
+%include "armv5/unused.S"
diff --git a/vm/mterp/armv5/OP_UNUSED_F1.S b/vm/mterp/armv5/OP_UNUSED_F1.S
new file mode 100644
index 0000000..9fdef1d
--- /dev/null
+++ b/vm/mterp/armv5/OP_UNUSED_F1.S
@@ -0,0 +1 @@
+%include "armv5/unused.S"
diff --git a/vm/mterp/armv5/OP_UNUSED_FC.S b/vm/mterp/armv5/OP_UNUSED_FC.S
new file mode 100644
index 0000000..9fdef1d
--- /dev/null
+++ b/vm/mterp/armv5/OP_UNUSED_FC.S
@@ -0,0 +1 @@
+%include "armv5/unused.S"
diff --git a/vm/mterp/armv5/OP_UNUSED_FD.S b/vm/mterp/armv5/OP_UNUSED_FD.S
new file mode 100644
index 0000000..9fdef1d
--- /dev/null
+++ b/vm/mterp/armv5/OP_UNUSED_FD.S
@@ -0,0 +1 @@
+%include "armv5/unused.S"
diff --git a/vm/mterp/armv5/OP_UNUSED_FE.S b/vm/mterp/armv5/OP_UNUSED_FE.S
new file mode 100644
index 0000000..9fdef1d
--- /dev/null
+++ b/vm/mterp/armv5/OP_UNUSED_FE.S
@@ -0,0 +1 @@
+%include "armv5/unused.S"
diff --git a/vm/mterp/armv5/OP_UNUSED_FF.S b/vm/mterp/armv5/OP_UNUSED_FF.S
new file mode 100644
index 0000000..9fdef1d
--- /dev/null
+++ b/vm/mterp/armv5/OP_UNUSED_FF.S
@@ -0,0 +1 @@
+%include "armv5/unused.S"
diff --git a/vm/mterp/armv5/OP_USHR_INT.S b/vm/mterp/armv5/OP_USHR_INT.S
new file mode 100644
index 0000000..dc06b4c
--- /dev/null
+++ b/vm/mterp/armv5/OP_USHR_INT.S
@@ -0,0 +1,2 @@
+%verify "executed"
+%include "armv5/binop.S" {"preinstr":"and     r1, r1, #31", "instr":"mov     r0, r0, lsr r1"}
diff --git a/vm/mterp/armv5/OP_USHR_INT_2ADDR.S b/vm/mterp/armv5/OP_USHR_INT_2ADDR.S
new file mode 100644
index 0000000..f4cba40
--- /dev/null
+++ b/vm/mterp/armv5/OP_USHR_INT_2ADDR.S
@@ -0,0 +1,2 @@
+%verify "executed"
+%include "armv5/binop2addr.S" {"preinstr":"and     r1, r1, #31", "instr":"mov     r0, r0, lsr r1"}
diff --git a/vm/mterp/armv5/OP_USHR_INT_LIT8.S b/vm/mterp/armv5/OP_USHR_INT_LIT8.S
new file mode 100644
index 0000000..bc08d54
--- /dev/null
+++ b/vm/mterp/armv5/OP_USHR_INT_LIT8.S
@@ -0,0 +1,2 @@
+%verify "executed"
+%include "armv5/binopLit8.S" {"preinstr":"and     r1, r1, #31", "instr":"mov     r0, r0, lsr r1"}
diff --git a/vm/mterp/armv5/OP_USHR_LONG.S b/vm/mterp/armv5/OP_USHR_LONG.S
new file mode 100644
index 0000000..fc14b57
--- /dev/null
+++ b/vm/mterp/armv5/OP_USHR_LONG.S
@@ -0,0 +1,33 @@
+%verify "executed"
+    /*
+     * Long integer shift.  This is different from the generic 32/64-bit
+     * binary operations because vAA/vBB are 64-bit but vCC (the shift
+     * distance) is 32-bit.  Also, Dalvik requires us to mask off the low
+     * 6 bits of the shift distance.
+     */
+    /* ushr-long vAA, vBB, vCC */
+    FETCH(r0, 1)                        @ r0<- CCBB
+    mov     r9, rINST, lsr #8           @ r9<- AA
+    and     r3, r0, #255                @ r3<- BB
+    mov     r0, r0, lsr #8              @ r0<- CC
+    add     r3, rFP, r3, lsl #2         @ r3<- &fp[BB]
+    GET_VREG(r2, r0)                    @ r2<- vCC
+    ldmia   r3, {r0-r1}                 @ r0/r1<- vBB/vBB+1
+    and     r2, r2, #63                 @ r0<- r0 & 0x3f
+    add     r9, rFP, r9, lsl #2         @ r9<- &fp[AA]
+
+    mov     r0, r0, lsr r2              @  r0<- r2 >> r2
+    rsb     r3, r2, #32                 @  r3<- 32 - r2
+    orr     r0, r0, r1, asl r3          @  r0<- r0 | (r1 << (32-r2))
+    subs    ip, r2, #32                 @  ip<- r2 - 32
+    movpl   r0, r1, lsr ip              @  if r2 >= 32, r0<-r1 >>> (r2-32)
+    FETCH_ADVANCE_INST(2)               @ advance rPC, load rINST
+    b       .L${opcode}_finish
+%break
+
+.L${opcode}_finish:
+    mov     r1, r1, lsr r2              @  r1<- r1 >>> r2
+    GET_INST_OPCODE(ip)                 @ extract opcode from rINST
+    stmia   r9, {r0-r1}                 @ vAA/vAA+1<- r0/r1
+    GOTO_OPCODE(ip)                     @ jump to next instruction
+
diff --git a/vm/mterp/armv5/OP_USHR_LONG_2ADDR.S b/vm/mterp/armv5/OP_USHR_LONG_2ADDR.S
new file mode 100644
index 0000000..18167e3
--- /dev/null
+++ b/vm/mterp/armv5/OP_USHR_LONG_2ADDR.S
@@ -0,0 +1,29 @@
+%verify "executed"
+    /*
+     * Long integer shift, 2addr version.  vA is 64-bit value/result, vB is
+     * 32-bit shift distance.
+     */
+    /* ushr-long/2addr vA, vB */
+    mov     r9, rINST, lsr #8           @ r9<- A+
+    mov     r3, rINST, lsr #12          @ r3<- B
+    and     r9, r9, #15
+    GET_VREG(r2, r3)                    @ r2<- vB
+    add     r9, rFP, r9, lsl #2         @ r9<- &fp[A]
+    and     r2, r2, #63                 @ r2<- r2 & 0x3f
+    ldmia   r9, {r0-r1}                 @ r0/r1<- vAA/vAA+1
+
+    mov     r0, r0, lsr r2              @  r0<- r2 >> r2
+    rsb     r3, r2, #32                 @  r3<- 32 - r2
+    orr     r0, r0, r1, asl r3          @  r0<- r0 | (r1 << (32-r2))
+    subs    ip, r2, #32                 @  ip<- r2 - 32
+    FETCH_ADVANCE_INST(1)               @ advance rPC, load rINST
+    movpl   r0, r1, lsr ip              @  if r2 >= 32, r0<-r1 >>> (r2-32)
+    mov     r1, r1, lsr r2              @  r1<- r1 >>> r2
+    b       .L${opcode}_finish
+%break
+
+.L${opcode}_finish:
+    GET_INST_OPCODE(ip)                 @ extract opcode from rINST
+    stmia   r9, {r0-r1}                 @ vAA/vAA+1<- r0/r1
+    GOTO_OPCODE(ip)                     @ jump to next instruction
+
diff --git a/vm/mterp/armv5/OP_XOR_INT.S b/vm/mterp/armv5/OP_XOR_INT.S
new file mode 100644
index 0000000..101c3b2
--- /dev/null
+++ b/vm/mterp/armv5/OP_XOR_INT.S
@@ -0,0 +1,2 @@
+%verify "executed"
+%include "armv5/binop.S" {"instr":"eor     r0, r0, r1"}
diff --git a/vm/mterp/armv5/OP_XOR_INT_2ADDR.S b/vm/mterp/armv5/OP_XOR_INT_2ADDR.S
new file mode 100644
index 0000000..70c3e36
--- /dev/null
+++ b/vm/mterp/armv5/OP_XOR_INT_2ADDR.S
@@ -0,0 +1,2 @@
+%verify "executed"
+%include "armv5/binop2addr.S" {"instr":"eor     r0, r0, r1"}
diff --git a/vm/mterp/armv5/OP_XOR_INT_LIT16.S b/vm/mterp/armv5/OP_XOR_INT_LIT16.S
new file mode 100644
index 0000000..3e12b63
--- /dev/null
+++ b/vm/mterp/armv5/OP_XOR_INT_LIT16.S
@@ -0,0 +1,2 @@
+%verify "executed"
+%include "armv5/binopLit16.S" {"instr":"eor     r0, r0, r1"}
diff --git a/vm/mterp/armv5/OP_XOR_INT_LIT8.S b/vm/mterp/armv5/OP_XOR_INT_LIT8.S
new file mode 100644
index 0000000..063b2cb
--- /dev/null
+++ b/vm/mterp/armv5/OP_XOR_INT_LIT8.S
@@ -0,0 +1,2 @@
+%verify "executed"
+%include "armv5/binopLit8.S" {"instr":"eor     r0, r0, r1"}
diff --git a/vm/mterp/armv5/OP_XOR_LONG.S b/vm/mterp/armv5/OP_XOR_LONG.S
new file mode 100644
index 0000000..1ecdaad
--- /dev/null
+++ b/vm/mterp/armv5/OP_XOR_LONG.S
@@ -0,0 +1,2 @@
+%verify "executed"
+%include "armv5/binopWide.S" {"preinstr":"eor     r0, r0, r2", "instr":"eor     r1, r1, r3"}
diff --git a/vm/mterp/armv5/OP_XOR_LONG_2ADDR.S b/vm/mterp/armv5/OP_XOR_LONG_2ADDR.S
new file mode 100644
index 0000000..5423e6a
--- /dev/null
+++ b/vm/mterp/armv5/OP_XOR_LONG_2ADDR.S
@@ -0,0 +1,2 @@
+%verify "executed"
+%include "armv5/binopWide2addr.S" {"preinstr":"eor     r0, r0, r2", "instr":"eor     r1, r1, r3"}
diff --git a/vm/mterp/armv5/bincmp.S b/vm/mterp/armv5/bincmp.S
new file mode 100644
index 0000000..9b574a3
--- /dev/null
+++ b/vm/mterp/armv5/bincmp.S
@@ -0,0 +1,25 @@
+%verify "branch taken"
+%verify "branch not taken"
+    /*
+     * Generic two-operand compare-and-branch operation.  Provide a "revcmp"
+     * fragment that specifies the *reverse* comparison to perform, e.g.
+     * for "if-le" you would use "gt".
+     *
+     * For: if-eq, if-ne, if-lt, if-ge, if-gt, if-le
+     */
+    /* if-cmp vA, vB, +CCCC */
+    mov     r0, rINST, lsr #8           @ r0<- A+
+    mov     r1, rINST, lsr #12          @ r1<- B
+    and     r0, r0, #15
+    GET_VREG(r3, r1)                    @ r3<- vB
+    GET_VREG(r2, r0)                    @ r2<- vA
+    mov     r9, #4                      @ r0<- BYTE branch dist for not-taken
+    cmp     r2, r3                      @ compare (vA, vB)
+    b${revcmp}  1f                      @ branch to 1 if comparison failed
+    FETCH_S(r9, 1)                      @ r9<- branch offset, in code units
+    movs    r9, r9, asl #1              @ convert to bytes, check sign
+    bmi     common_backwardBranch       @ yes, do periodic checks
+1:  FETCH_ADVANCE_INST_RB(r9)           @ update rPC, load rINST
+    GET_INST_OPCODE(ip)                 @ extract opcode from rINST
+    GOTO_OPCODE(ip)                     @ jump to next instruction
+
diff --git a/vm/mterp/armv5/binop.S b/vm/mterp/armv5/binop.S
new file mode 100644
index 0000000..d33ce01
--- /dev/null
+++ b/vm/mterp/armv5/binop.S
@@ -0,0 +1,34 @@
+%default {"preinstr":"", "result":"r0", "chkzero":"0"}
+    /*
+     * Generic 32-bit binary operation.  Provide an "instr" line that
+     * specifies an instruction that performs "result = r0 op r1".
+     * This could be an ARM instruction or a function call.  (If the result
+     * comes back in a register other than r0, you can override "result".)
+     *
+     * If "chkzero" is set to 1, we perform a divide-by-zero check on
+     * vCC (r1).  Useful for integer division and modulus.
+     *
+     * For: add-int, sub-int, mul-int, div-int, rem-int, and-int, or-int,
+     *      xor-int, shl-int, shr-int, ushr-int, add-float, sub-float,
+     *      mul-float, div-float, rem-float
+     */
+    /* binop vAA, vBB, vCC */
+    FETCH(r0, 1)                        @ r0<- CCBB
+    mov     r9, rINST, lsr #8           @ r9<- AA
+    mov     r3, r0, lsr #8              @ r3<- CC
+    and     r2, r0, #255                @ r2<- BB
+    GET_VREG(r1, r3)                    @ r1<- vCC
+    GET_VREG(r0, r2)                    @ r0<- vBB
+    .if $chkzero
+    cmp     r1, #0                      @ is second operand zero?
+    beq     common_errDivideByZero
+    .endif
+
+    FETCH_ADVANCE_INST(2)               @ advance rPC, load rINST
+    $preinstr                           @ optional op; may set condition codes
+    $instr                              @ $result<- op, r0-r3 changed
+    GET_INST_OPCODE(ip)                 @ extract opcode from rINST
+    SET_VREG($result, r9)               @ vAA<- $result
+    GOTO_OPCODE(ip)                     @ jump to next instruction
+    /* 11-14 instructions */
+
diff --git a/vm/mterp/armv5/binop2addr.S b/vm/mterp/armv5/binop2addr.S
new file mode 100644
index 0000000..fc170a0
--- /dev/null
+++ b/vm/mterp/armv5/binop2addr.S
@@ -0,0 +1,34 @@
+%default {"preinstr":"", "result":"r0", "chkzero":"0"}
+    /*
+     * Generic 32-bit "/2addr" binary operation.  Provide an "instr" line
+     * that specifies an instruction that performs "result = r0 op r1".
+     * This could be an ARM instruction or a function call.  (If the result
+     * comes back in a register other than r0, you can override "result".)
+     *
+     * If "chkzero" is set to 1, we perform a divide-by-zero check on
+     * vCC (r1).  Useful for integer division and modulus.
+     *
+     * For: add-int/2addr, sub-int/2addr, mul-int/2addr, div-int/2addr,
+     *      rem-int/2addr, and-int/2addr, or-int/2addr, xor-int/2addr,
+     *      shl-int/2addr, shr-int/2addr, ushr-int/2addr, add-float/2addr,
+     *      sub-float/2addr, mul-float/2addr, div-float/2addr, rem-float/2addr
+     */
+    /* binop/2addr vA, vB */
+    mov     r9, rINST, lsr #8           @ r9<- A+
+    mov     r3, rINST, lsr #12          @ r3<- B
+    and     r9, r9, #15
+    GET_VREG(r0, r9)                    @ r0<- vA
+    GET_VREG(r1, r3)                    @ r1<- vB
+    .if $chkzero
+    cmp     r1, #0                      @ is second operand zero?
+    beq     common_errDivideByZero
+    .endif
+    FETCH_ADVANCE_INST(1)               @ advance rPC, load rINST
+
+    $preinstr                           @ optional op; may set condition codes
+    $instr                              @ $result<- op, r0-r3 changed
+    GET_INST_OPCODE(ip)                 @ extract opcode from rINST
+    SET_VREG($result, r9)               @ vAA<- $result
+    GOTO_OPCODE(ip)                     @ jump to next instruction
+    /* 10-13 instructions */
+
diff --git a/vm/mterp/armv5/binopLit16.S b/vm/mterp/armv5/binopLit16.S
new file mode 100644
index 0000000..3cb1875
--- /dev/null
+++ b/vm/mterp/armv5/binopLit16.S
@@ -0,0 +1,31 @@
+%default {"result":"r0", "chkzero":"0"}
+    /*
+     * Generic 32-bit "lit16" binary operation.  Provide an "instr" line
+     * that specifies an instruction that performs "result = r0 op r1".
+     * This could be an ARM instruction or a function call.  (If the result
+     * comes back in a register other than r0, you can override "result".)
+     *
+     * If "chkzero" is set to 1, we perform a divide-by-zero check on
+     * vCC (r1).  Useful for integer division and modulus.
+     *
+     * For: add-int/lit16, rsub-int, mul-int/lit16, div-int/lit16,
+     *      rem-int/lit16, and-int/lit16, or-int/lit16, xor-int/lit16
+     */
+    /* binop/lit16 vA, vB, #+CCCC */
+    FETCH_S(r1, 1)                      @ r1<- ssssCCCC (sign-extended)
+    mov     r2, rINST, lsr #12          @ r2<- B
+    mov     r9, rINST, lsr #8           @ r9<- A+
+    GET_VREG(r0, r2)                    @ r0<- vB
+    and     r9, r9, #15
+    .if $chkzero
+    cmp     r1, #0                      @ is second operand zero?
+    beq     common_errDivideByZero
+    .endif
+    FETCH_ADVANCE_INST(2)               @ advance rPC, load rINST
+
+    $instr                              @ $result<- op, r0-r3 changed
+    GET_INST_OPCODE(ip)                 @ extract opcode from rINST
+    SET_VREG($result, r9)               @ vAA<- $result
+    GOTO_OPCODE(ip)                     @ jump to next instruction
+    /* 10-13 instructions */
+
diff --git a/vm/mterp/armv5/binopLit8.S b/vm/mterp/armv5/binopLit8.S
new file mode 100644
index 0000000..4406259
--- /dev/null
+++ b/vm/mterp/armv5/binopLit8.S
@@ -0,0 +1,33 @@
+%default {"preinstr":"", "result":"r0", "chkzero":"0"}
+    /*
+     * Generic 32-bit "lit8" binary operation.  Provide an "instr" line
+     * that specifies an instruction that performs "result = r0 op r1".
+     * This could be an ARM instruction or a function call.  (If the result
+     * comes back in a register other than r0, you can override "result".)
+     *
+     * If "chkzero" is set to 1, we perform a divide-by-zero check on
+     * vCC (r1).  Useful for integer division and modulus.
+     *
+     * For: add-int/lit8, rsub-int/lit8, mul-int/lit8, div-int/lit8,
+     *      rem-int/lit8, and-int/lit8, or-int/lit8, xor-int/lit8,
+     *      shl-int/lit8, shr-int/lit8, ushr-int/lit8
+     */
+    /* binop/lit8 vAA, vBB, #+CC */
+    FETCH_S(r3, 1)                      @ r3<- ssssCCBB (sign-extended for CC)
+    mov     r9, rINST, lsr #8           @ r9<- AA
+    and     r2, r3, #255                @ r2<- BB
+    GET_VREG(r0, r2)                    @ r0<- vBB
+    movs    r1, r3, asr #8              @ r1<- ssssssCC (sign extended)
+    .if $chkzero
+    @cmp     r1, #0                      @ is second operand zero?
+    beq     common_errDivideByZero
+    .endif
+    FETCH_ADVANCE_INST(2)               @ advance rPC, load rINST
+
+    $preinstr                           @ optional op; may set condition codes
+    $instr                              @ $result<- op, r0-r3 changed
+    GET_INST_OPCODE(ip)                 @ extract opcode from rINST
+    SET_VREG($result, r9)               @ vAA<- $result
+    GOTO_OPCODE(ip)                     @ jump to next instruction
+    /* 10-12 instructions */
+
diff --git a/vm/mterp/armv5/binopWide.S b/vm/mterp/armv5/binopWide.S
new file mode 100644
index 0000000..87c5f07
--- /dev/null
+++ b/vm/mterp/armv5/binopWide.S
@@ -0,0 +1,39 @@
+%default {"preinstr":"", "result0":"r0", "result1":"r1", "chkzero":"0"}
+    /*
+     * Generic 64-bit binary operation.  Provide an "instr" line that
+     * specifies an instruction that performs "result = r0-r1 op r2-r3".
+     * This could be an ARM instruction or a function call.  (If the result
+     * comes back in a register other than r0, you can override "result".)
+     *
+     * If "chkzero" is set to 1, we perform a divide-by-zero check on
+     * vCC (r1).  Useful for integer division and modulus.
+     *
+     * for: add-long, sub-long, div-long, rem-long, and-long, or-long,
+     *      xor-long, add-double, sub-double, mul-double, div-double,
+     *      rem-double
+     *
+     * IMPORTANT: you may specify "chkzero" or "preinstr" but not both.
+     */
+    /* binop vAA, vBB, vCC */
+    FETCH(r0, 1)                        @ r0<- CCBB
+    mov     r9, rINST, lsr #8           @ r9<- AA
+    and     r2, r0, #255                @ r2<- BB
+    mov     r3, r0, lsr #8              @ r3<- CC
+    add     r9, rFP, r9, lsl #2         @ r9<- &fp[AA]
+    add     r2, rFP, r2, lsl #2         @ r2<- &fp[BB]
+    add     r3, rFP, r3, lsl #2         @ r3<- &fp[CC]
+    ldmia   r2, {r0-r1}                 @ r0/r1<- vBB/vBB+1
+    ldmia   r3, {r2-r3}                 @ r2/r3<- vCC/vCC+1
+    .if $chkzero
+    orrs    ip, r2, r3                  @ second arg (r2-r3) is zero?
+    beq     common_errDivideByZero
+    .endif
+    FETCH_ADVANCE_INST(2)               @ advance rPC, load rINST
+
+    $preinstr                           @ optional op; may set condition codes
+    $instr                              @ result<- op, r0-r3 changed
+    GET_INST_OPCODE(ip)                 @ extract opcode from rINST
+    stmia   r9, {$result0,$result1}     @ vAA/vAA+1<- $result0/$result1
+    GOTO_OPCODE(ip)                     @ jump to next instruction
+    /* 14-17 instructions */
+
diff --git a/vm/mterp/armv5/binopWide2addr.S b/vm/mterp/armv5/binopWide2addr.S
new file mode 100644
index 0000000..98c6c98
--- /dev/null
+++ b/vm/mterp/armv5/binopWide2addr.S
@@ -0,0 +1,36 @@
+%default {"preinstr":"", "result0":"r0", "result1":"r1", "chkzero":"0"}
+    /*
+     * Generic 64-bit "/2addr" binary operation.  Provide an "instr" line
+     * that specifies an instruction that performs "result = r0-r1 op r2-r3".
+     * This could be an ARM instruction or a function call.  (If the result
+     * comes back in a register other than r0, you can override "result".)
+     *
+     * If "chkzero" is set to 1, we perform a divide-by-zero check on
+     * vCC (r1).  Useful for integer division and modulus.
+     *
+     * For: add-long/2addr, sub-long/2addr, div-long/2addr, rem-long/2addr,
+     *      and-long/2addr, or-long/2addr, xor-long/2addr, add-double/2addr,
+     *      sub-double/2addr, mul-double/2addr, div-double/2addr,
+     *      rem-double/2addr
+     */
+    /* binop/2addr vA, vB */
+    mov     r9, rINST, lsr #8           @ r9<- A+
+    mov     r1, rINST, lsr #12          @ r1<- B
+    and     r9, r9, #15
+    add     r1, rFP, r1, lsl #2         @ r1<- &fp[B]
+    add     r9, rFP, r9, lsl #2         @ r9<- &fp[A]
+    ldmia   r1, {r2-r3}                 @ r2/r3<- vBB/vBB+1
+    ldmia   r9, {r0-r1}                 @ r0/r1<- vAA/vAA+1
+    .if $chkzero
+    orrs    ip, r2, r3                  @ second arg (r2-r3) is zero?
+    beq     common_errDivideByZero
+    .endif
+    FETCH_ADVANCE_INST(1)               @ advance rPC, load rINST
+
+    $preinstr                           @ optional op; may set condition codes
+    $instr                              @ result<- op, r0-r3 changed
+    GET_INST_OPCODE(ip)                 @ extract opcode from rINST
+    stmia   r9, {$result0,$result1}     @ vAA/vAA+1<- $result0/$result1
+    GOTO_OPCODE(ip)                     @ jump to next instruction
+    /* 12-15 instructions */
+
diff --git a/vm/mterp/armv5/debug.c b/vm/mterp/armv5/debug.c
new file mode 100644
index 0000000..301e27a
--- /dev/null
+++ b/vm/mterp/armv5/debug.c
@@ -0,0 +1,79 @@
+#include <inttypes.h>
+
+/*
+ * Dump the fixed-purpose ARM registers, along with some other info.
+ *
+ * This function MUST be compiled in ARM mode -- THUMB will yield bogus
+ * results.
+ *
+ * This will NOT preserve r0-r3/ip.
+ */
+void dvmMterpDumpArmRegs(uint32_t r0, uint32_t r1, uint32_t r2, uint32_t r3)
+{
+    register uint32_t rPC       asm("r4");
+    register uint32_t rFP       asm("r5");
+    register uint32_t rGLUE     asm("r6");
+    register uint32_t rIBASE    asm("r7");
+    register uint32_t rINST     asm("r8");
+    register uint32_t r9        asm("r9");
+    register uint32_t r10       asm("r10");
+
+    extern char dvmAsmInstructionStart[];
+
+    printf("REGS: r0=%08x r1=%08x r2=%08x r3=%08x\n", r0, r1, r2, r3);
+    printf("    : rPC=%08x rFP=%08x rGLUE=%08x rIBASE=%08x\n",
+        rPC, rFP, rGLUE, rIBASE);
+    printf("    : rINST=%08x r9=%08x r10=%08x\n", rINST, r9, r10);
+
+    MterpGlue* glue = (MterpGlue*) rGLUE;
+    const Method* method = glue->method;
+    printf("    + self is %p\n", dvmThreadSelf());
+    //printf("    + currently in %s.%s %s\n",
+    //    method->clazz->descriptor, method->name, method->signature);
+    //printf("    + dvmAsmInstructionStart = %p\n", dvmAsmInstructionStart);
+    //printf("    + next handler for 0x%02x = %p\n",
+    //    rINST & 0xff, dvmAsmInstructionStart + (rINST & 0xff) * 64);
+}
+
+/*
+ * Dump the StackSaveArea for the specified frame pointer.
+ */
+void dvmDumpFp(void* fp, StackSaveArea* otherSaveArea)
+{
+    StackSaveArea* saveArea = SAVEAREA_FROM_FP(fp);
+    printf("StackSaveArea for fp %p [%p/%p]:\n", fp, saveArea, otherSaveArea);
+#ifdef EASY_GDB
+    printf("  prevSave=%p, prevFrame=%p savedPc=%p meth=%p curPc=%p\n",
+        saveArea->prevSave, saveArea->prevFrame, saveArea->savedPc,
+        saveArea->method, saveArea->xtra.currentPc);
+#else
+    printf("  prevFrame=%p savedPc=%p meth=%p curPc=%p fp[0]=0x%08x\n",
+        saveArea->prevFrame, saveArea->savedPc,
+        saveArea->method, saveArea->xtra.currentPc,
+        *(u4*)fp);
+#endif
+}
+
+/*
+ * Does the bulk of the work for common_printMethod().
+ */
+void dvmMterpPrintMethod(Method* method)
+{
+    /*
+     * It is a direct (non-virtual) method if it is static, private,
+     * or a constructor.
+     */
+    bool isDirect = 
+        ((method->accessFlags & (ACC_STATIC|ACC_PRIVATE)) != 0) ||
+        (method->name[0] == '<');
+
+    char* desc = dexProtoCopyMethodDescriptor(&method->prototype);
+        
+    printf("<%c:%s.%s %s> ",
+            isDirect ? 'D' : 'V',
+            method->clazz->descriptor,
+            method->name,
+            desc);
+
+    free(desc);
+}
diff --git a/vm/mterp/armv5/entry.S b/vm/mterp/armv5/entry.S
new file mode 100644
index 0000000..a1e3b4e
--- /dev/null
+++ b/vm/mterp/armv5/entry.S
@@ -0,0 +1,118 @@
+/*
+ * Copyright (C) 2008 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/*
+ * Interpreter entry point.
+ */
+
+/*
+ * We don't have formal stack frames, so gdb scans upward in the code
+ * to find the start of the function (a label with the %function type),
+ * and then looks at the next few instructions to figure out what
+ * got pushed onto the stack.  From this it figures out how to restore
+ * the registers, including PC, for the previous stack frame.  If gdb
+ * sees a non-function label, it stops scanning, so either we need to
+ * have nothing but assembler-local labels between the entry point and
+ * the break, or we need to fake it out.
+ *
+ * When this is defined, we add some stuff to make gdb less confused.
+ */
+#define ASSIST_DEBUGGER 1
+
+    .text
+    .align  2
+    .global dvmMterpStdRun
+    .type   dvmMterpStdRun, %function
+
+/*
+ * On entry:
+ *  r0  MterpGlue* glue
+ *
+ * This function returns a boolean "changeInterp" value.  The return comes
+ * via a call to dvmMterpStdBail().
+ */
+dvmMterpStdRun:
+#define MTERP_ENTRY1 \
+    .save {r4-r10,fp,lr}; \
+    stmfd   sp!, {r4-r10,fp,lr}         @ save 9 regs
+#define MTERP_ENTRY2 \
+    .pad    #4; \
+    sub     sp, sp, #4                  @ align 64
+
+    .fnstart
+    MTERP_ENTRY1
+    MTERP_ENTRY2
+
+    /* save stack pointer, add magic word for debuggerd */
+    str     sp, [r0, #offGlue_bailPtr]  @ save SP for eventual return
+
+    /* set up "named" registers, figure out entry point */
+    mov     rGLUE, r0                   @ set rGLUE
+    ldrb    r1, [r0, #offGlue_entryPoint]   @ InterpEntry enum is char
+    LOAD_PC_FP_FROM_GLUE()              @ load rPC and rFP from "glue"
+    adr     rIBASE, dvmAsmInstructionStart  @ set rIBASE
+    cmp     r1, #kInterpEntryInstr      @ usual case?
+    bne     .Lnot_instr                 @ no, handle it
+
+    /* start executing the instruction at rPC */
+    FETCH_INST()                        @ load rINST from rPC
+    GET_INST_OPCODE(ip)                 @ extract opcode from rINST
+    GOTO_OPCODE(ip)                     @ jump to next instruction
+
+.Lnot_instr:
+    cmp     r1, #kInterpEntryReturn     @ were we returning from a method?
+    beq     common_returnFromMethod
+
+.Lnot_return:
+    cmp     r1, #kInterpEntryThrow      @ were we throwing an exception?
+    beq     common_exceptionThrown
+
+.Lbad_arg:
+    ldr     r0, strBadEntryPoint
+    @ r1 holds value of entryPoint
+    bl      printf
+    bl      dvmAbort
+    .fnend
+
+
+    .global dvmMterpStdBail
+    .type   dvmMterpStdBail, %function
+
+/*
+ * Restore the stack pointer and PC from the save point established on entry.
+ * This is essentially the same as a longjmp, but should be cheaper.  The
+ * last instruction causes us to return to whoever called dvmMterpStdRun.
+ *
+ * We pushed some registers on the stack in dvmMterpStdRun, then saved
+ * SP and LR.  Here we restore SP, restore the registers, and then restore
+ * LR to PC.
+ *
+ * On entry:
+ *  r0  MterpGlue* glue
+ *  r1  bool changeInterp
+ */
+dvmMterpStdBail:
+    ldr     sp, [r0, #offGlue_bailPtr]      @ sp<- saved SP
+    mov     r0, r1                          @ return the changeInterp value
+    add     sp, sp, #4                      @ un-align 64
+    ldmfd   sp!, {r4-r10,fp,pc}             @ restore 9 regs
+
+
+/*
+ * String references.
+ */
+strBadEntryPoint:
+    .word   .LstrBadEntryPoint
+
diff --git a/vm/mterp/armv5/footer.S b/vm/mterp/armv5/footer.S
new file mode 100644
index 0000000..5efb24a
--- /dev/null
+++ b/vm/mterp/armv5/footer.S
@@ -0,0 +1,714 @@
+/*
+ * ===========================================================================
+ *  Common subroutines and data
+ * ===========================================================================
+ */
+
+    .text
+    .align  2
+
+/*
+ * Common code when a backward branch is taken.
+ *
+ * On entry:
+ *  r9 is PC adjustment *in bytes*
+ */
+common_backwardBranch:
+    mov     r0, #kInterpEntryInstr
+    bl      common_periodicChecks
+    FETCH_ADVANCE_INST_RB(r9)           @ update rPC, load rINST
+    GET_INST_OPCODE(ip)                 @ extract opcode from rINST
+    GOTO_OPCODE(ip)                     @ jump to next instruction
+
+
+/*
+ * Need to see if the thread needs to be suspended or debugger/profiler
+ * activity has begun.
+ *
+ * TODO: if JDWP isn't running, zero out pDebuggerActive pointer so we don't
+ * have to do the second ldr.
+ *
+ * TODO: reduce this so we're just checking a single location.
+ *
+ * On entry:
+ *  r0 is reentry type, e.g. kInterpEntryInstr
+ *  r9 is trampoline PC adjustment *in bytes*
+ */
+common_periodicChecks:
+    ldr     r3, [rGLUE, #offGlue_pSelfSuspendCount] @ r3<- &suspendCount
+
+#if defined(WITH_DEBUGGER)
+    ldr     r1, [rGLUE, #offGlue_pDebuggerActive]   @ r1<- &debuggerActive
+#endif
+#if defined(WITH_PROFILER)
+    ldr     r2, [rGLUE, #offGlue_pActiveProfilers]  @ r2<- &activeProfilers
+#endif
+
+    ldr     r3, [r3]                    @ r3<- suspendCount (int)
+
+#if defined(WITH_DEBUGGER)
+    ldrb    r1, [r1]                    @ r1<- debuggerActive (boolean)
+#endif
+#if defined (WITH_PROFILER)
+    ldr     r2, [r2]                    @ r2<- activeProfilers (int)
+#endif
+
+    cmp     r3, #0                      @ suspend pending?
+    bne     2f                          @ yes, check suspend
+
+#if defined(WITH_DEBUGGER) || defined(WITH_PROFILER)
+# if defined(WITH_DEBUGGER) && defined(WITH_PROFILER)
+    orrs    r1, r1, r2                  @ r1<- r1 | r2
+    cmp     r1, #0                      @ debugger attached or profiler started?
+# elif defined(WITH_DEBUGGER)
+    cmp     r1, #0                      @ debugger attached?
+# elif defined(WITH_PROFILER)
+    cmp     r2, #0                      @ profiler started?
+# endif
+    bne     3f                          @ debugger/profiler, switch interp
+#endif
+
+    mov     pc, lr                      @ nothing to do, return
+
+2:  @ check suspend
+    ldr     r0, [rGLUE, #offGlue_self]  @ r0<- glue->self
+    b       dvmCheckSuspendPending      @ suspend if necessary, then return
+
+3:  @ debugger/profiler enabled, bail out
+    add     rPC, rPC, r9                @ update rPC
+    str     r0, [rGLUE, #offGlue_entryPoint]
+    mov     r1, #1                      @ "want switch" = true
+    b       common_gotoBail
+
+
+/*
+ * The equivalent of "goto bail", this calls through the "bail handler".
+ *
+ * State registers will be saved to the "glue" area before bailing.
+ *
+ * On entry:
+ *  r1 is "bool changeInterp", indicating if we want to switch to the
+ *     other interpreter or just bail all the way out
+ */
+common_gotoBail:
+    SAVE_PC_FP_TO_GLUE()                @ export state to "glue"
+    mov     r0, rGLUE                   @ r0<- glue ptr
+    b       dvmMterpStdBail             @ call(glue, changeInterp)
+
+    @add     r1, r1, #1                  @ using (boolean+1)
+    @add     r0, rGLUE, #offGlue_jmpBuf  @ r0<- &glue->jmpBuf
+    @bl      _longjmp                    @ does not return
+    @bl      common_abort
+
+
+/*
+ * Common code for method invocation with range.
+ *
+ * On entry:
+ *  r0 is "Method* methodToCall", the method we're trying to call
+ */
+common_invokeMethodRange:
+.LinvokeNewRange:
+    @ prepare to copy args to "outs" area of current frame
+    movs    r2, rINST, lsr #8           @ r2<- AA (arg count) -- test for zero
+    SAVEAREA_FROM_FP(r10, rFP)          @ r10<- stack save area
+    beq     .LinvokeArgsDone            @ if no args, skip the rest
+    FETCH(r1, 2)                        @ r1<- CCCC
+
+    @ r0=methodToCall, r1=CCCC, r2=count, r10=outs
+    @ (very few methods have > 10 args; could unroll for common cases)
+    add     r3, rFP, r1, lsl #2         @ r3<- &fp[CCCC]
+    sub     r10, r10, r2, lsl #2        @ r10<- "outs" area, for call args
+1:  ldr     r1, [r3], #4                @ val = *fp++
+    subs    r2, r2, #1                  @ count--
+    str     r1, [r10], #4               @ *outs++ = val
+    bne     1b                          @ ...while count != 0
+    b       .LinvokeArgsDone
+
+/*
+ * Common code for method invocation without range.
+ *
+ * On entry:
+ *  r0 is "Method* methodToCall", the method we're trying to call
+ */
+common_invokeMethodNoRange:
+.LinvokeNewNoRange:
+    @ prepare to copy args to "outs" area of current frame
+    movs    r2, rINST, lsr #12          @ r2<- B (arg count) -- test for zero
+    SAVEAREA_FROM_FP(r10, rFP)          @ r10<- stack save area
+    beq     .LinvokeArgsDone            @ if no args, skip the rest
+    FETCH(r1, 2)                        @ r1<- GFED
+
+    @ r0=methodToCall, r1=GFED, r2=count, r10=outs
+.LinvokeNonRange:
+    rsb     r2, r2, #5                  @ r2<- 5-r2
+    add     pc, pc, r2, lsl #4          @ computed goto, 4 instrs each
+    bl      common_abort                @ (skipped due to ARM prefetch)
+5:  and     ip, rINST, #0x0f00          @ isolate A
+    ldr     r3, [rFP, ip, lsr #6]       @ r3<- vA (shift right 8, left 2)
+    mov     r0, r0                      @ nop
+    str     r3, [r10, #-4]!             @ *--outs = vA
+4:  and     ip, r1, #0xf000             @ isolate G
+    ldr     r3, [rFP, ip, lsr #10]      @ r3<- vG (shift right 12, left 2)
+    mov     r0, r0                      @ nop
+    str     r3, [r10, #-4]!             @ *--outs = vG
+3:  and     ip, r1, #0x0f00             @ isolate F
+    ldr     r3, [rFP, ip, lsr #6]       @ r3<- vF
+    mov     r0, r0                      @ nop
+    str     r3, [r10, #-4]!             @ *--outs = vF
+2:  and     ip, r1, #0x00f0             @ isolate E
+    ldr     r3, [rFP, ip, lsr #2]       @ r3<- vE
+    mov     r0, r0                      @ nop
+    str     r3, [r10, #-4]!             @ *--outs = vE
+1:  and     ip, r1, #0x000f             @ isolate D
+    ldr     r3, [rFP, ip, lsl #2]       @ r3<- vD
+    mov     r0, r0                      @ nop
+    str     r3, [r10, #-4]!             @ *--outs = vD
+0:  @ fall through to .LinvokeArgsDone
+
+.LinvokeArgsDone: @ r0=methodToCall
+    @ find space for the new stack frame, check for overflow
+    SAVEAREA_FROM_FP(r1, rFP)           @ r1<- stack save area
+    ldrh    r2, [r0, #offMethod_registersSize]  @ r2<- methodToCall->regsSize
+    ldrh    r3, [r0, #offMethod_outsSize]   @ r3<- methodToCall->outsSize
+    sub     r1, r1, r2, lsl #2          @ r1<- newFp (old savearea - regsSize)
+    SAVEAREA_FROM_FP(r10, r1)           @ r10<- newSaveArea
+@    bl      common_dumpRegs
+    ldr     r9, [rGLUE, #offGlue_interpStackEnd]    @ r9<- interpStackEnd
+    sub     r3, r10, r3, lsl #2         @ r3<- bottom (newsave - outsSize)
+    cmp     r3, r9                      @ bottom < interpStackEnd?
+    blt     .LstackOverflow             @ yes, this frame will overflow stack
+
+    @ set up newSaveArea
+#ifdef EASY_GDB
+    SAVEAREA_FROM_FP(ip, rFP)           @ ip<- stack save area
+    str     ip, [r10, #offStackSaveArea_prevSave]
+#endif
+    str     rFP, [r10, #offStackSaveArea_prevFrame]
+    str     rPC, [r10, #offStackSaveArea_savedPc]
+    str     r0, [r10, #offStackSaveArea_method]
+
+    ldr     r3, [r0, #offMethod_accessFlags] @ r3<- methodToCall->accessFlags
+    tst     r3, #ACC_NATIVE
+    bne     .LinvokeNative
+
+    /*
+    stmfd   sp!, {r0-r3}
+    bl      common_printNewline
+    mov     r0, rFP
+    mov     r1, #0
+    bl      dvmDumpFp
+    ldmfd   sp!, {r0-r3}
+    stmfd   sp!, {r0-r3}
+    mov     r0, r1
+    mov     r1, r10
+    bl      dvmDumpFp
+    bl      common_printNewline
+    ldmfd   sp!, {r0-r3}
+    */
+
+    @ Update "glue" values for the new method
+    @ r0=methodToCall, r1=newFp
+    ldr     r3, [r0, #offMethod_clazz]      @ r3<- method->clazz
+    str     r0, [rGLUE, #offGlue_method]    @ glue->method = methodToCall
+    ldr     r3, [r3, #offClassObject_pDvmDex] @ r3<- method->clazz->pDvmDex
+    ldr     rPC, [r0, #offMethod_insns]     @ rPC<- method->insns
+    str     r3, [rGLUE, #offGlue_methodClassDex] @ glue->methodClassDex = ...
+    ldr     r2, [rGLUE, #offGlue_self]      @ r2<- glue->self
+    FETCH_INST()                            @ load rINST from rPC
+    mov     rFP, r1                         @ fp = newFp
+    GET_INST_OPCODE(ip)                     @ extract opcode from rINST
+    str     r1, [r2, #offThread_curFrame]   @ self->curFrame = newFp
+    GOTO_OPCODE(ip)                         @ jump to next instruction
+
+.LinvokeNative:
+    @ Prep for the native call
+    @ r0=methodToCall, r1=newFp, r10=newSaveArea
+    ldr     r3, [rGLUE, #offGlue_self]      @ r3<- glue->self
+    ldr     r9, [r3, #offThread_jniLocal_nextEntry] @ r9<- thread->refNext
+    str     r1, [r3, #offThread_curFrame]   @ self->curFrame = newFp
+    str     r9, [r10, #offStackSaveArea_localRefTop] @newFp->localRefTop=refNext
+    mov     r9, r3                      @ r9<- glue->self (preserve)
+
+    mov     r2, r0                      @ r2<- methodToCall
+    mov     r0, r1                      @ r0<- newFp (points to args)
+    add     r1, rGLUE, #offGlue_retval  @ r1<- &retval
+
+#ifdef ASSIST_DEBUGGER
+    /* insert fake function header to help gdb find the stack frame */
+    b       .Lskip
+    .type   dalvik_mterp, %function
+dalvik_mterp:
+    .fnstart
+    MTERP_ENTRY1
+    MTERP_ENTRY2
+.Lskip:
+#endif
+
+    mov     lr, pc                      @ set return addr
+    ldr     pc, [r2, #offMethod_nativeFunc] @ pc<- methodToCall->nativeFunc
+
+    @ native return; r9=self, r10=newSaveArea
+    @ equivalent to dvmPopJniLocals
+    ldr     r0, [r10, #offStackSaveArea_localRefTop] @ r0<- newSave->localRefTop
+    ldr     r1, [r9, #offThread_exception] @ check for exception
+    str     rFP, [r9, #offThread_curFrame]  @ self->curFrame = fp
+    cmp     r1, #0                      @ null?
+    str     r0, [r9, #offThread_jniLocal_nextEntry] @ self->refNext<- r0
+    bne     common_exceptionThrown      @ no, handle exception
+
+    FETCH_ADVANCE_INST(3)               @ advance rPC, load rINST
+    GET_INST_OPCODE(ip)                 @ extract opcode from rINST
+    GOTO_OPCODE(ip)                     @ jump to next instruction
+
+.LstackOverflow:
+    ldr     r0, [rGLUE, #offGlue_self]  @ r0<- self
+    bl      dvmHandleStackOverflow
+    b       common_exceptionThrown
+#ifdef ASSIST_DEBUGGER
+    .fnend
+#endif
+
+
+    /*
+     * Common code for method invocation, calling through "glue code".
+     *
+     * TODO: now that we have range and non-range invoke handlers, this
+     *       needs to be split into two.  Maybe just create entry points
+     *       that set r9 and jump here?
+     *
+     * On entry:
+     *  r0 is "Method* methodToCall", the method we're trying to call
+     *  r9 is "bool methodCallRange", indicating if this is a /range variant
+     */
+     .if    0
+.LinvokeOld:
+    sub     sp, sp, #8                  @ space for args + pad
+    FETCH(ip, 2)                        @ ip<- FEDC or CCCC
+    mov     r2, r0                      @ A2<- methodToCall
+    mov     r0, rGLUE                   @ A0<- glue
+    SAVE_PC_FP_TO_GLUE()                @ export state to "glue"
+    mov     r1, r9                      @ A1<- methodCallRange
+    mov     r3, rINST, lsr #8           @ A3<- AA
+    str     ip, [sp, #0]                @ A4<- ip
+    bl      dvmMterp_invokeMethod       @ call the C invokeMethod
+    add     sp, sp, #8                  @ remove arg area
+    b       common_resumeAfterGlueCall  @ continue to next instruction
+    .endif
+
+
+
+/*
+ * Common code for handling a return instruction.
+ *
+ * This does not return.
+ */
+common_returnFromMethod:
+.LreturnNew:
+    mov     r0, #kInterpEntryReturn
+    mov     r9, #0
+    bl      common_periodicChecks
+
+    SAVEAREA_FROM_FP(r0, rFP)           @ r0<- saveArea (old)
+    ldr     rFP, [r0, #offStackSaveArea_prevFrame] @ fp = saveArea->prevFrame
+    ldr     r2, [rFP, #(offStackSaveArea_method - sizeofStackSaveArea)]
+                                        @ r2<- method we're returning to
+    cmp     r2, #0                      @ is this a break frame?
+    mov     r1, #0                      @ "want switch" = false
+    beq     common_gotoBail             @ break frame, bail out completely
+
+    ldr     rPC, [r0, #offStackSaveArea_savedPc] @ pc = saveArea->savedPc
+    ldr     r3, [rGLUE, #offGlue_self]      @ r3<- glue->self
+    str     r2, [rGLUE, #offGlue_method]    @ glue->method = newSave->method
+    str     rFP, [r3, #offThread_curFrame]  @ self->curFrame = fp
+    ldr     r1, [r2, #offMethod_clazz]      @ r1<- method->clazz
+    FETCH_ADVANCE_INST(3)               @ advance rPC, load rINST
+    ldr     r1, [r1, #offClassObject_pDvmDex]   @ r1<- method->clazz->pDvmDex
+    GET_INST_OPCODE(ip)                 @ extract opcode from rINST
+    str     r1, [rGLUE, #offGlue_methodClassDex]
+    GOTO_OPCODE(ip)                     @ jump to next instruction
+
+    /*
+     * Return handling, calls through "glue code".
+     */
+     .if    0
+.LreturnOld:
+    SAVE_PC_FP_TO_GLUE()                @ export state
+    mov     r0, rGLUE                   @ arg to function
+    bl      dvmMterp_returnFromMethod
+    b       common_resumeAfterGlueCall
+    .endif
+
+
+/*
+ * Somebody has thrown an exception.  Handle it.
+ *
+ * If the exception processing code returns to us (instead of falling
+ * out of the interpreter), continue with whatever the next instruction
+ * now happens to be.
+ *
+ * This does not return.
+ */
+common_exceptionThrown:
+.LexceptionNew:
+    mov     r0, #kInterpEntryThrow
+    mov     r9, #0
+    bl      common_periodicChecks
+
+    ldr     r10, [rGLUE, #offGlue_self] @ r10<- glue->self
+    ldr     r9, [r10, #offThread_exception] @ r9<- self->exception
+    mov     r1, r10                     @ r1<- self
+    mov     r0, r9                      @ r0<- exception
+    bl      dvmAddTrackedAlloc          @ don't let the exception be GCed
+    mov     r3, #0                      @ r3<- NULL
+    str     r3, [r10, #offThread_exception] @ self->exception = NULL
+
+    /* set up args and a local for "&fp" */
+    /* (str sp, [sp, #-4]!  would be perfect here, but is discouraged) */
+    str     rFP, [sp, #-4]!             @ *--sp = fp
+    mov     ip, sp                      @ ip<- &fp
+    mov     r3, #0                      @ r3<- false
+    str     ip, [sp, #-4]!              @ *--sp = &fp
+    ldr     r1, [rGLUE, #offGlue_method] @ r1<- glue->method
+    mov     r0, r10                     @ r0<- self
+    ldr     r1, [r1, #offMethod_insns]  @ r1<- method->insns
+    mov     r2, r9                      @ r2<- exception
+    sub     r1, rPC, r1                 @ r1<- pc - method->insns
+    mov     r1, r1, asr #1              @ r1<- offset in code units
+
+    /* call, r0 gets catchRelPc (a code-unit offset) */
+    bl      dvmFindCatchBlock           @ call(self, relPc, exc, scan?, &fp)
+    ldr     rFP, [sp, #4]               @ retrieve the updated rFP
+    cmp     r0, #0                      @ is catchRelPc < 0?
+    add     sp, sp, #8                  @ restore stack
+    bmi     .LnotCaughtLocally
+
+    /* fix stack overflow if necessary; must preserve r0 */
+    ldrb    r1, [r10, #offThread_stackOverflowed]
+    cmp     r1, #0                      @ did we overflow earlier?
+    beq     1f                          @ no, skip ahead
+    mov     r9, r0                      @ r9<- r0 (save it)
+    mov     r0, r10                     @ r0<- self
+    bl      dvmCleanupStackOverflow     @ call(self)
+    mov     r0, r9                      @ r0<- r9 (restore it)
+    ldr     r9, [r10, #offThread_exception] @ r9<- self->exception
+1:
+
+    /* adjust locals to match self->curFrame and updated PC */
+    SAVEAREA_FROM_FP(r1, rFP)           @ r1<- new save area
+    ldr     r1, [r1, #offStackSaveArea_method] @ r1<- new method
+    str     r1, [rGLUE, #offGlue_method]    @ glue->method = new method
+    ldr     r2, [r1, #offMethod_clazz]      @ r2<- method->clazz
+    ldr     r3, [r1, #offMethod_insns]      @ r3<- method->insns
+    ldr     r2, [r2, #offClassObject_pDvmDex] @ r2<- method->clazz->pDvmDex
+    add     rPC, r3, r0, asl #1             @ rPC<- method->insns + catchRelPc
+    str     r2, [rGLUE, #offGlue_methodClassDex] @ glue->pDvmDex = meth...
+
+    /* release the tracked alloc on the exception */
+    mov     r0, r9                      @ r0<- exception
+    mov     r1, r10                     @ r1<- self
+    bl      dvmReleaseTrackedAlloc      @ release the exception
+
+    /* restore the exception if the handler wants it */
+    FETCH_INST()                        @ load rINST from rPC
+    GET_INST_OPCODE(ip)                 @ extract opcode from rINST
+    cmp     ip, #OP_MOVE_EXCEPTION      @ is it "move-exception"?
+    streq   r9, [r10, #offThread_exception] @ yes, restore the exception
+    GOTO_OPCODE(ip)                     @ jump to next instruction
+
+.LnotCaughtLocally: @ r9=exception, r10=self
+    /* fix stack overflow if necessary */
+    ldrb    r1, [r10, #offThread_stackOverflowed]
+    cmp     r1, #0                      @ did we overflow earlier?
+    movne   r0, r10                     @ if yes: r0<- self
+    blne    dvmCleanupStackOverflow     @ if yes: call(self)
+
+    @ may want to show "not caught locally" debug messages here
+#if DVM_SHOW_EXCEPTION >= 2
+    /* call __android_log_print(prio, tag, format, ...) */
+    /* "Exception %s from %s:%d not caught locally" */
+    @ dvmLineNumFromPC(method, pc - method->insns)
+    ldr     r0, [rGLUE, #offGlue_method]
+    ldr     r1, [r0, #offMethod_insns]
+    sub     r1, rPC, r1
+    asr     r1, r1, #1
+    bl      dvmLineNumFromPC
+    str     r0, [sp, #-4]!
+    @ dvmGetMethodSourceFile(method)
+    ldr     r0, [rGLUE, #offGlue_method]
+    bl      dvmGetMethodSourceFile
+    str     r0, [sp, #-4]!
+    @ exception->clazz->descriptor
+    ldr     r3, [r9, #offObject_clazz]
+    ldr     r3, [r3, #offClassObject_descriptor]
+    @
+    ldr     r2, strExceptionNotCaughtLocally
+    ldr     r1, strLogTag
+    mov     r0, #3                      @ LOG_DEBUG
+    bl      __android_log_print
+#endif
+    str     r9, [r10, #offThread_exception] @ restore exception
+    mov     r0, r9                      @ r0<- exception
+    mov     r1, r10                     @ r1<- self
+    bl      dvmReleaseTrackedAlloc      @ release the exception
+    mov     r1, #0                      @ "want switch" = false
+    b       common_gotoBail             @ bail out
+
+
+    /*
+     * Exception handling, calls through "glue code".
+     */
+    .if     0
+.LexceptionOld:
+    SAVE_PC_FP_TO_GLUE()                @ export state
+    mov     r0, rGLUE                   @ arg to function
+    bl      dvmMterp_exceptionThrown
+    b       common_resumeAfterGlueCall
+    .endif
+
+
+/*
+ * After returning from a "glued" function, pull out the updated
+ * values and start executing at the next instruction.
+ */
+common_resumeAfterGlueCall:
+    LOAD_PC_FP_FROM_GLUE()              @ pull rPC and rFP out of glue
+    FETCH_INST()                        @ load rINST from rPC
+    GET_INST_OPCODE(ip)                 @ extract opcode from rINST
+    GOTO_OPCODE(ip)                     @ jump to next instruction
+
+/*
+ * Invalid array index.
+ */
+common_errArrayIndex:
+    EXPORT_PC()
+    ldr     r0, strArrayIndexException
+    mov     r1, #0
+    bl      dvmThrowException
+    b       common_exceptionThrown
+
+/*
+ * Invalid array value.
+ */
+common_errArrayStore:
+    EXPORT_PC()
+    ldr     r0, strArrayStoreException
+    mov     r1, #0
+    bl      dvmThrowException
+    b       common_exceptionThrown
+
+/*
+ * Integer divide or mod by zero.
+ */
+common_errDivideByZero:
+    EXPORT_PC()
+    ldr     r0, strArithmeticException
+    ldr     r1, strDivideByZero
+    bl      dvmThrowException
+    b       common_exceptionThrown
+
+/*
+ * Attempt to allocate an array with a negative size.
+ */
+common_errNegativeArraySize:
+    EXPORT_PC()
+    ldr     r0, strNegativeArraySizeException
+    mov     r1, #0
+    bl      dvmThrowException
+    b       common_exceptionThrown
+
+/*
+ * Invocation of a non-existent method.
+ */
+common_errNoSuchMethod:
+    EXPORT_PC()
+    ldr     r0, strNoSuchMethodError
+    mov     r1, #0
+    bl      dvmThrowException
+    b       common_exceptionThrown
+
+/*
+ * We encountered a null object when we weren't expecting one.  We
+ * export the PC, throw a NullPointerException, and goto the exception
+ * processing code.
+ */
+common_errNullObject:
+    EXPORT_PC()
+    ldr     r0, strNullPointerException
+    mov     r1, #0
+    bl      dvmThrowException
+    b       common_exceptionThrown
+
+/*
+ * For debugging, cause an immediate fault.  The source address will
+ * be in lr (use a bl instruction to jump here).
+ */
+common_abort:
+    ldr     pc, .LdeadFood
+.LdeadFood:
+    .word   0xdeadf00d
+
+/*
+ * Spit out a "we were here", preserving all registers.  (The attempt
+ * to save ip won't work, but we need to save an even number of
+ * registers for EABI 64-bit stack alignment.)
+ */
+    .macro  SQUEAK num
+common_squeak\num:
+    stmfd   sp!, {r0, r1, r2, r3, ip, lr}
+    ldr     r0, strSqueak
+    mov     r1, #\num
+    bl      printf
+    ldmfd   sp!, {r0, r1, r2, r3, ip, pc}
+    .endm
+
+    SQUEAK  0
+    SQUEAK  1
+    SQUEAK  2
+    SQUEAK  3
+    SQUEAK  4
+    SQUEAK  5
+
+/*
+ * Spit out the number in r0, preserving registers.
+ */
+common_printNum:
+    stmfd   sp!, {r0, r1, r2, r3, ip, lr}
+    mov     r1, r0
+    ldr     r0, strSqueak
+    bl      printf
+    ldmfd   sp!, {r0, r1, r2, r3, ip, pc}
+
+/*
+ * Print a newline, preserving registers.
+ */
+common_printNewline:
+    stmfd   sp!, {r0, r1, r2, r3, ip, lr}
+    ldr     r0, strNewline
+    bl      printf
+    ldmfd   sp!, {r0, r1, r2, r3, ip, pc}
+
+    /*
+     * Print the 32-bit quantity in r0 as a hex value, preserving registers.
+     */
+common_printHex:
+    stmfd   sp!, {r0, r1, r2, r3, ip, lr}
+    mov     r1, r0
+    ldr     r0, strPrintHex
+    bl      printf
+    ldmfd   sp!, {r0, r1, r2, r3, ip, pc}
+
+/*
+ * Print the 64-bit quantity in r0-r1, preserving registers.
+ */
+common_printLong:
+    stmfd   sp!, {r0, r1, r2, r3, ip, lr}
+    mov     r3, r1
+    mov     r2, r0
+    ldr     r0, strPrintLong
+    bl      printf
+    ldmfd   sp!, {r0, r1, r2, r3, ip, pc}
+
+/*
+ * Print full method info.  Pass the Method* in r0.  Preserves regs.
+ */
+common_printMethod:
+    stmfd   sp!, {r0, r1, r2, r3, ip, lr}
+    bl      dvmMterpPrintMethod
+    ldmfd   sp!, {r0, r1, r2, r3, ip, pc}
+
+/*
+ * Call a C helper function that dumps regs and possibly some
+ * additional info.  Requires the C function to be compiled in.
+ */
+    .if     0
+common_dumpRegs:
+    stmfd   sp!, {r0, r1, r2, r3, ip, lr}
+    bl      dvmMterpDumpArmRegs
+    ldmfd   sp!, {r0, r1, r2, r3, ip, pc}
+    .endif
+
+
+/*
+ * String references, must be close to the code that uses them.
+ */
+    .align  2
+strArithmeticException:
+    .word   .LstrArithmeticException
+strArrayIndexException:
+    .word   .LstrArrayIndexException
+strArrayStoreException:
+    .word   .LstrArrayStoreException
+strDivideByZero:
+    .word   .LstrDivideByZero
+strNegativeArraySizeException:
+    .word   .LstrNegativeArraySizeException
+strNoSuchMethodError:
+    .word   .LstrNoSuchMethodError
+strNullPointerException:
+    .word   .LstrNullPointerException
+
+strLogTag:
+    .word   .LstrLogTag
+strExceptionNotCaughtLocally:
+    .word   .LstrExceptionNotCaughtLocally
+
+strNewline:
+    .word   .LstrNewline
+strSqueak:
+    .word   .LstrSqueak
+strPrintHex:
+    .word   .LstrPrintHex
+strPrintLong:
+    .word   .LstrPrintLong
+
+/*
+ * Zero-terminated ASCII string data.
+ *
+ * On ARM we have two choices: do like gcc does, and LDR from a .word
+ * with the address, or use an ADR pseudo-op to get the address
+ * directly.  ADR saves 4 bytes and an indirection, but it's using a
+ * PC-relative addressing mode and hence has a limited range, which
+ * makes it not work well with mergeable string sections.
+ */
+    .section .rodata.str1.4,"aMS",%progbits,1
+
+.LstrBadEntryPoint:
+    .asciz  "Bad entry point %d\n"
+.LstrArithmeticException:
+    .asciz  "Ljava/lang/ArithmeticException;"
+.LstrArrayIndexException:
+    .asciz  "Ljava/lang/ArrayIndexOutOfBoundsException;"
+.LstrArrayStoreException:
+    .asciz  "Ljava/lang/ArrayStoreException;"
+.LstrClassCastException:
+    .asciz  "Ljava/lang/ClassCastException;"
+.LstrDivideByZero:
+    .asciz  "divide by zero"
+.LstrFilledNewArrayNotImpl:
+    .asciz  "filled-new-array only implemented for 'int'"
+.LstrInternalError:
+    .asciz  "Ljava/lang/InternalError;"
+.LstrInstantiationError:
+    .asciz  "Ljava/lang/InstantiationError;"
+.LstrNegativeArraySizeException:
+    .asciz  "Ljava/lang/NegativeArraySizeException;"
+.LstrNoSuchMethodError:
+    .asciz  "Ljava/lang/NoSuchMethodError;"
+.LstrNullPointerException:
+    .asciz  "Ljava/lang/NullPointerException;"
+
+.LstrLogTag:
+    .asciz  "mterp"
+.LstrExceptionNotCaughtLocally:
+    .asciz  "Exception %s from %s:%d not caught locally\n"
+
+.LstrNewline:
+    .asciz  "\n"
+.LstrSqueak:
+    .asciz  "<%d>"
+.LstrPrintHex:
+    .asciz  "<0x%x>"
+.LstrPrintLong:
+    .asciz  "<%lld>"
+
diff --git a/vm/mterp/armv5/header.S b/vm/mterp/armv5/header.S
new file mode 100644
index 0000000..460930d
--- /dev/null
+++ b/vm/mterp/armv5/header.S
@@ -0,0 +1,168 @@
+/*
+ * Copyright (C) 2008 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/*
+ * ARMv5 definitions and declarations.
+ */
+
+/*
+ARM EABI general notes:
+
+r0-r3 hold first 4 args to a method; they are not preserved across method calls
+r4-r8 are available for general use
+r9 is given special treatment in some situations, but not for us
+r10 (sl) seems to be generally available
+r11 (fp) is used by gcc (unless -fomit-frame-pointer is set)
+r12 (ip) is scratch -- not preserved across method calls
+r13 (sp) should be managed carefully in case a signal arrives
+r14 (lr) must be preserved
+r15 (pc) can be tinkered with directly
+
+r0 holds returns of <= 4 bytes
+r0-r1 hold returns of 8 bytes, low word in r0
+
+Callee must save/restore r4+ (except r12) if it modifies them.
+
+Stack is "full descending".  Only the arguments that don't fit in the first 4
+registers are placed on the stack.  "sp" points at the first stacked argument
+(i.e. the 5th arg).
+
+VFP: single-precision results in s0, double-precision results in d0.
+
+In the EABI, "sp" must be 64-bit aligned on entry to a function, and any
+64-bit quantities (long long, double) must be 64-bit aligned.
+*/
+
+/*
+Mterp and ARM notes:
+
+The following registers have fixed assignments:
+
+  reg nick      purpose
+  r4  rPC       interpreted program counter, used for fetching instructions
+  r5  rFP       interpreted frame pointer, used for accessing locals and args
+  r6  rGLUE     MterpGlue pointer
+  r7  rIBASE    interpreted instruction base pointer, used for computed goto
+  r8  rINST     first 16-bit code unit of current instruction
+
+Macros are provided for common operations.  Each macro MUST emit only
+one instruction to make instruction-counting easier.  They MUST NOT alter
+unspecified registers or condition codes.
+*/
+
+/* single-purpose registers, given names for clarity */
+#define rPC     r4
+#define rFP     r5
+#define rGLUE   r6
+#define rIBASE  r7
+#define rINST   r8
+
+/* save/restore the PC and/or FP from the glue struct */
+#define LOAD_PC_FROM_GLUE()     ldr     rPC, [rGLUE, #offGlue_pc]
+#define SAVE_PC_TO_GLUE()       str     rPC, [rGLUE, #offGlue_pc]
+#define LOAD_FP_FROM_GLUE()     ldr     rFP, [rGLUE, #offGlue_fp]
+#define SAVE_FP_TO_GLUE()       str     rFP, [rGLUE, #offGlue_fp]
+#define LOAD_PC_FP_FROM_GLUE()  ldmia   rGLUE, {rPC, rFP}
+#define SAVE_PC_FP_TO_GLUE()    stmia   rGLUE, {rPC, rFP}
+
+/*
+ * "export" the PC to the stack frame, f/b/o future exception objects.  Must
+ * be done *before* something calls dvmThrowException.
+ *
+ * In C this is "SAVEAREA_FROM_FP(fp)->xtra.currentPc = pc", i.e.
+ * fp - sizeof(StackSaveArea) + offsetof(SaveArea, xtra.currentPc)
+ *
+ * It's okay to do this more than once.
+ */
+#define EXPORT_PC() \
+    str     rPC, [rFP, #(-sizeofStackSaveArea + offStackSaveArea_currentPc)]
+
+/*
+ * Given a frame pointer, find the stack save area.
+ *
+ * In C this is "((StackSaveArea*)(_fp) -1)".
+ */
+#define SAVEAREA_FROM_FP(_reg, _fpreg) \
+    sub     _reg, _fpreg, #sizeofStackSaveArea
+
+/*
+ * Fetch the next instruction from rPC into rINST.  Does not advance rPC.
+ */
+#define FETCH_INST()            ldrh    rINST, [rPC]
+
+/*
+ * Fetch the next instruction from the specified offset.  Advances rPC
+ * to point to the next instruction.  "_count" is in 16-bit code units.
+ *
+ * Because of the limited size of immediate constants on ARM, this is only
+ * suitable for small forward movements (i.e. don't try to implement "goto"
+ * with this).
+ *
+ * This must come AFTER anything that can throw an exception, or the
+ * exception catch may miss.  (This also implies that it must come after
+ * EXPORT_PC().)
+ */
+#define FETCH_ADVANCE_INST(_count) ldrh    rINST, [rPC, #(_count*2)]!
+
+/*
+ * Fetch the next instruction from an offset specified by _reg.  Updates
+ * rPC to point to the next instruction.  "_reg" must specify the distance
+ * in bytes, *not* 16-bit code units, and may be a signed value.
+ *
+ * We want to write "ldrh rINST, [rPC, _reg, lsl #2]!", but some of the
+ * bits that hold the shift distance are used for the half/byte/sign flags.
+ * In some cases we can pre-double _reg for free, so we require a byte offset
+ * here.
+ */
+#define FETCH_ADVANCE_INST_RB(_reg) ldrh    rINST, [rPC, _reg]!
+
+/*
+ * Fetch a half-word code unit from an offset past the current PC.  The
+ * "_count" value is in 16-bit code units.  Does not advance rPC.
+ *
+ * The "_S" variant works the same but treats the value as signed.
+ */
+#define FETCH(_reg, _count)     ldrh    _reg, [rPC, #(_count*2)]
+#define FETCH_S(_reg, _count)   ldrsh   _reg, [rPC, #(_count*2)]
+
+/*
+ * Fetch one byte from an offset past the current PC.  Pass in the same
+ * "_count" as you would for FETCH, and an additional 0/1 indicating which
+ * byte of the halfword you want (lo/hi).
+ */
+#define FETCH_B(_reg, _count, _byte) ldrb     _reg, [rPC, #(_count*2+_byte)]
+
+/*
+ * Put the instruction's opcode field into the specified register.
+ */
+#define GET_INST_OPCODE(_reg)   and     _reg, rINST, #255
+
+/*
+ * Begin executing the opcode in _reg.
+ */
+#define GOTO_OPCODE(_reg)       add     pc, rIBASE, _reg, lsl #${handler_size_bits}
+
+/*
+ * Get/set the 32-bit value from a Dalvik register.
+ */
+#define GET_VREG(_reg, _vreg)   ldr     _reg, [rFP, _vreg, lsl #2]
+#define SET_VREG(_reg, _vreg)   str     _reg, [rFP, _vreg, lsl #2]
+
+/*
+ * This is a #include, not a %include, because we want the C pre-processor
+ * to expand the macros into assembler assignment statements.
+ */
+#include "../common/asm-constants.h"
+
diff --git a/vm/mterp/armv5/stub.S b/vm/mterp/armv5/stub.S
new file mode 100644
index 0000000..7607f09
--- /dev/null
+++ b/vm/mterp/armv5/stub.S
@@ -0,0 +1,9 @@
+    /* (stub) */
+    SAVE_PC_FP_TO_GLUE()            @ only need to export these two
+    mov     r0, rGLUE               @ glue is first arg to function
+    bl      dvmMterp_${opcode}      @ call
+    LOAD_PC_FP_FROM_GLUE()          @ retrieve updated values
+    FETCH_INST()                    @ load next instruction from rPC
+    GET_INST_OPCODE(ip)             @ ...trim down to just the opcode
+    GOTO_OPCODE(ip)                 @ ...and jump to the handler
+
diff --git a/vm/mterp/armv5/unop.S b/vm/mterp/armv5/unop.S
new file mode 100644
index 0000000..12d8206
--- /dev/null
+++ b/vm/mterp/armv5/unop.S
@@ -0,0 +1,21 @@
+%default {"preinstr":""}
+    /*
+     * Generic 32-bit unary operation.  Provide an "instr" line that
+     * specifies an instruction that performs "result = op r0".
+     * This could be an ARM instruction or a function call.
+     *
+     * for: neg-int, not-int, neg-float, int-to-float, float-to-int,
+     *      int-to-byte, int-to-char, int-to-short
+     */
+    /* unop vA, vB */
+    mov     r3, rINST, lsr #12          @ r3<- B
+    mov     r9, rINST, lsr #8           @ r9<- A+
+    GET_VREG(r0, r3)                    @ r0<- vB
+    and     r9, r9, #15
+    $preinstr                           @ optional op; may set condition codes
+    FETCH_ADVANCE_INST(1)               @ advance rPC, load rINST
+    $instr                              @ r0<- op, r0-r3 changed
+    GET_INST_OPCODE(ip)                 @ extract opcode from rINST
+    SET_VREG(r0, r9)                    @ vAA<- r0
+    GOTO_OPCODE(ip)                     @ jump to next instruction
+    /* 9-10 instructions */
diff --git a/vm/mterp/armv5/unopNarrower.S b/vm/mterp/armv5/unopNarrower.S
new file mode 100644
index 0000000..f1ad902
--- /dev/null
+++ b/vm/mterp/armv5/unopNarrower.S
@@ -0,0 +1,24 @@
+%default {"preinstr":""}
+    /*
+     * Generic 64bit-to-32bit unary operation.  Provide an "instr" line
+     * that specifies an instruction that performs "result = op r0/r1", where
+     * "result" is a 32-bit quantity in r0.
+     *
+     * For: long-to-float, double-to-int, double-to-float
+     *
+     * (This would work for long-to-int, but that instruction is actually
+     * an exact match for OP_MOVE.)
+     */
+    /* unop vA, vB */
+    mov     r3, rINST, lsr #12          @ r3<- B
+    mov     r9, rINST, lsr #8           @ r9<- A+
+    add     r3, rFP, r3, lsl #2         @ r3<- &fp[B]
+    and     r9, r9, #15
+    ldmia   r3, {r0-r1}                 @ r0/r1<- vB/vB+1
+    FETCH_ADVANCE_INST(1)               @ advance rPC, load rINST
+    $preinstr                           @ optional op; may set condition codes
+    $instr                              @ r0<- op, r0-r3 changed
+    GET_INST_OPCODE(ip)                 @ extract opcode from rINST
+    SET_VREG(r0, r9)                    @ vA<- r0
+    GOTO_OPCODE(ip)                     @ jump to next instruction
+    /* 10-11 instructions */
diff --git a/vm/mterp/armv5/unopWide.S b/vm/mterp/armv5/unopWide.S
new file mode 100644
index 0000000..e097317
--- /dev/null
+++ b/vm/mterp/armv5/unopWide.S
@@ -0,0 +1,23 @@
+%default {"preinstr":""}
+    /*
+     * Generic 64-bit unary operation.  Provide an "instr" line that
+     * specifies an instruction that performs "result = op r0/r1".
+     * This could be an ARM instruction or a function call.
+     *
+     * For: neg-long, not-long, neg-double, long-to-double, double-to-long
+     */
+    /* unop vA, vB */
+    mov     r9, rINST, lsr #8           @ r9<- A+
+    mov     r3, rINST, lsr #12          @ r3<- B
+    and     r9, r9, #15
+    add     r3, rFP, r3, lsl #2         @ r3<- &fp[B]
+    add     r9, rFP, r9, lsl #2         @ r9<- &fp[A]
+    ldmia   r3, {r0-r1}                 @ r0/r1<- vAA
+    FETCH_ADVANCE_INST(1)               @ advance rPC, load rINST
+    $preinstr                           @ optional op; may set condition codes
+    $instr                              @ r0/r1<- op, r2-r3 changed
+    GET_INST_OPCODE(ip)                 @ extract opcode from rINST
+    stmia   r9, {r0-r1}                 @ vAA<- r0/r1
+    GOTO_OPCODE(ip)                     @ jump to next instruction
+    /* 12-13 instructions */
+
diff --git a/vm/mterp/armv5/unopWider.S b/vm/mterp/armv5/unopWider.S
new file mode 100644
index 0000000..df1baea
--- /dev/null
+++ b/vm/mterp/armv5/unopWider.S
@@ -0,0 +1,21 @@
+%default {"preinstr":""}
+    /*
+     * Generic 32bit-to-64bit unary operation.  Provide an "instr" line
+     * that specifies an instruction that performs "result = op r0", where
+     * "result" is a 64-bit quantity in r0/r1.
+     *
+     * For: int-to-long, int-to-double, float-to-long, float-to-double
+     */
+    /* unop vA, vB */
+    mov     r9, rINST, lsr #8           @ r9<- A+
+    mov     r3, rINST, lsr #12          @ r3<- B
+    and     r9, r9, #15
+    GET_VREG(r0, r3)                    @ r0<- vB
+    add     r9, rFP, r9, lsl #2         @ r9<- &fp[A]
+    $preinstr                           @ optional op; may set condition codes
+    FETCH_ADVANCE_INST(1)               @ advance rPC, load rINST
+    $instr                              @ r0<- op, r0-r3 changed
+    GET_INST_OPCODE(ip)                 @ extract opcode from rINST
+    stmia   r9, {r0-r1}                 @ vA/vA+1<- r0/r1
+    GOTO_OPCODE(ip)                     @ jump to next instruction
+    /* 10-11 instructions */
diff --git a/vm/mterp/armv5/unused.S b/vm/mterp/armv5/unused.S
new file mode 100644
index 0000000..1c82919
--- /dev/null
+++ b/vm/mterp/armv5/unused.S
@@ -0,0 +1,2 @@
+    bl      common_abort
+
diff --git a/vm/mterp/armv5/zcmp.S b/vm/mterp/armv5/zcmp.S
new file mode 100644
index 0000000..7942632
--- /dev/null
+++ b/vm/mterp/armv5/zcmp.S
@@ -0,0 +1,22 @@
+%verify "branch taken"
+%verify "branch not taken"
+    /*
+     * Generic one-operand compare-and-branch operation.  Provide a "revcmp"
+     * fragment that specifies the *reverse* comparison to perform, e.g.
+     * for "if-le" you would use "gt".
+     *
+     * for: if-eqz, if-nez, if-ltz, if-gez, if-gtz, if-lez
+     */
+    /* if-cmp vAA, +BBBB */
+    mov     r0, rINST, lsr #8           @ r0<- AA
+    GET_VREG(r2, r0)                    @ r2<- vAA
+    mov     r9, #4                      @ r0<- BYTE branch dist for not-taken
+    cmp     r2, #0                      @ compare (vA, 0)
+    b${revcmp}  1f                      @ branch to 1 if comparison failed
+    FETCH_S(r9, 1)                      @ r9<- branch offset, in code units
+    movs    r9, r9, asl #1              @ convert to bytes, check sign
+    bmi     common_backwardBranch       @ backward branch, do periodic checks
+1:  FETCH_ADVANCE_INST_RB(r9)           @ update rPC, load rINST
+    GET_INST_OPCODE(ip)                 @ extract opcode from rINST
+    GOTO_OPCODE(ip)                     @ jump to next instruction
+
diff --git a/vm/mterp/c/OP_ADD_DOUBLE.c b/vm/mterp/c/OP_ADD_DOUBLE.c
new file mode 100644
index 0000000..571aeb8
--- /dev/null
+++ b/vm/mterp/c/OP_ADD_DOUBLE.c
@@ -0,0 +1,2 @@
+HANDLE_OP_X_DOUBLE(OP_ADD_DOUBLE, "add", +)
+OP_END
diff --git a/vm/mterp/c/OP_ADD_DOUBLE_2ADDR.c b/vm/mterp/c/OP_ADD_DOUBLE_2ADDR.c
new file mode 100644
index 0000000..af952cb
--- /dev/null
+++ b/vm/mterp/c/OP_ADD_DOUBLE_2ADDR.c
@@ -0,0 +1,2 @@
+HANDLE_OP_X_DOUBLE_2ADDR(OP_ADD_DOUBLE_2ADDR, "add", +)
+OP_END
diff --git a/vm/mterp/c/OP_ADD_FLOAT.c b/vm/mterp/c/OP_ADD_FLOAT.c
new file mode 100644
index 0000000..dab7d33
--- /dev/null
+++ b/vm/mterp/c/OP_ADD_FLOAT.c
@@ -0,0 +1,2 @@
+HANDLE_OP_X_FLOAT(OP_ADD_FLOAT, "add", +)
+OP_END
diff --git a/vm/mterp/c/OP_ADD_FLOAT_2ADDR.c b/vm/mterp/c/OP_ADD_FLOAT_2ADDR.c
new file mode 100644
index 0000000..a068fd0
--- /dev/null
+++ b/vm/mterp/c/OP_ADD_FLOAT_2ADDR.c
@@ -0,0 +1,2 @@
+HANDLE_OP_X_FLOAT_2ADDR(OP_ADD_FLOAT_2ADDR, "add", +)
+OP_END
diff --git a/vm/mterp/c/OP_ADD_INT.c b/vm/mterp/c/OP_ADD_INT.c
new file mode 100644
index 0000000..dda2cb8
--- /dev/null
+++ b/vm/mterp/c/OP_ADD_INT.c
@@ -0,0 +1,2 @@
+HANDLE_OP_X_INT(OP_ADD_INT, "add", +, false)
+OP_END
diff --git a/vm/mterp/c/OP_ADD_INT_2ADDR.c b/vm/mterp/c/OP_ADD_INT_2ADDR.c
new file mode 100644
index 0000000..29f32dd
--- /dev/null
+++ b/vm/mterp/c/OP_ADD_INT_2ADDR.c
@@ -0,0 +1,2 @@
+HANDLE_OP_X_INT_2ADDR(OP_ADD_INT_2ADDR, "add", +, false)
+OP_END
diff --git a/vm/mterp/c/OP_ADD_INT_LIT16.c b/vm/mterp/c/OP_ADD_INT_LIT16.c
new file mode 100644
index 0000000..578bd50
--- /dev/null
+++ b/vm/mterp/c/OP_ADD_INT_LIT16.c
@@ -0,0 +1,2 @@
+HANDLE_OP_X_INT_LIT16(OP_ADD_INT_LIT16, "add", (s4), +, false)
+OP_END
diff --git a/vm/mterp/c/OP_ADD_INT_LIT8.c b/vm/mterp/c/OP_ADD_INT_LIT8.c
new file mode 100644
index 0000000..a25b5f6
--- /dev/null
+++ b/vm/mterp/c/OP_ADD_INT_LIT8.c
@@ -0,0 +1,2 @@
+HANDLE_OP_X_INT_LIT8(OP_ADD_INT_LIT8,   "add", +, false)
+OP_END
diff --git a/vm/mterp/c/OP_ADD_LONG.c b/vm/mterp/c/OP_ADD_LONG.c
new file mode 100644
index 0000000..24a12bd
--- /dev/null
+++ b/vm/mterp/c/OP_ADD_LONG.c
@@ -0,0 +1,2 @@
+HANDLE_OP_X_LONG(OP_ADD_LONG, "add", +, false)
+OP_END
diff --git a/vm/mterp/c/OP_ADD_LONG_2ADDR.c b/vm/mterp/c/OP_ADD_LONG_2ADDR.c
new file mode 100644
index 0000000..0b7f740
--- /dev/null
+++ b/vm/mterp/c/OP_ADD_LONG_2ADDR.c
@@ -0,0 +1,2 @@
+HANDLE_OP_X_LONG_2ADDR(OP_ADD_LONG_2ADDR, "add", +, false)
+OP_END
diff --git a/vm/mterp/c/OP_AGET.c b/vm/mterp/c/OP_AGET.c
new file mode 100644
index 0000000..766beaf
--- /dev/null
+++ b/vm/mterp/c/OP_AGET.c
@@ -0,0 +1,2 @@
+HANDLE_OP_AGET(OP_AGET, "", u4, )
+OP_END
diff --git a/vm/mterp/c/OP_AGET_BOOLEAN.c b/vm/mterp/c/OP_AGET_BOOLEAN.c
new file mode 100644
index 0000000..d63bc10
--- /dev/null
+++ b/vm/mterp/c/OP_AGET_BOOLEAN.c
@@ -0,0 +1,2 @@
+HANDLE_OP_AGET(OP_AGET_BOOLEAN, "-boolean", u1, )
+OP_END
diff --git a/vm/mterp/c/OP_AGET_BYTE.c b/vm/mterp/c/OP_AGET_BYTE.c
new file mode 100644
index 0000000..61ecc05
--- /dev/null
+++ b/vm/mterp/c/OP_AGET_BYTE.c
@@ -0,0 +1,2 @@
+HANDLE_OP_AGET(OP_AGET_BYTE, "-byte", s1, )
+OP_END
diff --git a/vm/mterp/c/OP_AGET_CHAR.c b/vm/mterp/c/OP_AGET_CHAR.c
new file mode 100644
index 0000000..55e16ef
--- /dev/null
+++ b/vm/mterp/c/OP_AGET_CHAR.c
@@ -0,0 +1,2 @@
+HANDLE_OP_AGET(OP_AGET_CHAR, "-char", u2, )
+OP_END
diff --git a/vm/mterp/c/OP_AGET_OBJECT.c b/vm/mterp/c/OP_AGET_OBJECT.c
new file mode 100644
index 0000000..903637c
--- /dev/null
+++ b/vm/mterp/c/OP_AGET_OBJECT.c
@@ -0,0 +1,2 @@
+HANDLE_OP_AGET(OP_AGET_OBJECT, "-object", u4, )
+OP_END
diff --git a/vm/mterp/c/OP_AGET_SHORT.c b/vm/mterp/c/OP_AGET_SHORT.c
new file mode 100644
index 0000000..176b4a6
--- /dev/null
+++ b/vm/mterp/c/OP_AGET_SHORT.c
@@ -0,0 +1,2 @@
+HANDLE_OP_AGET(OP_AGET_SHORT, "-short", s2, )
+OP_END
diff --git a/vm/mterp/c/OP_AGET_WIDE.c b/vm/mterp/c/OP_AGET_WIDE.c
new file mode 100644
index 0000000..e7974cb
--- /dev/null
+++ b/vm/mterp/c/OP_AGET_WIDE.c
@@ -0,0 +1,2 @@
+HANDLE_OP_AGET(OP_AGET_WIDE, "-wide", s8, _WIDE)
+OP_END
diff --git a/vm/mterp/c/OP_AND_INT.c b/vm/mterp/c/OP_AND_INT.c
new file mode 100644
index 0000000..779b2d3
--- /dev/null
+++ b/vm/mterp/c/OP_AND_INT.c
@@ -0,0 +1,2 @@
+HANDLE_OP_X_INT(OP_AND_INT, "and", &, false)
+OP_END
diff --git a/vm/mterp/c/OP_AND_INT_2ADDR.c b/vm/mterp/c/OP_AND_INT_2ADDR.c
new file mode 100644
index 0000000..4330c35
--- /dev/null
+++ b/vm/mterp/c/OP_AND_INT_2ADDR.c
@@ -0,0 +1,2 @@
+HANDLE_OP_X_INT_2ADDR(OP_AND_INT_2ADDR, "and", &, false)
+OP_END
diff --git a/vm/mterp/c/OP_AND_INT_LIT16.c b/vm/mterp/c/OP_AND_INT_LIT16.c
new file mode 100644
index 0000000..01f48ba
--- /dev/null
+++ b/vm/mterp/c/OP_AND_INT_LIT16.c
@@ -0,0 +1,2 @@
+HANDLE_OP_X_INT_LIT16(OP_AND_INT_LIT16, "and", (s4), &, false)
+OP_END
diff --git a/vm/mterp/c/OP_AND_INT_LIT8.c b/vm/mterp/c/OP_AND_INT_LIT8.c
new file mode 100644
index 0000000..7bcfa0a
--- /dev/null
+++ b/vm/mterp/c/OP_AND_INT_LIT8.c
@@ -0,0 +1,2 @@
+HANDLE_OP_X_INT_LIT8(OP_AND_INT_LIT8,   "and", &, false)
+OP_END
diff --git a/vm/mterp/c/OP_AND_LONG.c b/vm/mterp/c/OP_AND_LONG.c
new file mode 100644
index 0000000..a92a5d4
--- /dev/null
+++ b/vm/mterp/c/OP_AND_LONG.c
@@ -0,0 +1,2 @@
+HANDLE_OP_X_LONG(OP_AND_LONG, "and", &, false)
+OP_END
diff --git a/vm/mterp/c/OP_AND_LONG_2ADDR.c b/vm/mterp/c/OP_AND_LONG_2ADDR.c
new file mode 100644
index 0000000..8161b8c
--- /dev/null
+++ b/vm/mterp/c/OP_AND_LONG_2ADDR.c
@@ -0,0 +1,2 @@
+HANDLE_OP_X_LONG_2ADDR(OP_AND_LONG_2ADDR, "and", &, false)
+OP_END
diff --git a/vm/mterp/c/OP_APUT.c b/vm/mterp/c/OP_APUT.c
new file mode 100644
index 0000000..07d3e04
--- /dev/null
+++ b/vm/mterp/c/OP_APUT.c
@@ -0,0 +1,2 @@
+HANDLE_OP_APUT(OP_APUT, "", u4, )
+OP_END
diff --git a/vm/mterp/c/OP_APUT_BOOLEAN.c b/vm/mterp/c/OP_APUT_BOOLEAN.c
new file mode 100644
index 0000000..fc69147
--- /dev/null
+++ b/vm/mterp/c/OP_APUT_BOOLEAN.c
@@ -0,0 +1,2 @@
+HANDLE_OP_APUT(OP_APUT_BOOLEAN, "-boolean", u1, )
+OP_END
diff --git a/vm/mterp/c/OP_APUT_BYTE.c b/vm/mterp/c/OP_APUT_BYTE.c
new file mode 100644
index 0000000..45aeb0b
--- /dev/null
+++ b/vm/mterp/c/OP_APUT_BYTE.c
@@ -0,0 +1,2 @@
+HANDLE_OP_APUT(OP_APUT_BYTE, "-byte", s1, )
+OP_END
diff --git a/vm/mterp/c/OP_APUT_CHAR.c b/vm/mterp/c/OP_APUT_CHAR.c
new file mode 100644
index 0000000..1553c27
--- /dev/null
+++ b/vm/mterp/c/OP_APUT_CHAR.c
@@ -0,0 +1,2 @@
+HANDLE_OP_APUT(OP_APUT_CHAR, "-char", u2, )
+OP_END
diff --git a/vm/mterp/c/OP_APUT_OBJECT.c b/vm/mterp/c/OP_APUT_OBJECT.c
new file mode 100644
index 0000000..6533b7d
--- /dev/null
+++ b/vm/mterp/c/OP_APUT_OBJECT.c
@@ -0,0 +1,39 @@
+HANDLE_OPCODE(OP_APUT_OBJECT /*vAA, vBB, vCC*/)
+    {
+        ArrayObject* arrayObj;
+        Object* obj;
+        u2 arrayInfo;
+        EXPORT_PC();
+        vdst = INST_AA(inst);       /* AA: source value */
+        arrayInfo = FETCH(1);
+        vsrc1 = arrayInfo & 0xff;   /* BB: array ptr */
+        vsrc2 = arrayInfo >> 8;     /* CC: index */
+        ILOGV("|aput%s v%d,v%d,v%d", "-object", vdst, vsrc1, vsrc2);
+        arrayObj = (ArrayObject*) GET_REGISTER(vsrc1);
+        if (!checkForNull((Object*) arrayObj))
+            GOTO(exceptionThrown);
+        if (GET_REGISTER(vsrc2) >= arrayObj->length) {
+            dvmThrowException("Ljava/lang/ArrayIndexOutOfBoundsException;",
+                NULL);
+            GOTO(exceptionThrown);
+        }
+        obj = (Object*) GET_REGISTER(vdst);
+        if (obj != NULL) {
+            if (!checkForNull(obj))
+                GOTO(exceptionThrown);
+            if (!dvmCanPutArrayElement(obj->clazz, arrayObj->obj.clazz)) {
+                LOGV("Can't put a '%s'(%p) into array type='%s'(%p)\n",
+                    obj->clazz->descriptor, obj,
+                    arrayObj->obj.clazz->descriptor, arrayObj);
+                //dvmDumpClass(obj->clazz);
+                //dvmDumpClass(arrayObj->obj.clazz);
+                dvmThrowException("Ljava/lang/ArrayStoreException;", NULL);
+                GOTO(exceptionThrown);
+            }
+        }
+        ILOGV("+ APUT[%d]=0x%08x", GET_REGISTER(vsrc2), GET_REGISTER(vdst));
+        ((u4*) arrayObj->contents)[GET_REGISTER(vsrc2)] =
+            GET_REGISTER(vdst);
+    }
+    FINISH(2);
+OP_END
diff --git a/vm/mterp/c/OP_APUT_SHORT.c b/vm/mterp/c/OP_APUT_SHORT.c
new file mode 100644
index 0000000..a72b5ea
--- /dev/null
+++ b/vm/mterp/c/OP_APUT_SHORT.c
@@ -0,0 +1,2 @@
+HANDLE_OP_APUT(OP_APUT_SHORT, "-short", s2, )
+OP_END
diff --git a/vm/mterp/c/OP_APUT_WIDE.c b/vm/mterp/c/OP_APUT_WIDE.c
new file mode 100644
index 0000000..39c8cfa
--- /dev/null
+++ b/vm/mterp/c/OP_APUT_WIDE.c
@@ -0,0 +1,2 @@
+HANDLE_OP_APUT(OP_APUT_WIDE, "-wide", s8, _WIDE)
+OP_END
diff --git a/vm/mterp/c/OP_ARRAY_LENGTH.c b/vm/mterp/c/OP_ARRAY_LENGTH.c
new file mode 100644
index 0000000..f38b057
--- /dev/null
+++ b/vm/mterp/c/OP_ARRAY_LENGTH.c
@@ -0,0 +1,15 @@
+HANDLE_OPCODE(OP_ARRAY_LENGTH /*vA, vB*/)
+    {
+        ArrayObject* arrayObj;
+
+        vdst = INST_A(inst);
+        vsrc1 = INST_B(inst);
+        arrayObj = (ArrayObject*) GET_REGISTER(vsrc1);
+        ILOGV("|array-length v%d,v%d  (%p)", vdst, vsrc1, arrayObj);
+        if (!checkForNullExportPC((Object*) arrayObj, fp, pc))
+            GOTO(exceptionThrown);
+        /* verifier guarantees this is an array reference */
+        SET_REGISTER(vdst, arrayObj->length);
+    }
+    FINISH(1);
+OP_END
diff --git a/vm/mterp/c/OP_CHECK_CAST.c b/vm/mterp/c/OP_CHECK_CAST.c
new file mode 100644
index 0000000..9bc1ecf
--- /dev/null
+++ b/vm/mterp/c/OP_CHECK_CAST.c
@@ -0,0 +1,32 @@
+HANDLE_OPCODE(OP_CHECK_CAST /*vAA, class@BBBB*/)
+    {
+        ClassObject* clazz;
+        Object* obj;
+
+        EXPORT_PC();
+
+        vsrc1 = INST_AA(inst);
+        ref = FETCH(1);         /* class to check against */
+        ILOGV("|check-cast v%d,class@0x%04x", vsrc1, ref);
+
+        obj = (Object*)GET_REGISTER(vsrc1);
+        if (obj != NULL) {
+#if defined(WITH_EXTRA_OBJECT_VALIDATION)
+            if (!checkForNull(obj))
+                GOTO(exceptionThrown);
+#endif
+            clazz = dvmDexGetResolvedClass(methodClassDex, ref);
+            if (clazz == NULL) {
+                clazz = dvmResolveClass(method->clazz, ref, false);
+                if (clazz == NULL)
+                    GOTO(exceptionThrown);
+            }
+            if (!dvmInstanceof(obj->clazz, clazz)) {
+                dvmThrowExceptionWithClassMessage(
+                    "Ljava/lang/ClassCastException;", obj->clazz->descriptor);
+                GOTO(exceptionThrown);
+            }
+        }
+    }
+    FINISH(2);
+OP_END
diff --git a/vm/mterp/c/OP_CMPG_DOUBLE.c b/vm/mterp/c/OP_CMPG_DOUBLE.c
new file mode 100644
index 0000000..3f4082c
--- /dev/null
+++ b/vm/mterp/c/OP_CMPG_DOUBLE.c
@@ -0,0 +1,2 @@
+HANDLE_OP_CMPX(OP_CMPG_DOUBLE, "g-double", double, _DOUBLE, 1)
+OP_END
diff --git a/vm/mterp/c/OP_CMPG_FLOAT.c b/vm/mterp/c/OP_CMPG_FLOAT.c
new file mode 100644
index 0000000..0bba49e
--- /dev/null
+++ b/vm/mterp/c/OP_CMPG_FLOAT.c
@@ -0,0 +1,2 @@
+HANDLE_OP_CMPX(OP_CMPG_FLOAT, "g-float", float, _FLOAT, 1)
+OP_END
diff --git a/vm/mterp/c/OP_CMPL_DOUBLE.c b/vm/mterp/c/OP_CMPL_DOUBLE.c
new file mode 100644
index 0000000..4da18b4
--- /dev/null
+++ b/vm/mterp/c/OP_CMPL_DOUBLE.c
@@ -0,0 +1,2 @@
+HANDLE_OP_CMPX(OP_CMPL_DOUBLE, "l-double", double, _DOUBLE, -1)
+OP_END
diff --git a/vm/mterp/c/OP_CMPL_FLOAT.c b/vm/mterp/c/OP_CMPL_FLOAT.c
new file mode 100644
index 0000000..7916193
--- /dev/null
+++ b/vm/mterp/c/OP_CMPL_FLOAT.c
@@ -0,0 +1,2 @@
+HANDLE_OP_CMPX(OP_CMPL_FLOAT, "l-float", float, _FLOAT, -1)
+OP_END
diff --git a/vm/mterp/c/OP_CMP_LONG.c b/vm/mterp/c/OP_CMP_LONG.c
new file mode 100644
index 0000000..a0e412c
--- /dev/null
+++ b/vm/mterp/c/OP_CMP_LONG.c
@@ -0,0 +1,2 @@
+HANDLE_OP_CMPX(OP_CMP_LONG, "-long", s8, _WIDE, 0)
+OP_END
diff --git a/vm/mterp/c/OP_CONST.c b/vm/mterp/c/OP_CONST.c
new file mode 100644
index 0000000..e281a51
--- /dev/null
+++ b/vm/mterp/c/OP_CONST.c
@@ -0,0 +1,12 @@
+HANDLE_OPCODE(OP_CONST /*vAA, #+BBBBBBBB*/)
+    {
+        u4 tmp;
+
+        vdst = INST_AA(inst);
+        tmp = FETCH(1);
+        tmp |= (u4)FETCH(2) << 16;
+        ILOGV("|const v%d,#0x%08x", vdst, tmp);
+        SET_REGISTER(vdst, tmp);
+    }
+    FINISH(3);
+OP_END
diff --git a/vm/mterp/c/OP_CONST_16.c b/vm/mterp/c/OP_CONST_16.c
new file mode 100644
index 0000000..f58f50c
--- /dev/null
+++ b/vm/mterp/c/OP_CONST_16.c
@@ -0,0 +1,7 @@
+HANDLE_OPCODE(OP_CONST_16 /*vAA, #+BBBB*/)
+    vdst = INST_AA(inst);
+    vsrc1 = FETCH(1);
+    ILOGV("|const/16 v%d,#0x%04x", vdst, (s2)vsrc1);
+    SET_REGISTER(vdst, (s2) vsrc1);
+    FINISH(2);
+OP_END
diff --git a/vm/mterp/c/OP_CONST_4.c b/vm/mterp/c/OP_CONST_4.c
new file mode 100644
index 0000000..800ef9a
--- /dev/null
+++ b/vm/mterp/c/OP_CONST_4.c
@@ -0,0 +1,11 @@
+HANDLE_OPCODE(OP_CONST_4 /*vA, #+B*/)
+    {
+        s4 tmp;
+
+        vdst = INST_A(inst);
+        tmp = (s4) (INST_B(inst) << 28) >> 28;  // sign extend 4-bit value
+        ILOGV("|const/4 v%d,#0x%02x", vdst, (s4)tmp);
+        SET_REGISTER(vdst, tmp);
+    }
+    FINISH(1);
+OP_END
diff --git a/vm/mterp/c/OP_CONST_CLASS.c b/vm/mterp/c/OP_CONST_CLASS.c
new file mode 100644
index 0000000..d0aefa3
--- /dev/null
+++ b/vm/mterp/c/OP_CONST_CLASS.c
@@ -0,0 +1,18 @@
+HANDLE_OPCODE(OP_CONST_CLASS /*vAA, class@BBBB*/)
+    {
+        ClassObject* clazz;
+
+        vdst = INST_AA(inst);
+        ref = FETCH(1);
+        ILOGV("|const-class v%d class@0x%04x", vdst, ref);
+        clazz = dvmDexGetResolvedClass(methodClassDex, ref);
+        if (clazz == NULL) {
+            EXPORT_PC();
+            clazz = dvmResolveClass(method->clazz, ref, true);
+            if (clazz == NULL)
+                GOTO(exceptionThrown);
+        }
+        SET_REGISTER(vdst, (u4) clazz);
+    }
+    FINISH(2);
+OP_END
diff --git a/vm/mterp/c/OP_CONST_HIGH16.c b/vm/mterp/c/OP_CONST_HIGH16.c
new file mode 100644
index 0000000..26b22f4
--- /dev/null
+++ b/vm/mterp/c/OP_CONST_HIGH16.c
@@ -0,0 +1,7 @@
+HANDLE_OPCODE(OP_CONST_HIGH16 /*vAA, #+BBBB0000*/)
+    vdst = INST_AA(inst);
+    vsrc1 = FETCH(1);
+    ILOGV("|const/high16 v%d,#0x%04x0000", vdst, vsrc1);
+    SET_REGISTER(vdst, vsrc1 << 16);
+    FINISH(2);
+OP_END
diff --git a/vm/mterp/c/OP_CONST_STRING.c b/vm/mterp/c/OP_CONST_STRING.c
new file mode 100644
index 0000000..c74635f
--- /dev/null
+++ b/vm/mterp/c/OP_CONST_STRING.c
@@ -0,0 +1,18 @@
+HANDLE_OPCODE(OP_CONST_STRING /*vAA, string@BBBB*/)
+    {
+        StringObject* strObj;
+
+        vdst = INST_AA(inst);
+        ref = FETCH(1);
+        ILOGV("|const-string v%d string@0x%04x", vdst, ref);
+        strObj = dvmDexGetResolvedString(methodClassDex, ref);
+        if (strObj == NULL) {
+            EXPORT_PC();
+            strObj = dvmResolveString(method->clazz, ref);
+            if (strObj == NULL)
+                GOTO(exceptionThrown);
+        }
+        SET_REGISTER(vdst, (u4) strObj);
+    }
+    FINISH(2);
+OP_END
diff --git a/vm/mterp/c/OP_CONST_STRING_JUMBO.c b/vm/mterp/c/OP_CONST_STRING_JUMBO.c
new file mode 100644
index 0000000..3afe47f
--- /dev/null
+++ b/vm/mterp/c/OP_CONST_STRING_JUMBO.c
@@ -0,0 +1,20 @@
+HANDLE_OPCODE(OP_CONST_STRING_JUMBO /*vAA, string@BBBBBBBB*/)
+    {
+        StringObject* strObj;
+        u4 tmp;
+
+        vdst = INST_AA(inst);
+        tmp = FETCH(1);
+        tmp |= (u4)FETCH(2) << 16;
+        ILOGV("|const-string/jumbo v%d string@0x%08x", vdst, tmp);
+        strObj = dvmDexGetResolvedString(methodClassDex, tmp);
+        if (strObj == NULL) {
+            EXPORT_PC();
+            strObj = dvmResolveString(method->clazz, tmp);
+            if (strObj == NULL)
+                GOTO(exceptionThrown);
+        }
+        SET_REGISTER(vdst, (u4) strObj);
+    }
+    FINISH(3);
+OP_END
diff --git a/vm/mterp/c/OP_CONST_WIDE.c b/vm/mterp/c/OP_CONST_WIDE.c
new file mode 100644
index 0000000..ccb3955
--- /dev/null
+++ b/vm/mterp/c/OP_CONST_WIDE.c
@@ -0,0 +1,14 @@
+HANDLE_OPCODE(OP_CONST_WIDE /*vAA, #+BBBBBBBBBBBBBBBB*/)
+    {
+        u8 tmp;
+
+        vdst = INST_AA(inst);
+        tmp = FETCH(1);
+        tmp |= (u8)FETCH(2) << 16;
+        tmp |= (u8)FETCH(3) << 32;
+        tmp |= (u8)FETCH(4) << 48;
+        ILOGV("|const-wide v%d,#0x%08llx", vdst, tmp);
+        SET_REGISTER_WIDE(vdst, tmp);
+    }
+    FINISH(5);
+OP_END
diff --git a/vm/mterp/c/OP_CONST_WIDE_16.c b/vm/mterp/c/OP_CONST_WIDE_16.c
new file mode 100644
index 0000000..da69f37
--- /dev/null
+++ b/vm/mterp/c/OP_CONST_WIDE_16.c
@@ -0,0 +1,7 @@
+HANDLE_OPCODE(OP_CONST_WIDE_16 /*vAA, #+BBBB*/)
+    vdst = INST_AA(inst);
+    vsrc1 = FETCH(1);
+    ILOGV("|const-wide/16 v%d,#0x%04x", vdst, (s2)vsrc1);
+    SET_REGISTER_WIDE(vdst, (s2)vsrc1);
+    FINISH(2);
+OP_END
diff --git a/vm/mterp/c/OP_CONST_WIDE_32.c b/vm/mterp/c/OP_CONST_WIDE_32.c
new file mode 100644
index 0000000..ad4acbb
--- /dev/null
+++ b/vm/mterp/c/OP_CONST_WIDE_32.c
@@ -0,0 +1,12 @@
+HANDLE_OPCODE(OP_CONST_WIDE_32 /*vAA, #+BBBBBBBB*/)
+    {
+        u4 tmp;
+
+        vdst = INST_AA(inst);
+        tmp = FETCH(1);
+        tmp |= (u4)FETCH(2) << 16;
+        ILOGV("|const-wide/32 v%d,#0x%08x", vdst, tmp);
+        SET_REGISTER_WIDE(vdst, (s4) tmp);
+    }
+    FINISH(3);
+OP_END
diff --git a/vm/mterp/c/OP_CONST_WIDE_HIGH16.c b/vm/mterp/c/OP_CONST_WIDE_HIGH16.c
new file mode 100644
index 0000000..bcc0664
--- /dev/null
+++ b/vm/mterp/c/OP_CONST_WIDE_HIGH16.c
@@ -0,0 +1,7 @@
+HANDLE_OPCODE(OP_CONST_WIDE_HIGH16 /*vAA, #+BBBB000000000000*/)
+    vdst = INST_AA(inst);
+    vsrc1 = FETCH(1);
+    ILOGV("|const-wide/high16 v%d,#0x%04x000000000000", vdst, vsrc1);
+    SET_REGISTER_WIDE(vdst, ((u8) vsrc1) << 48);
+    FINISH(2);
+OP_END
diff --git a/vm/mterp/c/OP_DIV_DOUBLE.c b/vm/mterp/c/OP_DIV_DOUBLE.c
new file mode 100644
index 0000000..d6e4b55
--- /dev/null
+++ b/vm/mterp/c/OP_DIV_DOUBLE.c
@@ -0,0 +1,2 @@
+HANDLE_OP_X_DOUBLE(OP_DIV_DOUBLE, "div", /)
+OP_END
diff --git a/vm/mterp/c/OP_DIV_DOUBLE_2ADDR.c b/vm/mterp/c/OP_DIV_DOUBLE_2ADDR.c
new file mode 100644
index 0000000..85a1523
--- /dev/null
+++ b/vm/mterp/c/OP_DIV_DOUBLE_2ADDR.c
@@ -0,0 +1,2 @@
+HANDLE_OP_X_DOUBLE_2ADDR(OP_DIV_DOUBLE_2ADDR, "div", /)
+OP_END
diff --git a/vm/mterp/c/OP_DIV_FLOAT.c b/vm/mterp/c/OP_DIV_FLOAT.c
new file mode 100644
index 0000000..2c5049b
--- /dev/null
+++ b/vm/mterp/c/OP_DIV_FLOAT.c
@@ -0,0 +1,2 @@
+HANDLE_OP_X_FLOAT(OP_DIV_FLOAT, "div", /)
+OP_END
diff --git a/vm/mterp/c/OP_DIV_FLOAT_2ADDR.c b/vm/mterp/c/OP_DIV_FLOAT_2ADDR.c
new file mode 100644
index 0000000..cd7b4d9
--- /dev/null
+++ b/vm/mterp/c/OP_DIV_FLOAT_2ADDR.c
@@ -0,0 +1,2 @@
+HANDLE_OP_X_FLOAT_2ADDR(OP_DIV_FLOAT_2ADDR, "div", /)
+OP_END
diff --git a/vm/mterp/c/OP_DIV_INT.c b/vm/mterp/c/OP_DIV_INT.c
new file mode 100644
index 0000000..70b1cb9
--- /dev/null
+++ b/vm/mterp/c/OP_DIV_INT.c
@@ -0,0 +1,2 @@
+HANDLE_OP_X_INT(OP_DIV_INT, "div", /, true)
+OP_END
diff --git a/vm/mterp/c/OP_DIV_INT_2ADDR.c b/vm/mterp/c/OP_DIV_INT_2ADDR.c
new file mode 100644
index 0000000..0491373
--- /dev/null
+++ b/vm/mterp/c/OP_DIV_INT_2ADDR.c
@@ -0,0 +1,2 @@
+HANDLE_OP_X_INT_2ADDR(OP_DIV_INT_2ADDR, "div", /, true)
+OP_END
diff --git a/vm/mterp/c/OP_DIV_INT_LIT16.c b/vm/mterp/c/OP_DIV_INT_LIT16.c
new file mode 100644
index 0000000..0975d06
--- /dev/null
+++ b/vm/mterp/c/OP_DIV_INT_LIT16.c
@@ -0,0 +1,2 @@
+HANDLE_OP_X_INT_LIT16(OP_DIV_INT_LIT16, "div", (s4), /, true)
+OP_END
diff --git a/vm/mterp/c/OP_DIV_INT_LIT8.c b/vm/mterp/c/OP_DIV_INT_LIT8.c
new file mode 100644
index 0000000..dfd84c0
--- /dev/null
+++ b/vm/mterp/c/OP_DIV_INT_LIT8.c
@@ -0,0 +1,2 @@
+HANDLE_OP_X_INT_LIT8(OP_DIV_INT_LIT8,   "div", /, true)
+OP_END
diff --git a/vm/mterp/c/OP_DIV_LONG.c b/vm/mterp/c/OP_DIV_LONG.c
new file mode 100644
index 0000000..da07f1c
--- /dev/null
+++ b/vm/mterp/c/OP_DIV_LONG.c
@@ -0,0 +1,2 @@
+HANDLE_OP_X_LONG(OP_DIV_LONG, "div", /, true)
+OP_END
diff --git a/vm/mterp/c/OP_DIV_LONG_2ADDR.c b/vm/mterp/c/OP_DIV_LONG_2ADDR.c
new file mode 100644
index 0000000..4fb234d
--- /dev/null
+++ b/vm/mterp/c/OP_DIV_LONG_2ADDR.c
@@ -0,0 +1,2 @@
+HANDLE_OP_X_LONG_2ADDR(OP_DIV_LONG_2ADDR, "div", /, true)
+OP_END
diff --git a/vm/mterp/c/OP_DOUBLE_TO_FLOAT.c b/vm/mterp/c/OP_DOUBLE_TO_FLOAT.c
new file mode 100644
index 0000000..152e5fd
--- /dev/null
+++ b/vm/mterp/c/OP_DOUBLE_TO_FLOAT.c
@@ -0,0 +1,2 @@
+HANDLE_NUMCONV(OP_DOUBLE_TO_FLOAT,      "double-to-float", _DOUBLE, _FLOAT)
+OP_END
diff --git a/vm/mterp/c/OP_DOUBLE_TO_INT.c b/vm/mterp/c/OP_DOUBLE_TO_INT.c
new file mode 100644
index 0000000..e210b92
--- /dev/null
+++ b/vm/mterp/c/OP_DOUBLE_TO_INT.c
@@ -0,0 +1,3 @@
+HANDLE_FLOAT_TO_INT(OP_DOUBLE_TO_INT,   "double-to-int",
+    double, _DOUBLE, s4, _INT)
+OP_END
diff --git a/vm/mterp/c/OP_DOUBLE_TO_LONG.c b/vm/mterp/c/OP_DOUBLE_TO_LONG.c
new file mode 100644
index 0000000..44d548c
--- /dev/null
+++ b/vm/mterp/c/OP_DOUBLE_TO_LONG.c
@@ -0,0 +1,3 @@
+HANDLE_FLOAT_TO_INT(OP_DOUBLE_TO_LONG,  "double-to-long",
+    double, _DOUBLE, s8, _WIDE)
+OP_END
diff --git a/vm/mterp/c/OP_EXECUTE_INLINE.c b/vm/mterp/c/OP_EXECUTE_INLINE.c
new file mode 100644
index 0000000..0081dfe
--- /dev/null
+++ b/vm/mterp/c/OP_EXECUTE_INLINE.c
@@ -0,0 +1,58 @@
+HANDLE_OPCODE(OP_EXECUTE_INLINE /*vB, {vD, vE, vF, vG}, inline@CCCC*/)
+    {
+        /*
+         * This has the same form as other method calls, but we ignore
+         * the 5th argument (vA).  This is chiefly because the first four
+         * arguments to a function on ARM are in registers.
+         *
+         * We only set the arguments that are actually used, leaving
+         * the rest uninitialized.  We're assuming that, if the method
+         * needs them, they'll be specified in the call.
+         *
+         * This annoys gcc when optimizations are enabled, causing a
+         * "may be used uninitialized" warning.  We can quiet the warnings
+         * for a slight penalty (5%: 373ns vs. 393ns on empty method).  Note
+         * that valgrind is perfectly happy with this arrangement, because
+         * the uninitialiezd values are never actually used.
+         */
+        u4 arg0, arg1, arg2, arg3;
+        //arg0 = arg1 = arg2 = arg3 = 0;
+
+        EXPORT_PC();
+
+        vsrc1 = INST_B(inst);       /* #of args */
+        ref = FETCH(1);             /* inline call "ref" */
+        vdst = FETCH(2);            /* 0-4 register indices */
+        ILOGV("|execute-inline args=%d @%d {regs=0x%04x}",
+            vsrc1, ref, vdst);
+
+        assert((vdst >> 16) == 0);  // 16-bit type -or- high 16 bits clear
+        assert(vsrc1 <= 4);
+
+        switch (vsrc1) {
+        case 4:
+            arg3 = GET_REGISTER(vdst >> 12);
+            /* fall through */
+        case 3:
+            arg2 = GET_REGISTER((vdst & 0x0f00) >> 8);
+            /* fall through */
+        case 2:
+            arg1 = GET_REGISTER((vdst & 0x00f0) >> 4);
+            /* fall through */
+        case 1:
+            arg0 = GET_REGISTER(vdst & 0x0f);
+            /* fall through */
+        default:        // case 0
+            ;
+        }
+
+#if INTERP_TYPE == INTERP_DBG
+        if (!dvmPerformInlineOp4Dbg(arg0, arg1, arg2, arg3, &retval, ref))
+            GOTO(exceptionThrown);
+#else
+        if (!dvmPerformInlineOp4Std(arg0, arg1, arg2, arg3, &retval, ref))
+            GOTO(exceptionThrown);
+#endif
+    }
+    FINISH(3);
+OP_END
diff --git a/vm/mterp/c/OP_FILLED_NEW_ARRAY.c b/vm/mterp/c/OP_FILLED_NEW_ARRAY.c
new file mode 100644
index 0000000..b925847
--- /dev/null
+++ b/vm/mterp/c/OP_FILLED_NEW_ARRAY.c
@@ -0,0 +1,3 @@
+HANDLE_OPCODE(OP_FILLED_NEW_ARRAY /*vB, {vD, vE, vF, vG, vA}, class@CCCC*/)
+    GOTO(filledNewArray, false);
+OP_END
diff --git a/vm/mterp/c/OP_FILLED_NEW_ARRAY_RANGE.c b/vm/mterp/c/OP_FILLED_NEW_ARRAY_RANGE.c
new file mode 100644
index 0000000..714e248
--- /dev/null
+++ b/vm/mterp/c/OP_FILLED_NEW_ARRAY_RANGE.c
@@ -0,0 +1,3 @@
+HANDLE_OPCODE(OP_FILLED_NEW_ARRAY_RANGE /*{vCCCC..v(CCCC+AA-1)}, class@BBBB*/)
+    GOTO(filledNewArray, true);
+OP_END
diff --git a/vm/mterp/c/OP_FILL_ARRAY_DATA.c b/vm/mterp/c/OP_FILL_ARRAY_DATA.c
new file mode 100644
index 0000000..22d9d66
--- /dev/null
+++ b/vm/mterp/c/OP_FILL_ARRAY_DATA.c
@@ -0,0 +1,28 @@
+HANDLE_OPCODE(OP_FILL_ARRAY_DATA)   /*vAA, +BBBBBBBB*/
+    {
+        const u2* arrayData;
+        s4 offset;
+        ArrayObject* arrayObj;
+
+        EXPORT_PC();
+        vsrc1 = INST_AA(inst);
+        offset = FETCH(1) | (((s4) FETCH(2)) << 16);
+        ILOGV("|fill-array-data v%d +0x%04x", vsrc1, offset);
+        arrayData = pc + offset;       // offset in 16-bit units
+#ifndef NDEBUG
+        if (arrayData < method->insns ||
+            arrayData >= method->insns + dvmGetMethodInsnsSize(method))
+        {
+            /* should have been caught in verifier */
+            dvmThrowException("Ljava/lang/InternalError;", 
+                              "bad fill array data");
+            GOTO(exceptionThrown);
+        }
+#endif
+        arrayObj = (ArrayObject*) GET_REGISTER(vsrc1);
+        if (!dvmInterpHandleFillArrayData(arrayObj, arrayData)) {
+            GOTO(exceptionThrown);
+        }
+        FINISH(3);
+    }
+OP_END
diff --git a/vm/mterp/c/OP_FLOAT_TO_DOUBLE.c b/vm/mterp/c/OP_FLOAT_TO_DOUBLE.c
new file mode 100644
index 0000000..ea5e7a6
--- /dev/null
+++ b/vm/mterp/c/OP_FLOAT_TO_DOUBLE.c
@@ -0,0 +1,2 @@
+HANDLE_NUMCONV(OP_FLOAT_TO_DOUBLE,      "float-to-double", _FLOAT, _DOUBLE)
+OP_END
diff --git a/vm/mterp/c/OP_FLOAT_TO_INT.c b/vm/mterp/c/OP_FLOAT_TO_INT.c
new file mode 100644
index 0000000..15522f8
--- /dev/null
+++ b/vm/mterp/c/OP_FLOAT_TO_INT.c
@@ -0,0 +1,3 @@
+HANDLE_FLOAT_TO_INT(OP_FLOAT_TO_INT,    "float-to-int",
+    float, _FLOAT, s4, _INT)
+OP_END
diff --git a/vm/mterp/c/OP_FLOAT_TO_LONG.c b/vm/mterp/c/OP_FLOAT_TO_LONG.c
new file mode 100644
index 0000000..03bd30d
--- /dev/null
+++ b/vm/mterp/c/OP_FLOAT_TO_LONG.c
@@ -0,0 +1,3 @@
+HANDLE_FLOAT_TO_INT(OP_FLOAT_TO_LONG,   "float-to-long",
+    float, _FLOAT, s8, _WIDE)
+OP_END
diff --git a/vm/mterp/c/OP_GOTO.c b/vm/mterp/c/OP_GOTO.c
new file mode 100644
index 0000000..eed7b9f
--- /dev/null
+++ b/vm/mterp/c/OP_GOTO.c
@@ -0,0 +1,11 @@
+HANDLE_OPCODE(OP_GOTO /*+AA*/)
+    vdst = INST_AA(inst);
+    if ((s1)vdst < 0)
+        ILOGV("|goto -0x%02x", -((s1)vdst));
+    else
+        ILOGV("|goto +0x%02x", ((s1)vdst));
+    ILOGV("> branch taken");
+    if ((s1)vdst < 0)
+        PERIODIC_CHECKS(kInterpEntryInstr, (s1)vdst);
+    FINISH((s1)vdst);
+OP_END
diff --git a/vm/mterp/c/OP_GOTO_16.c b/vm/mterp/c/OP_GOTO_16.c
new file mode 100644
index 0000000..afdccb3
--- /dev/null
+++ b/vm/mterp/c/OP_GOTO_16.c
@@ -0,0 +1,14 @@
+HANDLE_OPCODE(OP_GOTO_16 /*+AAAA*/)
+    {
+        s4 offset = (s2) FETCH(1);          /* sign-extend next code unit */
+
+        if (offset < 0)
+            ILOGV("|goto/16 -0x%04x", -offset);
+        else
+            ILOGV("|goto/16 +0x%04x", offset);
+        ILOGV("> branch taken");
+        if (offset < 0)
+            PERIODIC_CHECKS(kInterpEntryInstr, offset);
+        FINISH(offset);
+    }
+OP_END
diff --git a/vm/mterp/c/OP_GOTO_32.c b/vm/mterp/c/OP_GOTO_32.c
new file mode 100644
index 0000000..d1cee32
--- /dev/null
+++ b/vm/mterp/c/OP_GOTO_32.c
@@ -0,0 +1,15 @@
+HANDLE_OPCODE(OP_GOTO_32 /*+AAAAAAAA*/)
+    {
+        s4 offset = FETCH(1);               /* low-order 16 bits */
+        offset |= ((s4) FETCH(2)) << 16;    /* high-order 16 bits */
+
+        if (offset < 0)
+            ILOGV("|goto/32 -0x%08x", -offset);
+        else
+            ILOGV("|goto/32 +0x%08x", offset);
+        ILOGV("> branch taken");
+        if (offset <= 0)    /* allowed to branch to self */
+            PERIODIC_CHECKS(kInterpEntryInstr, offset);
+        FINISH(offset);
+    }
+OP_END
diff --git a/vm/mterp/c/OP_IF_EQ.c b/vm/mterp/c/OP_IF_EQ.c
new file mode 100644
index 0000000..2c3b9b5
--- /dev/null
+++ b/vm/mterp/c/OP_IF_EQ.c
@@ -0,0 +1,2 @@
+HANDLE_OP_IF_XX(OP_IF_EQ, "eq", ==)
+OP_END
diff --git a/vm/mterp/c/OP_IF_EQZ.c b/vm/mterp/c/OP_IF_EQZ.c
new file mode 100644
index 0000000..d2dd1aa
--- /dev/null
+++ b/vm/mterp/c/OP_IF_EQZ.c
@@ -0,0 +1,2 @@
+HANDLE_OP_IF_XXZ(OP_IF_EQZ, "eqz", ==)
+OP_END
diff --git a/vm/mterp/c/OP_IF_GE.c b/vm/mterp/c/OP_IF_GE.c
new file mode 100644
index 0000000..8aa85c4
--- /dev/null
+++ b/vm/mterp/c/OP_IF_GE.c
@@ -0,0 +1,2 @@
+HANDLE_OP_IF_XX(OP_IF_GE, "ge", >=)
+OP_END
diff --git a/vm/mterp/c/OP_IF_GEZ.c b/vm/mterp/c/OP_IF_GEZ.c
new file mode 100644
index 0000000..8c4b78a
--- /dev/null
+++ b/vm/mterp/c/OP_IF_GEZ.c
@@ -0,0 +1,2 @@
+HANDLE_OP_IF_XXZ(OP_IF_GEZ, "gez", >=)
+OP_END
diff --git a/vm/mterp/c/OP_IF_GT.c b/vm/mterp/c/OP_IF_GT.c
new file mode 100644
index 0000000..d35eb29
--- /dev/null
+++ b/vm/mterp/c/OP_IF_GT.c
@@ -0,0 +1,2 @@
+HANDLE_OP_IF_XX(OP_IF_GT, "gt", >)
+OP_END
diff --git a/vm/mterp/c/OP_IF_GTZ.c b/vm/mterp/c/OP_IF_GTZ.c
new file mode 100644
index 0000000..63a0073
--- /dev/null
+++ b/vm/mterp/c/OP_IF_GTZ.c
@@ -0,0 +1,2 @@
+HANDLE_OP_IF_XXZ(OP_IF_GTZ, "gtz", >)
+OP_END
diff --git a/vm/mterp/c/OP_IF_LE.c b/vm/mterp/c/OP_IF_LE.c
new file mode 100644
index 0000000..f4b213a
--- /dev/null
+++ b/vm/mterp/c/OP_IF_LE.c
@@ -0,0 +1,2 @@
+HANDLE_OP_IF_XX(OP_IF_LE, "le", <=)
+OP_END
diff --git a/vm/mterp/c/OP_IF_LEZ.c b/vm/mterp/c/OP_IF_LEZ.c
new file mode 100644
index 0000000..1d57a50
--- /dev/null
+++ b/vm/mterp/c/OP_IF_LEZ.c
@@ -0,0 +1,2 @@
+HANDLE_OP_IF_XXZ(OP_IF_LEZ, "lez", <=)
+OP_END
diff --git a/vm/mterp/c/OP_IF_LT.c b/vm/mterp/c/OP_IF_LT.c
new file mode 100644
index 0000000..0233892
--- /dev/null
+++ b/vm/mterp/c/OP_IF_LT.c
@@ -0,0 +1,2 @@
+HANDLE_OP_IF_XX(OP_IF_LT, "lt", <)
+OP_END
diff --git a/vm/mterp/c/OP_IF_LTZ.c b/vm/mterp/c/OP_IF_LTZ.c
new file mode 100644
index 0000000..b4b9be2
--- /dev/null
+++ b/vm/mterp/c/OP_IF_LTZ.c
@@ -0,0 +1,2 @@
+HANDLE_OP_IF_XXZ(OP_IF_LTZ, "ltz", <)
+OP_END
diff --git a/vm/mterp/c/OP_IF_NE.c b/vm/mterp/c/OP_IF_NE.c
new file mode 100644
index 0000000..8da70a5
--- /dev/null
+++ b/vm/mterp/c/OP_IF_NE.c
@@ -0,0 +1,2 @@
+HANDLE_OP_IF_XX(OP_IF_NE, "ne", !=)
+OP_END
diff --git a/vm/mterp/c/OP_IF_NEZ.c b/vm/mterp/c/OP_IF_NEZ.c
new file mode 100644
index 0000000..209e836
--- /dev/null
+++ b/vm/mterp/c/OP_IF_NEZ.c
@@ -0,0 +1,2 @@
+HANDLE_OP_IF_XXZ(OP_IF_NEZ, "nez", !=)
+OP_END
diff --git a/vm/mterp/c/OP_IGET.c b/vm/mterp/c/OP_IGET.c
new file mode 100644
index 0000000..c6333e5
--- /dev/null
+++ b/vm/mterp/c/OP_IGET.c
@@ -0,0 +1,2 @@
+HANDLE_IGET_X(OP_IGET,                  "", Int, )
+OP_END
diff --git a/vm/mterp/c/OP_IGET_BOOLEAN.c b/vm/mterp/c/OP_IGET_BOOLEAN.c
new file mode 100644
index 0000000..a5a47be
--- /dev/null
+++ b/vm/mterp/c/OP_IGET_BOOLEAN.c
@@ -0,0 +1,2 @@
+HANDLE_IGET_X(OP_IGET_BOOLEAN,          "", Int, )
+OP_END
diff --git a/vm/mterp/c/OP_IGET_BYTE.c b/vm/mterp/c/OP_IGET_BYTE.c
new file mode 100644
index 0000000..647f311
--- /dev/null
+++ b/vm/mterp/c/OP_IGET_BYTE.c
@@ -0,0 +1,2 @@
+HANDLE_IGET_X(OP_IGET_BYTE,             "", Int, )
+OP_END
diff --git a/vm/mterp/c/OP_IGET_CHAR.c b/vm/mterp/c/OP_IGET_CHAR.c
new file mode 100644
index 0000000..9a8adb0
--- /dev/null
+++ b/vm/mterp/c/OP_IGET_CHAR.c
@@ -0,0 +1,2 @@
+HANDLE_IGET_X(OP_IGET_CHAR,             "", Int, )
+OP_END
diff --git a/vm/mterp/c/OP_IGET_OBJECT.c b/vm/mterp/c/OP_IGET_OBJECT.c
new file mode 100644
index 0000000..03c9e50
--- /dev/null
+++ b/vm/mterp/c/OP_IGET_OBJECT.c
@@ -0,0 +1,2 @@
+HANDLE_IGET_X(OP_IGET_OBJECT,           "-object", Object, _AS_OBJECT)
+OP_END
diff --git a/vm/mterp/c/OP_IGET_OBJECT_QUICK.c b/vm/mterp/c/OP_IGET_OBJECT_QUICK.c
new file mode 100644
index 0000000..2ac3a54
--- /dev/null
+++ b/vm/mterp/c/OP_IGET_OBJECT_QUICK.c
@@ -0,0 +1,2 @@
+HANDLE_IGET_X_QUICK(OP_IGET_OBJECT_QUICK,   "-object", Object, _AS_OBJECT)
+OP_END
diff --git a/vm/mterp/c/OP_IGET_QUICK.c b/vm/mterp/c/OP_IGET_QUICK.c
new file mode 100644
index 0000000..b5724cc
--- /dev/null
+++ b/vm/mterp/c/OP_IGET_QUICK.c
@@ -0,0 +1,2 @@
+HANDLE_IGET_X_QUICK(OP_IGET_QUICK,          "", Int, )
+OP_END
diff --git a/vm/mterp/c/OP_IGET_SHORT.c b/vm/mterp/c/OP_IGET_SHORT.c
new file mode 100644
index 0000000..3e77789
--- /dev/null
+++ b/vm/mterp/c/OP_IGET_SHORT.c
@@ -0,0 +1,2 @@
+HANDLE_IGET_X(OP_IGET_SHORT,            "", Int, )
+OP_END
diff --git a/vm/mterp/c/OP_IGET_WIDE.c b/vm/mterp/c/OP_IGET_WIDE.c
new file mode 100644
index 0000000..cb1fcca
--- /dev/null
+++ b/vm/mterp/c/OP_IGET_WIDE.c
@@ -0,0 +1,2 @@
+HANDLE_IGET_X(OP_IGET_WIDE,             "-wide", Long, _WIDE)
+OP_END
diff --git a/vm/mterp/c/OP_IGET_WIDE_QUICK.c b/vm/mterp/c/OP_IGET_WIDE_QUICK.c
new file mode 100644
index 0000000..adb4fc1
--- /dev/null
+++ b/vm/mterp/c/OP_IGET_WIDE_QUICK.c
@@ -0,0 +1,2 @@
+HANDLE_IGET_X_QUICK(OP_IGET_WIDE_QUICK,     "-wide", Long, _WIDE)
+OP_END
diff --git a/vm/mterp/c/OP_INSTANCE_OF.c b/vm/mterp/c/OP_INSTANCE_OF.c
new file mode 100644
index 0000000..5855b5a
--- /dev/null
+++ b/vm/mterp/c/OP_INSTANCE_OF.c
@@ -0,0 +1,30 @@
+HANDLE_OPCODE(OP_INSTANCE_OF /*vA, vB, class@CCCC*/)
+    {
+        ClassObject* clazz;
+        Object* obj;
+
+        vdst = INST_A(inst);
+        vsrc1 = INST_B(inst);   /* object to check */
+        ref = FETCH(1);         /* class to check against */
+        ILOGV("|instance-of v%d,v%d,class@0x%04x", vdst, vsrc1, ref);
+
+        obj = (Object*)GET_REGISTER(vsrc1);
+        if (obj == NULL) {
+            SET_REGISTER(vdst, 0);
+        } else {
+#if defined(WITH_EXTRA_OBJECT_VALIDATION)
+            if (!checkForNullExportPC(obj, fp, pc))
+                GOTO(exceptionThrown);
+#endif
+            clazz = dvmDexGetResolvedClass(methodClassDex, ref);
+            if (clazz == NULL) {
+                EXPORT_PC();
+                clazz = dvmResolveClass(method->clazz, ref, true);
+                if (clazz == NULL)
+                    GOTO(exceptionThrown);
+            }
+            SET_REGISTER(vdst, dvmInstanceof(obj->clazz, clazz));
+        }
+    }
+    FINISH(2);
+OP_END
diff --git a/vm/mterp/c/OP_INT_TO_BYTE.c b/vm/mterp/c/OP_INT_TO_BYTE.c
new file mode 100644
index 0000000..ea75747
--- /dev/null
+++ b/vm/mterp/c/OP_INT_TO_BYTE.c
@@ -0,0 +1,2 @@
+HANDLE_INT_TO_SMALL(OP_INT_TO_BYTE,     "byte", s1)
+OP_END
diff --git a/vm/mterp/c/OP_INT_TO_CHAR.c b/vm/mterp/c/OP_INT_TO_CHAR.c
new file mode 100644
index 0000000..45ae0df
--- /dev/null
+++ b/vm/mterp/c/OP_INT_TO_CHAR.c
@@ -0,0 +1,2 @@
+HANDLE_INT_TO_SMALL(OP_INT_TO_CHAR,     "char", u2)
+OP_END
diff --git a/vm/mterp/c/OP_INT_TO_DOUBLE.c b/vm/mterp/c/OP_INT_TO_DOUBLE.c
new file mode 100644
index 0000000..624c702
--- /dev/null
+++ b/vm/mterp/c/OP_INT_TO_DOUBLE.c
@@ -0,0 +1,2 @@
+HANDLE_NUMCONV(OP_INT_TO_DOUBLE,        "int-to-double", _INT, _DOUBLE)
+OP_END
diff --git a/vm/mterp/c/OP_INT_TO_FLOAT.c b/vm/mterp/c/OP_INT_TO_FLOAT.c
new file mode 100644
index 0000000..fd15199
--- /dev/null
+++ b/vm/mterp/c/OP_INT_TO_FLOAT.c
@@ -0,0 +1,2 @@
+HANDLE_NUMCONV(OP_INT_TO_FLOAT,         "int-to-float", _INT, _FLOAT)
+OP_END
diff --git a/vm/mterp/c/OP_INT_TO_LONG.c b/vm/mterp/c/OP_INT_TO_LONG.c
new file mode 100644
index 0000000..8bc4223
--- /dev/null
+++ b/vm/mterp/c/OP_INT_TO_LONG.c
@@ -0,0 +1,2 @@
+HANDLE_NUMCONV(OP_INT_TO_LONG,          "int-to-long", _INT, _WIDE)
+OP_END
diff --git a/vm/mterp/c/OP_INT_TO_SHORT.c b/vm/mterp/c/OP_INT_TO_SHORT.c
new file mode 100644
index 0000000..0f06739
--- /dev/null
+++ b/vm/mterp/c/OP_INT_TO_SHORT.c
@@ -0,0 +1,2 @@
+HANDLE_INT_TO_SMALL(OP_INT_TO_SHORT,    "short", s2)    /* want sign bit */
+OP_END
diff --git a/vm/mterp/c/OP_INVOKE_DIRECT.c b/vm/mterp/c/OP_INVOKE_DIRECT.c
new file mode 100644
index 0000000..2f8858f
--- /dev/null
+++ b/vm/mterp/c/OP_INVOKE_DIRECT.c
@@ -0,0 +1,3 @@
+HANDLE_OPCODE(OP_INVOKE_DIRECT /*vB, {vD, vE, vF, vG, vA}, meth@CCCC*/)
+    GOTO(invokeDirect, false);
+OP_END
diff --git a/vm/mterp/c/OP_INVOKE_DIRECT_EMPTY.c b/vm/mterp/c/OP_INVOKE_DIRECT_EMPTY.c
new file mode 100644
index 0000000..f816490
--- /dev/null
+++ b/vm/mterp/c/OP_INVOKE_DIRECT_EMPTY.c
@@ -0,0 +1,4 @@
+HANDLE_OPCODE(OP_INVOKE_DIRECT_EMPTY /*vB, {vD, vE, vF, vG, vA}, meth@CCCC*/)
+    //LOGI("Ignoring empty\n");
+    FINISH(3);
+OP_END
diff --git a/vm/mterp/c/OP_INVOKE_DIRECT_RANGE.c b/vm/mterp/c/OP_INVOKE_DIRECT_RANGE.c
new file mode 100644
index 0000000..7f13c5f
--- /dev/null
+++ b/vm/mterp/c/OP_INVOKE_DIRECT_RANGE.c
@@ -0,0 +1,3 @@
+HANDLE_OPCODE(OP_INVOKE_DIRECT_RANGE /*{vCCCC..v(CCCC+AA-1)}, meth@BBBB*/)
+    GOTO(invokeDirect, true);
+OP_END
diff --git a/vm/mterp/c/OP_INVOKE_INTERFACE.c b/vm/mterp/c/OP_INVOKE_INTERFACE.c
new file mode 100644
index 0000000..f78dc7e
--- /dev/null
+++ b/vm/mterp/c/OP_INVOKE_INTERFACE.c
@@ -0,0 +1,3 @@
+HANDLE_OPCODE(OP_INVOKE_INTERFACE /*vB, {vD, vE, vF, vG, vA}, meth@CCCC*/)
+    GOTO(invokeInterface, false);
+OP_END
diff --git a/vm/mterp/c/OP_INVOKE_INTERFACE_RANGE.c b/vm/mterp/c/OP_INVOKE_INTERFACE_RANGE.c
new file mode 100644
index 0000000..6ca2d7d
--- /dev/null
+++ b/vm/mterp/c/OP_INVOKE_INTERFACE_RANGE.c
@@ -0,0 +1,3 @@
+HANDLE_OPCODE(OP_INVOKE_INTERFACE_RANGE /*{vCCCC..v(CCCC+AA-1)}, meth@BBBB*/)
+    GOTO(invokeInterface, true);
+OP_END
diff --git a/vm/mterp/c/OP_INVOKE_STATIC.c b/vm/mterp/c/OP_INVOKE_STATIC.c
new file mode 100644
index 0000000..b04ba95
--- /dev/null
+++ b/vm/mterp/c/OP_INVOKE_STATIC.c
@@ -0,0 +1,3 @@
+HANDLE_OPCODE(OP_INVOKE_STATIC /*vB, {vD, vE, vF, vG, vA}, meth@CCCC*/)
+    GOTO(invokeStatic, false);
+OP_END
diff --git a/vm/mterp/c/OP_INVOKE_STATIC_RANGE.c b/vm/mterp/c/OP_INVOKE_STATIC_RANGE.c
new file mode 100644
index 0000000..2a74039
--- /dev/null
+++ b/vm/mterp/c/OP_INVOKE_STATIC_RANGE.c
@@ -0,0 +1,3 @@
+HANDLE_OPCODE(OP_INVOKE_STATIC_RANGE /*{vCCCC..v(CCCC+AA-1)}, meth@BBBB*/)
+    GOTO(invokeStatic, true);
+OP_END
diff --git a/vm/mterp/c/OP_INVOKE_SUPER.c b/vm/mterp/c/OP_INVOKE_SUPER.c
new file mode 100644
index 0000000..22cab39
--- /dev/null
+++ b/vm/mterp/c/OP_INVOKE_SUPER.c
@@ -0,0 +1,3 @@
+HANDLE_OPCODE(OP_INVOKE_SUPER /*vB, {vD, vE, vF, vG, vA}, meth@CCCC*/)
+    GOTO(invokeSuper, false);
+OP_END
diff --git a/vm/mterp/c/OP_INVOKE_SUPER_QUICK.c b/vm/mterp/c/OP_INVOKE_SUPER_QUICK.c
new file mode 100644
index 0000000..fcd074e
--- /dev/null
+++ b/vm/mterp/c/OP_INVOKE_SUPER_QUICK.c
@@ -0,0 +1,3 @@
+HANDLE_OPCODE(OP_INVOKE_SUPER_QUICK /*vB, {vD, vE, vF, vG, vA}, meth@CCCC*/)
+    GOTO(invokeSuperQuick, false);
+OP_END
diff --git a/vm/mterp/c/OP_INVOKE_SUPER_QUICK_RANGE.c b/vm/mterp/c/OP_INVOKE_SUPER_QUICK_RANGE.c
new file mode 100644
index 0000000..cb468fd
--- /dev/null
+++ b/vm/mterp/c/OP_INVOKE_SUPER_QUICK_RANGE.c
@@ -0,0 +1,3 @@
+HANDLE_OPCODE(OP_INVOKE_SUPER_QUICK_RANGE /*{vCCCC..v(CCCC+AA-1)}, meth@BBBB*/)
+    GOTO(invokeSuperQuick, true);
+OP_END
diff --git a/vm/mterp/c/OP_INVOKE_SUPER_RANGE.c b/vm/mterp/c/OP_INVOKE_SUPER_RANGE.c
new file mode 100644
index 0000000..fce1ff3
--- /dev/null
+++ b/vm/mterp/c/OP_INVOKE_SUPER_RANGE.c
@@ -0,0 +1,3 @@
+HANDLE_OPCODE(OP_INVOKE_SUPER_RANGE /*{vCCCC..v(CCCC+AA-1)}, meth@BBBB*/)
+    GOTO(invokeSuper, true);
+OP_END
diff --git a/vm/mterp/c/OP_INVOKE_VIRTUAL.c b/vm/mterp/c/OP_INVOKE_VIRTUAL.c
new file mode 100644
index 0000000..0cef190
--- /dev/null
+++ b/vm/mterp/c/OP_INVOKE_VIRTUAL.c
@@ -0,0 +1,3 @@
+HANDLE_OPCODE(OP_INVOKE_VIRTUAL /*vB, {vD, vE, vF, vG, vA}, meth@CCCC*/)
+    GOTO(invokeVirtual, false);
+OP_END
diff --git a/vm/mterp/c/OP_INVOKE_VIRTUAL_QUICK.c b/vm/mterp/c/OP_INVOKE_VIRTUAL_QUICK.c
new file mode 100644
index 0000000..50862b1
--- /dev/null
+++ b/vm/mterp/c/OP_INVOKE_VIRTUAL_QUICK.c
@@ -0,0 +1,3 @@
+HANDLE_OPCODE(OP_INVOKE_VIRTUAL_QUICK /*vB, {vD, vE, vF, vG, vA}, meth@CCCC*/)
+    GOTO(invokeVirtualQuick, false);
+OP_END
diff --git a/vm/mterp/c/OP_INVOKE_VIRTUAL_QUICK_RANGE.c b/vm/mterp/c/OP_INVOKE_VIRTUAL_QUICK_RANGE.c
new file mode 100644
index 0000000..388bc73
--- /dev/null
+++ b/vm/mterp/c/OP_INVOKE_VIRTUAL_QUICK_RANGE.c
@@ -0,0 +1,3 @@
+HANDLE_OPCODE(OP_INVOKE_VIRTUAL_QUICK_RANGE/*{vCCCC..v(CCCC+AA-1)}, meth@BBBB*/)
+    GOTO(invokeVirtualQuick, true);
+OP_END
diff --git a/vm/mterp/c/OP_INVOKE_VIRTUAL_RANGE.c b/vm/mterp/c/OP_INVOKE_VIRTUAL_RANGE.c
new file mode 100644
index 0000000..0cac69f
--- /dev/null
+++ b/vm/mterp/c/OP_INVOKE_VIRTUAL_RANGE.c
@@ -0,0 +1,3 @@
+HANDLE_OPCODE(OP_INVOKE_VIRTUAL_RANGE /*{vCCCC..v(CCCC+AA-1)}, meth@BBBB*/)
+    GOTO(invokeVirtual, true);
+OP_END
diff --git a/vm/mterp/c/OP_IPUT.c b/vm/mterp/c/OP_IPUT.c
new file mode 100644
index 0000000..9d503ef
--- /dev/null
+++ b/vm/mterp/c/OP_IPUT.c
@@ -0,0 +1,2 @@
+HANDLE_IPUT_X(OP_IPUT,                  "", Int, )
+OP_END
diff --git a/vm/mterp/c/OP_IPUT_BOOLEAN.c b/vm/mterp/c/OP_IPUT_BOOLEAN.c
new file mode 100644
index 0000000..7fe4929
--- /dev/null
+++ b/vm/mterp/c/OP_IPUT_BOOLEAN.c
@@ -0,0 +1,2 @@
+HANDLE_IPUT_X(OP_IPUT_BOOLEAN,          "", Int, )
+OP_END
diff --git a/vm/mterp/c/OP_IPUT_BYTE.c b/vm/mterp/c/OP_IPUT_BYTE.c
new file mode 100644
index 0000000..8a49fb7
--- /dev/null
+++ b/vm/mterp/c/OP_IPUT_BYTE.c
@@ -0,0 +1,2 @@
+HANDLE_IPUT_X(OP_IPUT_BYTE,             "", Int, )
+OP_END
diff --git a/vm/mterp/c/OP_IPUT_CHAR.c b/vm/mterp/c/OP_IPUT_CHAR.c
new file mode 100644
index 0000000..b2812c2
--- /dev/null
+++ b/vm/mterp/c/OP_IPUT_CHAR.c
@@ -0,0 +1,2 @@
+HANDLE_IPUT_X(OP_IPUT_CHAR,             "", Int, )
+OP_END
diff --git a/vm/mterp/c/OP_IPUT_OBJECT.c b/vm/mterp/c/OP_IPUT_OBJECT.c
new file mode 100644
index 0000000..799e846
--- /dev/null
+++ b/vm/mterp/c/OP_IPUT_OBJECT.c
@@ -0,0 +1,2 @@
+HANDLE_IPUT_X(OP_IPUT_OBJECT,           "-object", Object, _AS_OBJECT)
+OP_END
diff --git a/vm/mterp/c/OP_IPUT_OBJECT_QUICK.c b/vm/mterp/c/OP_IPUT_OBJECT_QUICK.c
new file mode 100644
index 0000000..8670188
--- /dev/null
+++ b/vm/mterp/c/OP_IPUT_OBJECT_QUICK.c
@@ -0,0 +1,2 @@
+HANDLE_IPUT_X_QUICK(OP_IPUT_OBJECT_QUICK,   "-object", Object, _AS_OBJECT)
+OP_END
diff --git a/vm/mterp/c/OP_IPUT_QUICK.c b/vm/mterp/c/OP_IPUT_QUICK.c
new file mode 100644
index 0000000..483b9b1
--- /dev/null
+++ b/vm/mterp/c/OP_IPUT_QUICK.c
@@ -0,0 +1,2 @@
+HANDLE_IPUT_X_QUICK(OP_IPUT_QUICK,          "", Int, )
+OP_END
diff --git a/vm/mterp/c/OP_IPUT_SHORT.c b/vm/mterp/c/OP_IPUT_SHORT.c
new file mode 100644
index 0000000..0a63ebc
--- /dev/null
+++ b/vm/mterp/c/OP_IPUT_SHORT.c
@@ -0,0 +1,2 @@
+HANDLE_IPUT_X(OP_IPUT_SHORT,            "", Int, )
+OP_END
diff --git a/vm/mterp/c/OP_IPUT_WIDE.c b/vm/mterp/c/OP_IPUT_WIDE.c
new file mode 100644
index 0000000..bb4926c
--- /dev/null
+++ b/vm/mterp/c/OP_IPUT_WIDE.c
@@ -0,0 +1,2 @@
+HANDLE_IPUT_X(OP_IPUT_WIDE,             "-wide", Long, _WIDE)
+OP_END
diff --git a/vm/mterp/c/OP_IPUT_WIDE_QUICK.c b/vm/mterp/c/OP_IPUT_WIDE_QUICK.c
new file mode 100644
index 0000000..691630b
--- /dev/null
+++ b/vm/mterp/c/OP_IPUT_WIDE_QUICK.c
@@ -0,0 +1,2 @@
+HANDLE_IPUT_X_QUICK(OP_IPUT_WIDE_QUICK,     "-wide", Long, _WIDE)
+OP_END
diff --git a/vm/mterp/c/OP_LONG_TO_DOUBLE.c b/vm/mterp/c/OP_LONG_TO_DOUBLE.c
new file mode 100644
index 0000000..91b5eb2
--- /dev/null
+++ b/vm/mterp/c/OP_LONG_TO_DOUBLE.c
@@ -0,0 +1,2 @@
+HANDLE_NUMCONV(OP_LONG_TO_DOUBLE,       "long-to-double", _WIDE, _DOUBLE)
+OP_END
diff --git a/vm/mterp/c/OP_LONG_TO_FLOAT.c b/vm/mterp/c/OP_LONG_TO_FLOAT.c
new file mode 100644
index 0000000..ff1f5fb
--- /dev/null
+++ b/vm/mterp/c/OP_LONG_TO_FLOAT.c
@@ -0,0 +1,2 @@
+HANDLE_NUMCONV(OP_LONG_TO_FLOAT,        "long-to-float", _WIDE, _FLOAT)
+OP_END
diff --git a/vm/mterp/c/OP_LONG_TO_INT.c b/vm/mterp/c/OP_LONG_TO_INT.c
new file mode 100644
index 0000000..87c9a2e
--- /dev/null
+++ b/vm/mterp/c/OP_LONG_TO_INT.c
@@ -0,0 +1,2 @@
+HANDLE_NUMCONV(OP_LONG_TO_INT,          "long-to-int", _WIDE, _INT)
+OP_END
diff --git a/vm/mterp/c/OP_MONITOR_ENTER.c b/vm/mterp/c/OP_MONITOR_ENTER.c
new file mode 100644
index 0000000..57329d2
--- /dev/null
+++ b/vm/mterp/c/OP_MONITOR_ENTER.c
@@ -0,0 +1,22 @@
+HANDLE_OPCODE(OP_MONITOR_ENTER /*vAA*/)
+    {
+        Object* obj;
+
+        vsrc1 = INST_AA(inst);
+        ILOGV("|monitor-enter v%d %s(0x%08x)",
+            vsrc1, kSpacing+6, GET_REGISTER(vsrc1));
+        obj = (Object*)GET_REGISTER(vsrc1);
+        if (!checkForNullExportPC(obj, fp, pc))
+            GOTO(exceptionThrown);
+        ILOGV("+ locking %p %s\n", obj, obj->clazz->descriptor);
+#ifdef WITH_MONITOR_TRACKING
+        EXPORT_PC();        /* need for stack trace */
+#endif
+        dvmLockObject(self, obj);
+#ifdef WITH_DEADLOCK_PREDICTION
+        if (dvmCheckException(self))
+            GOTO(exceptionThrown);
+#endif
+    }
+    FINISH(1);
+OP_END
diff --git a/vm/mterp/c/OP_MONITOR_EXIT.c b/vm/mterp/c/OP_MONITOR_EXIT.c
new file mode 100644
index 0000000..d13a95f
--- /dev/null
+++ b/vm/mterp/c/OP_MONITOR_EXIT.c
@@ -0,0 +1,30 @@
+HANDLE_OPCODE(OP_MONITOR_EXIT /*vAA*/)
+    {
+        Object* obj;
+
+        EXPORT_PC();
+
+        vsrc1 = INST_AA(inst);
+        ILOGV("|monitor-exit v%d %s(0x%08x)",
+            vsrc1, kSpacing+5, GET_REGISTER(vsrc1));
+        obj = (Object*)GET_REGISTER(vsrc1);
+        if (!checkForNull(obj)) {
+            /*
+             * The exception needs to be processed at the *following*
+             * instruction, not the current instruction (see the Dalvik
+             * spec).  Because we're jumping to an exception handler,
+             * we're not actually at risk of skipping an instruction
+             * by doing so.
+             */
+            ADJUST_PC(1);           /* monitor-exit width is 1 */
+            GOTO(exceptionThrown);
+        }
+        ILOGV("+ unlocking %p %s\n", obj, obj->clazz->descriptor);
+        if (!dvmUnlockObject(self, obj)) {
+            assert(dvmCheckException(self));
+            ADJUST_PC(1);
+            GOTO(exceptionThrown);
+        }
+    }
+    FINISH(1);
+OP_END
diff --git a/vm/mterp/c/OP_MOVE.c b/vm/mterp/c/OP_MOVE.c
new file mode 100644
index 0000000..6666199
--- /dev/null
+++ b/vm/mterp/c/OP_MOVE.c
@@ -0,0 +1,9 @@
+HANDLE_OPCODE($opcode /*vA, vB*/)
+    vdst = INST_A(inst);
+    vsrc1 = INST_B(inst);
+    ILOGV("|move%s v%d,v%d %s(v%d=0x%08x)",
+        (INST_INST(inst) == OP_MOVE) ? "" : "-object", vdst, vsrc1,
+        kSpacing, vdst, GET_REGISTER(vsrc1));
+    SET_REGISTER(vdst, GET_REGISTER(vsrc1));
+    FINISH(1);
+OP_END
diff --git a/vm/mterp/c/OP_MOVE_16.c b/vm/mterp/c/OP_MOVE_16.c
new file mode 100644
index 0000000..53af5d5
--- /dev/null
+++ b/vm/mterp/c/OP_MOVE_16.c
@@ -0,0 +1,9 @@
+HANDLE_OPCODE($opcode /*vAAAA, vBBBB*/)
+    vdst = FETCH(1);
+    vsrc1 = FETCH(2);
+    ILOGV("|move%s/16 v%d,v%d %s(v%d=0x%08x)",
+        (INST_INST(inst) == OP_MOVE_16) ? "" : "-object", vdst, vsrc1,
+        kSpacing, vdst, GET_REGISTER(vsrc1));
+    SET_REGISTER(vdst, GET_REGISTER(vsrc1));
+    FINISH(3);
+OP_END
diff --git a/vm/mterp/c/OP_MOVE_EXCEPTION.c b/vm/mterp/c/OP_MOVE_EXCEPTION.c
new file mode 100644
index 0000000..86587ca
--- /dev/null
+++ b/vm/mterp/c/OP_MOVE_EXCEPTION.c
@@ -0,0 +1,8 @@
+HANDLE_OPCODE(OP_MOVE_EXCEPTION /*vAA*/)
+    vdst = INST_AA(inst);
+    ILOGV("|move-exception v%d", vdst);
+    assert(self->exception != NULL);
+    SET_REGISTER(vdst, (u4)self->exception);
+    dvmClearException(self);
+    FINISH(1);
+OP_END
diff --git a/vm/mterp/c/OP_MOVE_FROM16.c b/vm/mterp/c/OP_MOVE_FROM16.c
new file mode 100644
index 0000000..59fc285
--- /dev/null
+++ b/vm/mterp/c/OP_MOVE_FROM16.c
@@ -0,0 +1,9 @@
+HANDLE_OPCODE($opcode /*vAA, vBBBB*/)
+    vdst = INST_AA(inst);
+    vsrc1 = FETCH(1);
+    ILOGV("|move%s/from16 v%d,v%d %s(v%d=0x%08x)",
+        (INST_INST(inst) == OP_MOVE_FROM16) ? "" : "-object", vdst, vsrc1,
+        kSpacing, vdst, GET_REGISTER(vsrc1));
+    SET_REGISTER(vdst, GET_REGISTER(vsrc1));
+    FINISH(2);
+OP_END
diff --git a/vm/mterp/c/OP_MOVE_OBJECT.c b/vm/mterp/c/OP_MOVE_OBJECT.c
new file mode 100644
index 0000000..bc03c80
--- /dev/null
+++ b/vm/mterp/c/OP_MOVE_OBJECT.c
@@ -0,0 +1,2 @@
+%include "c/OP_MOVE.c"
+//OP_END
diff --git a/vm/mterp/c/OP_MOVE_OBJECT_16.c b/vm/mterp/c/OP_MOVE_OBJECT_16.c
new file mode 100644
index 0000000..336a17e
--- /dev/null
+++ b/vm/mterp/c/OP_MOVE_OBJECT_16.c
@@ -0,0 +1,2 @@
+%include "c/OP_MOVE_16.c"
+//OP_END
diff --git a/vm/mterp/c/OP_MOVE_OBJECT_FROM16.c b/vm/mterp/c/OP_MOVE_OBJECT_FROM16.c
new file mode 100644
index 0000000..c65ecf9
--- /dev/null
+++ b/vm/mterp/c/OP_MOVE_OBJECT_FROM16.c
@@ -0,0 +1,2 @@
+%include "c/OP_MOVE_FROM16.c"
+//OP_END
diff --git a/vm/mterp/c/OP_MOVE_RESULT.c b/vm/mterp/c/OP_MOVE_RESULT.c
new file mode 100644
index 0000000..ddf535b
--- /dev/null
+++ b/vm/mterp/c/OP_MOVE_RESULT.c
@@ -0,0 +1,8 @@
+HANDLE_OPCODE($opcode /*vAA*/)
+    vdst = INST_AA(inst);
+    ILOGV("|move-result%s v%d %s(v%d=0x%08x)",
+         (INST_INST(inst) == OP_MOVE_RESULT) ? "" : "-object",
+         vdst, kSpacing+4, vdst,retval.i);
+    SET_REGISTER(vdst, retval.i);
+    FINISH(1);
+OP_END
diff --git a/vm/mterp/c/OP_MOVE_RESULT_OBJECT.c b/vm/mterp/c/OP_MOVE_RESULT_OBJECT.c
new file mode 100644
index 0000000..a52f7c2
--- /dev/null
+++ b/vm/mterp/c/OP_MOVE_RESULT_OBJECT.c
@@ -0,0 +1,2 @@
+%include "c/OP_MOVE_RESULT.c"
+//OP_END
diff --git a/vm/mterp/c/OP_MOVE_RESULT_WIDE.c b/vm/mterp/c/OP_MOVE_RESULT_WIDE.c
new file mode 100644
index 0000000..f6ec8d9
--- /dev/null
+++ b/vm/mterp/c/OP_MOVE_RESULT_WIDE.c
@@ -0,0 +1,6 @@
+HANDLE_OPCODE(OP_MOVE_RESULT_WIDE /*vAA*/)
+    vdst = INST_AA(inst);
+    ILOGV("|move-result-wide v%d %s(0x%08llx)", vdst, kSpacing, retval.j);
+    SET_REGISTER_WIDE(vdst, retval.j);
+    FINISH(1);
+OP_END
diff --git a/vm/mterp/c/OP_MOVE_WIDE.c b/vm/mterp/c/OP_MOVE_WIDE.c
new file mode 100644
index 0000000..9ee323d
--- /dev/null
+++ b/vm/mterp/c/OP_MOVE_WIDE.c
@@ -0,0 +1,10 @@
+HANDLE_OPCODE(OP_MOVE_WIDE /*vA, vB*/)
+    /* IMPORTANT: must correctly handle overlapping registers, e.g. both
+     * "move-wide v6, v7" and "move-wide v7, v6" */
+    vdst = INST_A(inst);
+    vsrc1 = INST_B(inst);
+    ILOGV("|move-wide v%d,v%d %s(v%d=0x%08llx)", vdst, vsrc1,
+        kSpacing+5, vdst, GET_REGISTER_WIDE(vsrc1));
+    SET_REGISTER_WIDE(vdst, GET_REGISTER_WIDE(vsrc1));
+    FINISH(1);
+OP_END
diff --git a/vm/mterp/c/OP_MOVE_WIDE_16.c b/vm/mterp/c/OP_MOVE_WIDE_16.c
new file mode 100644
index 0000000..e3d0e16
--- /dev/null
+++ b/vm/mterp/c/OP_MOVE_WIDE_16.c
@@ -0,0 +1,8 @@
+HANDLE_OPCODE(OP_MOVE_WIDE_16 /*vAAAA, vBBBB*/)
+    vdst = FETCH(1);
+    vsrc1 = FETCH(2);
+    ILOGV("|move-wide/16 v%d,v%d %s(v%d=0x%08llx)", vdst, vsrc1,
+        kSpacing+8, vdst, GET_REGISTER_WIDE(vsrc1));
+    SET_REGISTER_WIDE(vdst, GET_REGISTER_WIDE(vsrc1));
+    FINISH(3);
+OP_END
diff --git a/vm/mterp/c/OP_MOVE_WIDE_FROM16.c b/vm/mterp/c/OP_MOVE_WIDE_FROM16.c
new file mode 100644
index 0000000..cdbaa2e
--- /dev/null
+++ b/vm/mterp/c/OP_MOVE_WIDE_FROM16.c
@@ -0,0 +1,8 @@
+HANDLE_OPCODE(OP_MOVE_WIDE_FROM16 /*vAA, vBBBB*/)
+    vdst = INST_AA(inst);
+    vsrc1 = FETCH(1);
+    ILOGV("|move-wide/from16 v%d,v%d  (v%d=0x%08llx)", vdst, vsrc1,
+        vdst, GET_REGISTER_WIDE(vsrc1));
+    SET_REGISTER_WIDE(vdst, GET_REGISTER_WIDE(vsrc1));
+    FINISH(2);
+OP_END
diff --git a/vm/mterp/c/OP_MUL_DOUBLE.c b/vm/mterp/c/OP_MUL_DOUBLE.c
new file mode 100644
index 0000000..3e65efa
--- /dev/null
+++ b/vm/mterp/c/OP_MUL_DOUBLE.c
@@ -0,0 +1,2 @@
+HANDLE_OP_X_DOUBLE(OP_MUL_DOUBLE, "mul", *)
+OP_END
diff --git a/vm/mterp/c/OP_MUL_DOUBLE_2ADDR.c b/vm/mterp/c/OP_MUL_DOUBLE_2ADDR.c
new file mode 100644
index 0000000..905b6a7
--- /dev/null
+++ b/vm/mterp/c/OP_MUL_DOUBLE_2ADDR.c
@@ -0,0 +1,2 @@
+HANDLE_OP_X_DOUBLE_2ADDR(OP_MUL_DOUBLE_2ADDR, "mul", *)
+OP_END
diff --git a/vm/mterp/c/OP_MUL_FLOAT.c b/vm/mterp/c/OP_MUL_FLOAT.c
new file mode 100644
index 0000000..310495c
--- /dev/null
+++ b/vm/mterp/c/OP_MUL_FLOAT.c
@@ -0,0 +1,2 @@
+HANDLE_OP_X_FLOAT(OP_MUL_FLOAT, "mul", *)
+OP_END
diff --git a/vm/mterp/c/OP_MUL_FLOAT_2ADDR.c b/vm/mterp/c/OP_MUL_FLOAT_2ADDR.c
new file mode 100644
index 0000000..03623ca
--- /dev/null
+++ b/vm/mterp/c/OP_MUL_FLOAT_2ADDR.c
@@ -0,0 +1,2 @@
+HANDLE_OP_X_FLOAT_2ADDR(OP_MUL_FLOAT_2ADDR, "mul", *)
+OP_END
diff --git a/vm/mterp/c/OP_MUL_INT.c b/vm/mterp/c/OP_MUL_INT.c
new file mode 100644
index 0000000..19c2c48
--- /dev/null
+++ b/vm/mterp/c/OP_MUL_INT.c
@@ -0,0 +1,2 @@
+HANDLE_OP_X_INT(OP_MUL_INT, "mul", *, false)
+OP_END
diff --git a/vm/mterp/c/OP_MUL_INT_2ADDR.c b/vm/mterp/c/OP_MUL_INT_2ADDR.c
new file mode 100644
index 0000000..d9619a6
--- /dev/null
+++ b/vm/mterp/c/OP_MUL_INT_2ADDR.c
@@ -0,0 +1,2 @@
+HANDLE_OP_X_INT_2ADDR(OP_MUL_INT_2ADDR, "mul", *, false)
+OP_END
diff --git a/vm/mterp/c/OP_MUL_INT_LIT16.c b/vm/mterp/c/OP_MUL_INT_LIT16.c
new file mode 100644
index 0000000..b8eb4ba
--- /dev/null
+++ b/vm/mterp/c/OP_MUL_INT_LIT16.c
@@ -0,0 +1,2 @@
+HANDLE_OP_X_INT_LIT16(OP_MUL_INT_LIT16, "mul", (s4), *, false)
+OP_END
diff --git a/vm/mterp/c/OP_MUL_INT_LIT8.c b/vm/mterp/c/OP_MUL_INT_LIT8.c
new file mode 100644
index 0000000..0d3a9a5
--- /dev/null
+++ b/vm/mterp/c/OP_MUL_INT_LIT8.c
@@ -0,0 +1,2 @@
+HANDLE_OP_X_INT_LIT8(OP_MUL_INT_LIT8,   "mul", *, false)
+OP_END
diff --git a/vm/mterp/c/OP_MUL_LONG.c b/vm/mterp/c/OP_MUL_LONG.c
new file mode 100644
index 0000000..145acbc
--- /dev/null
+++ b/vm/mterp/c/OP_MUL_LONG.c
@@ -0,0 +1,2 @@
+HANDLE_OP_X_LONG(OP_MUL_LONG, "mul", *, false)
+OP_END
diff --git a/vm/mterp/c/OP_MUL_LONG_2ADDR.c b/vm/mterp/c/OP_MUL_LONG_2ADDR.c
new file mode 100644
index 0000000..9cebcaa
--- /dev/null
+++ b/vm/mterp/c/OP_MUL_LONG_2ADDR.c
@@ -0,0 +1,2 @@
+HANDLE_OP_X_LONG_2ADDR(OP_MUL_LONG_2ADDR, "mul", *, false)
+OP_END
diff --git a/vm/mterp/c/OP_NEG_DOUBLE.c b/vm/mterp/c/OP_NEG_DOUBLE.c
new file mode 100644
index 0000000..805082c
--- /dev/null
+++ b/vm/mterp/c/OP_NEG_DOUBLE.c
@@ -0,0 +1,2 @@
+HANDLE_UNOP(OP_NEG_DOUBLE, "neg-double", -, , _DOUBLE)
+OP_END
diff --git a/vm/mterp/c/OP_NEG_FLOAT.c b/vm/mterp/c/OP_NEG_FLOAT.c
new file mode 100644
index 0000000..00e14f5
--- /dev/null
+++ b/vm/mterp/c/OP_NEG_FLOAT.c
@@ -0,0 +1,2 @@
+HANDLE_UNOP(OP_NEG_FLOAT, "neg-float", -, , _FLOAT)
+OP_END
diff --git a/vm/mterp/c/OP_NEG_INT.c b/vm/mterp/c/OP_NEG_INT.c
new file mode 100644
index 0000000..9b97bef
--- /dev/null
+++ b/vm/mterp/c/OP_NEG_INT.c
@@ -0,0 +1,2 @@
+HANDLE_UNOP(OP_NEG_INT, "neg-int", -, , )
+OP_END
diff --git a/vm/mterp/c/OP_NEG_LONG.c b/vm/mterp/c/OP_NEG_LONG.c
new file mode 100644
index 0000000..52d553a
--- /dev/null
+++ b/vm/mterp/c/OP_NEG_LONG.c
@@ -0,0 +1,2 @@
+HANDLE_UNOP(OP_NEG_LONG, "neg-long", -, , _WIDE)
+OP_END
diff --git a/vm/mterp/c/OP_NEW_ARRAY.c b/vm/mterp/c/OP_NEW_ARRAY.c
new file mode 100644
index 0000000..dd20505
--- /dev/null
+++ b/vm/mterp/c/OP_NEW_ARRAY.c
@@ -0,0 +1,36 @@
+HANDLE_OPCODE(OP_NEW_ARRAY /*vA, vB, class@CCCC*/)
+    {
+        ClassObject* arrayClass;
+        ArrayObject* newArray;
+        s4 length;
+
+        EXPORT_PC();
+
+        vdst = INST_A(inst);
+        vsrc1 = INST_B(inst);       /* length reg */
+        ref = FETCH(1);
+        ILOGV("|new-array v%d,v%d,class@0x%04x  (%d elements)",
+            vdst, vsrc1, ref, (s4) GET_REGISTER(vsrc1));
+        length = (s4) GET_REGISTER(vsrc1);
+        if (length < 0) {
+            dvmThrowException("Ljava/lang/NegativeArraySizeException;", NULL);
+            GOTO(exceptionThrown);
+        }
+        arrayClass = dvmDexGetResolvedClass(methodClassDex, ref);
+        if (arrayClass == NULL) {
+            arrayClass = dvmResolveClass(method->clazz, ref, false);
+            if (arrayClass == NULL)
+                GOTO(exceptionThrown);
+        }
+        /* verifier guarantees this is an array class */
+        assert(dvmIsArrayClass(arrayClass));
+        assert(dvmIsClassInitialized(arrayClass));
+
+        newArray = dvmAllocArrayByClass(arrayClass, length, ALLOC_DONT_TRACK);
+        if (newArray == NULL)
+            GOTO(exceptionThrown);
+        SET_REGISTER(vdst, (u4) newArray);
+    }
+    FINISH(2);
+OP_END
+
diff --git a/vm/mterp/c/OP_NEW_INSTANCE.c b/vm/mterp/c/OP_NEW_INSTANCE.c
new file mode 100644
index 0000000..86da6e3
--- /dev/null
+++ b/vm/mterp/c/OP_NEW_INSTANCE.c
@@ -0,0 +1,43 @@
+HANDLE_OPCODE(OP_NEW_INSTANCE /*vAA, class@BBBB*/)
+    {
+        ClassObject* clazz;
+        Object* newObj;
+
+        EXPORT_PC();
+
+        vdst = INST_AA(inst);
+        ref = FETCH(1);
+        ILOGV("|new-instance v%d,class@0x%04x", vdst, ref);
+        clazz = dvmDexGetResolvedClass(methodClassDex, ref);
+        if (clazz == NULL) {
+            clazz = dvmResolveClass(method->clazz, ref, false);
+            if (clazz == NULL)
+                GOTO(exceptionThrown);
+        }
+
+        if (!dvmIsClassInitialized(clazz) && !dvmInitClass(clazz))
+            GOTO(exceptionThrown);
+
+        /*
+         * Note: the verifier can ensure that this never happens, allowing us
+         * to remove the check.  However, the spec requires we throw the
+         * exception at runtime, not verify time, so the verifier would
+         * need to replace the new-instance call with a magic "throw
+         * InstantiationError" instruction.
+         *
+         * Since this relies on the verifier, which is optional, we would
+         * also need a "new-instance-quick" instruction to identify instances
+         * that don't require the check.
+         */
+        if (dvmIsInterfaceClass(clazz) || dvmIsAbstractClass(clazz)) {
+            dvmThrowExceptionWithClassMessage("Ljava/lang/InstantiationError;",
+                clazz->descriptor);
+            GOTO(exceptionThrown);
+        }
+        newObj = dvmAllocObject(clazz, ALLOC_DONT_TRACK);
+        if (newObj == NULL)
+            GOTO(exceptionThrown);
+        SET_REGISTER(vdst, (u4) newObj);
+    }
+    FINISH(2);
+OP_END
diff --git a/vm/mterp/c/OP_NOP.c b/vm/mterp/c/OP_NOP.c
new file mode 100644
index 0000000..d9fd744
--- /dev/null
+++ b/vm/mterp/c/OP_NOP.c
@@ -0,0 +1,3 @@
+HANDLE_OPCODE(OP_NOP)
+    FINISH(1);
+OP_END
diff --git a/vm/mterp/c/OP_NOT_INT.c b/vm/mterp/c/OP_NOT_INT.c
new file mode 100644
index 0000000..e585f62
--- /dev/null
+++ b/vm/mterp/c/OP_NOT_INT.c
@@ -0,0 +1,2 @@
+HANDLE_UNOP(OP_NOT_INT, "not-int", , ^ 0xffffffff, )
+OP_END
diff --git a/vm/mterp/c/OP_NOT_LONG.c b/vm/mterp/c/OP_NOT_LONG.c
new file mode 100644
index 0000000..4aafe8c
--- /dev/null
+++ b/vm/mterp/c/OP_NOT_LONG.c
@@ -0,0 +1,2 @@
+HANDLE_UNOP(OP_NOT_LONG, "not-long", , & 0xffffffffffffffffULL, _WIDE)
+OP_END
diff --git a/vm/mterp/c/OP_OR_INT.c b/vm/mterp/c/OP_OR_INT.c
new file mode 100644
index 0000000..f1a74b1
--- /dev/null
+++ b/vm/mterp/c/OP_OR_INT.c
@@ -0,0 +1,2 @@
+HANDLE_OP_X_INT(OP_OR_INT,  "or",  |, false)
+OP_END
diff --git a/vm/mterp/c/OP_OR_INT_2ADDR.c b/vm/mterp/c/OP_OR_INT_2ADDR.c
new file mode 100644
index 0000000..f53f06d
--- /dev/null
+++ b/vm/mterp/c/OP_OR_INT_2ADDR.c
@@ -0,0 +1,2 @@
+HANDLE_OP_X_INT_2ADDR(OP_OR_INT_2ADDR,  "or", |, false)
+OP_END
diff --git a/vm/mterp/c/OP_OR_INT_LIT16.c b/vm/mterp/c/OP_OR_INT_LIT16.c
new file mode 100644
index 0000000..8db4686
--- /dev/null
+++ b/vm/mterp/c/OP_OR_INT_LIT16.c
@@ -0,0 +1,2 @@
+HANDLE_OP_X_INT_LIT16(OP_OR_INT_LIT16,  "or",  (s4), |, false)
+OP_END
diff --git a/vm/mterp/c/OP_OR_INT_LIT8.c b/vm/mterp/c/OP_OR_INT_LIT8.c
new file mode 100644
index 0000000..0a31a25
--- /dev/null
+++ b/vm/mterp/c/OP_OR_INT_LIT8.c
@@ -0,0 +1,2 @@
+HANDLE_OP_X_INT_LIT8(OP_OR_INT_LIT8,    "or",  |, false)
+OP_END
diff --git a/vm/mterp/c/OP_OR_LONG.c b/vm/mterp/c/OP_OR_LONG.c
new file mode 100644
index 0000000..8d977a0
--- /dev/null
+++ b/vm/mterp/c/OP_OR_LONG.c
@@ -0,0 +1,2 @@
+HANDLE_OP_X_LONG(OP_OR_LONG,  "or", |, false)
+OP_END
diff --git a/vm/mterp/c/OP_OR_LONG_2ADDR.c b/vm/mterp/c/OP_OR_LONG_2ADDR.c
new file mode 100644
index 0000000..5977d49
--- /dev/null
+++ b/vm/mterp/c/OP_OR_LONG_2ADDR.c
@@ -0,0 +1,2 @@
+HANDLE_OP_X_LONG_2ADDR(OP_OR_LONG_2ADDR,  "or", |, false)
+OP_END
diff --git a/vm/mterp/c/OP_PACKED_SWITCH.c b/vm/mterp/c/OP_PACKED_SWITCH.c
new file mode 100644
index 0000000..5e490cf
--- /dev/null
+++ b/vm/mterp/c/OP_PACKED_SWITCH.c
@@ -0,0 +1,29 @@
+HANDLE_OPCODE(OP_PACKED_SWITCH /*vAA, +BBBB*/)
+    {
+        const u2* switchData;
+        u4 testVal;
+        s4 offset;
+
+        vsrc1 = INST_AA(inst);
+        offset = FETCH(1) | (((s4) FETCH(2)) << 16);
+        ILOGV("|packed-switch v%d +0x%04x", vsrc1, vsrc2);
+        switchData = pc + offset;       // offset in 16-bit units
+#ifndef NDEBUG
+        if (switchData < method->insns ||
+            switchData >= method->insns + dvmGetMethodInsnsSize(method))
+        {
+            /* should have been caught in verifier */
+            EXPORT_PC();
+            dvmThrowException("Ljava/lang/InternalError;", "bad packed switch");
+            GOTO(exceptionThrown);
+        }
+#endif
+        testVal = GET_REGISTER(vsrc1);
+
+        offset = dvmInterpHandlePackedSwitch(switchData, testVal);
+        ILOGV("> branch taken (0x%04x)\n", offset);
+        if (offset <= 0)  /* uncommon */
+            PERIODIC_CHECKS(kInterpEntryInstr, offset);
+        FINISH(offset);
+    }
+OP_END
diff --git a/vm/mterp/c/OP_REM_DOUBLE.c b/vm/mterp/c/OP_REM_DOUBLE.c
new file mode 100644
index 0000000..343e25e
--- /dev/null
+++ b/vm/mterp/c/OP_REM_DOUBLE.c
@@ -0,0 +1,13 @@
+HANDLE_OPCODE(OP_REM_DOUBLE /*vAA, vBB, vCC*/)
+    {
+        u2 srcRegs;
+        vdst = INST_AA(inst);
+        srcRegs = FETCH(1);
+        vsrc1 = srcRegs & 0xff;
+        vsrc2 = srcRegs >> 8;
+        ILOGV("|%s-double v%d,v%d,v%d", "mod", vdst, vsrc1, vsrc2);
+        SET_REGISTER_DOUBLE(vdst,
+            fmod(GET_REGISTER_DOUBLE(vsrc1), GET_REGISTER_DOUBLE(vsrc2)));
+    }
+    FINISH(2);
+OP_END
diff --git a/vm/mterp/c/OP_REM_DOUBLE_2ADDR.c b/vm/mterp/c/OP_REM_DOUBLE_2ADDR.c
new file mode 100644
index 0000000..392eacf
--- /dev/null
+++ b/vm/mterp/c/OP_REM_DOUBLE_2ADDR.c
@@ -0,0 +1,8 @@
+HANDLE_OPCODE(OP_REM_DOUBLE_2ADDR /*vA, vB*/)
+    vdst = INST_A(inst);
+    vsrc1 = INST_B(inst);
+    ILOGV("|%s-double-2addr v%d,v%d", "mod", vdst, vsrc1);
+    SET_REGISTER_DOUBLE(vdst,
+        fmod(GET_REGISTER_DOUBLE(vdst), GET_REGISTER_DOUBLE(vsrc1)));
+    FINISH(1);
+OP_END
diff --git a/vm/mterp/c/OP_REM_FLOAT.c b/vm/mterp/c/OP_REM_FLOAT.c
new file mode 100644
index 0000000..9604b30
--- /dev/null
+++ b/vm/mterp/c/OP_REM_FLOAT.c
@@ -0,0 +1,13 @@
+HANDLE_OPCODE(OP_REM_FLOAT /*vAA, vBB, vCC*/)
+    {
+        u2 srcRegs;
+        vdst = INST_AA(inst);
+        srcRegs = FETCH(1);
+        vsrc1 = srcRegs & 0xff;
+        vsrc2 = srcRegs >> 8;
+        ILOGV("|%s-float v%d,v%d,v%d", "mod", vdst, vsrc1, vsrc2);
+        SET_REGISTER_FLOAT(vdst,
+            fmodf(GET_REGISTER_FLOAT(vsrc1), GET_REGISTER_FLOAT(vsrc2)));
+    }
+    FINISH(2);
+OP_END
diff --git a/vm/mterp/c/OP_REM_FLOAT_2ADDR.c b/vm/mterp/c/OP_REM_FLOAT_2ADDR.c
new file mode 100644
index 0000000..87bb31e
--- /dev/null
+++ b/vm/mterp/c/OP_REM_FLOAT_2ADDR.c
@@ -0,0 +1,8 @@
+HANDLE_OPCODE(OP_REM_FLOAT_2ADDR /*vA, vB*/)
+    vdst = INST_A(inst);
+    vsrc1 = INST_B(inst);
+    ILOGV("|%s-float-2addr v%d,v%d", "mod", vdst, vsrc1);
+    SET_REGISTER_FLOAT(vdst,
+        fmodf(GET_REGISTER_FLOAT(vdst), GET_REGISTER_FLOAT(vsrc1)));
+    FINISH(1);
+OP_END
diff --git a/vm/mterp/c/OP_REM_INT.c b/vm/mterp/c/OP_REM_INT.c
new file mode 100644
index 0000000..fb277bd
--- /dev/null
+++ b/vm/mterp/c/OP_REM_INT.c
@@ -0,0 +1,2 @@
+HANDLE_OP_X_INT(OP_REM_INT, "rem", %, true)
+OP_END
diff --git a/vm/mterp/c/OP_REM_INT_2ADDR.c b/vm/mterp/c/OP_REM_INT_2ADDR.c
new file mode 100644
index 0000000..dca7647
--- /dev/null
+++ b/vm/mterp/c/OP_REM_INT_2ADDR.c
@@ -0,0 +1,2 @@
+HANDLE_OP_X_INT_2ADDR(OP_REM_INT_2ADDR, "rem", %, true)
+OP_END
diff --git a/vm/mterp/c/OP_REM_INT_LIT16.c b/vm/mterp/c/OP_REM_INT_LIT16.c
new file mode 100644
index 0000000..69e9b4e
--- /dev/null
+++ b/vm/mterp/c/OP_REM_INT_LIT16.c
@@ -0,0 +1,2 @@
+HANDLE_OP_X_INT_LIT16(OP_REM_INT_LIT16, "rem", (s4), %, true)
+OP_END
diff --git a/vm/mterp/c/OP_REM_INT_LIT8.c b/vm/mterp/c/OP_REM_INT_LIT8.c
new file mode 100644
index 0000000..49cc2f6
--- /dev/null
+++ b/vm/mterp/c/OP_REM_INT_LIT8.c
@@ -0,0 +1,2 @@
+HANDLE_OP_X_INT_LIT8(OP_REM_INT_LIT8,   "rem", %, true)
+OP_END
diff --git a/vm/mterp/c/OP_REM_LONG.c b/vm/mterp/c/OP_REM_LONG.c
new file mode 100644
index 0000000..04ccc1b
--- /dev/null
+++ b/vm/mterp/c/OP_REM_LONG.c
@@ -0,0 +1,2 @@
+HANDLE_OP_X_LONG(OP_REM_LONG, "rem", %, true)
+OP_END
diff --git a/vm/mterp/c/OP_REM_LONG_2ADDR.c b/vm/mterp/c/OP_REM_LONG_2ADDR.c
new file mode 100644
index 0000000..5879dd6
--- /dev/null
+++ b/vm/mterp/c/OP_REM_LONG_2ADDR.c
@@ -0,0 +1,2 @@
+HANDLE_OP_X_LONG_2ADDR(OP_REM_LONG_2ADDR, "rem", %, true)
+OP_END
diff --git a/vm/mterp/c/OP_RETURN.c b/vm/mterp/c/OP_RETURN.c
new file mode 100644
index 0000000..6d5ffa3
--- /dev/null
+++ b/vm/mterp/c/OP_RETURN.c
@@ -0,0 +1,7 @@
+HANDLE_OPCODE($opcode /*vAA*/)
+    vsrc1 = INST_AA(inst);
+    ILOGV("|return%s v%d",
+        (INST_INST(inst) == OP_RETURN) ? "" : "-object", vsrc1);
+    retval.i = GET_REGISTER(vsrc1);
+    GOTO(returnFromMethod);
+OP_END
diff --git a/vm/mterp/c/OP_RETURN_OBJECT.c b/vm/mterp/c/OP_RETURN_OBJECT.c
new file mode 100644
index 0000000..49bf01e
--- /dev/null
+++ b/vm/mterp/c/OP_RETURN_OBJECT.c
@@ -0,0 +1,2 @@
+%include "c/OP_RETURN.c"
+//OP_END
diff --git a/vm/mterp/c/OP_RETURN_VOID.c b/vm/mterp/c/OP_RETURN_VOID.c
new file mode 100644
index 0000000..38649e9
--- /dev/null
+++ b/vm/mterp/c/OP_RETURN_VOID.c
@@ -0,0 +1,7 @@
+HANDLE_OPCODE(OP_RETURN_VOID /**/)
+    ILOGV("|return-void");
+#ifndef NDEBUG
+    retval.j = 0xababababULL;    // placate valgrind
+#endif
+    GOTO(returnFromMethod);
+OP_END
diff --git a/vm/mterp/c/OP_RETURN_WIDE.c b/vm/mterp/c/OP_RETURN_WIDE.c
new file mode 100644
index 0000000..5bd9849
--- /dev/null
+++ b/vm/mterp/c/OP_RETURN_WIDE.c
@@ -0,0 +1,6 @@
+HANDLE_OPCODE(OP_RETURN_WIDE /*vAA*/)
+    vsrc1 = INST_AA(inst);
+    ILOGV("|return-wide v%d", vsrc1);
+    retval.j = GET_REGISTER_WIDE(vsrc1);
+    GOTO(returnFromMethod);
+OP_END
diff --git a/vm/mterp/c/OP_RSUB_INT.c b/vm/mterp/c/OP_RSUB_INT.c
new file mode 100644
index 0000000..336ca55
--- /dev/null
+++ b/vm/mterp/c/OP_RSUB_INT.c
@@ -0,0 +1,10 @@
+HANDLE_OPCODE(OP_RSUB_INT /*vA, vB, #+CCCC*/)
+    {
+        vdst = INST_A(inst);
+        vsrc1 = INST_B(inst);
+        vsrc2 = FETCH(1);
+        ILOGV("|rsub-int v%d,v%d,#+0x%04x", vdst, vsrc1, vsrc2);
+        SET_REGISTER(vdst, (s2) vsrc2 - (s4) GET_REGISTER(vsrc1));
+    }
+    FINISH(2);
+OP_END
diff --git a/vm/mterp/c/OP_RSUB_INT_LIT8.c b/vm/mterp/c/OP_RSUB_INT_LIT8.c
new file mode 100644
index 0000000..742854b
--- /dev/null
+++ b/vm/mterp/c/OP_RSUB_INT_LIT8.c
@@ -0,0 +1,12 @@
+HANDLE_OPCODE(OP_RSUB_INT_LIT8 /*vAA, vBB, #+CC*/)
+    {
+        u2 litInfo;
+        vdst = INST_AA(inst);
+        litInfo = FETCH(1);
+        vsrc1 = litInfo & 0xff;
+        vsrc2 = litInfo >> 8;
+        ILOGV("|%s-int/lit8 v%d,v%d,#+0x%02x", "rsub", vdst, vsrc1, vsrc2);
+        SET_REGISTER(vdst, (s1) vsrc2 - (s4) GET_REGISTER(vsrc1));
+    }
+    FINISH(2);
+OP_END
diff --git a/vm/mterp/c/OP_SGET.c b/vm/mterp/c/OP_SGET.c
new file mode 100644
index 0000000..5297cd7
--- /dev/null
+++ b/vm/mterp/c/OP_SGET.c
@@ -0,0 +1,2 @@
+HANDLE_SGET_X(OP_SGET,                  "", Int, )
+OP_END
diff --git a/vm/mterp/c/OP_SGET_BOOLEAN.c b/vm/mterp/c/OP_SGET_BOOLEAN.c
new file mode 100644
index 0000000..7c5d45e
--- /dev/null
+++ b/vm/mterp/c/OP_SGET_BOOLEAN.c
@@ -0,0 +1,2 @@
+HANDLE_SGET_X(OP_SGET_BOOLEAN,          "", Int, )
+OP_END
diff --git a/vm/mterp/c/OP_SGET_BYTE.c b/vm/mterp/c/OP_SGET_BYTE.c
new file mode 100644
index 0000000..b37cab4
--- /dev/null
+++ b/vm/mterp/c/OP_SGET_BYTE.c
@@ -0,0 +1,2 @@
+HANDLE_SGET_X(OP_SGET_BYTE,             "", Int, )
+OP_END
diff --git a/vm/mterp/c/OP_SGET_CHAR.c b/vm/mterp/c/OP_SGET_CHAR.c
new file mode 100644
index 0000000..7ede5ec
--- /dev/null
+++ b/vm/mterp/c/OP_SGET_CHAR.c
@@ -0,0 +1,2 @@
+HANDLE_SGET_X(OP_SGET_CHAR,             "", Int, )
+OP_END
diff --git a/vm/mterp/c/OP_SGET_OBJECT.c b/vm/mterp/c/OP_SGET_OBJECT.c
new file mode 100644
index 0000000..9f3b63d
--- /dev/null
+++ b/vm/mterp/c/OP_SGET_OBJECT.c
@@ -0,0 +1,2 @@
+HANDLE_SGET_X(OP_SGET_OBJECT,           "-object", Object, _AS_OBJECT)
+OP_END
diff --git a/vm/mterp/c/OP_SGET_SHORT.c b/vm/mterp/c/OP_SGET_SHORT.c
new file mode 100644
index 0000000..cd1fe4c
--- /dev/null
+++ b/vm/mterp/c/OP_SGET_SHORT.c
@@ -0,0 +1,2 @@
+HANDLE_SGET_X(OP_SGET_SHORT,            "", Int, )
+OP_END
diff --git a/vm/mterp/c/OP_SGET_WIDE.c b/vm/mterp/c/OP_SGET_WIDE.c
new file mode 100644
index 0000000..817c6e7
--- /dev/null
+++ b/vm/mterp/c/OP_SGET_WIDE.c
@@ -0,0 +1,2 @@
+HANDLE_SGET_X(OP_SGET_WIDE,             "-wide", Long, _WIDE)
+OP_END
diff --git a/vm/mterp/c/OP_SHL_INT.c b/vm/mterp/c/OP_SHL_INT.c
new file mode 100644
index 0000000..e32af49
--- /dev/null
+++ b/vm/mterp/c/OP_SHL_INT.c
@@ -0,0 +1,2 @@
+HANDLE_OP_SHX_INT(OP_SHL_INT, "shl", (s4), <<)
+OP_END
diff --git a/vm/mterp/c/OP_SHL_INT_2ADDR.c b/vm/mterp/c/OP_SHL_INT_2ADDR.c
new file mode 100644
index 0000000..c5f5399
--- /dev/null
+++ b/vm/mterp/c/OP_SHL_INT_2ADDR.c
@@ -0,0 +1,2 @@
+HANDLE_OP_SHX_INT_2ADDR(OP_SHL_INT_2ADDR, "shl", (s4), <<)
+OP_END
diff --git a/vm/mterp/c/OP_SHL_INT_LIT8.c b/vm/mterp/c/OP_SHL_INT_LIT8.c
new file mode 100644
index 0000000..009d14e
--- /dev/null
+++ b/vm/mterp/c/OP_SHL_INT_LIT8.c
@@ -0,0 +1,2 @@
+HANDLE_OP_SHX_INT_LIT8(OP_SHL_INT_LIT8,   "shl", (s4), <<)
+OP_END
diff --git a/vm/mterp/c/OP_SHL_LONG.c b/vm/mterp/c/OP_SHL_LONG.c
new file mode 100644
index 0000000..f6b502a
--- /dev/null
+++ b/vm/mterp/c/OP_SHL_LONG.c
@@ -0,0 +1,2 @@
+HANDLE_OP_SHX_LONG(OP_SHL_LONG, "shl", (s8), <<)
+OP_END
diff --git a/vm/mterp/c/OP_SHL_LONG_2ADDR.c b/vm/mterp/c/OP_SHL_LONG_2ADDR.c
new file mode 100644
index 0000000..b8a9954
--- /dev/null
+++ b/vm/mterp/c/OP_SHL_LONG_2ADDR.c
@@ -0,0 +1,2 @@
+HANDLE_OP_SHX_LONG_2ADDR(OP_SHL_LONG_2ADDR, "shl", (s8), <<)
+OP_END
diff --git a/vm/mterp/c/OP_SHR_INT.c b/vm/mterp/c/OP_SHR_INT.c
new file mode 100644
index 0000000..3834824
--- /dev/null
+++ b/vm/mterp/c/OP_SHR_INT.c
@@ -0,0 +1,2 @@
+HANDLE_OP_SHX_INT(OP_SHR_INT, "shr", (s4), >>)
+OP_END
diff --git a/vm/mterp/c/OP_SHR_INT_2ADDR.c b/vm/mterp/c/OP_SHR_INT_2ADDR.c
new file mode 100644
index 0000000..c76c178
--- /dev/null
+++ b/vm/mterp/c/OP_SHR_INT_2ADDR.c
@@ -0,0 +1,2 @@
+HANDLE_OP_SHX_INT_2ADDR(OP_SHR_INT_2ADDR, "shr", (s4), >>)
+OP_END
diff --git a/vm/mterp/c/OP_SHR_INT_LIT8.c b/vm/mterp/c/OP_SHR_INT_LIT8.c
new file mode 100644
index 0000000..e2657d7
--- /dev/null
+++ b/vm/mterp/c/OP_SHR_INT_LIT8.c
@@ -0,0 +1,2 @@
+HANDLE_OP_SHX_INT_LIT8(OP_SHR_INT_LIT8,   "shr", (s4), >>)
+OP_END
diff --git a/vm/mterp/c/OP_SHR_LONG.c b/vm/mterp/c/OP_SHR_LONG.c
new file mode 100644
index 0000000..357a666
--- /dev/null
+++ b/vm/mterp/c/OP_SHR_LONG.c
@@ -0,0 +1,2 @@
+HANDLE_OP_SHX_LONG(OP_SHR_LONG, "shr", (s8), >>)
+OP_END
diff --git a/vm/mterp/c/OP_SHR_LONG_2ADDR.c b/vm/mterp/c/OP_SHR_LONG_2ADDR.c
new file mode 100644
index 0000000..43e27ea
--- /dev/null
+++ b/vm/mterp/c/OP_SHR_LONG_2ADDR.c
@@ -0,0 +1,2 @@
+HANDLE_OP_SHX_LONG_2ADDR(OP_SHR_LONG_2ADDR, "shr", (s8), >>)
+OP_END
diff --git a/vm/mterp/c/OP_SPARSE_SWITCH.c b/vm/mterp/c/OP_SPARSE_SWITCH.c
new file mode 100644
index 0000000..4886830
--- /dev/null
+++ b/vm/mterp/c/OP_SPARSE_SWITCH.c
@@ -0,0 +1,29 @@
+HANDLE_OPCODE(OP_SPARSE_SWITCH /*vAA, +BBBB*/)
+    {
+        const u2* switchData;
+        u4 testVal;
+        s4 offset;
+
+        vsrc1 = INST_AA(inst);
+        offset = FETCH(1) | (((s4) FETCH(2)) << 16);
+        ILOGV("|sparse-switch v%d +0x%04x", vsrc1, vsrc2);
+        switchData = pc + offset;       // offset in 16-bit units
+#ifndef NDEBUG
+        if (switchData < method->insns ||
+            switchData >= method->insns + dvmGetMethodInsnsSize(method))
+        {
+            /* should have been caught in verifier */
+            EXPORT_PC();
+            dvmThrowException("Ljava/lang/InternalError;", "bad sparse switch");
+            GOTO(exceptionThrown);
+        }
+#endif
+        testVal = GET_REGISTER(vsrc1);
+
+        offset = dvmInterpHandleSparseSwitch(switchData, testVal);
+        ILOGV("> branch taken (0x%04x)\n", offset);
+        if (offset <= 0)  /* uncommon */
+            PERIODIC_CHECKS(kInterpEntryInstr, offset);
+        FINISH(offset);
+    }
+OP_END
diff --git a/vm/mterp/c/OP_SPUT.c b/vm/mterp/c/OP_SPUT.c
new file mode 100644
index 0000000..286e64c
--- /dev/null
+++ b/vm/mterp/c/OP_SPUT.c
@@ -0,0 +1,2 @@
+HANDLE_SPUT_X(OP_SPUT,                  "", Int, )
+OP_END
diff --git a/vm/mterp/c/OP_SPUT_BOOLEAN.c b/vm/mterp/c/OP_SPUT_BOOLEAN.c
new file mode 100644
index 0000000..55ceb11
--- /dev/null
+++ b/vm/mterp/c/OP_SPUT_BOOLEAN.c
@@ -0,0 +1,2 @@
+HANDLE_SPUT_X(OP_SPUT_BOOLEAN,          "", Int, )
+OP_END
diff --git a/vm/mterp/c/OP_SPUT_BYTE.c b/vm/mterp/c/OP_SPUT_BYTE.c
new file mode 100644
index 0000000..d242fe1
--- /dev/null
+++ b/vm/mterp/c/OP_SPUT_BYTE.c
@@ -0,0 +1,2 @@
+HANDLE_SPUT_X(OP_SPUT_BYTE,             "", Int, )
+OP_END
diff --git a/vm/mterp/c/OP_SPUT_CHAR.c b/vm/mterp/c/OP_SPUT_CHAR.c
new file mode 100644
index 0000000..18a2f06
--- /dev/null
+++ b/vm/mterp/c/OP_SPUT_CHAR.c
@@ -0,0 +1,2 @@
+HANDLE_SPUT_X(OP_SPUT_CHAR,             "", Int, )
+OP_END
diff --git a/vm/mterp/c/OP_SPUT_OBJECT.c b/vm/mterp/c/OP_SPUT_OBJECT.c
new file mode 100644
index 0000000..fb223d6
--- /dev/null
+++ b/vm/mterp/c/OP_SPUT_OBJECT.c
@@ -0,0 +1,2 @@
+HANDLE_SPUT_X(OP_SPUT_OBJECT,           "-object", Object, _AS_OBJECT)
+OP_END
diff --git a/vm/mterp/c/OP_SPUT_SHORT.c b/vm/mterp/c/OP_SPUT_SHORT.c
new file mode 100644
index 0000000..c6cd8d6
--- /dev/null
+++ b/vm/mterp/c/OP_SPUT_SHORT.c
@@ -0,0 +1,2 @@
+HANDLE_SPUT_X(OP_SPUT_SHORT,            "", Int, )
+OP_END
diff --git a/vm/mterp/c/OP_SPUT_WIDE.c b/vm/mterp/c/OP_SPUT_WIDE.c
new file mode 100644
index 0000000..0c74651
--- /dev/null
+++ b/vm/mterp/c/OP_SPUT_WIDE.c
@@ -0,0 +1,2 @@
+HANDLE_SPUT_X(OP_SPUT_WIDE,             "-wide", Long, _WIDE)
+OP_END
diff --git a/vm/mterp/c/OP_SUB_DOUBLE.c b/vm/mterp/c/OP_SUB_DOUBLE.c
new file mode 100644
index 0000000..64a112d
--- /dev/null
+++ b/vm/mterp/c/OP_SUB_DOUBLE.c
@@ -0,0 +1,2 @@
+HANDLE_OP_X_DOUBLE(OP_SUB_DOUBLE, "sub", -)
+OP_END
diff --git a/vm/mterp/c/OP_SUB_DOUBLE_2ADDR.c b/vm/mterp/c/OP_SUB_DOUBLE_2ADDR.c
new file mode 100644
index 0000000..5870400
--- /dev/null
+++ b/vm/mterp/c/OP_SUB_DOUBLE_2ADDR.c
@@ -0,0 +1,2 @@
+HANDLE_OP_X_DOUBLE_2ADDR(OP_SUB_DOUBLE_2ADDR, "sub", -)
+OP_END
diff --git a/vm/mterp/c/OP_SUB_FLOAT.c b/vm/mterp/c/OP_SUB_FLOAT.c
new file mode 100644
index 0000000..96c5fbd
--- /dev/null
+++ b/vm/mterp/c/OP_SUB_FLOAT.c
@@ -0,0 +1,2 @@
+HANDLE_OP_X_FLOAT(OP_SUB_FLOAT, "sub", -)
+OP_END
diff --git a/vm/mterp/c/OP_SUB_FLOAT_2ADDR.c b/vm/mterp/c/OP_SUB_FLOAT_2ADDR.c
new file mode 100644
index 0000000..802935c
--- /dev/null
+++ b/vm/mterp/c/OP_SUB_FLOAT_2ADDR.c
@@ -0,0 +1,2 @@
+HANDLE_OP_X_FLOAT_2ADDR(OP_SUB_FLOAT_2ADDR, "sub", -)
+OP_END
diff --git a/vm/mterp/c/OP_SUB_INT.c b/vm/mterp/c/OP_SUB_INT.c
new file mode 100644
index 0000000..f16524b
--- /dev/null
+++ b/vm/mterp/c/OP_SUB_INT.c
@@ -0,0 +1,2 @@
+HANDLE_OP_X_INT(OP_SUB_INT, "sub", -, false)
+OP_END
diff --git a/vm/mterp/c/OP_SUB_INT_2ADDR.c b/vm/mterp/c/OP_SUB_INT_2ADDR.c
new file mode 100644
index 0000000..744eb58
--- /dev/null
+++ b/vm/mterp/c/OP_SUB_INT_2ADDR.c
@@ -0,0 +1,2 @@
+HANDLE_OP_X_INT_2ADDR(OP_SUB_INT_2ADDR, "sub", -, false)
+OP_END
diff --git a/vm/mterp/c/OP_SUB_LONG.c b/vm/mterp/c/OP_SUB_LONG.c
new file mode 100644
index 0000000..07f782c
--- /dev/null
+++ b/vm/mterp/c/OP_SUB_LONG.c
@@ -0,0 +1,2 @@
+HANDLE_OP_X_LONG(OP_SUB_LONG, "sub", -, false)
+OP_END
diff --git a/vm/mterp/c/OP_SUB_LONG_2ADDR.c b/vm/mterp/c/OP_SUB_LONG_2ADDR.c
new file mode 100644
index 0000000..09c4653
--- /dev/null
+++ b/vm/mterp/c/OP_SUB_LONG_2ADDR.c
@@ -0,0 +1,2 @@
+HANDLE_OP_X_LONG_2ADDR(OP_SUB_LONG_2ADDR, "sub", -, false)
+OP_END
diff --git a/vm/mterp/c/OP_THROW.c b/vm/mterp/c/OP_THROW.c
new file mode 100644
index 0000000..12e002a
--- /dev/null
+++ b/vm/mterp/c/OP_THROW.c
@@ -0,0 +1,17 @@
+HANDLE_OPCODE(OP_THROW /*vAA*/)
+    {
+        Object* obj;
+
+        vsrc1 = INST_AA(inst);
+        ILOGV("|throw v%d  (%p)", vsrc1, (void*)GET_REGISTER(vsrc1));
+        obj = (Object*) GET_REGISTER(vsrc1);
+        if (!checkForNullExportPC(obj, fp, pc)) {
+            /* will throw a null pointer exception */
+            LOGVV("Bad exception\n");
+        } else {
+            /* use the requested exception */
+            dvmSetException(self, obj);
+        }
+        GOTO(exceptionThrown);
+    }
+OP_END
diff --git a/vm/mterp/c/OP_UNUSED_3E.c b/vm/mterp/c/OP_UNUSED_3E.c
new file mode 100644
index 0000000..9ecf8e3
--- /dev/null
+++ b/vm/mterp/c/OP_UNUSED_3E.c
@@ -0,0 +1,2 @@
+HANDLE_OPCODE(OP_UNUSED_3E)
+OP_END
diff --git a/vm/mterp/c/OP_UNUSED_3F.c b/vm/mterp/c/OP_UNUSED_3F.c
new file mode 100644
index 0000000..9d1d68d
--- /dev/null
+++ b/vm/mterp/c/OP_UNUSED_3F.c
@@ -0,0 +1,2 @@
+HANDLE_OPCODE(OP_UNUSED_3F)
+OP_END
diff --git a/vm/mterp/c/OP_UNUSED_40.c b/vm/mterp/c/OP_UNUSED_40.c
new file mode 100644
index 0000000..f73a59c
--- /dev/null
+++ b/vm/mterp/c/OP_UNUSED_40.c
@@ -0,0 +1,2 @@
+HANDLE_OPCODE(OP_UNUSED_40)
+OP_END
diff --git a/vm/mterp/c/OP_UNUSED_41.c b/vm/mterp/c/OP_UNUSED_41.c
new file mode 100644
index 0000000..38747e6
--- /dev/null
+++ b/vm/mterp/c/OP_UNUSED_41.c
@@ -0,0 +1,2 @@
+HANDLE_OPCODE(OP_UNUSED_41)
+OP_END
diff --git a/vm/mterp/c/OP_UNUSED_42.c b/vm/mterp/c/OP_UNUSED_42.c
new file mode 100644
index 0000000..154d293
--- /dev/null
+++ b/vm/mterp/c/OP_UNUSED_42.c
@@ -0,0 +1,2 @@
+HANDLE_OPCODE(OP_UNUSED_42)
+OP_END
diff --git a/vm/mterp/c/OP_UNUSED_43.c b/vm/mterp/c/OP_UNUSED_43.c
new file mode 100644
index 0000000..c7e702c
--- /dev/null
+++ b/vm/mterp/c/OP_UNUSED_43.c
@@ -0,0 +1,2 @@
+HANDLE_OPCODE(OP_UNUSED_43)
+OP_END
diff --git a/vm/mterp/c/OP_UNUSED_73.c b/vm/mterp/c/OP_UNUSED_73.c
new file mode 100644
index 0000000..85aa95f
--- /dev/null
+++ b/vm/mterp/c/OP_UNUSED_73.c
@@ -0,0 +1,2 @@
+HANDLE_OPCODE(OP_UNUSED_73)
+OP_END
diff --git a/vm/mterp/c/OP_UNUSED_79.c b/vm/mterp/c/OP_UNUSED_79.c
new file mode 100644
index 0000000..1fa86e9
--- /dev/null
+++ b/vm/mterp/c/OP_UNUSED_79.c
@@ -0,0 +1,2 @@
+HANDLE_OPCODE(OP_UNUSED_79)
+OP_END
diff --git a/vm/mterp/c/OP_UNUSED_7A.c b/vm/mterp/c/OP_UNUSED_7A.c
new file mode 100644
index 0000000..beab006
--- /dev/null
+++ b/vm/mterp/c/OP_UNUSED_7A.c
@@ -0,0 +1,2 @@
+HANDLE_OPCODE(OP_UNUSED_7A)
+OP_END
diff --git a/vm/mterp/c/OP_UNUSED_E3.c b/vm/mterp/c/OP_UNUSED_E3.c
new file mode 100644
index 0000000..d52836b
--- /dev/null
+++ b/vm/mterp/c/OP_UNUSED_E3.c
@@ -0,0 +1,2 @@
+HANDLE_OPCODE(OP_UNUSED_E3)
+OP_END
diff --git a/vm/mterp/c/OP_UNUSED_E4.c b/vm/mterp/c/OP_UNUSED_E4.c
new file mode 100644
index 0000000..30a714f
--- /dev/null
+++ b/vm/mterp/c/OP_UNUSED_E4.c
@@ -0,0 +1,2 @@
+HANDLE_OPCODE(OP_UNUSED_E4)
+OP_END
diff --git a/vm/mterp/c/OP_UNUSED_E5.c b/vm/mterp/c/OP_UNUSED_E5.c
new file mode 100644
index 0000000..0cc6ff9
--- /dev/null
+++ b/vm/mterp/c/OP_UNUSED_E5.c
@@ -0,0 +1,2 @@
+HANDLE_OPCODE(OP_UNUSED_E5)
+OP_END
diff --git a/vm/mterp/c/OP_UNUSED_E6.c b/vm/mterp/c/OP_UNUSED_E6.c
new file mode 100644
index 0000000..255dcd1
--- /dev/null
+++ b/vm/mterp/c/OP_UNUSED_E6.c
@@ -0,0 +1,2 @@
+HANDLE_OPCODE(OP_UNUSED_E6)
+OP_END
diff --git a/vm/mterp/c/OP_UNUSED_E7.c b/vm/mterp/c/OP_UNUSED_E7.c
new file mode 100644
index 0000000..b910f71
--- /dev/null
+++ b/vm/mterp/c/OP_UNUSED_E7.c
@@ -0,0 +1,2 @@
+HANDLE_OPCODE(OP_UNUSED_E7)
+OP_END
diff --git a/vm/mterp/c/OP_UNUSED_E8.c b/vm/mterp/c/OP_UNUSED_E8.c
new file mode 100644
index 0000000..ba11d34
--- /dev/null
+++ b/vm/mterp/c/OP_UNUSED_E8.c
@@ -0,0 +1,2 @@
+HANDLE_OPCODE(OP_UNUSED_E8)
+OP_END
diff --git a/vm/mterp/c/OP_UNUSED_E9.c b/vm/mterp/c/OP_UNUSED_E9.c
new file mode 100644
index 0000000..6f4d8fb
--- /dev/null
+++ b/vm/mterp/c/OP_UNUSED_E9.c
@@ -0,0 +1,2 @@
+HANDLE_OPCODE(OP_UNUSED_E9)
+OP_END
diff --git a/vm/mterp/c/OP_UNUSED_EA.c b/vm/mterp/c/OP_UNUSED_EA.c
new file mode 100644
index 0000000..8456083
--- /dev/null
+++ b/vm/mterp/c/OP_UNUSED_EA.c
@@ -0,0 +1,2 @@
+HANDLE_OPCODE(OP_UNUSED_EA)
+OP_END
diff --git a/vm/mterp/c/OP_UNUSED_EB.c b/vm/mterp/c/OP_UNUSED_EB.c
new file mode 100644
index 0000000..3525262
--- /dev/null
+++ b/vm/mterp/c/OP_UNUSED_EB.c
@@ -0,0 +1,2 @@
+HANDLE_OPCODE(OP_UNUSED_EB)
+OP_END
diff --git a/vm/mterp/c/OP_UNUSED_EC.c b/vm/mterp/c/OP_UNUSED_EC.c
new file mode 100644
index 0000000..fcb8c2e
--- /dev/null
+++ b/vm/mterp/c/OP_UNUSED_EC.c
@@ -0,0 +1,2 @@
+HANDLE_OPCODE(OP_UNUSED_EC)
+OP_END
diff --git a/vm/mterp/c/OP_UNUSED_ED.c b/vm/mterp/c/OP_UNUSED_ED.c
new file mode 100644
index 0000000..c11348f
--- /dev/null
+++ b/vm/mterp/c/OP_UNUSED_ED.c
@@ -0,0 +1,2 @@
+HANDLE_OPCODE(OP_UNUSED_ED)
+OP_END
diff --git a/vm/mterp/c/OP_UNUSED_EF.c b/vm/mterp/c/OP_UNUSED_EF.c
new file mode 100644
index 0000000..c5e1863
--- /dev/null
+++ b/vm/mterp/c/OP_UNUSED_EF.c
@@ -0,0 +1,2 @@
+HANDLE_OPCODE(OP_UNUSED_EF)
+OP_END
diff --git a/vm/mterp/c/OP_UNUSED_F1.c b/vm/mterp/c/OP_UNUSED_F1.c
new file mode 100644
index 0000000..af26195
--- /dev/null
+++ b/vm/mterp/c/OP_UNUSED_F1.c
@@ -0,0 +1,2 @@
+HANDLE_OPCODE(OP_UNUSED_F1)
+OP_END
diff --git a/vm/mterp/c/OP_UNUSED_FC.c b/vm/mterp/c/OP_UNUSED_FC.c
new file mode 100644
index 0000000..4b49684
--- /dev/null
+++ b/vm/mterp/c/OP_UNUSED_FC.c
@@ -0,0 +1,2 @@
+HANDLE_OPCODE(OP_UNUSED_FC)
+OP_END
diff --git a/vm/mterp/c/OP_UNUSED_FD.c b/vm/mterp/c/OP_UNUSED_FD.c
new file mode 100644
index 0000000..c10efaf
--- /dev/null
+++ b/vm/mterp/c/OP_UNUSED_FD.c
@@ -0,0 +1,2 @@
+HANDLE_OPCODE(OP_UNUSED_FD)
+OP_END
diff --git a/vm/mterp/c/OP_UNUSED_FE.c b/vm/mterp/c/OP_UNUSED_FE.c
new file mode 100644
index 0000000..7b097d9
--- /dev/null
+++ b/vm/mterp/c/OP_UNUSED_FE.c
@@ -0,0 +1,2 @@
+HANDLE_OPCODE(OP_UNUSED_FE)
+OP_END
diff --git a/vm/mterp/c/OP_UNUSED_FF.c b/vm/mterp/c/OP_UNUSED_FF.c
new file mode 100644
index 0000000..ed9e11c
--- /dev/null
+++ b/vm/mterp/c/OP_UNUSED_FF.c
@@ -0,0 +1,2 @@
+HANDLE_OPCODE(OP_UNUSED_FF)
+OP_END
diff --git a/vm/mterp/c/OP_USHR_INT.c b/vm/mterp/c/OP_USHR_INT.c
new file mode 100644
index 0000000..7596c94
--- /dev/null
+++ b/vm/mterp/c/OP_USHR_INT.c
@@ -0,0 +1,2 @@
+HANDLE_OP_SHX_INT(OP_USHR_INT, "ushr", (u4), >>)
+OP_END
diff --git a/vm/mterp/c/OP_USHR_INT_2ADDR.c b/vm/mterp/c/OP_USHR_INT_2ADDR.c
new file mode 100644
index 0000000..5fa2b94
--- /dev/null
+++ b/vm/mterp/c/OP_USHR_INT_2ADDR.c
@@ -0,0 +1,2 @@
+HANDLE_OP_SHX_INT_2ADDR(OP_USHR_INT_2ADDR, "ushr", (u4), >>)
+OP_END
diff --git a/vm/mterp/c/OP_USHR_INT_LIT8.c b/vm/mterp/c/OP_USHR_INT_LIT8.c
new file mode 100644
index 0000000..0d325d7
--- /dev/null
+++ b/vm/mterp/c/OP_USHR_INT_LIT8.c
@@ -0,0 +1,2 @@
+HANDLE_OP_SHX_INT_LIT8(OP_USHR_INT_LIT8,  "ushr", (u4), >>)
+OP_END
diff --git a/vm/mterp/c/OP_USHR_LONG.c b/vm/mterp/c/OP_USHR_LONG.c
new file mode 100644
index 0000000..9b7e757
--- /dev/null
+++ b/vm/mterp/c/OP_USHR_LONG.c
@@ -0,0 +1,2 @@
+HANDLE_OP_SHX_LONG(OP_USHR_LONG, "ushr", (u8), >>)
+OP_END
diff --git a/vm/mterp/c/OP_USHR_LONG_2ADDR.c b/vm/mterp/c/OP_USHR_LONG_2ADDR.c
new file mode 100644
index 0000000..4ac0598
--- /dev/null
+++ b/vm/mterp/c/OP_USHR_LONG_2ADDR.c
@@ -0,0 +1,2 @@
+HANDLE_OP_SHX_LONG_2ADDR(OP_USHR_LONG_2ADDR, "ushr", (u8), >>)
+OP_END
diff --git a/vm/mterp/c/OP_XOR_INT.c b/vm/mterp/c/OP_XOR_INT.c
new file mode 100644
index 0000000..76b7791
--- /dev/null
+++ b/vm/mterp/c/OP_XOR_INT.c
@@ -0,0 +1,2 @@
+HANDLE_OP_X_INT(OP_XOR_INT, "xor", ^, false)
+OP_END
diff --git a/vm/mterp/c/OP_XOR_INT_2ADDR.c b/vm/mterp/c/OP_XOR_INT_2ADDR.c
new file mode 100644
index 0000000..9b8ff1d
--- /dev/null
+++ b/vm/mterp/c/OP_XOR_INT_2ADDR.c
@@ -0,0 +1,2 @@
+HANDLE_OP_X_INT_2ADDR(OP_XOR_INT_2ADDR, "xor", ^, false)
+OP_END
diff --git a/vm/mterp/c/OP_XOR_INT_LIT16.c b/vm/mterp/c/OP_XOR_INT_LIT16.c
new file mode 100644
index 0000000..b9d3127
--- /dev/null
+++ b/vm/mterp/c/OP_XOR_INT_LIT16.c
@@ -0,0 +1,2 @@
+HANDLE_OP_X_INT_LIT16(OP_XOR_INT_LIT16, "xor", (s4), ^, false)
+OP_END
diff --git a/vm/mterp/c/OP_XOR_INT_LIT8.c b/vm/mterp/c/OP_XOR_INT_LIT8.c
new file mode 100644
index 0000000..11c641a
--- /dev/null
+++ b/vm/mterp/c/OP_XOR_INT_LIT8.c
@@ -0,0 +1,2 @@
+HANDLE_OP_X_INT_LIT8(OP_XOR_INT_LIT8,   "xor", ^, false)
+OP_END
diff --git a/vm/mterp/c/OP_XOR_LONG.c b/vm/mterp/c/OP_XOR_LONG.c
new file mode 100644
index 0000000..289af20
--- /dev/null
+++ b/vm/mterp/c/OP_XOR_LONG.c
@@ -0,0 +1,2 @@
+HANDLE_OP_X_LONG(OP_XOR_LONG, "xor", ^, false)
+OP_END
diff --git a/vm/mterp/c/OP_XOR_LONG_2ADDR.c b/vm/mterp/c/OP_XOR_LONG_2ADDR.c
new file mode 100644
index 0000000..8e4976e
--- /dev/null
+++ b/vm/mterp/c/OP_XOR_LONG_2ADDR.c
@@ -0,0 +1,2 @@
+HANDLE_OP_X_LONG_2ADDR(OP_XOR_LONG_2ADDR, "xor", ^, false)
+OP_END
diff --git a/vm/mterp/c/footer.c b/vm/mterp/c/footer.c
new file mode 100644
index 0000000..89e7347
--- /dev/null
+++ b/vm/mterp/c/footer.c
@@ -0,0 +1,977 @@
+/*
+ * C footer.  This has some common code shared by the various targets.
+ */
+
+#define GOTO_TARGET(_target, ...)                                           \
+    void dvmMterp_##_target(MterpGlue* glue, ## __VA_ARGS__) {              \
+        u2 ref, vsrc1, vsrc2, vdst;                                         \
+        u2 inst = FETCH(0);                                                 \
+        const Method* methodToCall;                                         \
+        StackSaveArea* debugSaveArea;
+
+#define GOTO_TARGET_END }
+
+
+/*
+ * Everything from here on is a "goto target".  In the basic interpreter
+ * we jump into these targets and then jump directly to the handler for
+ * next instruction.  Here, these are subroutines that return to the caller.
+ */
+
+GOTO_TARGET(filledNewArray, bool methodCallRange)
+    {
+        ClassObject* arrayClass;
+        ArrayObject* newArray;
+        int* contents;
+        char typeCh;
+        int i;
+        u4 arg5;
+
+        EXPORT_PC();
+
+        ref = FETCH(1);             /* class ref */
+        vdst = FETCH(2);            /* first 4 regs -or- range base */
+
+        if (methodCallRange) {
+            vsrc1 = INST_AA(inst);  /* #of elements */
+            arg5 = -1;              /* silence compiler warning */
+            ILOGV("|filled-new-array-range args=%d @0x%04x {regs=v%d-v%d}",
+                vsrc1, ref, vdst, vdst+vsrc1-1);
+        } else {
+            arg5 = INST_A(inst);
+            vsrc1 = INST_B(inst);   /* #of elements */
+            ILOGV("|filled-new-array args=%d @0x%04x {regs=0x%04x %x}",
+                vsrc1, ref, vdst, arg5);
+        }
+
+        /*
+         * Resolve the array class.
+         */
+        arrayClass = dvmDexGetResolvedClass(methodClassDex, ref);
+        if (arrayClass == NULL) {
+            arrayClass = dvmResolveClass(method->clazz, ref, false);
+            if (arrayClass == NULL)
+                GOTO(exceptionThrown);
+        }
+        /*
+        if (!dvmIsArrayClass(arrayClass)) {
+            dvmThrowException("Ljava/lang/RuntimeError;",
+                "filled-new-array needs array class");
+            GOTO(exceptionThrown);
+        }
+        */
+        /* verifier guarantees this is an array class */
+        assert(dvmIsArrayClass(arrayClass));
+        assert(dvmIsClassInitialized(arrayClass));
+
+        /*
+         * Create an array of the specified type.
+         */
+        LOGVV("+++ filled-new-array type is '%s'\n", arrayClass->descriptor);
+        typeCh = arrayClass->descriptor[1];
+        if (typeCh == 'D' || typeCh == 'J') {
+            /* category 2 primitives not allowed */
+            dvmThrowException("Ljava/lang/RuntimeError;",
+                "bad filled array req");
+            GOTO(exceptionThrown);
+        } else if (typeCh == 'L' || typeCh == '[') {
+            /* create array of objects or array of arrays */
+            /* TODO: need some work in the verifier before we allow this */
+            LOGE("fnao not implemented\n");
+            dvmThrowException("Ljava/lang/InternalError;",
+                "filled-new-array not implemented for reference types");
+            GOTO(exceptionThrown);
+        } else if (typeCh != 'I') {
+            /* TODO: requires multiple "fill in" loops with different widths */
+            LOGE("non-int not implemented\n");
+            dvmThrowException("Ljava/lang/InternalError;",
+                "filled-new-array not implemented for anything but 'int'");
+            GOTO(exceptionThrown);
+        }
+
+        assert(strchr("BCIFZ", typeCh) != NULL);
+        newArray = dvmAllocPrimitiveArray(arrayClass->descriptor[1], vsrc1,
+                    ALLOC_DONT_TRACK);
+        if (newArray == NULL)
+            GOTO(exceptionThrown);
+
+        /*
+         * Fill in the elements.  It's legal for vsrc1 to be zero.
+         */
+        contents = (int*) newArray->contents;
+        if (methodCallRange) {
+            for (i = 0; i < vsrc1; i++)
+                contents[i] = GET_REGISTER(vdst+i);
+        } else {
+            assert(vsrc1 <= 5);
+            if (vsrc1 == 5) {
+                contents[4] = GET_REGISTER(arg5);
+                vsrc1--;
+            }
+            for (i = 0; i < vsrc1; i++) {
+                contents[i] = GET_REGISTER(vdst & 0x0f);
+                vdst >>= 4;
+            }
+        }
+
+        retval.l = newArray;
+    }
+    FINISH(3);
+GOTO_TARGET_END
+
+
+GOTO_TARGET(invokeVirtual, bool methodCallRange)
+    {
+        Method* baseMethod;
+        Object* thisPtr;
+
+        EXPORT_PC();
+
+        vsrc1 = INST_AA(inst);      /* AA (count) or BA (count + arg 5) */
+        ref = FETCH(1);             /* method ref */
+        vdst = FETCH(2);            /* 4 regs -or- first reg */
+
+        if (methodCallRange) {
+            /*
+             * The object against which we are executing a method is always
+             * in the first argument.
+             */
+            assert(vsrc1 > 0);
+            ILOGV("|invoke-virtual-range args=%d @0x%04x {regs=v%d-v%d}",
+                vsrc1, ref, vdst, vdst+vsrc1-1);
+            thisPtr = (Object*) GET_REGISTER(vdst);
+        } else {
+            /*
+             * The object against which we are executing a method is always
+             * in the first argument.
+             */
+            assert((vsrc1>>4) > 0);
+            ILOGV("|invoke-virtual args=%d @0x%04x {regs=0x%04x %x}",
+                vsrc1 >> 4, ref, vdst, vsrc1 & 0x0f);
+            thisPtr = (Object*) GET_REGISTER(vdst & 0x0f);
+        }
+
+        if (!checkForNull(thisPtr))
+            GOTO(exceptionThrown);
+
+        /*
+         * Resolve the method.  This is the correct method for the static
+         * type of the object.  We also verify access permissions here.
+         */
+        baseMethod = dvmDexGetResolvedMethod(methodClassDex, ref);
+        if (baseMethod == NULL) {
+            baseMethod = dvmResolveMethod(method->clazz, ref, METHOD_VIRTUAL);
+            if (baseMethod == NULL) {
+                ILOGV("+ unknown method or access denied\n");
+                GOTO(exceptionThrown);
+            }
+        }
+
+        /*
+         * Combine the object we found with the vtable offset in the
+         * method.
+         */
+        assert(baseMethod->methodIndex < thisPtr->clazz->vtableCount);
+        methodToCall = thisPtr->clazz->vtable[baseMethod->methodIndex];
+
+#if 0
+        if (dvmIsAbstractMethod(methodToCall)) {
+            /*
+             * This can happen if you create two classes, Base and Sub, where
+             * Sub is a sub-class of Base.  Declare a protected abstract
+             * method foo() in Base, and invoke foo() from a method in Base.
+             * Base is an "abstract base class" and is never instantiated
+             * directly.  Now, Override foo() in Sub, and use Sub.  This
+             * Works fine unless Sub stops providing an implementation of
+             * the method.
+             */
+            dvmThrowException("Ljava/lang/AbstractMethodError;",
+                "abstract method not implemented");
+            GOTO(exceptionThrown);
+        }
+#else
+        assert(!dvmIsAbstractMethod(methodToCall) ||
+            methodToCall->nativeFunc != NULL);
+#endif
+
+        LOGVV("+++ base=%s.%s virtual[%d]=%s.%s\n",
+            baseMethod->clazz->descriptor, baseMethod->name,
+            (u4) baseMethod->methodIndex,
+            methodToCall->clazz->descriptor, methodToCall->name);
+        assert(methodToCall != NULL);
+
+#if 0
+        if (vsrc1 != methodToCall->insSize) {
+            LOGW("WRONG METHOD: base=%s.%s virtual[%d]=%s.%s\n",
+                baseMethod->clazz->descriptor, baseMethod->name,
+                (u4) baseMethod->methodIndex,
+                methodToCall->clazz->descriptor, methodToCall->name);
+            //dvmDumpClass(baseMethod->clazz);
+            //dvmDumpClass(methodToCall->clazz);
+            dvmDumpAllClasses(0);
+        }
+#endif
+
+        GOTO(invokeMethod, methodCallRange, methodToCall, vsrc1, vdst);
+    }
+GOTO_TARGET_END
+
+GOTO_TARGET(invokeSuper, bool methodCallRange)
+    {
+        Method* baseMethod;
+        u2 thisReg;
+
+        EXPORT_PC();
+
+        vsrc1 = INST_AA(inst);      /* AA (count) or BA (count + arg 5) */
+        ref = FETCH(1);             /* method ref */
+        vdst = FETCH(2);            /* 4 regs -or- first reg */
+
+        if (methodCallRange) {
+            ILOGV("|invoke-super-range args=%d @0x%04x {regs=v%d-v%d}",
+                vsrc1, ref, vdst, vdst+vsrc1-1);
+            thisReg = vdst;
+        } else {
+            ILOGV("|invoke-super args=%d @0x%04x {regs=0x%04x %x}",
+                vsrc1 >> 4, ref, vdst, vsrc1 & 0x0f);
+            thisReg = vdst & 0x0f;
+        }
+        /* impossible in well-formed code, but we must check nevertheless */
+        if (!checkForNull((Object*) GET_REGISTER(thisReg)))
+            GOTO(exceptionThrown);
+
+        /*
+         * Resolve the method.  This is the correct method for the static
+         * type of the object.  We also verify access permissions here.
+         * The first arg to dvmResolveMethod() is just the referring class
+         * (used for class loaders and such), so we don't want to pass
+         * the superclass into the resolution call.
+         */
+        baseMethod = dvmDexGetResolvedMethod(methodClassDex, ref);
+        if (baseMethod == NULL) {
+            baseMethod = dvmResolveMethod(method->clazz, ref, METHOD_VIRTUAL);
+            if (baseMethod == NULL) {
+                ILOGV("+ unknown method or access denied\n");
+                GOTO(exceptionThrown);
+            }
+        }
+
+        /*
+         * Combine the object we found with the vtable offset in the
+         * method's class.
+         *
+         * We're using the current method's class' superclass, not the
+         * superclass of "this".  This is because we might be executing
+         * in a method inherited from a superclass, and we want to run
+         * in that class' superclass.
+         */
+        if (baseMethod->methodIndex >= method->clazz->super->vtableCount) {
+            /*
+             * Method does not exist in the superclass.  Could happen if
+             * superclass gets updated.
+             */
+            dvmThrowException("Ljava/lang/NoSuchMethodError;",
+                baseMethod->name);
+            GOTO(exceptionThrown);
+        }
+        methodToCall = method->clazz->super->vtable[baseMethod->methodIndex];
+#if 0
+        if (dvmIsAbstractMethod(methodToCall)) {
+            dvmThrowException("Ljava/lang/AbstractMethodError;",
+                "abstract method not implemented");
+            GOTO(exceptionThrown);
+        }
+#else
+        assert(!dvmIsAbstractMethod(methodToCall) ||
+            methodToCall->nativeFunc != NULL);
+#endif
+        LOGVV("+++ base=%s.%s super-virtual=%s.%s\n",
+            baseMethod->clazz->descriptor, baseMethod->name,
+            methodToCall->clazz->descriptor, methodToCall->name);
+        assert(methodToCall != NULL);
+
+        GOTO(invokeMethod, methodCallRange, methodToCall, vsrc1, vdst);
+    }
+GOTO_TARGET_END
+
+GOTO_TARGET(invokeInterface, bool methodCallRange)
+    {
+        Object* thisPtr;
+        ClassObject* thisClass;
+
+        EXPORT_PC();
+
+        vsrc1 = INST_AA(inst);      /* AA (count) or BA (count + arg 5) */
+        ref = FETCH(1);             /* method ref */
+        vdst = FETCH(2);            /* 4 regs -or- first reg */
+
+        if (methodCallRange) {
+            /*
+             * The object against which we are executing a method is always
+             * in the first argument.
+             */
+            assert(vsrc1 > 0);
+            ILOGV("|invoke-interface-range args=%d @0x%04x {regs=v%d-v%d}",
+                vsrc1, ref, vdst, vdst+vsrc1-1);
+            thisPtr = (Object*) GET_REGISTER(vdst);
+        } else {
+            /*
+             * The object against which we are executing a method is always
+             * in the first argument.
+             */
+            assert((vsrc1>>4) > 0);
+            ILOGV("|invoke-interface args=%d @0x%04x {regs=0x%04x %x}",
+                vsrc1 >> 4, ref, vdst, vsrc1 & 0x0f);
+            thisPtr = (Object*) GET_REGISTER(vdst & 0x0f);
+        }
+
+        if (!checkForNull(thisPtr))
+            GOTO(exceptionThrown);
+        thisClass = thisPtr->clazz;
+
+        /*
+         * Given a class and a method index, find the Method* with the
+         * actual code we want to execute.
+         */
+        methodToCall = dvmFindInterfaceMethodInCache(thisClass, ref, method,
+                        methodClassDex);
+        if (methodToCall == NULL) {
+            assert(dvmCheckException(self));
+            GOTO(exceptionThrown);
+        }
+
+        GOTO(invokeMethod, methodCallRange, methodToCall, vsrc1, vdst);
+    }
+GOTO_TARGET_END
+
+GOTO_TARGET(invokeDirect, bool methodCallRange)
+    {
+        u2 thisReg;
+
+        vsrc1 = INST_AA(inst);      /* AA (count) or BA (count + arg 5) */
+        ref = FETCH(1);             /* method ref */
+        vdst = FETCH(2);            /* 4 regs -or- first reg */
+
+        EXPORT_PC();
+
+        if (methodCallRange) {
+            ILOGV("|invoke-direct-range args=%d @0x%04x {regs=v%d-v%d}",
+                vsrc1, ref, vdst, vdst+vsrc1-1);
+            thisReg = vdst;
+        } else {
+            ILOGV("|invoke-direct args=%d @0x%04x {regs=0x%04x %x}",
+                vsrc1 >> 4, ref, vdst, vsrc1 & 0x0f);
+            thisReg = vdst & 0x0f;
+        }
+        if (!checkForNull((Object*) GET_REGISTER(thisReg)))
+            GOTO(exceptionThrown);
+
+        methodToCall = dvmDexGetResolvedMethod(methodClassDex, ref);
+        if (methodToCall == NULL) {
+            methodToCall = dvmResolveMethod(method->clazz, ref, METHOD_DIRECT);
+            if (methodToCall == NULL) {
+                ILOGV("+ unknown direct method\n");     // should be impossible
+                GOTO(exceptionThrown);
+            }
+        }
+        GOTO(invokeMethod, methodCallRange, methodToCall, vsrc1, vdst);
+    }
+GOTO_TARGET_END
+
+GOTO_TARGET(invokeStatic, bool methodCallRange)
+    vsrc1 = INST_AA(inst);      /* AA (count) or BA (count + arg 5) */
+    ref = FETCH(1);             /* method ref */
+    vdst = FETCH(2);            /* 4 regs -or- first reg */
+
+    EXPORT_PC();
+
+    if (methodCallRange)
+        ILOGV("|invoke-static-range args=%d @0x%04x {regs=v%d-v%d}",
+            vsrc1, ref, vdst, vdst+vsrc1-1);
+    else
+        ILOGV("|invoke-static args=%d @0x%04x {regs=0x%04x %x}",
+            vsrc1 >> 4, ref, vdst, vsrc1 & 0x0f);
+    methodToCall = dvmDexGetResolvedMethod(methodClassDex, ref);
+    if (methodToCall == NULL) {
+        methodToCall = dvmResolveMethod(method->clazz, ref, METHOD_STATIC);
+        if (methodToCall == NULL) {
+            ILOGV("+ unknown method\n");
+            GOTO(exceptionThrown);
+        }
+    }
+    GOTO(invokeMethod, methodCallRange, methodToCall, vsrc1, vdst);
+GOTO_TARGET_END
+
+GOTO_TARGET(invokeVirtualQuick, bool methodCallRange)
+    {
+        Object* thisPtr;
+
+        EXPORT_PC();
+
+        vsrc1 = INST_AA(inst);      /* AA (count) or BA (count + arg 5) */
+        ref = FETCH(1);             /* vtable index */
+        vdst = FETCH(2);            /* 4 regs -or- first reg */
+
+        if (methodCallRange) {
+            /*
+             * The object against which we are executing a method is always
+             * in the first argument.
+             */
+            assert(vsrc1 > 0);
+            ILOGV("|invoke-virtual-quick-range args=%d @0x%04x {regs=v%d-v%d}",
+                vsrc1, ref, vdst, vdst+vsrc1-1);
+            thisPtr = (Object*) GET_REGISTER(vdst);
+        } else {
+            /*
+             * The object against which we are executing a method is always
+             * in the first argument.
+             */
+            assert((vsrc1>>4) > 0);
+            ILOGV("|invoke-virtual-quick args=%d @0x%04x {regs=0x%04x %x}",
+                vsrc1 >> 4, ref, vdst, vsrc1 & 0x0f);
+            thisPtr = (Object*) GET_REGISTER(vdst & 0x0f);
+        }
+
+        if (!checkForNull(thisPtr))
+            GOTO(exceptionThrown);
+
+        /*
+         * Combine the object we found with the vtable offset in the
+         * method.
+         */
+        assert(ref < thisPtr->clazz->vtableCount);
+        methodToCall = thisPtr->clazz->vtable[ref];
+
+#if 0
+        if (dvmIsAbstractMethod(methodToCall)) {
+            dvmThrowException("Ljava/lang/AbstractMethodError;",
+                "abstract method not implemented");
+            GOTO(exceptionThrown);
+        }
+#else
+        assert(!dvmIsAbstractMethod(methodToCall) ||
+            methodToCall->nativeFunc != NULL);
+#endif
+
+        LOGVV("+++ virtual[%d]=%s.%s\n",
+            ref, methodToCall->clazz->descriptor, methodToCall->name);
+        assert(methodToCall != NULL);
+
+        GOTO(invokeMethod, methodCallRange, methodToCall, vsrc1, vdst);
+    }
+GOTO_TARGET_END
+
+GOTO_TARGET(invokeSuperQuick, bool methodCallRange)
+    {
+        u2 thisReg;
+
+        EXPORT_PC();
+
+        vsrc1 = INST_AA(inst);      /* AA (count) or BA (count + arg 5) */
+        ref = FETCH(1);             /* vtable index */
+        vdst = FETCH(2);            /* 4 regs -or- first reg */
+
+        if (methodCallRange) {
+            ILOGV("|invoke-super-quick-range args=%d @0x%04x {regs=v%d-v%d}",
+                vsrc1, ref, vdst, vdst+vsrc1-1);
+            thisReg = vdst;
+        } else {
+            ILOGV("|invoke-super-quick args=%d @0x%04x {regs=0x%04x %x}",
+                vsrc1 >> 4, ref, vdst, vsrc1 & 0x0f);
+            thisReg = vdst & 0x0f;
+        }
+        /* impossible in well-formed code, but we must check nevertheless */
+        if (!checkForNull((Object*) GET_REGISTER(thisReg)))
+            GOTO(exceptionThrown);
+
+#if 0   /* impossible in optimized + verified code */
+        if (ref >= method->clazz->super->vtableCount) {
+            dvmThrowException("Ljava/lang/NoSuchMethodError;", NULL);
+            GOTO(exceptionThrown);
+        }
+#else
+        assert(ref < method->clazz->super->vtableCount);
+#endif
+
+        /*
+         * Combine the object we found with the vtable offset in the
+         * method's class.
+         *
+         * We're using the current method's class' superclass, not the
+         * superclass of "this".  This is because we might be executing
+         * in a method inherited from a superclass, and we want to run
+         * in the method's class' superclass.
+         */
+        methodToCall = method->clazz->super->vtable[ref];
+
+#if 0
+        if (dvmIsAbstractMethod(methodToCall)) {
+            dvmThrowException("Ljava/lang/AbstractMethodError;",
+                "abstract method not implemented");
+            GOTO(exceptionThrown);
+        }
+#else
+        assert(!dvmIsAbstractMethod(methodToCall) ||
+            methodToCall->nativeFunc != NULL);
+#endif
+        LOGVV("+++ super-virtual[%d]=%s.%s\n",
+            ref, methodToCall->clazz->descriptor, methodToCall->name);
+        assert(methodToCall != NULL);
+
+        GOTO(invokeMethod, methodCallRange, methodToCall, vsrc1, vdst);
+    }
+GOTO_TARGET_END
+
+
+
+    /*
+     * General handling for return-void, return, and return-wide.  Put the
+     * return value in "retval" before jumping here.
+     */
+GOTO_TARGET(returnFromMethod)
+    {
+        StackSaveArea* saveArea;
+
+        /*
+         * We must do this BEFORE we pop the previous stack frame off, so
+         * that the GC can see the return value (if any) in the local vars.
+         *
+         * Since this is now an interpreter switch point, we must do it before
+         * we do anything at all.
+         */
+        PERIODIC_CHECKS(kInterpEntryReturn, 0);
+
+        ILOGV("> retval=0x%llx (leaving %s.%s %s)",
+            retval.j, method->clazz->descriptor, method->name,
+            method->signature);
+        //DUMP_REGS(method, fp);
+
+        saveArea = SAVEAREA_FROM_FP(fp);
+
+#ifdef EASY_GDB
+        debugSaveArea = saveArea;
+#endif
+#if (INTERP_TYPE == INTERP_DBG) && defined(WITH_PROFILER)
+        TRACE_METHOD_EXIT(self, method);
+#endif
+
+        /* back up to previous frame and see if we hit a break */
+        fp = saveArea->prevFrame;
+        assert(fp != NULL);
+        if (dvmIsBreakFrame(fp)) {
+            /* bail without popping the method frame from stack */
+            LOGVV("+++ returned into break frame\n");
+            GOTO_BAIL(false);
+        }
+
+        /* update thread FP, and reset local variables */
+        self->curFrame = fp;
+        method =
+#undef method       // ARRGH!
+            SAVEAREA_FROM_FP(fp)->method;
+#define method glue->method
+        //methodClass = method->clazz;
+        methodClassDex = method->clazz->pDvmDex;
+        pc = saveArea->savedPc;
+        ILOGD("> (return to %s.%s %s)", method->clazz->descriptor,
+            method->name, method->signature);
+
+        /* use FINISH on the caller's invoke instruction */
+        //u2 invokeInstr = INST_INST(FETCH(0));
+        if (true /*invokeInstr >= OP_INVOKE_VIRTUAL &&
+            invokeInstr <= OP_INVOKE_INTERFACE*/)
+        {
+            FINISH(3);
+        } else {
+            //LOGE("Unknown invoke instr %02x at %d\n",
+            //    invokeInstr, (int) (pc - method->insns));
+            assert(false);
+        }
+    }
+GOTO_TARGET_END
+
+
+    /*
+     * Jump here when the code throws an exception.
+     *
+     * By the time we get here, the Throwable has been created and the stack
+     * trace has been saved off.
+     */
+GOTO_TARGET(exceptionThrown)
+    {
+        Object* exception;
+        int catchRelPc;
+
+        /*
+         * Since this is now an interpreter switch point, we must do it before
+         * we do anything at all.
+         */
+        PERIODIC_CHECKS(kInterpEntryThrow, 0);
+
+        /*
+         * We save off the exception and clear the exception status.  While
+         * processing the exception we might need to load some Throwable
+         * classes, and we don't want class loader exceptions to get
+         * confused with this one.
+         */
+        assert(dvmCheckException(self));
+        exception = dvmGetException(self);
+        dvmAddTrackedAlloc(exception, self);
+        dvmClearException(self);
+
+        LOGV("Handling exception %s at %s:%d\n",
+            exception->clazz->descriptor, method->name,
+            dvmLineNumFromPC(method, pc - method->insns));
+
+#if (INTERP_TYPE == INTERP_DBG) && defined(WITH_DEBUGGER)
+        /*
+         * Tell the debugger about it.
+         *
+         * TODO: if the exception was thrown by interpreted code, control
+         * fell through native, and then back to us, we will report the
+         * exception at the point of the throw and again here.  We can avoid
+         * this by not reporting exceptions when we jump here directly from
+         * the native call code above, but then we won't report exceptions
+         * that were thrown *from* the JNI code (as opposed to *through* it).
+         *
+         * The correct solution is probably to ignore from-native exceptions
+         * here, and have the JNI exception code do the reporting to the
+         * debugger.
+         */
+        if (gDvm.debuggerActive) {
+            void* catchFrame;
+            catchRelPc = dvmFindCatchBlock(self, pc - method->insns,
+                        exception, true, &catchFrame);
+            dvmDbgPostException(fp, pc - method->insns, catchFrame, catchRelPc,
+                exception);
+        }
+#endif
+
+        /*
+         * We need to unroll to the catch block or the nearest "break"
+         * frame.
+         *
+         * A break frame could indicate that we have reached an intermediate
+         * native call, or have gone off the top of the stack and the thread
+         * needs to exit.  Either way, we return from here, leaving the
+         * exception raised.
+         *
+         * If we do find a catch block, we want to transfer execution to
+         * that point.
+         */
+        catchRelPc = dvmFindCatchBlock(self, pc - method->insns,
+                    exception, false, (void*)&fp);
+
+        /*
+         * Restore the stack bounds after an overflow.  This isn't going to
+         * be correct in all circumstances, e.g. if JNI code devours the
+         * exception this won't happen until some other exception gets
+         * thrown.  If the code keeps pushing the stack bounds we'll end
+         * up aborting the VM.
+         */
+        if (self->stackOverflowed)
+            dvmCleanupStackOverflow(self);
+
+        if (catchRelPc < 0) {
+            /* falling through to JNI code or off the bottom of the stack */
+#if DVM_SHOW_EXCEPTION >= 2
+            LOGD("Exception %s from %s:%d not caught locally\n",
+                exception->clazz->descriptor, dvmGetMethodSourceFile(method),
+                dvmLineNumFromPC(method, pc - method->insns));
+#endif
+            dvmSetException(self, exception);
+            dvmReleaseTrackedAlloc(exception, self);
+            GOTO_BAIL(false);
+        }
+
+#if DVM_SHOW_EXCEPTION >= 3
+        {
+            const Method* catchMethod =
+#undef method
+                SAVEAREA_FROM_FP(fp)->method;
+#define method glue->method
+            LOGD("Exception %s thrown from %s:%d to %s:%d\n",
+                exception->clazz->descriptor, dvmGetMethodSourceFile(method),
+                dvmLineNumFromPC(method, pc - method->insns),
+                dvmGetMethodSourceFile(catchMethod),
+                dvmLineNumFromPC(catchMethod, catchRelPc));
+        }
+#endif
+
+        /*
+         * Adjust local variables to match self->curFrame and the
+         * updated PC.
+         */
+        //fp = (u4*) self->curFrame;
+        method =
+#undef method
+            SAVEAREA_FROM_FP(fp)->method;
+#define method glue->method
+        //methodClass = method->clazz;
+        methodClassDex = method->clazz->pDvmDex;
+        pc = method->insns + catchRelPc;
+        ILOGV("> pc <-- %s.%s %s", method->clazz->descriptor, method->name,
+            method->signature);
+        DUMP_REGS(method, fp, false);               // show all regs
+
+        /*
+         * Restore the exception if the handler wants it.
+         *
+         * The Dalvik spec mandates that, if an exception handler wants to
+         * do something with the exception, the first instruction executed
+         * must be "move-exception".  We can pass the exception along
+         * through the thread struct, and let the move-exception instruction
+         * clear it for us.
+         *
+         * If the handler doesn't call move-exception, we don't want to
+         * finish here with an exception still pending.
+         */
+        if (INST_INST(FETCH(0)) == OP_MOVE_EXCEPTION)
+            dvmSetException(self, exception);
+
+        dvmReleaseTrackedAlloc(exception, self);
+        FINISH(0);
+    }
+GOTO_TARGET_END
+
+
+    /*
+     * General handling for invoke-{virtual,super,direct,static,interface},
+     * including "quick" variants.
+     *
+     * Set "methodToCall" to the Method we're calling, and "methodCallRange"
+     * depending on whether this is a "/range" instruction.
+     *
+     * For a range call:
+     *  "vsrc1" holds the argument count (8 bits)
+     *  "vdst" holds the first argument in the range
+     * For a non-range call:
+     *  "vsrc1" holds the argument count (4 bits) and the 5th argument index
+     *  "vdst" holds four 4-bit register indices
+     *
+     * The caller must EXPORT_PC before jumping here, because any method
+     * call can throw a stack overflow exception.
+     */
+GOTO_TARGET(invokeMethod, bool methodCallRange, const Method* _methodToCall,
+    u2 count, u2 regs)
+    {
+        vsrc1 = count; vdst = regs; methodToCall = _methodToCall;  /* ADDED */
+
+        //printf("range=%d call=%p count=%d regs=0x%04x\n",
+        //    methodCallRange, methodToCall, count, regs);
+        //printf(" --> %s.%s %s\n", methodToCall->clazz->descriptor,
+        //    methodToCall->name, methodToCall->signature);
+
+        u4* outs;
+        int i;
+
+        /*
+         * Copy args.  This may corrupt vsrc1/vdst.
+         */
+        if (methodCallRange) {
+            // could use memcpy or a "Duff's device"; most functions have
+            // so few args it won't matter much
+            assert(vsrc1 <= method->outsSize);
+            assert(vsrc1 == methodToCall->insSize);
+            outs = OUTS_FROM_FP(fp, vsrc1);
+            for (i = 0; i < vsrc1; i++)
+                outs[i] = GET_REGISTER(vdst+i);
+        } else {
+            u4 count = vsrc1 >> 4;
+
+            assert(count <= method->outsSize);
+            assert(count == methodToCall->insSize);
+            assert(count <= 5);
+
+            outs = OUTS_FROM_FP(fp, count);
+#if 0
+            if (count == 5) {
+                outs[4] = GET_REGISTER(vsrc1 & 0x0f);
+                count--;
+            }
+            for (i = 0; i < (int) count; i++) {
+                outs[i] = GET_REGISTER(vdst & 0x0f);
+                vdst >>= 4;
+            }
+#else
+            // This version executes fewer instructions but is larger
+            // overall.  Seems to be a teensy bit faster.
+            assert((vdst >> 16) == 0);  // 16 bits -or- high 16 bits clear
+            switch (count) {
+            case 5:
+                outs[4] = GET_REGISTER(vsrc1 & 0x0f);
+            case 4:
+                outs[3] = GET_REGISTER(vdst >> 12);
+            case 3:
+                outs[2] = GET_REGISTER((vdst & 0x0f00) >> 8);
+            case 2:
+                outs[1] = GET_REGISTER((vdst & 0x00f0) >> 4);
+            case 1:
+                outs[0] = GET_REGISTER(vdst & 0x0f);
+            default:
+                ;
+            }
+#endif
+        }
+    }
+
+    /*
+     * (This was originally a "goto" target; I've kept it separate from the
+     * stuff above in case we want to refactor things again.)
+     *
+     * At this point, we have the arguments stored in the "outs" area of
+     * the current method's stack frame, and the method to call in
+     * "methodToCall".  Push a new stack frame.
+     */
+    {
+        StackSaveArea* newSaveArea;
+        u4* newFp;
+
+        ILOGV("> %s%s.%s %s",
+            dvmIsNativeMethod(methodToCall) ? "(NATIVE) " : "",
+            methodToCall->clazz->descriptor, methodToCall->name,
+            methodToCall->signature);
+
+        newFp = (u4*) SAVEAREA_FROM_FP(fp) - methodToCall->registersSize;
+        newSaveArea = SAVEAREA_FROM_FP(newFp);
+
+        /* verify that we have enough space */
+        if (true) {
+            u1* bottom;
+            bottom = (u1*) newSaveArea - methodToCall->outsSize * sizeof(u4);
+            if (bottom < self->interpStackEnd) {
+                /* stack overflow */
+                LOGV("Stack overflow on method call (start=%p end=%p newBot=%p size=%d '%s')\n",
+                    self->interpStackStart, self->interpStackEnd, bottom,
+                    self->interpStackSize, methodToCall->name);
+                dvmHandleStackOverflow(self);
+                assert(dvmCheckException(self));
+                GOTO(exceptionThrown);
+            }
+            //LOGD("+++ fp=%p newFp=%p newSave=%p bottom=%p\n",
+            //    fp, newFp, newSaveArea, bottom);
+        }
+
+#ifdef LOG_INSTR
+        if (methodToCall->registersSize > methodToCall->insSize) {
+            /*
+             * This makes valgrind quiet when we print registers that
+             * haven't been initialized.  Turn it off when the debug
+             * messages are disabled -- we want valgrind to report any
+             * used-before-initialized issues.
+             */
+            memset(newFp, 0xcc,
+                (methodToCall->registersSize - methodToCall->insSize) * 4);
+        }
+#endif
+
+#ifdef EASY_GDB
+        newSaveArea->prevSave = SAVEAREA_FROM_FP(fp);
+#endif
+        newSaveArea->prevFrame = fp;
+        newSaveArea->savedPc = pc;
+#undef method
+        newSaveArea->method = methodToCall;
+#define method glue->method
+
+        if (!dvmIsNativeMethod(methodToCall)) {
+            /*
+             * "Call" interpreted code.  Reposition the PC, update the
+             * frame pointer and other local state, and continue.
+             */
+            method = methodToCall;
+            methodClassDex = method->clazz->pDvmDex;
+            pc = methodToCall->insns;
+            fp = self->curFrame = newFp;
+#ifdef EASY_GDB
+            debugSaveArea = SAVEAREA_FROM_FP(newFp);
+#endif
+#if INTERP_TYPE == INTERP_DBG
+            debugIsMethodEntry = true;              // profiling, debugging
+#endif
+            ILOGD("> pc <-- %s.%s %s", method->clazz->descriptor, method->name,
+                method->signature);
+            DUMP_REGS(method, fp, true);            // show input args
+            FINISH(0);                              // jump to method start
+        } else {
+            /* set this up for JNI locals, even if not a JNI native */
+            newSaveArea->xtra.localRefTop = self->jniLocalRefTable.nextEntry;
+
+            self->curFrame = newFp;
+
+            DUMP_REGS(methodToCall, newFp, true);   // show input args
+
+#if (INTERP_TYPE == INTERP_DBG) && defined(WITH_DEBUGGER)
+            if (gDvm.debuggerActive) {
+                dvmDbgPostLocationEvent(methodToCall, -1,
+                    dvmGetThisPtr(method, fp), DBG_METHOD_ENTRY);
+            }
+#endif
+#if (INTERP_TYPE == INTERP_DBG) && defined(WITH_PROFILER)
+            TRACE_METHOD_ENTER(self, methodToCall);
+#endif
+
+            ILOGD("> native <-- %s.%s %s", methodToCall->clazz->descriptor,
+                methodToCall->name, methodToCall->signature);
+
+            /*
+             * Jump through native call bridge.  Because we leave no
+             * space for locals on native calls, "newFp" points directly
+             * to the method arguments.
+             */
+            (*methodToCall->nativeFunc)(newFp, &retval, methodToCall, self);
+
+#if (INTERP_TYPE == INTERP_DBG) && defined(WITH_DEBUGGER)
+            if (gDvm.debuggerActive) {
+                dvmDbgPostLocationEvent(methodToCall, -1,
+                    dvmGetThisPtr(method, fp), DBG_METHOD_EXIT);
+            }
+#endif
+#if (INTERP_TYPE == INTERP_DBG) && defined(WITH_PROFILER)
+            TRACE_METHOD_EXIT(self, methodToCall);
+#endif
+
+            /* pop frame off */
+            dvmPopJniLocals(self, newSaveArea);
+            self->curFrame = fp;
+
+            /*
+             * If the native code threw an exception, or interpreted code
+             * invoked by the native call threw one and nobody has cleared
+             * it, jump to our local exception handling.
+             */
+            if (dvmCheckException(self)) {
+                LOGV("Exception thrown by/below native code\n");
+                GOTO(exceptionThrown);
+            }
+
+            ILOGD("> retval=0x%llx (leaving native)", retval.j);
+            ILOGD("> (return from native %s.%s to %s.%s %s)",
+                methodToCall->clazz->descriptor, methodToCall->name,
+                method->clazz->descriptor, method->name,
+                method->signature);
+
+            //u2 invokeInstr = INST_INST(FETCH(0));
+            if (true /*invokeInstr >= OP_INVOKE_VIRTUAL &&
+                invokeInstr <= OP_INVOKE_INTERFACE*/)
+            {
+                FINISH(3);
+            } else {
+                //LOGE("Unknown invoke instr %02x at %d\n",
+                //    invokeInstr, (int) (pc - method->insns));
+                assert(false);
+            }
+        }
+    }
+    assert(false);      // should not get here
+GOTO_TARGET_END
+
+
+/* undefine "magic" name remapping */
+#undef retval
+#undef pc
+#undef fp
+#undef method
+#undef methodClassDex
+#undef self
+#undef debugTrackedRefStart
diff --git a/vm/mterp/c/header.c b/vm/mterp/c/header.c
new file mode 100644
index 0000000..1c3b20e
--- /dev/null
+++ b/vm/mterp/c/header.c
@@ -0,0 +1,39 @@
+/*
+ * Copyright (C) 2008 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/* common includes */
+#include "Dalvik.h"
+#include "interp/InterpDefs.h"
+#include "mterp/Mterp.h"
+#include <math.h>                   // needed for fmod, fmodf
+
+
+#define GOTO_TARGET_DECL(_target, ...)                                      \
+    void dvmMterp_##_target(MterpGlue* glue, ## __VA_ARGS__);
+
+GOTO_TARGET_DECL(filledNewArray, bool methodCallRange);
+GOTO_TARGET_DECL(invokeVirtual, bool methodCallRange);
+GOTO_TARGET_DECL(invokeSuper, bool methodCallRange);
+GOTO_TARGET_DECL(invokeInterface, bool methodCallRange);
+GOTO_TARGET_DECL(invokeDirect, bool methodCallRange);
+GOTO_TARGET_DECL(invokeStatic, bool methodCallRange);
+GOTO_TARGET_DECL(invokeVirtualQuick, bool methodCallRange);
+GOTO_TARGET_DECL(invokeSuperQuick, bool methodCallRange);
+GOTO_TARGET_DECL(invokeMethod, bool methodCallRange, const Method* methodToCall,
+    u2 count, u2 regs);
+GOTO_TARGET_DECL(returnFromMethod);
+GOTO_TARGET_DECL(exceptionThrown);
+
diff --git a/vm/mterp/c/opcommon.c b/vm/mterp/c/opcommon.c
new file mode 100644
index 0000000..c3f824d
--- /dev/null
+++ b/vm/mterp/c/opcommon.c
@@ -0,0 +1,627 @@
+/*
+ * Redefine what used to be local variable accesses into MterpGlue struct
+ * references.  (These are undefined down in "footer.c".)
+ */
+#define retval                  glue->retval
+#define pc                      glue->pc
+#define fp                      glue->fp
+#define method                  glue->method
+#define methodClassDex          glue->methodClassDex
+#define self                    glue->self
+//#define entryPoint              glue->entryPoint
+#define debugTrackedRefStart    glue->debugTrackedRefStart
+
+
+/*
+ * Replace the opcode definition macros.  Here, each opcode is a separate
+ * function that takes a "glue" argument and returns void.  We can't declare
+ * these "static" because they may be called from an assembly stub.
+ */
+#undef HANDLE_OPCODE
+#undef OP_END
+#undef FINISH
+
+#define HANDLE_OPCODE(_op)                                                  \
+    void dvmMterp_##_op(MterpGlue* glue) {                                  \
+        u2 ref, vsrc1, vsrc2, vdst;                                         \
+        u2 inst = FETCH(0);
+
+#define OP_END }
+
+/*
+ * Like standard FINISH, but don't reload "inst", and return to caller
+ * when done.
+ */
+#define FINISH(_offset) {                                                   \
+        ADJUST_PC(_offset);                                                 \
+        CHECK_DEBUG_AND_PROF();                                             \
+        CHECK_TRACKED_REFS();                                               \
+        return;                                                             \
+    }
+
+
+/*
+ * The "goto label" statements turn into function calls followed by
+ * return statements.  Some of the functions take arguments.
+ */
+#define GOTO(_target, ...)                                                  \
+    do {                                                                    \
+        dvmMterp_##_target(glue, ## __VA_ARGS__);                           \
+        return;                                                             \
+    } while(false)
+
+/*
+ * As a special case, "goto bail" turns into a longjmp.  "_switch" should be
+ * "true" if we need to switch to the other interpreter upon our return.
+ */
+#define GOTO_BAIL(_switch)                                                  \
+    dvmMterpStdBail(glue, _switch);
+
+/* for now, mterp is always a "standard" interpreter */
+#define INTERP_TYPE INTERP_STD
+
+/*
+ * Periodic checks macro, slightly modified.
+ */
+#define PERIODIC_CHECKS(_entryPoint, _pcadj) {                              \
+        dvmCheckSuspendQuick(self);                                         \
+        if (NEED_INTERP_SWITCH(INTERP_TYPE)) {                              \
+            ADJUST_PC(_pcadj);                                              \
+            glue->entryPoint = _entryPoint;                                 \
+            LOGVV("threadid=%d: switch to STD ep=%d adj=%d\n",              \
+                glue->self->threadId, (_entryPoint), (_pcadj));             \
+            GOTO_BAIL(true);                                                \
+        }                                                                   \
+    }
+
+
+/*
+ * ===========================================================================
+ *
+ * What follows are the "common" opcode definitions copied & pasted from the
+ * basic interpreter.  The only changes that need to be made to the original
+ * sources are:
+ *  - replace "goto exceptionThrown" with "GOTO(exceptionThrown)"
+ *
+ * ===========================================================================
+ */
+
+
+#define HANDLE_NUMCONV(_opcode, _opname, _fromtype, _totype)                \
+    HANDLE_OPCODE(_opcode /*vA, vB*/)                                       \
+        vdst = INST_A(inst);                                                \
+        vsrc1 = INST_B(inst);                                               \
+        ILOGV("|%s v%d,v%d", (_opname), vdst, vsrc1);                       \
+        SET_REGISTER##_totype(vdst,                                         \
+            GET_REGISTER##_fromtype(vsrc1));                                \
+        FINISH(1);
+
+#define HANDLE_FLOAT_TO_INT(_opcode, _opname, _fromvtype, _fromrtype,       \
+        _tovtype, _tortype)                                                 \
+    HANDLE_OPCODE(_opcode /*vA, vB*/)                                       \
+    {                                                                       \
+        /* spec defines specific handling for +/- inf and NaN values */     \
+        _fromvtype val;                                                     \
+        _tovtype intMin, intMax, result;                                    \
+        vdst = INST_A(inst);                                                \
+        vsrc1 = INST_B(inst);                                               \
+        ILOGV("|%s v%d,v%d", (_opname), vdst, vsrc1);                       \
+        val = GET_REGISTER##_fromrtype(vsrc1);                              \
+        intMin = (_tovtype) 1 << (sizeof(_tovtype) * 8 -1);                 \
+        intMax = ~intMin;                                                   \
+        result = (_tovtype) val;                                            \
+        if (val >= intMax)          /* +inf */                              \
+            result = intMax;                                                \
+        else if (val <= intMin)     /* -inf */                              \
+            result = intMin;                                                \
+        else if (val != val)        /* NaN */                               \
+            result = 0;                                                     \
+        else                                                                \
+            result = (_tovtype) val;                                        \
+        SET_REGISTER##_tortype(vdst, result);                               \
+    }                                                                       \
+    FINISH(1);
+
+#define HANDLE_INT_TO_SMALL(_opcode, _opname, _type)                        \
+    HANDLE_OPCODE(_opcode /*vA, vB*/)                                       \
+        vdst = INST_A(inst);                                                \
+        vsrc1 = INST_B(inst);                                               \
+        ILOGV("|int-to-%s v%d,v%d", (_opname), vdst, vsrc1);                \
+        SET_REGISTER(vdst, (_type) GET_REGISTER(vsrc1));                    \
+        FINISH(1);
+
+/* NOTE: the comparison result is always a signed 4-byte integer */
+#define HANDLE_OP_CMPX(_opcode, _opname, _varType, _type, _nanVal)          \
+    HANDLE_OPCODE(_opcode /*vAA, vBB, vCC*/)                                \
+    {                                                                       \
+        int result;                                                         \
+        u2 regs;                                                            \
+        _varType val1, val2;                                                \
+        vdst = INST_AA(inst);                                               \
+        regs = FETCH(1);                                                    \
+        vsrc1 = regs & 0xff;                                                \
+        vsrc2 = regs >> 8;                                                  \
+        ILOGV("|cmp%s v%d,v%d,v%d", (_opname), vdst, vsrc1, vsrc2);         \
+        val1 = GET_REGISTER##_type(vsrc1);                                  \
+        val2 = GET_REGISTER##_type(vsrc2);                                  \
+        if (val1 == val2)                                                   \
+            result = 0;                                                     \
+        else if (val1 < val2)                                               \
+            result = -1;                                                    \
+        else if (val1 > val2)                                               \
+            result = 1;                                                     \
+        else                                                                \
+            result = (_nanVal);                                             \
+        ILOGV("+ result=%d\n", result);                                     \
+        SET_REGISTER(vdst, result);                                         \
+    }                                                                       \
+    FINISH(2);
+
+#define HANDLE_OP_IF_XX(_opcode, _opname, _cmp)                             \
+    HANDLE_OPCODE(_opcode /*vA, vB, +CCCC*/)                                \
+        vsrc1 = INST_A(inst);                                               \
+        vsrc2 = INST_B(inst);                                               \
+        if ((s4) GET_REGISTER(vsrc1) _cmp (s4) GET_REGISTER(vsrc2)) {       \
+            int branchOffset = (s2)FETCH(1);    /* sign-extended */         \
+            ILOGV("|if-%s v%d,v%d,+0x%04x", (_opname), vsrc1, vsrc2,        \
+                branchOffset);                                              \
+            ILOGV("> branch taken");                                        \
+            if (branchOffset < 0)                                           \
+                PERIODIC_CHECKS(kInterpEntryInstr, branchOffset);           \
+            FINISH(branchOffset);                                           \
+        } else {                                                            \
+            ILOGV("|if-%s v%d,v%d,-", (_opname), vsrc1, vsrc2);             \
+            FINISH(2);                                                      \
+        }
+
+#define HANDLE_OP_IF_XXZ(_opcode, _opname, _cmp)                            \
+    HANDLE_OPCODE(_opcode /*vAA, +BBBB*/)                                   \
+        vsrc1 = INST_AA(inst);                                              \
+        if ((s4) GET_REGISTER(vsrc1) _cmp 0) {                              \
+            int branchOffset = (s2)FETCH(1);    /* sign-extended */         \
+            ILOGV("|if-%s v%d,+0x%04x", (_opname), vsrc1, branchOffset);    \
+            ILOGV("> branch taken");                                        \
+            if (branchOffset < 0)                                           \
+                PERIODIC_CHECKS(kInterpEntryInstr, branchOffset);           \
+            FINISH(branchOffset);                                           \
+        } else {                                                            \
+            ILOGV("|if-%s v%d,-", (_opname), vsrc1);                        \
+            FINISH(2);                                                      \
+        }
+
+#define HANDLE_UNOP(_opcode, _opname, _pfx, _sfx, _type)                    \
+    HANDLE_OPCODE(_opcode /*vA, vB*/)                                       \
+        vdst = INST_A(inst);                                                \
+        vsrc1 = INST_B(inst);                                               \
+        ILOGV("|%s v%d,v%d", (_opname), vdst, vsrc1);                       \
+        SET_REGISTER##_type(vdst, _pfx GET_REGISTER##_type(vsrc1) _sfx);    \
+        FINISH(1);
+
+#define HANDLE_OP_X_INT(_opcode, _opname, _op, _chkdiv)                     \
+    HANDLE_OPCODE(_opcode /*vAA, vBB, vCC*/)                                \
+    {                                                                       \
+        u2 srcRegs;                                                         \
+        vdst = INST_AA(inst);                                               \
+        srcRegs = FETCH(1);                                                 \
+        vsrc1 = srcRegs & 0xff;                                             \
+        vsrc2 = srcRegs >> 8;                                               \
+        ILOGV("|%s-int v%d,v%d", (_opname), vdst, vsrc1);                   \
+        if (_chkdiv) {                                                      \
+            if (GET_REGISTER(vsrc2) == 0) {                                 \
+                EXPORT_PC();                                                \
+                dvmThrowException("Ljava/lang/ArithmeticException;",        \
+                    "divide by zero");                                      \
+                GOTO(exceptionThrown);                                      \
+            }                                                               \
+        }                                                                   \
+        SET_REGISTER(vdst,                                                  \
+            (s4) GET_REGISTER(vsrc1) _op (s4) GET_REGISTER(vsrc2));         \
+    }                                                                       \
+    FINISH(2);
+
+#define HANDLE_OP_SHX_INT(_opcode, _opname, _cast, _op)                     \
+    HANDLE_OPCODE(_opcode /*vAA, vBB, vCC*/)                                \
+    {                                                                       \
+        u2 srcRegs;                                                         \
+        vdst = INST_AA(inst);                                               \
+        srcRegs = FETCH(1);                                                 \
+        vsrc1 = srcRegs & 0xff;                                             \
+        vsrc2 = srcRegs >> 8;                                               \
+        ILOGV("|%s-int v%d,v%d", (_opname), vdst, vsrc1);                   \
+        SET_REGISTER(vdst,                                                  \
+            _cast GET_REGISTER(vsrc1) _op (GET_REGISTER(vsrc2) & 0x1f));    \
+    }                                                                       \
+    FINISH(2);
+
+#define HANDLE_OP_X_INT_LIT16(_opcode, _opname, _cast, _op, _chkdiv)        \
+    HANDLE_OPCODE(_opcode /*vA, vB, #+CCCC*/)                               \
+        vdst = INST_A(inst);                                                \
+        vsrc1 = INST_B(inst);                                               \
+        vsrc2 = FETCH(1);                                                   \
+        ILOGV("|%s-int/lit16 v%d,v%d,#+0x%04x",                             \
+            (_opname), vdst, vsrc1, vsrc2);                                 \
+        if (_chkdiv) {                                                      \
+            if ((s2) vsrc2 == 0) {                                          \
+                EXPORT_PC();                                                \
+                dvmThrowException("Ljava/lang/ArithmeticException;",        \
+                    "divide by zero");                                      \
+                GOTO(exceptionThrown);                                      \
+            }                                                               \
+        }                                                                   \
+        SET_REGISTER(vdst,                                                  \
+            _cast GET_REGISTER(vsrc1) _op (s2) vsrc2);                      \
+        FINISH(2);
+
+#define HANDLE_OP_X_INT_LIT8(_opcode, _opname, _op, _chkdiv)                \
+    HANDLE_OPCODE(_opcode /*vAA, vBB, #+CC*/)                               \
+    {                                                                       \
+        u2 litInfo;                                                         \
+        vdst = INST_AA(inst);                                               \
+        litInfo = FETCH(1);                                                 \
+        vsrc1 = litInfo & 0xff;                                             \
+        vsrc2 = litInfo >> 8;       /* constant */                          \
+        ILOGV("|%s-int/lit8 v%d,v%d,#+0x%02x",                              \
+            (_opname), vdst, vsrc1, vsrc2);                                 \
+        if (_chkdiv) {                                                      \
+            if ((s1) vsrc2 == 0) {                                          \
+                EXPORT_PC();                                                \
+                dvmThrowException("Ljava/lang/ArithmeticException;",        \
+                    "divide by zero");                                      \
+                GOTO(exceptionThrown);                                      \
+            }                                                               \
+        }                                                                   \
+        SET_REGISTER(vdst,                                                  \
+            (s4) GET_REGISTER(vsrc1) _op (s1) vsrc2);                       \
+    }                                                                       \
+    FINISH(2);
+
+#define HANDLE_OP_SHX_INT_LIT8(_opcode, _opname, _cast, _op)                \
+    HANDLE_OPCODE(_opcode /*vAA, vBB, #+CC*/)                               \
+    {                                                                       \
+        u2 litInfo;                                                         \
+        vdst = INST_AA(inst);                                               \
+        litInfo = FETCH(1);                                                 \
+        vsrc1 = litInfo & 0xff;                                             \
+        vsrc2 = litInfo >> 8;       /* constant */                          \
+        ILOGV("|%s-int/lit8 v%d,v%d,#+0x%02x",                              \
+            (_opname), vdst, vsrc1, vsrc2);                                 \
+        SET_REGISTER(vdst,                                                  \
+            _cast GET_REGISTER(vsrc1) _op (vsrc2 & 0x1f));                  \
+    }                                                                       \
+    FINISH(2);
+
+#define HANDLE_OP_X_INT_2ADDR(_opcode, _opname, _op, _chkdiv)               \
+    HANDLE_OPCODE(_opcode /*vA, vB*/)                                       \
+        vdst = INST_A(inst);                                                \
+        vsrc1 = INST_B(inst);                                               \
+        ILOGV("|%s-int-2addr v%d,v%d", (_opname), vdst, vsrc1);             \
+        if (_chkdiv) {                                                      \
+            if (GET_REGISTER(vsrc1) == 0) {                                 \
+                EXPORT_PC();                                                \
+                dvmThrowException("Ljava/lang/ArithmeticException;",        \
+                    "divide by zero");                                      \
+                GOTO(exceptionThrown);                                      \
+            }                                                               \
+        }                                                                   \
+        SET_REGISTER(vdst,                                                  \
+            (s4) GET_REGISTER(vdst) _op (s4) GET_REGISTER(vsrc1));          \
+        FINISH(1);
+
+#define HANDLE_OP_SHX_INT_2ADDR(_opcode, _opname, _cast, _op)               \
+    HANDLE_OPCODE(_opcode /*vA, vB*/)                                       \
+        vdst = INST_A(inst);                                                \
+        vsrc1 = INST_B(inst);                                               \
+        ILOGV("|%s-int-2addr v%d,v%d", (_opname), vdst, vsrc1);             \
+        SET_REGISTER(vdst,                                                  \
+            _cast GET_REGISTER(vdst) _op (GET_REGISTER(vsrc1) & 0x1f));     \
+        FINISH(1);
+
+#define HANDLE_OP_X_LONG(_opcode, _opname, _op, _chkdiv)                    \
+    HANDLE_OPCODE(_opcode /*vAA, vBB, vCC*/)                                \
+    {                                                                       \
+        u2 srcRegs;                                                         \
+        vdst = INST_AA(inst);                                               \
+        srcRegs = FETCH(1);                                                 \
+        vsrc1 = srcRegs & 0xff;                                             \
+        vsrc2 = srcRegs >> 8;                                               \
+        ILOGV("|%s-long v%d,v%d,v%d", (_opname), vdst, vsrc1, vsrc2);       \
+        if (_chkdiv) {                                                      \
+            if (GET_REGISTER_WIDE(vsrc2) == 0) {                            \
+                EXPORT_PC();                                                \
+                dvmThrowException("Ljava/lang/ArithmeticException;",        \
+                    "divide by zero");                                      \
+                GOTO(exceptionThrown);                                      \
+            }                                                               \
+        }                                                                   \
+        SET_REGISTER_WIDE(vdst,                                             \
+            (s8) GET_REGISTER_WIDE(vsrc1) _op (s8) GET_REGISTER_WIDE(vsrc2)); \
+    }                                                                       \
+    FINISH(2);
+
+#define HANDLE_OP_SHX_LONG(_opcode, _opname, _cast, _op)                    \
+    HANDLE_OPCODE(_opcode /*vAA, vBB, vCC*/)                                \
+    {                                                                       \
+        u2 srcRegs;                                                         \
+        vdst = INST_AA(inst);                                               \
+        srcRegs = FETCH(1);                                                 \
+        vsrc1 = srcRegs & 0xff;                                             \
+        vsrc2 = srcRegs >> 8;                                               \
+        ILOGV("|%s-long v%d,v%d,v%d", (_opname), vdst, vsrc1, vsrc2);       \
+        SET_REGISTER_WIDE(vdst,                                             \
+            _cast GET_REGISTER_WIDE(vsrc1) _op (GET_REGISTER(vsrc2) & 0x3f)); \
+    }                                                                       \
+    FINISH(2);
+
+#define HANDLE_OP_X_LONG_2ADDR(_opcode, _opname, _op, _chkdiv)              \
+    HANDLE_OPCODE(_opcode /*vA, vB*/)                                       \
+        vdst = INST_A(inst);                                                \
+        vsrc1 = INST_B(inst);                                               \
+        ILOGV("|%s-long-2addr v%d,v%d", (_opname), vdst, vsrc1);            \
+        if (_chkdiv) {                                                      \
+            if (GET_REGISTER_WIDE(vsrc1) == 0) {                            \
+                EXPORT_PC();                                                \
+                dvmThrowException("Ljava/lang/ArithmeticException;",        \
+                    "divide by zero");                                      \
+                GOTO(exceptionThrown);                                      \
+            }                                                               \
+        }                                                                   \
+        SET_REGISTER_WIDE(vdst,                                             \
+            (s8) GET_REGISTER_WIDE(vdst) _op (s8)GET_REGISTER_WIDE(vsrc1)); \
+        FINISH(1);
+
+#define HANDLE_OP_SHX_LONG_2ADDR(_opcode, _opname, _cast, _op)              \
+    HANDLE_OPCODE(_opcode /*vA, vB*/)                                       \
+        vdst = INST_A(inst);                                                \
+        vsrc1 = INST_B(inst);                                               \
+        ILOGV("|%s-long-2addr v%d,v%d", (_opname), vdst, vsrc1);            \
+        SET_REGISTER_WIDE(vdst,                                             \
+            _cast GET_REGISTER_WIDE(vdst) _op (GET_REGISTER(vsrc1) & 0x3f)); \
+        FINISH(1);
+
+#define HANDLE_OP_X_FLOAT(_opcode, _opname, _op)                            \
+    HANDLE_OPCODE(_opcode /*vAA, vBB, vCC*/)                                \
+    {                                                                       \
+        u2 srcRegs;                                                         \
+        vdst = INST_AA(inst);                                               \
+        srcRegs = FETCH(1);                                                 \
+        vsrc1 = srcRegs & 0xff;                                             \
+        vsrc2 = srcRegs >> 8;                                               \
+        ILOGV("|%s-float v%d,v%d,v%d", (_opname), vdst, vsrc1, vsrc2);      \
+        SET_REGISTER_FLOAT(vdst,                                            \
+            GET_REGISTER_FLOAT(vsrc1) _op GET_REGISTER_FLOAT(vsrc2));       \
+    }                                                                       \
+    FINISH(2);
+
+#define HANDLE_OP_X_DOUBLE(_opcode, _opname, _op)                           \
+    HANDLE_OPCODE(_opcode /*vAA, vBB, vCC*/)                                \
+    {                                                                       \
+        u2 srcRegs;                                                         \
+        vdst = INST_AA(inst);                                               \
+        srcRegs = FETCH(1);                                                 \
+        vsrc1 = srcRegs & 0xff;                                             \
+        vsrc2 = srcRegs >> 8;                                               \
+        ILOGV("|%s-double v%d,v%d,v%d", (_opname), vdst, vsrc1, vsrc2);     \
+        SET_REGISTER_DOUBLE(vdst,                                           \
+            GET_REGISTER_DOUBLE(vsrc1) _op GET_REGISTER_DOUBLE(vsrc2));     \
+    }                                                                       \
+    FINISH(2);
+
+#define HANDLE_OP_X_FLOAT_2ADDR(_opcode, _opname, _op)                      \
+    HANDLE_OPCODE(_opcode /*vA, vB*/)                                       \
+        vdst = INST_A(inst);                                                \
+        vsrc1 = INST_B(inst);                                               \
+        ILOGV("|%s-float-2addr v%d,v%d", (_opname), vdst, vsrc1);           \
+        SET_REGISTER_FLOAT(vdst,                                            \
+            GET_REGISTER_FLOAT(vdst) _op GET_REGISTER_FLOAT(vsrc1));        \
+        FINISH(1);
+
+#define HANDLE_OP_X_DOUBLE_2ADDR(_opcode, _opname, _op)                     \
+    HANDLE_OPCODE(_opcode /*vA, vB*/)                                       \
+        vdst = INST_A(inst);                                                \
+        vsrc1 = INST_B(inst);                                               \
+        ILOGV("|%s-double-2addr v%d,v%d", (_opname), vdst, vsrc1);          \
+        SET_REGISTER_DOUBLE(vdst,                                           \
+            GET_REGISTER_DOUBLE(vdst) _op GET_REGISTER_DOUBLE(vsrc1));      \
+        FINISH(1);
+
+#define HANDLE_OP_AGET(_opcode, _opname, _type, _regsize)                   \
+    HANDLE_OPCODE(_opcode /*vAA, vBB, vCC*/)                                \
+    {                                                                       \
+        ArrayObject* arrayObj;                                              \
+        u2 arrayInfo;                                                       \
+        EXPORT_PC();                                                        \
+        vdst = INST_AA(inst);                                               \
+        arrayInfo = FETCH(1);                                               \
+        vsrc1 = arrayInfo & 0xff;    /* array ptr */                        \
+        vsrc2 = arrayInfo >> 8;      /* index */                            \
+        ILOGV("|aget%s v%d,v%d,v%d", (_opname), vdst, vsrc1, vsrc2);        \
+        arrayObj = (ArrayObject*) GET_REGISTER(vsrc1);                      \
+        if (!checkForNull((Object*) arrayObj))                              \
+            GOTO(exceptionThrown);                                          \
+        if (GET_REGISTER(vsrc2) >= arrayObj->length) {                      \
+            LOGV("Invalid array access: %p %d (len=%d)\n",                  \
+                arrayObj, vsrc2, arrayObj->length);                         \
+            dvmThrowException("Ljava/lang/ArrayIndexOutOfBoundsException;", \
+                NULL);                                                      \
+            GOTO(exceptionThrown);                                          \
+        }                                                                   \
+        SET_REGISTER##_regsize(vdst,                                        \
+            ((_type*) arrayObj->contents)[GET_REGISTER(vsrc2)]);            \
+        ILOGV("+ AGET[%d]=0x%x", GET_REGISTER(vsrc2), GET_REGISTER(vdst));  \
+    }                                                                       \
+    FINISH(2);
+
+#define HANDLE_OP_APUT(_opcode, _opname, _type, _regsize)                   \
+    HANDLE_OPCODE(_opcode /*vAA, vBB, vCC*/)                                \
+    {                                                                       \
+        ArrayObject* arrayObj;                                              \
+        u2 arrayInfo;                                                       \
+        EXPORT_PC();                                                        \
+        vdst = INST_AA(inst);       /* AA: source value */                  \
+        arrayInfo = FETCH(1);                                               \
+        vsrc1 = arrayInfo & 0xff;   /* BB: array ptr */                     \
+        vsrc2 = arrayInfo >> 8;     /* CC: index */                         \
+        ILOGV("|aput%s v%d,v%d,v%d", (_opname), vdst, vsrc1, vsrc2);        \
+        arrayObj = (ArrayObject*) GET_REGISTER(vsrc1);                      \
+        if (!checkForNull((Object*) arrayObj))                              \
+            GOTO(exceptionThrown);                                          \
+        if (GET_REGISTER(vsrc2) >= arrayObj->length) {                      \
+            dvmThrowException("Ljava/lang/ArrayIndexOutOfBoundsException;", \
+                NULL);                                                      \
+            GOTO(exceptionThrown);                                          \
+        }                                                                   \
+        ILOGV("+ APUT[%d]=0x%08x", GET_REGISTER(vsrc2), GET_REGISTER(vdst));\
+        ((_type*) arrayObj->contents)[GET_REGISTER(vsrc2)] =                \
+            GET_REGISTER##_regsize(vdst);                                   \
+    }                                                                       \
+    FINISH(2);
+
+/*
+ * It's possible to get a bad value out of a field with sub-32-bit stores
+ * because the -quick versions always operate on 32 bits.  Consider:
+ *   short foo = -1  (sets a 32-bit register to 0xffffffff)
+ *   iput-quick foo  (writes all 32 bits to the field)
+ *   short bar = 1   (sets a 32-bit register to 0x00000001)
+ *   iput-short      (writes the low 16 bits to the field)
+ *   iget-quick foo  (reads all 32 bits from the field, yielding 0xffff0001)
+ * This can only happen when optimized and non-optimized code has interleaved
+ * access to the same field.  This is unlikely but possible.
+ *
+ * The easiest way to fix this is to always read/write 32 bits at a time.  On
+ * a device with a 16-bit data bus this is sub-optimal.  (The alternative
+ * approach is to have sub-int versions of iget-quick, but now we're wasting
+ * Dalvik instruction space and making it less likely that handler code will
+ * already be in the CPU i-cache.)
+ */
+#define HANDLE_IGET_X(_opcode, _opname, _ftype, _regsize)                   \
+    HANDLE_OPCODE(_opcode /*vA, vB, field@CCCC*/)                           \
+    {                                                                       \
+        InstField* ifield;                                                  \
+        Object* obj;                                                        \
+        EXPORT_PC();                                                        \
+        vdst = INST_A(inst);                                                \
+        vsrc1 = INST_B(inst);   /* object ptr */                            \
+        ref = FETCH(1);         /* field ref */                             \
+        ILOGV("|iget%s v%d,v%d,field@0x%04x", (_opname), vdst, vsrc1, ref); \
+        obj = (Object*) GET_REGISTER(vsrc1);                                \
+        if (!checkForNull(obj))                                             \
+            GOTO(exceptionThrown);                                          \
+        ifield = (InstField*) dvmDexGetResolvedField(methodClassDex, ref);  \
+        if (ifield == NULL) {                                               \
+            ifield = dvmResolveInstField(method->clazz, ref);               \
+            if (ifield == NULL)                                             \
+                GOTO(exceptionThrown);                                      \
+        }                                                                   \
+        SET_REGISTER##_regsize(vdst,                                        \
+            dvmGetField##_ftype(obj, ifield->byteOffset));                  \
+        ILOGV("+ IGET '%s'=0x%08llx", ifield->field.name,                   \
+            (u8) GET_REGISTER##_regsize(vdst));                             \
+        UPDATE_FIELD_GET(&ifield->field);                                   \
+    }                                                                       \
+    FINISH(2);
+
+#define HANDLE_IGET_X_QUICK(_opcode, _opname, _ftype, _regsize)             \
+    HANDLE_OPCODE(_opcode /*vA, vB, field@CCCC*/)                           \
+    {                                                                       \
+        Object* obj;                                                        \
+        vdst = INST_A(inst);                                                \
+        vsrc1 = INST_B(inst);   /* object ptr */                            \
+        ref = FETCH(1);         /* field offset */                          \
+        ILOGV("|iget%s-quick v%d,v%d,field@+%u",                            \
+            (_opname), vdst, vsrc1, ref);                                   \
+        obj = (Object*) GET_REGISTER(vsrc1);                                \
+        if (!checkForNullExportPC(obj, fp, pc))                             \
+            GOTO(exceptionThrown);                                          \
+        SET_REGISTER##_regsize(vdst, dvmGetField##_ftype(obj, ref));        \
+        ILOGV("+ IGETQ %d=0x%08llx", ref,                                   \
+            (u8) GET_REGISTER##_regsize(vdst));                             \
+    }                                                                       \
+    FINISH(2);
+
+#define HANDLE_IPUT_X(_opcode, _opname, _ftype, _regsize)                   \
+    HANDLE_OPCODE(_opcode /*vA, vB, field@CCCC*/)                           \
+    {                                                                       \
+        InstField* ifield;                                                  \
+        Object* obj;                                                        \
+        EXPORT_PC();                                                        \
+        vdst = INST_A(inst);                                                \
+        vsrc1 = INST_B(inst);   /* object ptr */                            \
+        ref = FETCH(1);         /* field ref */                             \
+        ILOGV("|iput%s v%d,v%d,field@0x%04x", (_opname), vdst, vsrc1, ref); \
+        obj = (Object*) GET_REGISTER(vsrc1);                                \
+        if (!checkForNull(obj))                                             \
+            GOTO(exceptionThrown);                                          \
+        ifield = (InstField*) dvmDexGetResolvedField(methodClassDex, ref);  \
+        if (ifield == NULL) {                                               \
+            ifield = dvmResolveInstField(method->clazz, ref);               \
+            if (ifield == NULL)                                             \
+                GOTO(exceptionThrown);                                      \
+        }                                                                   \
+        dvmSetField##_ftype(obj, ifield->byteOffset,                        \
+            GET_REGISTER##_regsize(vdst));                                  \
+        ILOGV("+ IPUT '%s'=0x%08llx", ifield->field.name,                   \
+            (u8) GET_REGISTER##_regsize(vdst));                             \
+        UPDATE_FIELD_PUT(&ifield->field);                                   \
+    }                                                                       \
+    FINISH(2);
+
+#define HANDLE_IPUT_X_QUICK(_opcode, _opname, _ftype, _regsize)             \
+    HANDLE_OPCODE(_opcode /*vA, vB, field@CCCC*/)                           \
+    {                                                                       \
+        Object* obj;                                                        \
+        vdst = INST_A(inst);                                                \
+        vsrc1 = INST_B(inst);   /* object ptr */                            \
+        ref = FETCH(1);         /* field offset */                          \
+        ILOGV("|iput%s-quick v%d,v%d,field@0x%04x",                         \
+            (_opname), vdst, vsrc1, ref);                                   \
+        obj = (Object*) GET_REGISTER(vsrc1);                                \
+        if (!checkForNullExportPC(obj, fp, pc))                             \
+            GOTO(exceptionThrown);                                          \
+        dvmSetField##_ftype(obj, ref, GET_REGISTER##_regsize(vdst));        \
+        ILOGV("+ IPUTQ %d=0x%08llx", ref,                                   \
+            (u8) GET_REGISTER##_regsize(vdst));                             \
+    }                                                                       \
+    FINISH(2);
+
+#define HANDLE_SGET_X(_opcode, _opname, _ftype, _regsize)                   \
+    HANDLE_OPCODE(_opcode /*vAA, field@BBBB*/)                              \
+    {                                                                       \
+        StaticField* sfield;                                                \
+        vdst = INST_AA(inst);                                               \
+        ref = FETCH(1);         /* field ref */                             \
+        ILOGV("|sget%s v%d,sfield@0x%04x", (_opname), vdst, ref);           \
+        sfield = (StaticField*)dvmDexGetResolvedField(methodClassDex, ref); \
+        if (sfield == NULL) {                                               \
+            EXPORT_PC();                                                    \
+            sfield = dvmResolveStaticField(method->clazz, ref);             \
+            if (sfield == NULL)                                             \
+                GOTO(exceptionThrown);                                      \
+        }                                                                   \
+        SET_REGISTER##_regsize(vdst, dvmGetStaticField##_ftype(sfield));    \
+        ILOGV("+ SGET '%s'=0x%08llx",                                       \
+            sfield->field.name, (u8)GET_REGISTER##_regsize(vdst));          \
+        UPDATE_FIELD_GET(&sfield->field);                                   \
+    }                                                                       \
+    FINISH(2);
+
+#define HANDLE_SPUT_X(_opcode, _opname, _ftype, _regsize)                   \
+    HANDLE_OPCODE(_opcode /*vAA, field@BBBB*/)                              \
+    {                                                                       \
+        StaticField* sfield;                                                \
+        vdst = INST_AA(inst);                                               \
+        ref = FETCH(1);         /* field ref */                             \
+        ILOGV("|sput%s v%d,sfield@0x%04x", (_opname), vdst, ref);           \
+        sfield = (StaticField*)dvmDexGetResolvedField(methodClassDex, ref); \
+        if (sfield == NULL) {                                               \
+            EXPORT_PC();                                                    \
+            sfield = dvmResolveStaticField(method->clazz, ref);             \
+            if (sfield == NULL)                                             \
+                GOTO(exceptionThrown);                                      \
+        }                                                                   \
+        dvmSetStaticField##_ftype(sfield, GET_REGISTER##_regsize(vdst));    \
+        ILOGV("+ SPUT '%s'=0x%08llx",                                       \
+            sfield->field.name, (u8)GET_REGISTER##_regsize(vdst));          \
+        UPDATE_FIELD_PUT(&sfield->field);                                   \
+    }                                                                       \
+    FINISH(2);
+
diff --git a/vm/mterp/common/asm-constants.h b/vm/mterp/common/asm-constants.h
new file mode 100644
index 0000000..d7a975f
--- /dev/null
+++ b/vm/mterp/common/asm-constants.h
@@ -0,0 +1,227 @@
+/*
+ * Copyright 2008 The Android Open Source Project
+ *
+ * Constants used by the assembler and verified by the C compiler.
+ */
+
+#if defined(ASM_DEF_VERIFY)
+  /*
+   * Generate C fragments that verify values; assumes "bool failed" exists.
+   * These are all constant expressions, so on success these will compile
+   * down to nothing.
+   */
+# define MTERP_OFFSET(_name, _type, _field, _offset)                        \
+    if (offsetof(_type, _field) != _offset) {                               \
+        LOGE("Bad asm offset %s (%d), should be %d\n",                      \
+            #_name, _offset, offsetof(_type, _field));                      \
+        failed = true;                                                      \
+    }
+# define MTERP_SIZEOF(_name, _type, _size)                                  \
+    if (sizeof(_type) != (_size)) {                                         \
+        LOGE("Bad asm sizeof %s (%d), should be %d\n",                      \
+            #_name, (_size), sizeof(_type));                                \
+        failed = true;                                                      \
+    }
+# define MTERP_CONSTANT(_name, _value)                                      \
+    if ((_name) != (_value)) {                                              \
+        LOGE("Bad asm constant %s (%d), should be %d\n",                    \
+            #_name, (_value), (_name));                                     \
+        failed = true;                                                      \
+    }
+#else
+  /* generate constant labels for the assembly output */
+# define MTERP_OFFSET(name, type, field, offset)    name = offset
+# define MTERP_SIZEOF(name, type, size)             name = size
+# define MTERP_CONSTANT(name, value)                name = value
+#endif
+
+/*
+ * Platform dependencies.  Some platforms require 64-bit alignment of 64-bit
+ * data structures.  Some versions of gcc will hold small enumerated types
+ * in a char instead of an int.
+ */
+#if defined(__ARM_EABI__)
+# define MTERP_NO_UNALIGN_64
+#endif
+#if defined(__ARM_EABI__)           // TODO: should be gcc version?
+# define MTERP_SMALL_ENUM   1
+#else
+# define MTERP_SMALL_ENUM   4
+#endif
+
+/*
+ * This file must only contain the following kinds of statements:
+ *
+ *  MTERP_OFFSET(name, StructType, fieldname, offset)
+ *
+ *   Declares that the expected offset of StructType.fieldname is "offset".
+ *   This will break whenever the contents of StructType are rearranged.
+ *
+ *  MTERP_SIZEOF(name, Type, size)
+ *
+ *   Declares that the expected size of Type is "size".
+ *
+ *  MTERP_CONSTANT(name, value)
+ *
+ *   Declares that the expected value of "name" is "value".  Useful for
+ *   enumerations and defined constants that are inaccessible to the
+ *   assembly source.  (Note this assumes you will use the same name in
+ *   both C and assembly, which is good practice.)
+ *
+ * In all cases the "name" field is the label you will use in the assembler.
+ *
+ * The "value" field must always be an actual number, not a symbol, unless
+ * you are sure that the symbol's value will be visible to both C and
+ * assembly sources.  There may be restrictions on the possible range of
+ * values (which are usually provided as immediate operands), so it's best
+ * to restrict numbers assuming a signed 8-bit field.
+ *
+ * On the assembly side, these just become "name=value" constants.  On the
+ * C side, these turn into assertions that cause the VM to abort if the
+ * values are incorrect.
+ */
+
+/* globals (sanity check for LDR vs LDRB) */
+MTERP_SIZEOF(sizeofGlobal_debuggerActive, gDvm.debuggerActive, MTERP_SMALL_ENUM)
+#if defined(WITH_PROFILER)
+MTERP_SIZEOF(sizeofGlobal_activeProfilers, gDvm.activeProfilers, 4)
+#endif
+
+/* MterpGlue fields */
+MTERP_OFFSET(offGlue_pc,                MterpGlue, pc, 0)
+MTERP_OFFSET(offGlue_fp,                MterpGlue, fp, 4)
+MTERP_OFFSET(offGlue_retval,            MterpGlue, retval, 8)
+MTERP_OFFSET(offGlue_method,            MterpGlue, method, 16)
+MTERP_OFFSET(offGlue_methodClassDex,    MterpGlue, methodClassDex, 20)
+MTERP_OFFSET(offGlue_self,              MterpGlue, self, 24)
+MTERP_OFFSET(offGlue_bailPtr,           MterpGlue, bailPtr, 28)
+MTERP_OFFSET(offGlue_interpStackEnd,    MterpGlue, interpStackEnd, 32)
+MTERP_OFFSET(offGlue_pSelfSuspendCount, MterpGlue, pSelfSuspendCount, 36)
+#if defined(WITH_DEBUGGER) && defined(WITH_PROFILER)
+MTERP_OFFSET(offGlue_pDebuggerActive,   MterpGlue, pDebuggerActive, 40)
+MTERP_OFFSET(offGlue_pActiveProfilers,  MterpGlue, pActiveProfilers, 44)
+MTERP_OFFSET(offGlue_entryPoint,        MterpGlue, entryPoint, 48)
+#elif defined(WITH_DEBUGGER)
+MTERP_OFFSET(offGlue_pDebuggerActive,   MterpGlue, pDebuggerActive, 40)
+MTERP_OFFSET(offGlue_entryPoint,        MterpGlue, entryPoint, 44)
+#elif defined(WITH_PROFILER)
+MTERP_OFFSET(offGlue_pActiveProfilers,  MterpGlue, pActiveProfilers, 40)
+MTERP_OFFSET(offGlue_entryPoint,        MterpGlue, entryPoint, 44)
+#else
+MTERP_OFFSET(offGlue_entryPoint,        MterpGlue, entryPoint, 40)
+#endif
+/* make sure all JValue union members are stored at the same offset */
+MTERP_OFFSET(offGlue_retval_z,          MterpGlue, retval.z, 8)
+MTERP_OFFSET(offGlue_retval_i,          MterpGlue, retval.i, 8)
+MTERP_OFFSET(offGlue_retval_j,          MterpGlue, retval.j, 8)
+MTERP_OFFSET(offGlue_retval_l,          MterpGlue, retval.l, 8)
+
+/* DvmDex fields */
+MTERP_OFFSET(offDvmDex_pResStrings,     DvmDex, pResStrings, 8)
+MTERP_OFFSET(offDvmDex_pResClasses,     DvmDex, pResClasses, 12)
+MTERP_OFFSET(offDvmDex_pResMethods,     DvmDex, pResMethods, 16)
+MTERP_OFFSET(offDvmDex_pResFields,      DvmDex, pResFields, 20)
+MTERP_OFFSET(offDvmDex_pInterfaceCache, DvmDex, pInterfaceCache, 24)
+
+/* StackSaveArea fields */
+#ifdef EASY_GDB
+MTERP_OFFSET(offStackSaveArea_prevSave, StackSaveArea, prevSave, 0)
+MTERP_OFFSET(offStackSaveArea_prevFrame, StackSaveArea, prevFrame, 4)
+MTERP_OFFSET(offStackSaveArea_savedPc,  StackSaveArea, savedPc, 8)
+MTERP_OFFSET(offStackSaveArea_method,   StackSaveArea, method, 12)
+MTERP_OFFSET(offStackSaveArea_currentPc, StackSaveArea, xtra.currentPc, 16)
+MTERP_OFFSET(offStackSaveArea_localRefTop, StackSaveArea, xtra.localRefTop, 16)
+MTERP_SIZEOF(sizeofStackSaveArea,       StackSaveArea, 20)
+#else
+MTERP_OFFSET(offStackSaveArea_prevFrame, StackSaveArea, prevFrame, 0)
+MTERP_OFFSET(offStackSaveArea_savedPc,  StackSaveArea, savedPc, 4)
+MTERP_OFFSET(offStackSaveArea_method,   StackSaveArea, method, 8)
+MTERP_OFFSET(offStackSaveArea_currentPc, StackSaveArea, xtra.currentPc, 12)
+MTERP_OFFSET(offStackSaveArea_localRefTop, StackSaveArea, xtra.localRefTop, 12)
+MTERP_SIZEOF(sizeofStackSaveArea,       StackSaveArea, 16)
+#endif
+
+/* InstField fields */
+#ifdef PROFILE_FIELD_ACCESS
+MTERP_OFFSET(offInstField_byteOffset,   InstField, byteOffset, 24)
+#else
+MTERP_OFFSET(offInstField_byteOffset,   InstField, byteOffset, 16)
+#endif
+
+/* StaticField fields */
+#ifdef PROFILE_FIELD_ACCESS
+MTERP_OFFSET(offStaticField_value,      StaticField, value, 24)
+#else
+MTERP_OFFSET(offStaticField_value,      StaticField, value, 16)
+#endif
+
+/* Method fields */
+MTERP_OFFSET(offMethod_clazz,           Method, clazz, 0)
+MTERP_OFFSET(offMethod_accessFlags,     Method, accessFlags, 4)
+MTERP_OFFSET(offMethod_methodIndex,     Method, methodIndex, 8)
+MTERP_OFFSET(offMethod_registersSize,   Method, registersSize, 10)
+MTERP_OFFSET(offMethod_outsSize,        Method, outsSize, 12)
+MTERP_OFFSET(offMethod_name,            Method, name, 16)
+MTERP_OFFSET(offMethod_insns,           Method, insns, 32)
+MTERP_OFFSET(offMethod_nativeFunc,      Method, nativeFunc, 40)
+
+/* InlineOperation fields -- code assumes "func" offset is zero, do not alter */
+MTERP_OFFSET(offInlineOperation_func,   InlineOperation, func, 0)
+
+/* Thread fields */
+MTERP_OFFSET(offThread_stackOverflowed, Thread, stackOverflowed, 40)
+MTERP_OFFSET(offThread_curFrame,        Thread, curFrame, 44)
+MTERP_OFFSET(offThread_exception,       Thread, exception, 48)
+MTERP_OFFSET(offThread_jniLocal_nextEntry, \
+                                        Thread, jniLocalRefTable.nextEntry, 80)
+
+/* Object fields */
+MTERP_OFFSET(offObject_clazz,           Object, clazz, 0)
+
+/* ArrayObject fields */
+MTERP_OFFSET(offArrayObject_length,     ArrayObject, length, 8)
+#ifdef MTERP_NO_UNALIGN_64
+MTERP_OFFSET(offArrayObject_contents,   ArrayObject, contents, 16)
+#else
+MTERP_OFFSET(offArrayObject_contents,   ArrayObject, contents, 12)
+#endif
+
+/* ClassObject fields */
+MTERP_OFFSET(offClassObject_descriptor, ClassObject, descriptor, 24)
+MTERP_OFFSET(offClassObject_accessFlags, ClassObject, accessFlags, 32)
+MTERP_OFFSET(offClassObject_pDvmDex,    ClassObject, pDvmDex, 36)
+MTERP_OFFSET(offClassObject_status,     ClassObject, status, 40)
+MTERP_OFFSET(offClassObject_super,      ClassObject, super, 72)
+MTERP_OFFSET(offClassObject_vtableCount, ClassObject, vtableCount, 112)
+MTERP_OFFSET(offClassObject_vtable,     ClassObject, vtable, 116)
+
+/* InterpEntry enumeration */
+MTERP_SIZEOF(sizeofClassStatus,         InterpEntry, MTERP_SMALL_ENUM)
+MTERP_CONSTANT(kInterpEntryInstr,   0)
+MTERP_CONSTANT(kInterpEntryReturn,  1)
+MTERP_CONSTANT(kInterpEntryThrow,   2)
+
+/* ClassStatus enumeration */
+MTERP_SIZEOF(sizeofClassStatus,         ClassStatus, MTERP_SMALL_ENUM)
+MTERP_CONSTANT(CLASS_INITIALIZED,   7)
+
+/* MethodType enumeration */
+MTERP_SIZEOF(sizeofMethodType,          MethodType, MTERP_SMALL_ENUM)
+MTERP_CONSTANT(METHOD_DIRECT,       1)
+MTERP_CONSTANT(METHOD_STATIC,       2)
+MTERP_CONSTANT(METHOD_VIRTUAL,      3)
+MTERP_CONSTANT(METHOD_INTERFACE,    4)
+
+/* ClassObject constants */
+MTERP_CONSTANT(ACC_PRIVATE,         0x0002)
+MTERP_CONSTANT(ACC_STATIC,          0x0008)
+MTERP_CONSTANT(ACC_NATIVE,          0x0100)
+MTERP_CONSTANT(ACC_INTERFACE,       0x0200)
+MTERP_CONSTANT(ACC_ABSTRACT,        0x0400)
+
+/* flags for dvmMalloc */
+MTERP_CONSTANT(ALLOC_DONT_TRACK,    0x02)
+
+/* opcode number */
+MTERP_CONSTANT(OP_MOVE_EXCEPTION,   0x0d)
+
diff --git a/vm/mterp/config-armv5 b/vm/mterp/config-armv5
new file mode 100644
index 0000000..4bf4562
--- /dev/null
+++ b/vm/mterp/config-armv5
@@ -0,0 +1,45 @@
+# Copyright (C) 2008 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+#
+# Configuration for ARMv5 architecture targets.
+#
+
+handler-size 64
+
+# source for the instruction table stub
+asm-stub armv5/stub.S
+
+# file header and basic definitions
+import c/header.c
+import armv5/header.S
+
+# common defs for the C helpers; include this before the instruction handlers
+import c/opcommon.c
+
+# arch-specific entry point to interpreter
+import armv5/entry.S
+
+# opcode list; argument to op-start is default directory
+op-start armv5
+	#op OP_FILL_ARRAY_DATA c
+op-end
+
+# "helper" code for C; include this after the instruction handlers
+import c/footer.c
+
+# common subroutines for asm
+import armv5/footer.S
+import armv5/debug.c
+
diff --git a/vm/mterp/config-desktop b/vm/mterp/config-desktop
new file mode 100644
index 0000000..06a977a
--- /dev/null
+++ b/vm/mterp/config-desktop
@@ -0,0 +1,37 @@
+# Copyright (C) 2008 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+#
+# Configuration for "desktop" targets.
+#
+
+handler-size 64
+
+# C file header and basic definitions
+import c/header.c
+
+# common defs for the C opcodes
+import c/opcommon.c
+
+# opcode list; argument to op-start is default directory
+op-start c
+    # use 100% C implementations
+op-end
+
+# arch-specific entry point to interpreter
+import desktop/entry.c
+
+# "helper" code
+import c/footer.c
+
diff --git a/vm/mterp/desktop/entry.c b/vm/mterp/desktop/entry.c
new file mode 100644
index 0000000..ba1eb09
--- /dev/null
+++ b/vm/mterp/desktop/entry.c
@@ -0,0 +1,79 @@
+/*
+ * Handler function table, one entry per opcode.
+ */
+#undef H
+#define H(_op) dvmMterp_##_op
+DEFINE_GOTO_TABLE(gDvmMterpHandlers)
+
+#undef H
+#define H(_op) #_op
+DEFINE_GOTO_TABLE(gDvmMterpHandlerNames)
+
+#include <setjmp.h>
+
+/*
+ * C mterp entry point.  This just calls the various C fallbacks, making
+ * this a slow but portable interpeter.
+ */
+bool dvmMterpStdRun(MterpGlue* glue)
+{
+    jmp_buf jmpBuf;
+    int changeInterp;
+
+    glue->bailPtr = &jmpBuf;
+
+    /*
+     * We want to return "changeInterp" as a boolean, but we can't return
+     * zero through longjmp, so we return (boolean+1).
+     */
+    changeInterp = setjmp(jmpBuf) -1;
+    if (changeInterp >= 0) {
+        Thread* threadSelf = dvmThreadSelf();
+        LOGVV("mterp threadid=%d returning %d\n",
+            threadSelf->threadId, changeInterp);
+        return changeInterp;
+    }
+
+    /*
+     * We may not be starting at a point where we're executing instructions.
+     * We need to pick up where the other interpreter left off.
+     *
+     * In some cases we need to call into a throw/return handler which
+     * will do some processing and then either return to us (updating "glue")
+     * or longjmp back out.
+     */
+    switch (glue->entryPoint) {
+    case kInterpEntryInstr:
+        /* just start at the start */
+        break;
+    case kInterpEntryReturn:
+        dvmMterp_returnFromMethod(glue);
+        break;
+    case kInterpEntryThrow:
+        dvmMterp_exceptionThrown(glue);
+        break;
+    default:
+        dvmAbort();
+    }
+
+    /* run until somebody longjmp()s out */
+    while (true) {
+        typedef void (*Handler)(MterpGlue* glue);
+
+        u2 inst = /*glue->*/pc[0];
+        Handler handler = (Handler) gDvmMterpHandlers[inst & 0xff];
+        LOGVV("handler %p %s\n",
+            handler, (const char*) gDvmMterpHandlerNames[inst & 0xff]);
+        (*handler)(glue);
+    }
+}
+
+/*
+ * C mterp exit point.  Call here to bail out of the interpreter.
+ */
+void dvmMterpStdBail(MterpGlue* glue, bool changeInterp)
+{
+    jmp_buf* pJmpBuf = glue->bailPtr;
+    longjmp(*pJmpBuf, ((int)changeInterp)+1);
+}
+
diff --git a/vm/mterp/gen-mterp.py b/vm/mterp/gen-mterp.py
new file mode 100755
index 0000000..e342266
--- /dev/null
+++ b/vm/mterp/gen-mterp.py
@@ -0,0 +1,474 @@
+#!/usr/bin/env python
+#
+# Copyright (C) 2007 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+#
+# Using instructions from an architecture-specific config file, generate C
+# and assembly source files for the Dalvik interpreter.
+#
+
+import sys, string, re, time
+from string import Template
+
+interp_defs_file = "../../libdex/OpCode.h" # need opcode list
+
+handler_size_bits = -1000
+handler_size_bytes = -1000
+in_op_start = 0             # 0=not started, 1=started, 2=ended
+default_op_dir = None
+opcode_locations = {}
+asm_stub_text = []
+label_prefix = ".L"         # use ".L" to hide labels from gdb
+
+
+# Exception class.
+class DataParseError(SyntaxError):
+    "Failure when parsing data file"
+
+#
+# Set any omnipresent substitution values.
+#
+def getGlobalSubDict():
+    return { "handler_size_bits":handler_size_bits,
+             "handler_size_bytes":handler_size_bytes }
+
+#
+# Parse arch config file --
+# Set handler_size_bytes to the value of tokens[1], and handler_size_bits to
+# log2(handler_size_bytes).  Throws an exception if "bytes" is not a power
+# of two.
+#
+def setHandlerSize(tokens):
+    global handler_size_bits, handler_size_bytes
+    if len(tokens) != 2:
+        raise DataParseError("handler-size requires one argument")
+    if handler_size_bits != -1000:
+        raise DataParseError("handler-size may only be set once")
+
+    # compute log2(n), and make sure n is a power of 2
+    handler_size_bytes = bytes = int(tokens[1])
+    bits = -1
+    while bytes > 0:
+        bytes //= 2     # halve with truncating division
+        bits += 1
+
+    if handler_size_bytes == 0 or handler_size_bytes != (1 << bits):
+        raise DataParseError("handler-size (%d) must be power of 2 and > 0" \
+                % orig_bytes)
+    handler_size_bits = bits
+
+#
+# Parse arch config file --
+# Copy a file in to the C or asm output file.
+#
+def importFile(tokens):
+    if len(tokens) != 2:
+        raise DataParseError("import requires one argument")
+    source = tokens[1]
+    if source.endswith(".c"):
+        appendSourceFile(tokens[1], getGlobalSubDict(), c_fp, None)
+    elif source.endswith(".S"):
+        appendSourceFile(tokens[1], getGlobalSubDict(), asm_fp, None)
+    else:
+        raise DataParseError("don't know how to import %s (expecting .c/.S)"
+                % source)
+
+#
+# Parse arch config file --
+# Copy a file in to the C or asm output file.
+#
+def setAsmStub(tokens):
+    global asm_stub_text
+    if len(tokens) != 2:
+        raise DataParseError("import requires one argument")
+    try:
+        stub_fp = open(tokens[1])
+        asm_stub_text = stub_fp.readlines()
+    except IOError, err:
+        stub_fp.close()
+        raise DataParseError("unable to load asm-stub: %s" % str(err))
+    stub_fp.close()
+
+#
+# Parse arch config file --
+# Start of opcode list.
+#
+def opStart(tokens):
+    global in_op_start
+    global default_op_dir
+    if len(tokens) != 2:
+        raise DataParseError("opStart takes a directory name argument")
+    if in_op_start != 0:
+        raise DataParseError("opStart can only be specified once")
+    default_op_dir = tokens[1]
+    in_op_start = 1
+
+#
+# Parse arch config file --
+# Set location of a single opcode's source file.
+#
+def opEntry(tokens):
+    #global opcode_locations
+    if len(tokens) != 3:
+        raise DataParseError("op requires exactly two arguments")
+    if in_op_start != 1:
+        raise DataParseError("op statements must be between opStart/opEnd")
+    try:
+        index = opcodes.index(tokens[1])
+    except ValueError:
+        raise DataParseError("unknown opcode %s" % tokens[1])
+    opcode_locations[tokens[1]] = tokens[2]
+
+#
+# Parse arch config file --
+# End of opcode list; emit instruction blocks.
+#
+def opEnd(tokens):
+    global in_op_start
+    if len(tokens) != 1:
+        raise DataParseError("opEnd takes no arguments")
+    if in_op_start != 1:
+        raise DataParseError("opEnd must follow opStart, and only appear once")
+    in_op_start = 2
+
+    loadAndEmitOpcodes()
+
+
+#
+# Extract an ordered list of instructions from the VM sources.  We use the
+# "goto table" definition macro, which has exactly 256 entries.
+#
+def getOpcodeList():
+    opcodes = []
+    opcode_fp = open(interp_defs_file)
+    opcode_re = re.compile(r"^\s*H\(OP_(\w+)\),.*", re.DOTALL)
+    for line in opcode_fp:
+        match = opcode_re.match(line)
+        if not match:
+            continue
+        opcodes.append("OP_" + match.group(1))
+    opcode_fp.close()
+
+    if len(opcodes) != 256:
+        print "ERROR: found %d opcodes in Interp.h (expected 256)" \
+                % len(opcodes)
+        raise SyntaxError, "bad opcode count"
+    return opcodes
+
+
+#
+# Load and emit opcodes for all 256 instructions.
+#
+def loadAndEmitOpcodes():
+    sister_list = []
+    assert len(opcodes) == 256
+    need_dummy_start = False
+
+    # point dvmAsmInstructionStart at the first handler or stub
+    asm_fp.write("\n    .global dvmAsmInstructionStart\n")
+    asm_fp.write("    .type   dvmAsmInstructionStart, %function\n")
+    asm_fp.write("dvmAsmInstructionStart = " + label_prefix + "_OP_NOP\n")
+    asm_fp.write("    .text\n\n")
+
+    for i in xrange(256):
+        op = opcodes[i]
+
+        if opcode_locations.has_key(op):
+            location = opcode_locations[op]
+        else:
+            location = default_op_dir
+
+        if location == "c":
+            loadAndEmitC(location, i)
+            if len(asm_stub_text) == 0:
+                need_dummy_start = True
+        else:
+            loadAndEmitAsm(location, i, sister_list)
+
+    # For a 100% C implementation, there are no asm handlers or stubs.  We
+    # need to have the dvmAsmInstructionStart label point at OP_NOP, and it's
+    # too annoying to try to slide it in after the alignment psuedo-op, so
+    # we take the low road and just emit a dummy OP_NOP here.
+    if need_dummy_start:
+        asm_fp.write("    .balign %d\n" % handler_size_bytes)
+        asm_fp.write(label_prefix + "_OP_NOP:   /* dummy */\n");
+
+    asm_fp.write("\n    .balign %d\n" % handler_size_bytes)
+    asm_fp.write("    .size   dvmAsmInstructionStart, .-dvmAsmInstructionStart\n")
+    asm_fp.write("    .global dvmAsmInstructionEnd\n")
+    asm_fp.write("dvmAsmInstructionEnd:\n")
+
+    emitSectionComment("Sister implementations", asm_fp)
+    asm_fp.write("    .global dvmAsmSisterStart\n")
+    asm_fp.write("    .type   dvmAsmSisterStart, %function\n")
+    asm_fp.write("    .text\n")
+    asm_fp.write("    .balign 4\n")
+    asm_fp.write("dvmAsmSisterStart:\n")
+    asm_fp.writelines(sister_list)
+
+    asm_fp.write("\n    .size   dvmAsmSisterStart, .-dvmAsmSisterStart\n")
+    asm_fp.write("    .global dvmAsmSisterEnd\n")
+    asm_fp.write("dvmAsmSisterEnd:\n\n")
+
+#
+# Load a C fragment and emit it, then output an assembly stub.
+#
+def loadAndEmitC(location, opindex):
+    op = opcodes[opindex]
+    source = "%s/%s.c" % (location, op)
+    print " emit %s --> C" % source
+    dict = getGlobalSubDict()
+    dict.update({ "opcode":op, "opnum":opindex })
+
+    appendSourceFile(source, dict, c_fp, None)
+
+    if len(asm_stub_text) != 0:
+        emitAsmStub(asm_fp, dict)
+
+#
+# Load an assembly fragment and emit it.
+#
+def loadAndEmitAsm(location, opindex, sister_list):
+    op = opcodes[opindex]
+    source = "%s/%s.S" % (location, op)
+    dict = getGlobalSubDict()
+    dict.update({ "opcode":op, "opnum":opindex })
+    print " emit %s --> asm" % source
+
+    emitAsmHeader(asm_fp, dict)
+    appendSourceFile(source, dict, asm_fp, sister_list)
+
+#
+# Output the alignment directive and label for an assembly piece.
+#
+def emitAsmHeader(outfp, dict):
+    outfp.write("/* ------------------------------ */\n")
+    # The alignment directive ensures that the handler occupies
+    # at least the correct amount of space.  We don't try to deal
+    # with overflow here.
+    outfp.write("    .balign %d\n" % handler_size_bytes)
+    # Emit a label so that gdb will say the right thing.  We prepend an
+    # underscore so the symbol name doesn't clash with the OpCode enum.
+    outfp.write(label_prefix + "_%(opcode)s: /* 0x%(opnum)02x */\n" % dict)
+
+#
+# Output a generic instruction stub that updates the "glue" struct and
+# calls the C implementation.
+#
+def emitAsmStub(outfp, dict):
+    emitAsmHeader(outfp, dict)
+    for line in asm_stub_text:
+        templ = Template(line)
+        outfp.write(templ.substitute(dict))
+
+#
+# Append the file specified by "source" to the open "outfp".  Each line will
+# be template-replaced using the substitution dictionary "dict".
+#
+# If the first line of the file starts with "%" it is taken as a directive.
+# A "%include" line contains a filename and, optionally, a Python-style
+# dictionary declaration with substitution strings.  (This is implemented
+# with recursion.)
+#
+# If "sister_list" is provided, and we find a line that contains only "&",
+# all subsequent lines from the file will be appended to sister_list instead
+# of copied to the output.
+#
+# This may modify "dict".
+#
+def appendSourceFile(source, dict, outfp, sister_list):
+    outfp.write("/* File: %s */\n" % source)
+    infp = open(source, "r")
+    in_sister = False
+    for line in infp:
+        if line.startswith("%include"):
+            # Parse the "include" line
+            tokens = line.strip().split(' ', 2)
+            if len(tokens) < 2:
+                raise DataParseError("malformed %%include in %s" % source)
+
+            alt_source = tokens[1].strip("\"")
+            if alt_source == source:
+                raise DataParseError("self-referential %%include in %s"
+                        % source)
+
+            new_dict = dict.copy()
+            if len(tokens) == 3:
+                new_dict.update(eval(tokens[2]))
+            #print " including src=%s dict=%s" % (alt_source, new_dict)
+            appendSourceFile(alt_source, new_dict, outfp, sister_list)
+            continue
+
+        elif line.startswith("%default"):
+            # copy keywords into dictionary
+            tokens = line.strip().split(' ', 1)
+            if len(tokens) < 2:
+                raise DataParseError("malformed %%default in %s" % source)
+            defaultValues = eval(tokens[1])
+            for entry in defaultValues:
+                dict.setdefault(entry, defaultValues[entry])
+            continue
+
+        elif line.startswith("%verify"):
+            # more to come, someday
+            continue
+
+        elif line.startswith("%break") and sister_list != None:
+            # allow more than one %break, ignoring all following the first
+            if not in_sister:
+                in_sister = True
+                sister_list.append("\n/* continuation for %(opcode)s */\n"%dict)
+            continue
+
+        # perform keyword substitution if a dictionary was provided
+        if dict != None:
+            templ = Template(line)
+            try:
+                subline = templ.substitute(dict)
+            except KeyError, err:
+                raise DataParseError("keyword substitution failed in %s: %s"
+                        % (source, str(err)))
+            except:
+                print "ERROR: substitution failed: " + line
+                raise
+        else:
+            subline = line
+
+        # write output to appropriate file
+        if in_sister:
+            sister_list.append(subline)
+        else:
+            outfp.write(subline)
+    outfp.write("\n")
+    infp.close()
+
+#
+# Emit a C-style section header comment.
+#
+def emitSectionComment(str, fp):
+    equals = "========================================" \
+             "==================================="
+
+    fp.write("\n/*\n * %s\n *  %s\n * %s\n */\n" %
+        (equals, str, equals))
+
+
+#
+# ===========================================================================
+# "main" code
+#
+
+#
+# Check args.
+#
+if len(sys.argv) != 3:
+    print "Usage: %s target-arch output-dir" % sys.argv[0]
+    sys.exit(2)
+
+target_arch = sys.argv[1]
+output_dir = sys.argv[2]
+
+#
+# Extract opcode list.
+#
+opcodes = getOpcodeList()
+#for op in opcodes:
+#    print "  %s" % op
+
+#
+# Open config file.
+#
+try:
+    config_fp = open("config-%s" % target_arch)
+except:
+    print "Unable to open config file 'config-%s'" % target_arch
+    sys.exit(1)
+
+#
+# Open and prepare output files.
+#
+try:
+    c_fp = open("%s/InterpC-%s.c" % (output_dir, target_arch), "w")
+    asm_fp = open("%s/InterpAsm-%s.S" % (output_dir, target_arch), "w")
+except:
+    print "Unable to open output files"
+    print "Make sure directory '%s' exists and existing files are writable" \
+            % output_dir
+    # Ideally we'd remove the files to avoid confusing "make", but if they
+    # failed to open we probably won't be able to remove them either.
+    sys.exit(1)
+
+print "Generating %s, %s" % (c_fp.name, asm_fp.name)
+
+file_header = """/*
+ * This file was generated automatically by gen-mterp.py for '%s'.
+ *
+ * --> DO NOT EDIT <--
+ */
+
+""" % (target_arch)
+
+c_fp.write(file_header)
+asm_fp.write(file_header)
+
+#
+# Process the config file.
+#
+failed = False
+try:
+    for line in config_fp:
+        line = line.strip()         # remove CRLF, leading spaces
+        tokens = line.split(' ')    # tokenize
+        #print "%d: %s" % (len(tokens), tokens)
+        if len(tokens[0]) == 0:
+            #print "  blank"
+            pass
+        elif tokens[0][0] == '#':
+            #print "  comment"
+            pass
+        else:
+            if tokens[0] == "handler-size":
+                setHandlerSize(tokens)
+            elif tokens[0] == "import":
+                importFile(tokens)
+            elif tokens[0] == "asm-stub":
+                setAsmStub(tokens)
+            elif tokens[0] == "op-start":
+                opStart(tokens)
+            elif tokens[0] == "op-end":
+                opEnd(tokens)
+            elif tokens[0] == "op":
+                opEntry(tokens)
+            else:
+                raise DataParseError, "unrecognized command '%s'" % tokens[0]
+except DataParseError, err:
+    print "Failed: " + str(err)
+    # TODO: remove output files so "make" doesn't get confused
+    failed = True
+    c_fp.close()
+    asm_fp.close()
+    c_fp = asm_fp = None
+
+config_fp.close()
+
+#
+# Done!
+#
+if c_fp:
+    c_fp.close()
+if asm_fp:
+    asm_fp.close()
+
+sys.exit(failed)
diff --git a/vm/mterp/out/InterpAsm-armv5.S b/vm/mterp/out/InterpAsm-armv5.S
new file mode 100644
index 0000000..1e776ca
--- /dev/null
+++ b/vm/mterp/out/InterpAsm-armv5.S
@@ -0,0 +1,9925 @@
+/*
+ * This file was generated automatically by gen-mterp.py for 'armv5'.
+ *
+ * --> DO NOT EDIT <--
+ */
+
+/* File: armv5/header.S */
+/*
+ * Copyright (C) 2008 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/*
+ * ARMv5 definitions and declarations.
+ */
+
+/*
+ARM EABI general notes:
+
+r0-r3 hold first 4 args to a method; they are not preserved across method calls
+r4-r8 are available for general use
+r9 is given special treatment in some situations, but not for us
+r10 (sl) seems to be generally available
+r11 (fp) is used by gcc (unless -fomit-frame-pointer is set)
+r12 (ip) is scratch -- not preserved across method calls
+r13 (sp) should be managed carefully in case a signal arrives
+r14 (lr) must be preserved
+r15 (pc) can be tinkered with directly
+
+r0 holds returns of <= 4 bytes
+r0-r1 hold returns of 8 bytes, low word in r0
+
+Callee must save/restore r4+ (except r12) if it modifies them.
+
+Stack is "full descending".  Only the arguments that don't fit in the first 4
+registers are placed on the stack.  "sp" points at the first stacked argument
+(i.e. the 5th arg).
+
+VFP: single-precision results in s0, double-precision results in d0.
+
+In the EABI, "sp" must be 64-bit aligned on entry to a function, and any
+64-bit quantities (long long, double) must be 64-bit aligned.
+*/
+
+/*
+Mterp and ARM notes:
+
+The following registers have fixed assignments:
+
+  reg nick      purpose
+  r4  rPC       interpreted program counter, used for fetching instructions
+  r5  rFP       interpreted frame pointer, used for accessing locals and args
+  r6  rGLUE     MterpGlue pointer
+  r7  rIBASE    interpreted instruction base pointer, used for computed goto
+  r8  rINST     first 16-bit code unit of current instruction
+
+Macros are provided for common operations.  Each macro MUST emit only
+one instruction to make instruction-counting easier.  They MUST NOT alter
+unspecified registers or condition codes.
+*/
+
+/* single-purpose registers, given names for clarity */
+#define rPC     r4
+#define rFP     r5
+#define rGLUE   r6
+#define rIBASE  r7
+#define rINST   r8
+
+/* save/restore the PC and/or FP from the glue struct */
+#define LOAD_PC_FROM_GLUE()     ldr     rPC, [rGLUE, #offGlue_pc]
+#define SAVE_PC_TO_GLUE()       str     rPC, [rGLUE, #offGlue_pc]
+#define LOAD_FP_FROM_GLUE()     ldr     rFP, [rGLUE, #offGlue_fp]
+#define SAVE_FP_TO_GLUE()       str     rFP, [rGLUE, #offGlue_fp]
+#define LOAD_PC_FP_FROM_GLUE()  ldmia   rGLUE, {rPC, rFP}
+#define SAVE_PC_FP_TO_GLUE()    stmia   rGLUE, {rPC, rFP}
+
+/*
+ * "export" the PC to the stack frame, f/b/o future exception objects.  Must
+ * be done *before* something calls dvmThrowException.
+ *
+ * In C this is "SAVEAREA_FROM_FP(fp)->xtra.currentPc = pc", i.e.
+ * fp - sizeof(StackSaveArea) + offsetof(SaveArea, xtra.currentPc)
+ *
+ * It's okay to do this more than once.
+ */
+#define EXPORT_PC() \
+    str     rPC, [rFP, #(-sizeofStackSaveArea + offStackSaveArea_currentPc)]
+
+/*
+ * Given a frame pointer, find the stack save area.
+ *
+ * In C this is "((StackSaveArea*)(_fp) -1)".
+ */
+#define SAVEAREA_FROM_FP(_reg, _fpreg) \
+    sub     _reg, _fpreg, #sizeofStackSaveArea
+
+/*
+ * Fetch the next instruction from rPC into rINST.  Does not advance rPC.
+ */
+#define FETCH_INST()            ldrh    rINST, [rPC]
+
+/*
+ * Fetch the next instruction from the specified offset.  Advances rPC
+ * to point to the next instruction.  "_count" is in 16-bit code units.
+ *
+ * Because of the limited size of immediate constants on ARM, this is only
+ * suitable for small forward movements (i.e. don't try to implement "goto"
+ * with this).
+ *
+ * This must come AFTER anything that can throw an exception, or the
+ * exception catch may miss.  (This also implies that it must come after
+ * EXPORT_PC().)
+ */
+#define FETCH_ADVANCE_INST(_count) ldrh    rINST, [rPC, #(_count*2)]!
+
+/*
+ * Fetch the next instruction from an offset specified by _reg.  Updates
+ * rPC to point to the next instruction.  "_reg" must specify the distance
+ * in bytes, *not* 16-bit code units, and may be a signed value.
+ *
+ * We want to write "ldrh rINST, [rPC, _reg, lsl #2]!", but some of the
+ * bits that hold the shift distance are used for the half/byte/sign flags.
+ * In some cases we can pre-double _reg for free, so we require a byte offset
+ * here.
+ */
+#define FETCH_ADVANCE_INST_RB(_reg) ldrh    rINST, [rPC, _reg]!
+
+/*
+ * Fetch a half-word code unit from an offset past the current PC.  The
+ * "_count" value is in 16-bit code units.  Does not advance rPC.
+ *
+ * The "_S" variant works the same but treats the value as signed.
+ */
+#define FETCH(_reg, _count)     ldrh    _reg, [rPC, #(_count*2)]
+#define FETCH_S(_reg, _count)   ldrsh   _reg, [rPC, #(_count*2)]
+
+/*
+ * Fetch one byte from an offset past the current PC.  Pass in the same
+ * "_count" as you would for FETCH, and an additional 0/1 indicating which
+ * byte of the halfword you want (lo/hi).
+ */
+#define FETCH_B(_reg, _count, _byte) ldrb     _reg, [rPC, #(_count*2+_byte)]
+
+/*
+ * Put the instruction's opcode field into the specified register.
+ */
+#define GET_INST_OPCODE(_reg)   and     _reg, rINST, #255
+
+/*
+ * Begin executing the opcode in _reg.
+ */
+#define GOTO_OPCODE(_reg)       add     pc, rIBASE, _reg, lsl #6
+
+/*
+ * Get/set the 32-bit value from a Dalvik register.
+ */
+#define GET_VREG(_reg, _vreg)   ldr     _reg, [rFP, _vreg, lsl #2]
+#define SET_VREG(_reg, _vreg)   str     _reg, [rFP, _vreg, lsl #2]
+
+/*
+ * This is a #include, not a %include, because we want the C pre-processor
+ * to expand the macros into assembler assignment statements.
+ */
+#include "../common/asm-constants.h"
+
+
+/* File: armv5/entry.S */
+/*
+ * Copyright (C) 2008 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/*
+ * Interpreter entry point.
+ */
+
+/*
+ * We don't have formal stack frames, so gdb scans upward in the code
+ * to find the start of the function (a label with the %function type),
+ * and then looks at the next few instructions to figure out what
+ * got pushed onto the stack.  From this it figures out how to restore
+ * the registers, including PC, for the previous stack frame.  If gdb
+ * sees a non-function label, it stops scanning, so either we need to
+ * have nothing but assembler-local labels between the entry point and
+ * the break, or we need to fake it out.
+ *
+ * When this is defined, we add some stuff to make gdb less confused.
+ */
+#define ASSIST_DEBUGGER 1
+
+    .text
+    .align  2
+    .global dvmMterpStdRun
+    .type   dvmMterpStdRun, %function
+
+/*
+ * On entry:
+ *  r0  MterpGlue* glue
+ *
+ * This function returns a boolean "changeInterp" value.  The return comes
+ * via a call to dvmMterpStdBail().
+ */
+dvmMterpStdRun:
+#define MTERP_ENTRY1 \
+    .save {r4-r10,fp,lr}; \
+    stmfd   sp!, {r4-r10,fp,lr}         @ save 9 regs
+#define MTERP_ENTRY2 \
+    .pad    #4; \
+    sub     sp, sp, #4                  @ align 64
+
+    .fnstart
+    MTERP_ENTRY1
+    MTERP_ENTRY2
+
+    /* save stack pointer, add magic word for debuggerd */
+    str     sp, [r0, #offGlue_bailPtr]  @ save SP for eventual return
+
+    /* set up "named" registers, figure out entry point */
+    mov     rGLUE, r0                   @ set rGLUE
+    ldrb    r1, [r0, #offGlue_entryPoint]   @ InterpEntry enum is char
+    LOAD_PC_FP_FROM_GLUE()              @ load rPC and rFP from "glue"
+    adr     rIBASE, dvmAsmInstructionStart  @ set rIBASE
+    cmp     r1, #kInterpEntryInstr      @ usual case?
+    bne     .Lnot_instr                 @ no, handle it
+
+    /* start executing the instruction at rPC */
+    FETCH_INST()                        @ load rINST from rPC
+    GET_INST_OPCODE(ip)                 @ extract opcode from rINST
+    GOTO_OPCODE(ip)                     @ jump to next instruction
+
+.Lnot_instr:
+    cmp     r1, #kInterpEntryReturn     @ were we returning from a method?
+    beq     common_returnFromMethod
+
+.Lnot_return:
+    cmp     r1, #kInterpEntryThrow      @ were we throwing an exception?
+    beq     common_exceptionThrown
+
+.Lbad_arg:
+    ldr     r0, strBadEntryPoint
+    @ r1 holds value of entryPoint
+    bl      printf
+    bl      dvmAbort
+    .fnend
+
+
+    .global dvmMterpStdBail
+    .type   dvmMterpStdBail, %function
+
+/*
+ * Restore the stack pointer and PC from the save point established on entry.
+ * This is essentially the same as a longjmp, but should be cheaper.  The
+ * last instruction causes us to return to whoever called dvmMterpStdRun.
+ *
+ * We pushed some registers on the stack in dvmMterpStdRun, then saved
+ * SP and LR.  Here we restore SP, restore the registers, and then restore
+ * LR to PC.
+ *
+ * On entry:
+ *  r0  MterpGlue* glue
+ *  r1  bool changeInterp
+ */
+dvmMterpStdBail:
+    ldr     sp, [r0, #offGlue_bailPtr]      @ sp<- saved SP
+    mov     r0, r1                          @ return the changeInterp value
+    add     sp, sp, #4                      @ un-align 64
+    ldmfd   sp!, {r4-r10,fp,pc}             @ restore 9 regs
+
+
+/*
+ * String references.
+ */
+strBadEntryPoint:
+    .word   .LstrBadEntryPoint
+
+
+
+    .global dvmAsmInstructionStart
+    .type   dvmAsmInstructionStart, %function
+dvmAsmInstructionStart = .L_OP_NOP
+    .text
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_NOP: /* 0x00 */
+/* File: armv5/OP_NOP.S */
+    FETCH_ADVANCE_INST(1)               @ advance to next instr, load rINST
+    GET_INST_OPCODE(ip)                 @ ip<- opcode from rINST
+    GOTO_OPCODE(ip)                     @ execute it
+
+#ifdef ASSIST_DEBUGGER
+    /* insert fake function header to help gdb find the stack frame */
+    .type   dalvik_inst, %function
+dalvik_inst:
+    .fnstart
+    MTERP_ENTRY1
+    MTERP_ENTRY2
+    .fnend
+#endif
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_MOVE: /* 0x01 */
+/* File: armv5/OP_MOVE.S */
+    /* for move, move-object, long-to-int */
+    /* op vA, vB */
+    mov     r1, rINST, lsr #12          @ r1<- B from 15:12
+    mov     r0, rINST, lsr #8           @ r0<- A from 11:8
+    FETCH_ADVANCE_INST(1)               @ advance rPC, load rINST
+    GET_VREG(r2, r1)                    @ r2<- fp[B]
+    and     r0, r0, #15
+    GET_INST_OPCODE(ip)                 @ ip<- opcode from rINST
+    SET_VREG(r2, r0)                    @ fp[A]<- r2
+    GOTO_OPCODE(ip)                     @ execute next instruction
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_MOVE_FROM16: /* 0x02 */
+/* File: armv5/OP_MOVE_FROM16.S */
+    /* for: move/from16, move-object/from16 */
+    /* op vAA, vBBBB */
+    FETCH(r1, 1)                        @ r1<- BBBB
+    mov     r0, rINST, lsr #8           @ r0<- AA
+    FETCH_ADVANCE_INST(2)               @ advance rPC, load rINST
+    GET_VREG(r2, r1)                    @ r2<- fp[BBBB]
+    GET_INST_OPCODE(ip)                 @ extract opcode from rINST
+    SET_VREG(r2, r0)                    @ fp[AA]<- r2
+    GOTO_OPCODE(ip)                     @ jump to next instruction
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_MOVE_16: /* 0x03 */
+/* File: armv5/OP_MOVE_16.S */
+    /* for: move/16, move-object/16 */
+    /* op vAAAA, vBBBB */
+    FETCH(r1, 2)                        @ r1<- BBBB
+    FETCH(r0, 1)                        @ r0<- AAAA
+    FETCH_ADVANCE_INST(3)               @ advance rPC, load rINST
+    GET_VREG(r2, r1)                    @ r2<- fp[BBBB]
+    GET_INST_OPCODE(ip)                 @ extract opcode from rINST
+    SET_VREG(r2, r0)                    @ fp[AAAA]<- r2
+    GOTO_OPCODE(ip)                     @ jump to next instruction
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_MOVE_WIDE: /* 0x04 */
+/* File: armv5/OP_MOVE_WIDE.S */
+    /* move-wide vA, vB */
+    /* NOTE: regs can overlap, e.g. "move v6,v7" or "move v7,v6" */
+    mov     r2, rINST, lsr #8           @ r2<- A(+)
+    mov     r3, rINST, lsr #12          @ r3<- B
+    and     r2, r2, #15
+    add     r3, rFP, r3, lsl #2         @ r3<- &fp[B]
+    add     r2, rFP, r2, lsl #2         @ r2<- &fp[A]
+    ldmia   r3, {r0-r1}                 @ r0/r1<- fp[B]
+    FETCH_ADVANCE_INST(1)               @ advance rPC, load rINST
+    GET_INST_OPCODE(ip)                 @ extract opcode from rINST
+    stmia   r2, {r0-r1}                 @ fp[A]<- r0/r1
+    GOTO_OPCODE(ip)                     @ jump to next instruction
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_MOVE_WIDE_FROM16: /* 0x05 */
+/* File: armv5/OP_MOVE_WIDE_FROM16.S */
+    /* move-wide/from16 vAA, vBBBB */
+    /* NOTE: regs can overlap, e.g. "move v6,v7" or "move v7,v6" */
+    FETCH(r3, 1)                        @ r3<- BBBB
+    mov     r2, rINST, lsr #8           @ r2<- AA
+    add     r3, rFP, r3, lsl #2         @ r3<- &fp[BBBB]
+    add     r2, rFP, r2, lsl #2         @ r2<- &fp[AA]
+    ldmia   r3, {r0-r1}                 @ r0/r1<- fp[BBBB]
+    FETCH_ADVANCE_INST(2)               @ advance rPC, load rINST
+    GET_INST_OPCODE(ip)                 @ extract opcode from rINST
+    stmia   r2, {r0-r1}                 @ fp[AA]<- r0/r1
+    GOTO_OPCODE(ip)                     @ jump to next instruction
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_MOVE_WIDE_16: /* 0x06 */
+/* File: armv5/OP_MOVE_WIDE_16.S */
+    /* move-wide/16 vAAAA, vBBBB */
+    /* NOTE: regs can overlap, e.g. "move v6,v7" or "move v7,v6" */
+    FETCH(r3, 2)                        @ r3<- BBBB
+    FETCH(r2, 1)                        @ r2<- AAAA
+    add     r3, rFP, r3, lsl #2         @ r3<- &fp[BBBB]
+    add     r2, rFP, r2, lsl #2         @ r2<- &fp[AAAA]
+    ldmia   r3, {r0-r1}                 @ r0/r1<- fp[BBBB]
+    FETCH_ADVANCE_INST(2)               @ advance rPC, load rINST
+    GET_INST_OPCODE(ip)                 @ extract opcode from rINST
+    stmia   r2, {r0-r1}                 @ fp[AAAA]<- r0/r1
+    GOTO_OPCODE(ip)                     @ jump to next instruction
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_MOVE_OBJECT: /* 0x07 */
+/* File: armv5/OP_MOVE_OBJECT.S */
+/* File: armv5/OP_MOVE.S */
+    /* for move, move-object, long-to-int */
+    /* op vA, vB */
+    mov     r1, rINST, lsr #12          @ r1<- B from 15:12
+    mov     r0, rINST, lsr #8           @ r0<- A from 11:8
+    FETCH_ADVANCE_INST(1)               @ advance rPC, load rINST
+    GET_VREG(r2, r1)                    @ r2<- fp[B]
+    and     r0, r0, #15
+    GET_INST_OPCODE(ip)                 @ ip<- opcode from rINST
+    SET_VREG(r2, r0)                    @ fp[A]<- r2
+    GOTO_OPCODE(ip)                     @ execute next instruction
+
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_MOVE_OBJECT_FROM16: /* 0x08 */
+/* File: armv5/OP_MOVE_OBJECT_FROM16.S */
+/* File: armv5/OP_MOVE_FROM16.S */
+    /* for: move/from16, move-object/from16 */
+    /* op vAA, vBBBB */
+    FETCH(r1, 1)                        @ r1<- BBBB
+    mov     r0, rINST, lsr #8           @ r0<- AA
+    FETCH_ADVANCE_INST(2)               @ advance rPC, load rINST
+    GET_VREG(r2, r1)                    @ r2<- fp[BBBB]
+    GET_INST_OPCODE(ip)                 @ extract opcode from rINST
+    SET_VREG(r2, r0)                    @ fp[AA]<- r2
+    GOTO_OPCODE(ip)                     @ jump to next instruction
+
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_MOVE_OBJECT_16: /* 0x09 */
+/* File: armv5/OP_MOVE_OBJECT_16.S */
+/* File: armv5/OP_MOVE_16.S */
+    /* for: move/16, move-object/16 */
+    /* op vAAAA, vBBBB */
+    FETCH(r1, 2)                        @ r1<- BBBB
+    FETCH(r0, 1)                        @ r0<- AAAA
+    FETCH_ADVANCE_INST(3)               @ advance rPC, load rINST
+    GET_VREG(r2, r1)                    @ r2<- fp[BBBB]
+    GET_INST_OPCODE(ip)                 @ extract opcode from rINST
+    SET_VREG(r2, r0)                    @ fp[AAAA]<- r2
+    GOTO_OPCODE(ip)                     @ jump to next instruction
+
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_MOVE_RESULT: /* 0x0a */
+/* File: armv5/OP_MOVE_RESULT.S */
+    /* for: move-result, move-result-object */
+    /* op vAA */
+    mov     r2, rINST, lsr #8           @ r2<- AA
+    FETCH_ADVANCE_INST(1)               @ advance rPC, load rINST
+    ldr     r0, [rGLUE, #offGlue_retval]    @ r0<- glue->retval.i
+    GET_INST_OPCODE(ip)                 @ extract opcode from rINST
+    SET_VREG(r0, r2)                    @ fp[AA]<- r0
+    GOTO_OPCODE(ip)                     @ jump to next instruction
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_MOVE_RESULT_WIDE: /* 0x0b */
+/* File: armv5/OP_MOVE_RESULT_WIDE.S */
+    /* move-result-wide vAA */
+    mov     r2, rINST, lsr #8           @ r2<- AA
+    add     r3, rGLUE, #offGlue_retval  @ r3<- &glue->retval
+    add     r2, rFP, r2, lsl #2         @ r2<- &fp[AA]
+    ldmia   r3, {r0-r1}                 @ r0/r1<- retval.j
+    FETCH_ADVANCE_INST(1)               @ advance rPC, load rINST
+    GET_INST_OPCODE(ip)                 @ extract opcode from rINST
+    stmia   r2, {r0-r1}                 @ fp[AA]<- r0/r1
+    GOTO_OPCODE(ip)                     @ jump to next instruction
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_MOVE_RESULT_OBJECT: /* 0x0c */
+/* File: armv5/OP_MOVE_RESULT_OBJECT.S */
+/* File: armv5/OP_MOVE_RESULT.S */
+    /* for: move-result, move-result-object */
+    /* op vAA */
+    mov     r2, rINST, lsr #8           @ r2<- AA
+    FETCH_ADVANCE_INST(1)               @ advance rPC, load rINST
+    ldr     r0, [rGLUE, #offGlue_retval]    @ r0<- glue->retval.i
+    GET_INST_OPCODE(ip)                 @ extract opcode from rINST
+    SET_VREG(r0, r2)                    @ fp[AA]<- r0
+    GOTO_OPCODE(ip)                     @ jump to next instruction
+
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_MOVE_EXCEPTION: /* 0x0d */
+/* File: armv5/OP_MOVE_EXCEPTION.S */
+    /* move-exception vAA */
+    ldr     r0, [rGLUE, #offGlue_self]  @ r0<- glue->self
+    mov     r2, rINST, lsr #8           @ r2<- AA
+    ldr     r3, [r0, #offThread_exception]  @ r3<- dvmGetException bypass
+    mov     r1, #0                      @ r1<- 0
+    FETCH_ADVANCE_INST(1)               @ advance rPC, load rINST
+    SET_VREG(r3, r2)                    @ fp[AA]<- exception obj
+    GET_INST_OPCODE(ip)                 @ extract opcode from rINST
+    str     r1, [r0, #offThread_exception]  @ dvmClearException bypass
+    GOTO_OPCODE(ip)                     @ jump to next instruction
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_RETURN_VOID: /* 0x0e */
+/* File: armv5/OP_RETURN_VOID.S */
+    b       common_returnFromMethod
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_RETURN: /* 0x0f */
+/* File: armv5/OP_RETURN.S */
+    /*
+     * Return a 32-bit value.  Copies the return value into the "glue"
+     * structure, then jumps to the return handler.
+     *
+     * for: return, return-object
+     */
+    /* op vAA */
+    mov     r2, rINST, lsr #8           @ r2<- AA
+    GET_VREG(r0, r2)                    @ r0<- vAA
+    str     r0, [rGLUE, #offGlue_retval] @ retval.i <- vAA
+    b       common_returnFromMethod
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_RETURN_WIDE: /* 0x10 */
+/* File: armv5/OP_RETURN_WIDE.S */
+    /*
+     * Return a 64-bit value.  Copies the return value into the "glue"
+     * structure, then jumps to the return handler.
+     */
+    /* return-wide vAA */
+    mov     r2, rINST, lsr #8           @ r2<- AA
+    add     r2, rFP, r2, lsl #2         @ r2<- &fp[AA]
+    add     r3, rGLUE, #offGlue_retval  @ r3<- &glue->retval
+    ldmia   r2, {r0-r1}                 @ r0/r1 <- vAA/vAA+1
+    stmia   r3, {r0-r1}                 @ retval<- r0/r1
+    b       common_returnFromMethod
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_RETURN_OBJECT: /* 0x11 */
+/* File: armv5/OP_RETURN_OBJECT.S */
+/* File: armv5/OP_RETURN.S */
+    /*
+     * Return a 32-bit value.  Copies the return value into the "glue"
+     * structure, then jumps to the return handler.
+     *
+     * for: return, return-object
+     */
+    /* op vAA */
+    mov     r2, rINST, lsr #8           @ r2<- AA
+    GET_VREG(r0, r2)                    @ r0<- vAA
+    str     r0, [rGLUE, #offGlue_retval] @ retval.i <- vAA
+    b       common_returnFromMethod
+
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_CONST_4: /* 0x12 */
+/* File: armv5/OP_CONST_4.S */
+    /* const/4 vA, #+B */
+    mov     r1, rINST, lsl #16          @ r1<- Bxxx0000
+    mov     r0, rINST, lsr #8           @ r0<- A+
+    FETCH_ADVANCE_INST(1)               @ advance rPC, load rINST
+    mov     r1, r1, asr #28             @ r1<- sssssssB (sign-extended)
+    and     r0, r0, #15
+    GET_INST_OPCODE(ip)                 @ ip<- opcode from rINST
+    SET_VREG(r1, r0)                    @ fp[A]<- r1
+    GOTO_OPCODE(ip)                     @ execute next instruction
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_CONST_16: /* 0x13 */
+/* File: armv5/OP_CONST_16.S */
+    /* const/16 vAA, #+BBBB */
+    FETCH_S(r0, 1)                      @ r0<- ssssBBBB (sign-extended)
+    mov     r3, rINST, lsr #8           @ r3<- AA
+    FETCH_ADVANCE_INST(2)               @ advance rPC, load rINST
+    SET_VREG(r0, r3)                    @ vAA<- r0
+    GET_INST_OPCODE(ip)                 @ extract opcode from rINST
+    GOTO_OPCODE(ip)                     @ jump to next instruction
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_CONST: /* 0x14 */
+/* File: armv5/OP_CONST.S */
+    /* const vAA, #+BBBBbbbb */
+    mov     r3, rINST, lsr #8           @ r3<- AA
+    FETCH(r0, 1)                        @ r0<- bbbb (low)
+    FETCH(r1, 2)                        @ r1<- BBBB (high)
+    FETCH_ADVANCE_INST(3)               @ advance rPC, load rINST
+    orr     r0, r0, r1, lsl #16         @ r0<- BBBBbbbb
+    GET_INST_OPCODE(ip)                 @ extract opcode from rINST
+    SET_VREG(r0, r3)                    @ vAA<- r0
+    GOTO_OPCODE(ip)                     @ jump to next instruction
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_CONST_HIGH16: /* 0x15 */
+/* File: armv5/OP_CONST_HIGH16.S */
+    /* const/high16 vAA, #+BBBB0000 */
+    FETCH(r0, 1)                        @ r0<- 0000BBBB (zero-extended)
+    mov     r3, rINST, lsr #8           @ r3<- AA
+    mov     r0, r0, lsl #16             @ r0<- BBBB0000
+    FETCH_ADVANCE_INST(2)               @ advance rPC, load rINST
+    SET_VREG(r0, r3)                    @ vAA<- r0
+    GET_INST_OPCODE(ip)                 @ extract opcode from rINST
+    GOTO_OPCODE(ip)                     @ jump to next instruction
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_CONST_WIDE_16: /* 0x16 */
+/* File: armv5/OP_CONST_WIDE_16.S */
+    /* const-wide/16 vAA, #+BBBB */
+    FETCH_S(r0, 1)                      @ r0<- ssssBBBB (sign-extended)
+    mov     r3, rINST, lsr #8           @ r3<- AA
+    mov     r1, r0, asr #31             @ r1<- ssssssss
+    FETCH_ADVANCE_INST(2)               @ advance rPC, load rINST
+    add     r3, rFP, r3, lsl #2         @ r3<- &fp[AA]
+    GET_INST_OPCODE(ip)                 @ extract opcode from rINST
+    stmia   r3, {r0-r1}                 @ vAA<- r0/r1
+    GOTO_OPCODE(ip)                     @ jump to next instruction
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_CONST_WIDE_32: /* 0x17 */
+/* File: armv5/OP_CONST_WIDE_32.S */
+    /* const-wide/32 vAA, #+BBBBbbbb */
+    FETCH(r0, 1)                        @ r0<- 0000bbbb (low)
+    mov     r3, rINST, lsr #8           @ r3<- AA
+    FETCH_S(r2, 2)                      @ r2<- ssssBBBB (high)
+    FETCH_ADVANCE_INST(3)               @ advance rPC, load rINST
+    orr     r0, r0, r2, lsl #16         @ r0<- BBBBbbbb
+    add     r3, rFP, r3, lsl #2         @ r3<- &fp[AA]
+    mov     r1, r0, asr #31             @ r1<- ssssssss
+    GET_INST_OPCODE(ip)                 @ extract opcode from rINST
+    stmia   r3, {r0-r1}                 @ vAA<- r0/r1
+    GOTO_OPCODE(ip)                     @ jump to next instruction
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_CONST_WIDE: /* 0x18 */
+/* File: armv5/OP_CONST_WIDE.S */
+    /* const-wide vAA, #+HHHHhhhhBBBBbbbb */
+    FETCH(r0, 1)                        @ r0<- bbbb (low)
+    FETCH(r1, 2)                        @ r1<- BBBB (low middle)
+    FETCH(r2, 3)                        @ r2<- hhhh (high middle)
+    orr     r0, r0, r1, lsl #16         @ r0<- BBBBbbbb (low word)
+    FETCH(r3, 4)                        @ r3<- HHHH (high)
+    mov     r9, rINST, lsr #8           @ r9<- AA
+    orr     r1, r2, r3, lsl #16         @ r1<- HHHHhhhh (high word)
+    FETCH_ADVANCE_INST(5)               @ advance rPC, load rINST
+    add     r9, rFP, r9, lsl #2         @ r9<- &fp[AA]
+    GET_INST_OPCODE(ip)                 @ extract opcode from rINST
+    stmia   r9, {r0-r1}                 @ vAA<- r0/r1
+    GOTO_OPCODE(ip)                     @ jump to next instruction
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_CONST_WIDE_HIGH16: /* 0x19 */
+/* File: armv5/OP_CONST_WIDE_HIGH16.S */
+    /* const-wide/high16 vAA, #+BBBB000000000000 */
+    FETCH(r1, 1)                        @ r1<- 0000BBBB (zero-extended)
+    mov     r3, rINST, lsr #8           @ r3<- AA
+    mov     r0, #0                      @ r0<- 00000000
+    mov     r1, r1, lsl #16             @ r1<- BBBB0000
+    FETCH_ADVANCE_INST(2)               @ advance rPC, load rINST
+    add     r3, rFP, r3, lsl #2         @ r3<- &fp[AA]
+    GET_INST_OPCODE(ip)                 @ extract opcode from rINST
+    stmia   r3, {r0-r1}                 @ vAA<- r0/r1
+    GOTO_OPCODE(ip)                     @ jump to next instruction
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_CONST_STRING: /* 0x1a */
+/* File: armv5/OP_CONST_STRING.S */
+    /* const/string vAA, String@BBBB */
+    FETCH(r1, 1)                        @ r1<- BBBB
+    ldr     r2, [rGLUE, #offGlue_methodClassDex]    @ r2<- glue->methodClassDex
+    mov     r9, rINST, lsr #8           @ r9<- AA
+    ldr     r2, [r2, #offDvmDex_pResStrings]   @ r2<- dvmDex->pResStrings
+    ldr     r0, [r2, r1, lsl #2]        @ r0<- pResStrings[BBBB]
+    cmp     r0, #0                      @ not yet resolved?
+    beq     .LOP_CONST_STRING_resolve
+    FETCH_ADVANCE_INST(2)               @ advance rPC, load rINST
+    GET_INST_OPCODE(ip)                 @ extract opcode from rINST
+    SET_VREG(r0, r9)                    @ vAA<- r0
+    GOTO_OPCODE(ip)                     @ jump to next instruction
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_CONST_STRING_JUMBO: /* 0x1b */
+/* File: armv5/OP_CONST_STRING_JUMBO.S */
+    /* const/string vAA, String@BBBBBBBB */
+    FETCH(r0, 1)                        @ r0<- bbbb (low)
+    FETCH(r1, 2)                        @ r1<- BBBB (high)
+    ldr     r2, [rGLUE, #offGlue_methodClassDex]    @ r2<- glue->methodClassDex
+    mov     r9, rINST, lsr #8           @ r9<- AA
+    ldr     r2, [r2, #offDvmDex_pResStrings]   @ r2<- dvmDex->pResStrings
+    orr     r1, r0, r1, lsl #16         @ r1<- BBBBbbbb
+    ldr     r0, [r2, r1, lsl #2]        @ r0<- pResStrings[BBBB]
+    cmp     r0, #0
+    beq     .LOP_CONST_STRING_JUMBO_resolve
+    FETCH_ADVANCE_INST(3)               @ advance rPC, load rINST
+    GET_INST_OPCODE(ip)                 @ extract opcode from rINST
+    SET_VREG(r0, r9)                    @ vAA<- r0
+    GOTO_OPCODE(ip)                     @ jump to next instruction
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_CONST_CLASS: /* 0x1c */
+/* File: armv5/OP_CONST_CLASS.S */
+    /* const/class vAA, Class@BBBB */
+    FETCH(r1, 1)                        @ r1<- BBBB
+    ldr     r2, [rGLUE, #offGlue_methodClassDex]    @ r2<- glue->methodClassDex
+    mov     r9, rINST, lsr #8           @ r9<- AA
+    ldr     r2, [r2, #offDvmDex_pResClasses]   @ r2<- dvmDex->pResClasses
+    ldr     r0, [r2, r1, lsl #2]        @ r0<- pResClasses[BBBB]
+    cmp     r0, #0                      @ not yet resolved?
+    beq     .LOP_CONST_CLASS_resolve
+    FETCH_ADVANCE_INST(2)               @ advance rPC, load rINST
+    GET_INST_OPCODE(ip)                 @ extract opcode from rINST
+    SET_VREG(r0, r9)                    @ vAA<- r0
+    GOTO_OPCODE(ip)                     @ jump to next instruction
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_MONITOR_ENTER: /* 0x1d */
+/* File: armv5/OP_MONITOR_ENTER.S */
+    /*
+     * Synchronize on an object.
+     */
+    /* monitor-enter vAA */
+    mov     r2, rINST, lsr #8           @ r2<- AA
+    GET_VREG(r1, r2)                    @ r1<- vAA (object)
+    ldr     r0, [rGLUE, #offGlue_self]  @ r0<- glue->self
+    cmp     r1, #0                      @ null object?
+#ifdef WITH_MONITOR_TRACKING
+    EXPORT_PC()                         @ export PC so we can grab stack trace
+#endif
+    beq     common_errNullObject        @ null object, throw an exception
+    FETCH_ADVANCE_INST(1)               @ advance rPC, load rINST
+    bl      dvmLockObject               @ call(self, obj)
+#ifdef WITH_DEADLOCK_PREDICTION /* implies WITH_MONITOR_TRACKING */
+    ldr     r0, [rGLUE, #offGlue_self]  @ r0<- glue->self
+    ldr     r1, [r0, #offThread_exception] @ check for exception
+    cmp     r1, #0
+    bne     common_exceptionThrown      @ exception raised, bail out
+#endif
+    GET_INST_OPCODE(ip)                 @ extract opcode from rINST
+    GOTO_OPCODE(ip)                     @ jump to next instruction
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_MONITOR_EXIT: /* 0x1e */
+/* File: armv5/OP_MONITOR_EXIT.S */
+    /*
+     * Unlock an object.
+     *
+     * Exceptions that occur when unlocking a monitor need to appear as
+     * if they happened at the following instruction.  See the Dalvik
+     * instruction spec.
+     */
+    /* monitor-exit vAA */
+    mov     r2, rINST, lsr #8           @ r2<- AA
+    EXPORT_PC()                         @ before fetch: export the PC
+    GET_VREG(r1, r2)                    @ r1<- vAA (object)
+    cmp     r1, #0                      @ null object?
+    beq     common_errNullObject        @ yes
+    ldr     r0, [rGLUE, #offGlue_self]  @ r0<- glue->self
+    bl      dvmUnlockObject             @ r0<- success for unlock(self, obj)
+    cmp     r0, #0                      @ failed?
+    beq     common_exceptionThrown      @ yes, exception is pending
+    FETCH_ADVANCE_INST(1)               @ before throw: advance rPC, load rINST
+    GET_INST_OPCODE(ip)                 @ extract opcode from rINST
+    GOTO_OPCODE(ip)                     @ jump to next instruction
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_CHECK_CAST: /* 0x1f */
+/* File: armv5/OP_CHECK_CAST.S */
+    /*
+     * Check to see if a cast from one class to another is allowed.
+     */
+    /* check-cast vAA, class@BBBB */
+    mov     r3, rINST, lsr #8           @ r3<- AA
+    FETCH(r2, 1)                        @ r2<- BBBB
+    GET_VREG(r9, r3)                    @ r9<- object
+    ldr     r0, [rGLUE, #offGlue_methodClassDex]    @ r0<- pDvmDex
+    cmp     r9, #0                      @ is object null?
+    ldr     r0, [r0, #offDvmDex_pResClasses]    @ r0<- pDvmDex->pResClasses
+    beq     .LOP_CHECK_CAST_okay            @ null obj, cast always succeeds
+    ldr     r1, [r0, r2, lsl #2]        @ r1<- resolved class
+    ldr     r0, [r9, #offObject_clazz]  @ r0<- obj->clazz
+    cmp     r1, #0                      @ have we resolved this before?
+    beq     .LOP_CHECK_CAST_resolve         @ not resolved, do it now
+.LOP_CHECK_CAST_resolved:
+    cmp     r0, r1                      @ same class (trivial success)?
+    bne     .LOP_CHECK_CAST_fullcheck       @ no, do full check
+.LOP_CHECK_CAST_okay:
+    FETCH_ADVANCE_INST(2)               @ advance rPC, load rINST
+    GET_INST_OPCODE(ip)                 @ extract opcode from rINST
+    GOTO_OPCODE(ip)                     @ jump to next instruction
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_INSTANCE_OF: /* 0x20 */
+/* File: armv5/OP_INSTANCE_OF.S */
+    /*
+     * Check to see if an object reference is an instance of a class.
+     *
+     * Most common situation is a non-null object, being compared against
+     * an already-resolved class.
+     */
+    /* instance-of vA, vB, class@CCCC */
+    mov     r3, rINST, lsr #12          @ r3<- B
+    mov     r9, rINST, lsr #8           @ r9<- A+
+    GET_VREG(r0, r3)                    @ r0<- vB (object)
+    and     r9, r9, #15                 @ r9<- A
+    cmp     r0, #0                      @ is object null?
+    ldr     r2, [rGLUE, #offGlue_methodClassDex]    @ r2<- pDvmDex
+    beq     .LOP_INSTANCE_OF_store           @ null obj, not an instance, store r0
+    FETCH(r3, 1)                        @ r3<- CCCC
+    ldr     r2, [r2, #offDvmDex_pResClasses]    @ r2<- pDvmDex->pResClasses
+    ldr     r1, [r2, r3, lsl #2]        @ r1<- resolved class
+    ldr     r0, [r0, #offObject_clazz]  @ r0<- obj->clazz
+    cmp     r1, #0                      @ have we resolved this before?
+    beq     .LOP_INSTANCE_OF_resolve         @ not resolved, do it now
+.LOP_INSTANCE_OF_resolved: @ r0=obj->clazz, r1=resolved class
+    cmp     r0, r1                      @ same class (trivial success)?
+    beq     .LOP_INSTANCE_OF_trivial         @ yes, trivial finish
+    b       .LOP_INSTANCE_OF_fullcheck       @ no, do full check
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_ARRAY_LENGTH: /* 0x21 */
+/* File: armv5/OP_ARRAY_LENGTH.S */
+    /*
+     * Return the length of an array.
+     */
+    mov     r1, rINST, lsr #12          @ r1<- B
+    mov     r2, rINST, lsr #8           @ r2<- A+
+    GET_VREG(r0, r1)                    @ r0<- vB (object ref)
+    and     r2, r2, #15                 @ r2<- A
+    cmp     r0, #0                      @ is object null?
+    beq     common_errNullObject        @ yup, fail
+    FETCH_ADVANCE_INST(1)               @ advance rPC, load rINST
+    ldr     r3, [r0, #offArrayObject_length]    @ r3<- array length
+    GET_INST_OPCODE(ip)                 @ extract opcode from rINST
+    SET_VREG(r3, r2)                    @ vB<- length
+    GOTO_OPCODE(ip)                     @ jump to next instruction
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_NEW_INSTANCE: /* 0x22 */
+/* File: armv5/OP_NEW_INSTANCE.S */
+    /*
+     * Create a new instance of a class.
+     */
+    /* new-instance vAA, class@BBBB */
+    ldr     r3, [rGLUE, #offGlue_methodClassDex]    @ r3<- pDvmDex
+    FETCH(r1, 1)                        @ r1<- BBBB
+    ldr     r3, [r3, #offDvmDex_pResClasses]    @ r3<- pDvmDex->pResClasses
+    ldr     r0, [r3, r1, lsl #2]        @ r0<- resolved class
+    EXPORT_PC()                         @ req'd for init, resolve, alloc
+    cmp     r0, #0                      @ already resolved?
+    beq     .LOP_NEW_INSTANCE_resolve         @ no, resolve it now
+.LOP_NEW_INSTANCE_resolved:   @ r0=class
+    ldrb    r1, [r0, #offClassObject_status]    @ r1<- ClassStatus enum
+    cmp     r1, #CLASS_INITIALIZED      @ has class been initialized?
+    bne     .LOP_NEW_INSTANCE_needinit        @ no, init class now
+.LOP_NEW_INSTANCE_initialized: @ r0=class
+    ldr     r3, [r0, #offClassObject_accessFlags]   @ r3<- clazz->accessFlags
+    tst     r3, #(ACC_INTERFACE|ACC_ABSTRACT)   @ abstract or interface?
+    mov     r1, #ALLOC_DONT_TRACK       @ flags for alloc call
+    beq     .LOP_NEW_INSTANCE_finish          @ concrete class, continue
+    b       .LOP_NEW_INSTANCE_abstract        @ fail
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_NEW_ARRAY: /* 0x23 */
+/* File: armv5/OP_NEW_ARRAY.S */
+    /*
+     * Allocate an array of objects, specified with the array class
+     * and a count.
+     *
+     * The verifier guarantees that this is an array class, so we don't
+     * check for it here.
+     */
+    /* new-array vA, vB, class@CCCC */
+    mov     r0, rINST, lsr #12          @ r0<- B
+    FETCH(r2, 1)                        @ r2<- CCCC
+    ldr     r3, [rGLUE, #offGlue_methodClassDex]    @ r3<- pDvmDex
+    GET_VREG(r1, r0)                    @ r1<- vB (array length)
+    ldr     r3, [r3, #offDvmDex_pResClasses]    @ r3<- pDvmDex->pResClasses
+    cmp     r1, #0                      @ check length
+    ldr     r0, [r3, r2, lsl #2]        @ r0<- resolved class
+    bmi     common_errNegativeArraySize @ negative length, bail
+    cmp     r0, #0                      @ already resolved?
+    EXPORT_PC()                         @ req'd for resolve, alloc
+    bne     .LOP_NEW_ARRAY_finish          @ resolved, continue
+    b       .LOP_NEW_ARRAY_resolve         @ do resolve now
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_FILLED_NEW_ARRAY: /* 0x24 */
+/* File: armv5/OP_FILLED_NEW_ARRAY.S */
+    /*
+     * Create a new array with elements filled from registers.
+     *
+     * for: filled-new-array, filled-new-array/range
+     */
+    /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */
+    /* op {vCCCC..v(CCCC+AA-1)}, type@BBBB */
+    ldr     r3, [rGLUE, #offGlue_methodClassDex]    @ r3<- pDvmDex
+    FETCH(r1, 1)                        @ r1<- BBBB
+    ldr     r3, [r3, #offDvmDex_pResClasses]    @ r3<- pDvmDex->pResClasses
+    EXPORT_PC()                         @ need for resolve and alloc
+    ldr     r0, [r3, r1, lsl #2]        @ r0<- resolved class
+    mov     r10, rINST, lsr #8          @ r10<- AA or BA
+    cmp     r0, #0                      @ already resolved?
+    bne     .LOP_FILLED_NEW_ARRAY_continue        @ yes, continue on
+8:  ldr     r3, [rGLUE, #offGlue_method] @ r3<- glue->method
+    mov     r2, #0                      @ r2<- false
+    ldr     r0, [r3, #offMethod_clazz]  @ r0<- method->clazz
+    bl      dvmResolveClass             @ r0<- call(clazz, ref)
+    cmp     r0, #0                      @ got null?
+    beq     common_exceptionThrown      @ yes, handle exception
+    b       .LOP_FILLED_NEW_ARRAY_continue
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_FILLED_NEW_ARRAY_RANGE: /* 0x25 */
+/* File: armv5/OP_FILLED_NEW_ARRAY_RANGE.S */
+/* File: armv5/OP_FILLED_NEW_ARRAY.S */
+    /*
+     * Create a new array with elements filled from registers.
+     *
+     * for: filled-new-array, filled-new-array/range
+     */
+    /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */
+    /* op {vCCCC..v(CCCC+AA-1)}, type@BBBB */
+    ldr     r3, [rGLUE, #offGlue_methodClassDex]    @ r3<- pDvmDex
+    FETCH(r1, 1)                        @ r1<- BBBB
+    ldr     r3, [r3, #offDvmDex_pResClasses]    @ r3<- pDvmDex->pResClasses
+    EXPORT_PC()                         @ need for resolve and alloc
+    ldr     r0, [r3, r1, lsl #2]        @ r0<- resolved class
+    mov     r10, rINST, lsr #8          @ r10<- AA or BA
+    cmp     r0, #0                      @ already resolved?
+    bne     .LOP_FILLED_NEW_ARRAY_RANGE_continue        @ yes, continue on
+8:  ldr     r3, [rGLUE, #offGlue_method] @ r3<- glue->method
+    mov     r2, #0                      @ r2<- false
+    ldr     r0, [r3, #offMethod_clazz]  @ r0<- method->clazz
+    bl      dvmResolveClass             @ r0<- call(clazz, ref)
+    cmp     r0, #0                      @ got null?
+    beq     common_exceptionThrown      @ yes, handle exception
+    b       .LOP_FILLED_NEW_ARRAY_RANGE_continue
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_FILL_ARRAY_DATA: /* 0x26 */
+/* File: armv5/OP_FILL_ARRAY_DATA.S */
+    /* fill-array-data vAA, +BBBBBBBB */
+    FETCH(r0, 1)                        @ r0<- bbbb (lo)
+    FETCH(r1, 2)                        @ r1<- BBBB (hi)
+    mov     r3, rINST, lsr #8           @ r3<- AA
+    orr     r1, r0, r1, lsl #16         @ r1<- BBBBbbbb
+    GET_VREG(r0, r3)                    @ r0<- vAA (array object)
+    add     r1, rPC, r1, lsl #1         @ r1<- PC + BBBBbbbb*2 (array data off.)
+    EXPORT_PC();
+    bl      dvmInterpHandleFillArrayData@ fill the array with predefined data
+    cmp     r0, #0                      @ 0 means an exception is thrown
+    beq     common_exceptionThrown      @ has exception
+    FETCH_ADVANCE_INST(3)               @ advance rPC, load rINST
+    GET_INST_OPCODE(ip)                 @ extract opcode from rINST
+    GOTO_OPCODE(ip)                     @ jump to next instruction
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_THROW: /* 0x27 */
+/* File: armv5/OP_THROW.S */
+    /*
+     * Throw an exception object in the current thread.
+     */
+    /* throw vAA */
+    mov     r2, rINST, lsr #8           @ r2<- AA
+    GET_VREG(r1, r2)                    @ r1<- vAA (exception object)
+    ldr     r0, [rGLUE, #offGlue_self]  @ r0<- glue->self
+    cmp     r1, #0                      @ null object?
+    beq     common_errNullObject        @ yes, throw an NPE instead
+    @ bypass dvmSetException, just store it
+    str     r1, [r0, #offThread_exception]  @ thread->exception<- obj
+    b       common_exceptionThrown
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_GOTO: /* 0x28 */
+/* File: armv5/OP_GOTO.S */
+    /*
+     * Unconditional branch, 8-bit offset.
+     *
+     * The branch distance is a signed code-unit offset, which we need to
+     * double to get a byte offset.
+     */
+    /* goto +AA */
+    mov     r0, rINST, lsl #16          @ r0<- AAxx0000
+    movs    r9, r0, asr #24             @ r9<- ssssssAA (sign-extended)
+    mov     r9, r9, lsl #1              @ r9<- byte offset
+    bmi     common_backwardBranch       @ backward branch, do periodic checks
+    FETCH_ADVANCE_INST_RB(r9)           @ update rPC, load rINST
+    GET_INST_OPCODE(ip)                 @ extract opcode from rINST
+    GOTO_OPCODE(ip)                     @ jump to next instruction
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_GOTO_16: /* 0x29 */
+/* File: armv5/OP_GOTO_16.S */
+    /*
+     * Unconditional branch, 16-bit offset.
+     *
+     * The branch distance is a signed code-unit offset, which we need to
+     * double to get a byte offset.
+     */
+    /* goto/16 +AAAA */
+    FETCH_S(r0, 1)                      @ r0<- ssssAAAA (sign-extended)
+    movs    r9, r0, asl #1              @ r9<- byte offset, check sign
+    bmi     common_backwardBranch       @ backward branch, do periodic checks
+    FETCH_ADVANCE_INST_RB(r9)           @ update rPC, load rINST
+    GET_INST_OPCODE(ip)                 @ extract opcode from rINST
+    GOTO_OPCODE(ip)                     @ jump to next instruction
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_GOTO_32: /* 0x2a */
+/* File: armv5/OP_GOTO_32.S */
+    /*
+     * Unconditional branch, 32-bit offset.
+     *
+     * The branch distance is a signed code-unit offset, which we need to
+     * double to get a byte offset.
+     *
+     * Unlike most opcodes, this one is allowed to branch to itself, so
+     * our "backward branch" test must be "<=0" instead of "<0".  The ORRS
+     * instruction doesn't affect the V flag, so we need to clear it
+     * explicitly.
+     */
+    /* goto/32 +AAAAAAAA */
+    FETCH(r0, 1)                        @ r0<- aaaa (lo)
+    FETCH(r1, 2)                        @ r1<- AAAA (hi)
+    cmp     ip, ip                      @ (clear V flag during stall)
+    orrs    r0, r0, r1, lsl #16         @ r0<- AAAAaaaa, check sign
+    mov     r9, r0, asl #1              @ r9<- byte offset
+    ble     common_backwardBranch       @ backward branch, do periodic checks
+    FETCH_ADVANCE_INST_RB(r9)           @ update rPC, load rINST
+    GET_INST_OPCODE(ip)                 @ extract opcode from rINST
+    GOTO_OPCODE(ip)                     @ jump to next instruction
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_PACKED_SWITCH: /* 0x2b */
+/* File: armv5/OP_PACKED_SWITCH.S */
+    /*
+     * Handle a packed-switch or sparse-switch instruction.  In both cases
+     * we decode it and hand it off to a helper function.
+     *
+     * We don't really expect backward branches in a switch statement, but
+     * they're perfectly legal, so we check for them here.
+     *
+     * for: packed-switch, sparse-switch
+     */
+    /* op vAA, +BBBB */
+    FETCH(r0, 1)                        @ r0<- bbbb (lo)
+    FETCH(r1, 2)                        @ r1<- BBBB (hi)
+    mov     r3, rINST, lsr #8           @ r3<- AA
+    orr     r0, r0, r1, lsl #16         @ r0<- BBBBbbbb
+    GET_VREG(r1, r3)                    @ r1<- vAA
+    add     r0, rPC, r0, lsl #1         @ r0<- PC + BBBBbbbb*2
+    bl      dvmInterpHandlePackedSwitch                       @ r0<- code-unit branch offset
+    movs    r9, r0, asl #1              @ r9<- branch byte offset, check sign
+    bmi     common_backwardBranch       @ backward branch, do periodic checks
+    beq     common_backwardBranch       @ (want to use BLE but V is unknown)
+    FETCH_ADVANCE_INST_RB(r9)           @ update rPC, load rINST
+    GET_INST_OPCODE(ip)                 @ extract opcode from rINST
+    GOTO_OPCODE(ip)                     @ jump to next instruction
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_SPARSE_SWITCH: /* 0x2c */
+/* File: armv5/OP_SPARSE_SWITCH.S */
+/* File: armv5/OP_PACKED_SWITCH.S */
+    /*
+     * Handle a packed-switch or sparse-switch instruction.  In both cases
+     * we decode it and hand it off to a helper function.
+     *
+     * We don't really expect backward branches in a switch statement, but
+     * they're perfectly legal, so we check for them here.
+     *
+     * for: packed-switch, sparse-switch
+     */
+    /* op vAA, +BBBB */
+    FETCH(r0, 1)                        @ r0<- bbbb (lo)
+    FETCH(r1, 2)                        @ r1<- BBBB (hi)
+    mov     r3, rINST, lsr #8           @ r3<- AA
+    orr     r0, r0, r1, lsl #16         @ r0<- BBBBbbbb
+    GET_VREG(r1, r3)                    @ r1<- vAA
+    add     r0, rPC, r0, lsl #1         @ r0<- PC + BBBBbbbb*2
+    bl      dvmInterpHandleSparseSwitch                       @ r0<- code-unit branch offset
+    movs    r9, r0, asl #1              @ r9<- branch byte offset, check sign
+    bmi     common_backwardBranch       @ backward branch, do periodic checks
+    beq     common_backwardBranch       @ (want to use BLE but V is unknown)
+    FETCH_ADVANCE_INST_RB(r9)           @ update rPC, load rINST
+    GET_INST_OPCODE(ip)                 @ extract opcode from rINST
+    GOTO_OPCODE(ip)                     @ jump to next instruction
+
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_CMPL_FLOAT: /* 0x2d */
+/* File: armv5/OP_CMPL_FLOAT.S */
+    /*
+     * Compare two floating-point values.  Puts 0, 1, or -1 into the
+     * destination register based on the results of the comparison.
+     *
+     * Provide a "naninst" instruction that puts 1 or -1 into r1 depending
+     * on what value we'd like to return when one of the operands is NaN.
+     *
+     * The operation we're implementing is:
+     *   if (x == y)
+     *     return 0;
+     *   else if (x < y)
+     *     return -1;
+     *   else if (x > y)
+     *     return 1;
+     *   else
+     *     return {-1,1};  // one or both operands was NaN
+     *
+     * The straightforward implementation requires 3 calls to functions
+     * that return a result in r0.  We can do it with two calls if our
+     * EABI library supports __aeabi_cfcmple (only one if we want to check
+     * for NaN directly):
+     *   check x <= y
+     *     if <, return -1
+     *     if ==, return 0
+     *   check y <= x
+     *     if <, return 1
+     *   return {-1,1}
+     *
+     * for: cmpl-float, cmpg-float
+     */
+    /* op vAA, vBB, vCC */
+    FETCH(r0, 1)                        @ r0<- CCBB
+    and     r2, r0, #255                @ r2<- BB
+    mov     r3, r0, lsr #8              @ r3<- CC
+    GET_VREG(r9, r2)                    @ r9<- vBB
+    GET_VREG(r10, r3)                   @ r10<- vCC
+    mov     r0, r9                      @ copy to arg registers
+    mov     r1, r10
+    bl      __aeabi_cfcmple             @ cmp <=: C clear if <, Z set if eq
+    bhi     .LOP_CMPL_FLOAT_gt_or_nan       @ C set and Z clear, disambiguate
+    mvncc   r1, #0                      @ (less than) r1<- -1
+    moveq   r1, #0                      @ (equal) r1<- 0, trumps less than
+.LOP_CMPL_FLOAT_finish:
+    mov     r3, rINST, lsr #8           @ r3<- AA
+    FETCH_ADVANCE_INST(2)               @ advance rPC, load rINST
+    SET_VREG(r1, r3)                    @ vAA<- r1
+    GET_INST_OPCODE(ip)                 @ extract opcode from rINST
+    GOTO_OPCODE(ip)                     @ jump to next instruction
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_CMPG_FLOAT: /* 0x2e */
+/* File: armv5/OP_CMPG_FLOAT.S */
+/* File: armv5/OP_CMPL_FLOAT.S */
+    /*
+     * Compare two floating-point values.  Puts 0, 1, or -1 into the
+     * destination register based on the results of the comparison.
+     *
+     * Provide a "naninst" instruction that puts 1 or -1 into r1 depending
+     * on what value we'd like to return when one of the operands is NaN.
+     *
+     * The operation we're implementing is:
+     *   if (x == y)
+     *     return 0;
+     *   else if (x < y)
+     *     return -1;
+     *   else if (x > y)
+     *     return 1;
+     *   else
+     *     return {-1,1};  // one or both operands was NaN
+     *
+     * The straightforward implementation requires 3 calls to functions
+     * that return a result in r0.  We can do it with two calls if our
+     * EABI library supports __aeabi_cfcmple (only one if we want to check
+     * for NaN directly):
+     *   check x <= y
+     *     if <, return -1
+     *     if ==, return 0
+     *   check y <= x
+     *     if <, return 1
+     *   return {-1,1}
+     *
+     * for: cmpl-float, cmpg-float
+     */
+    /* op vAA, vBB, vCC */
+    FETCH(r0, 1)                        @ r0<- CCBB
+    and     r2, r0, #255                @ r2<- BB
+    mov     r3, r0, lsr #8              @ r3<- CC
+    GET_VREG(r9, r2)                    @ r9<- vBB
+    GET_VREG(r10, r3)                   @ r10<- vCC
+    mov     r0, r9                      @ copy to arg registers
+    mov     r1, r10
+    bl      __aeabi_cfcmple             @ cmp <=: C clear if <, Z set if eq
+    bhi     .LOP_CMPG_FLOAT_gt_or_nan       @ C set and Z clear, disambiguate
+    mvncc   r1, #0                      @ (less than) r1<- -1
+    moveq   r1, #0                      @ (equal) r1<- 0, trumps less than
+.LOP_CMPG_FLOAT_finish:
+    mov     r3, rINST, lsr #8           @ r3<- AA
+    FETCH_ADVANCE_INST(2)               @ advance rPC, load rINST
+    SET_VREG(r1, r3)                    @ vAA<- r1
+    GET_INST_OPCODE(ip)                 @ extract opcode from rINST
+    GOTO_OPCODE(ip)                     @ jump to next instruction
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_CMPL_DOUBLE: /* 0x2f */
+/* File: armv5/OP_CMPL_DOUBLE.S */
+    /*
+     * Compare two floating-point values.  Puts 0, 1, or -1 into the
+     * destination register based on the results of the comparison.
+     *
+     * Provide a "naninst" instruction that puts 1 or -1 into r1 depending
+     * on what value we'd like to return when one of the operands is NaN.
+     *
+     * See OP_CMPL_FLOAT for an explanation.
+     *
+     * For: cmpl-double, cmpg-double
+     */
+    /* op vAA, vBB, vCC */
+    FETCH(r0, 1)                        @ r0<- CCBB
+    and     r9, r0, #255                @ r9<- BB
+    mov     r10, r0, lsr #8             @ r10<- CC
+    add     r9, rFP, r9, lsl #2         @ r9<- &fp[BB]
+    add     r10, rFP, r10, lsl #2       @ r10<- &fp[CC]
+    ldmia   r9, {r0-r1}                 @ r0/r1<- vBB/vBB+1
+    ldmia   r10, {r2-r3}                @ r2/r3<- vCC/vCC+1
+    bl      __aeabi_cdcmple             @ cmp <=: C clear if <, Z set if eq
+    bhi     .LOP_CMPL_DOUBLE_gt_or_nan       @ C set and Z clear, disambiguate
+    mvncc   r1, #0                      @ (less than) r1<- -1
+    moveq   r1, #0                      @ (equal) r1<- 0, trumps less than
+.LOP_CMPL_DOUBLE_finish:
+    mov     r3, rINST, lsr #8           @ r3<- AA
+    FETCH_ADVANCE_INST(2)               @ advance rPC, load rINST
+    SET_VREG(r1, r3)                    @ vAA<- r1
+    GET_INST_OPCODE(ip)                 @ extract opcode from rINST
+    GOTO_OPCODE(ip)                     @ jump to next instruction
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_CMPG_DOUBLE: /* 0x30 */
+/* File: armv5/OP_CMPG_DOUBLE.S */
+/* File: armv5/OP_CMPL_DOUBLE.S */
+    /*
+     * Compare two floating-point values.  Puts 0, 1, or -1 into the
+     * destination register based on the results of the comparison.
+     *
+     * Provide a "naninst" instruction that puts 1 or -1 into r1 depending
+     * on what value we'd like to return when one of the operands is NaN.
+     *
+     * See OP_CMPL_FLOAT for an explanation.
+     *
+     * For: cmpl-double, cmpg-double
+     */
+    /* op vAA, vBB, vCC */
+    FETCH(r0, 1)                        @ r0<- CCBB
+    and     r9, r0, #255                @ r9<- BB
+    mov     r10, r0, lsr #8             @ r10<- CC
+    add     r9, rFP, r9, lsl #2         @ r9<- &fp[BB]
+    add     r10, rFP, r10, lsl #2       @ r10<- &fp[CC]
+    ldmia   r9, {r0-r1}                 @ r0/r1<- vBB/vBB+1
+    ldmia   r10, {r2-r3}                @ r2/r3<- vCC/vCC+1
+    bl      __aeabi_cdcmple             @ cmp <=: C clear if <, Z set if eq
+    bhi     .LOP_CMPG_DOUBLE_gt_or_nan       @ C set and Z clear, disambiguate
+    mvncc   r1, #0                      @ (less than) r1<- -1
+    moveq   r1, #0                      @ (equal) r1<- 0, trumps less than
+.LOP_CMPG_DOUBLE_finish:
+    mov     r3, rINST, lsr #8           @ r3<- AA
+    FETCH_ADVANCE_INST(2)               @ advance rPC, load rINST
+    SET_VREG(r1, r3)                    @ vAA<- r1
+    GET_INST_OPCODE(ip)                 @ extract opcode from rINST
+    GOTO_OPCODE(ip)                     @ jump to next instruction
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_CMP_LONG: /* 0x31 */
+/* File: armv5/OP_CMP_LONG.S */
+    /*
+     * Compare two 64-bit values.  Puts 0, 1, or -1 into the destination
+     * register based on the results of the comparison.
+     *
+     * We load the full values with LDM, but in practice many values could
+     * be resolved by only looking at the high word.  This could be made
+     * faster or slower by splitting the LDM into a pair of LDRs.
+     *
+     * If we just wanted to set condition flags, we could do this:
+     *  subs    ip, r0, r2
+     *  sbcs    ip, r1, r3
+     *  subeqs  ip, r0, r2
+     * Leaving { <0, 0, >0 } in ip.  However, we have to set it to a specific
+     * integer value, which we can do with 2 conditional mov/mvn instructions
+     * (set 1, set -1; if they're equal we already have 0 in ip), giving
+     * us a constant 5-cycle path plus a branch at the end to the
+     * instruction epilogue code.  The multi-compare approach below needs
+     * 2 or 3 cycles + branch if the high word doesn't match, 6 + branch
+     * in the worst case (the 64-bit values are equal).
+     */
+    /* cmp-long vAA, vBB, vCC */
+    FETCH(r0, 1)                        @ r0<- CCBB
+    mov     r9, rINST, lsr #8           @ r9<- AA
+    and     r2, r0, #255                @ r2<- BB
+    mov     r3, r0, lsr #8              @ r3<- CC
+    add     r2, rFP, r2, lsl #2         @ r2<- &fp[BB]
+    add     r3, rFP, r3, lsl #2         @ r3<- &fp[CC]
+    ldmia   r2, {r0-r1}                 @ r0/r1<- vBB/vBB+1
+    ldmia   r3, {r2-r3}                 @ r2/r3<- vCC/vCC+1
+    cmp     r1, r3                      @ compare (vBB+1, vCC+1)
+    blt     .LOP_CMP_LONG_less            @ signed compare on high part
+    bgt     .LOP_CMP_LONG_greater
+    subs    r1, r0, r2                  @ r1<- r0 - r2
+    bhi     .LOP_CMP_LONG_greater         @ unsigned compare on low part
+    bne     .LOP_CMP_LONG_less
+    b       .LOP_CMP_LONG_finish          @ equal; r1 already holds 0
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_IF_EQ: /* 0x32 */
+/* File: armv5/OP_IF_EQ.S */
+/* File: armv5/bincmp.S */
+    /*
+     * Generic two-operand compare-and-branch operation.  Provide a "revcmp"
+     * fragment that specifies the *reverse* comparison to perform, e.g.
+     * for "if-le" you would use "gt".
+     *
+     * For: if-eq, if-ne, if-lt, if-ge, if-gt, if-le
+     */
+    /* if-cmp vA, vB, +CCCC */
+    mov     r0, rINST, lsr #8           @ r0<- A+
+    mov     r1, rINST, lsr #12          @ r1<- B
+    and     r0, r0, #15
+    GET_VREG(r3, r1)                    @ r3<- vB
+    GET_VREG(r2, r0)                    @ r2<- vA
+    mov     r9, #4                      @ r0<- BYTE branch dist for not-taken
+    cmp     r2, r3                      @ compare (vA, vB)
+    bne  1f                      @ branch to 1 if comparison failed
+    FETCH_S(r9, 1)                      @ r9<- branch offset, in code units
+    movs    r9, r9, asl #1              @ convert to bytes, check sign
+    bmi     common_backwardBranch       @ yes, do periodic checks
+1:  FETCH_ADVANCE_INST_RB(r9)           @ update rPC, load rINST
+    GET_INST_OPCODE(ip)                 @ extract opcode from rINST
+    GOTO_OPCODE(ip)                     @ jump to next instruction
+
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_IF_NE: /* 0x33 */
+/* File: armv5/OP_IF_NE.S */
+/* File: armv5/bincmp.S */
+    /*
+     * Generic two-operand compare-and-branch operation.  Provide a "revcmp"
+     * fragment that specifies the *reverse* comparison to perform, e.g.
+     * for "if-le" you would use "gt".
+     *
+     * For: if-eq, if-ne, if-lt, if-ge, if-gt, if-le
+     */
+    /* if-cmp vA, vB, +CCCC */
+    mov     r0, rINST, lsr #8           @ r0<- A+
+    mov     r1, rINST, lsr #12          @ r1<- B
+    and     r0, r0, #15
+    GET_VREG(r3, r1)                    @ r3<- vB
+    GET_VREG(r2, r0)                    @ r2<- vA
+    mov     r9, #4                      @ r0<- BYTE branch dist for not-taken
+    cmp     r2, r3                      @ compare (vA, vB)
+    beq  1f                      @ branch to 1 if comparison failed
+    FETCH_S(r9, 1)                      @ r9<- branch offset, in code units
+    movs    r9, r9, asl #1              @ convert to bytes, check sign
+    bmi     common_backwardBranch       @ yes, do periodic checks
+1:  FETCH_ADVANCE_INST_RB(r9)           @ update rPC, load rINST
+    GET_INST_OPCODE(ip)                 @ extract opcode from rINST
+    GOTO_OPCODE(ip)                     @ jump to next instruction
+
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_IF_LT: /* 0x34 */
+/* File: armv5/OP_IF_LT.S */
+/* File: armv5/bincmp.S */
+    /*
+     * Generic two-operand compare-and-branch operation.  Provide a "revcmp"
+     * fragment that specifies the *reverse* comparison to perform, e.g.
+     * for "if-le" you would use "gt".
+     *
+     * For: if-eq, if-ne, if-lt, if-ge, if-gt, if-le
+     */
+    /* if-cmp vA, vB, +CCCC */
+    mov     r0, rINST, lsr #8           @ r0<- A+
+    mov     r1, rINST, lsr #12          @ r1<- B
+    and     r0, r0, #15
+    GET_VREG(r3, r1)                    @ r3<- vB
+    GET_VREG(r2, r0)                    @ r2<- vA
+    mov     r9, #4                      @ r0<- BYTE branch dist for not-taken
+    cmp     r2, r3                      @ compare (vA, vB)
+    bge  1f                      @ branch to 1 if comparison failed
+    FETCH_S(r9, 1)                      @ r9<- branch offset, in code units
+    movs    r9, r9, asl #1              @ convert to bytes, check sign
+    bmi     common_backwardBranch       @ yes, do periodic checks
+1:  FETCH_ADVANCE_INST_RB(r9)           @ update rPC, load rINST
+    GET_INST_OPCODE(ip)                 @ extract opcode from rINST
+    GOTO_OPCODE(ip)                     @ jump to next instruction
+
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_IF_GE: /* 0x35 */
+/* File: armv5/OP_IF_GE.S */
+/* File: armv5/bincmp.S */
+    /*
+     * Generic two-operand compare-and-branch operation.  Provide a "revcmp"
+     * fragment that specifies the *reverse* comparison to perform, e.g.
+     * for "if-le" you would use "gt".
+     *
+     * For: if-eq, if-ne, if-lt, if-ge, if-gt, if-le
+     */
+    /* if-cmp vA, vB, +CCCC */
+    mov     r0, rINST, lsr #8           @ r0<- A+
+    mov     r1, rINST, lsr #12          @ r1<- B
+    and     r0, r0, #15
+    GET_VREG(r3, r1)                    @ r3<- vB
+    GET_VREG(r2, r0)                    @ r2<- vA
+    mov     r9, #4                      @ r0<- BYTE branch dist for not-taken
+    cmp     r2, r3                      @ compare (vA, vB)
+    blt  1f                      @ branch to 1 if comparison failed
+    FETCH_S(r9, 1)                      @ r9<- branch offset, in code units
+    movs    r9, r9, asl #1              @ convert to bytes, check sign
+    bmi     common_backwardBranch       @ yes, do periodic checks
+1:  FETCH_ADVANCE_INST_RB(r9)           @ update rPC, load rINST
+    GET_INST_OPCODE(ip)                 @ extract opcode from rINST
+    GOTO_OPCODE(ip)                     @ jump to next instruction
+
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_IF_GT: /* 0x36 */
+/* File: armv5/OP_IF_GT.S */
+/* File: armv5/bincmp.S */
+    /*
+     * Generic two-operand compare-and-branch operation.  Provide a "revcmp"
+     * fragment that specifies the *reverse* comparison to perform, e.g.
+     * for "if-le" you would use "gt".
+     *
+     * For: if-eq, if-ne, if-lt, if-ge, if-gt, if-le
+     */
+    /* if-cmp vA, vB, +CCCC */
+    mov     r0, rINST, lsr #8           @ r0<- A+
+    mov     r1, rINST, lsr #12          @ r1<- B
+    and     r0, r0, #15
+    GET_VREG(r3, r1)                    @ r3<- vB
+    GET_VREG(r2, r0)                    @ r2<- vA
+    mov     r9, #4                      @ r0<- BYTE branch dist for not-taken
+    cmp     r2, r3                      @ compare (vA, vB)
+    ble  1f                      @ branch to 1 if comparison failed
+    FETCH_S(r9, 1)                      @ r9<- branch offset, in code units
+    movs    r9, r9, asl #1              @ convert to bytes, check sign
+    bmi     common_backwardBranch       @ yes, do periodic checks
+1:  FETCH_ADVANCE_INST_RB(r9)           @ update rPC, load rINST
+    GET_INST_OPCODE(ip)                 @ extract opcode from rINST
+    GOTO_OPCODE(ip)                     @ jump to next instruction
+
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_IF_LE: /* 0x37 */
+/* File: armv5/OP_IF_LE.S */
+/* File: armv5/bincmp.S */
+    /*
+     * Generic two-operand compare-and-branch operation.  Provide a "revcmp"
+     * fragment that specifies the *reverse* comparison to perform, e.g.
+     * for "if-le" you would use "gt".
+     *
+     * For: if-eq, if-ne, if-lt, if-ge, if-gt, if-le
+     */
+    /* if-cmp vA, vB, +CCCC */
+    mov     r0, rINST, lsr #8           @ r0<- A+
+    mov     r1, rINST, lsr #12          @ r1<- B
+    and     r0, r0, #15
+    GET_VREG(r3, r1)                    @ r3<- vB
+    GET_VREG(r2, r0)                    @ r2<- vA
+    mov     r9, #4                      @ r0<- BYTE branch dist for not-taken
+    cmp     r2, r3                      @ compare (vA, vB)
+    bgt  1f                      @ branch to 1 if comparison failed
+    FETCH_S(r9, 1)                      @ r9<- branch offset, in code units
+    movs    r9, r9, asl #1              @ convert to bytes, check sign
+    bmi     common_backwardBranch       @ yes, do periodic checks
+1:  FETCH_ADVANCE_INST_RB(r9)           @ update rPC, load rINST
+    GET_INST_OPCODE(ip)                 @ extract opcode from rINST
+    GOTO_OPCODE(ip)                     @ jump to next instruction
+
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_IF_EQZ: /* 0x38 */
+/* File: armv5/OP_IF_EQZ.S */
+/* File: armv5/zcmp.S */
+    /*
+     * Generic one-operand compare-and-branch operation.  Provide a "revcmp"
+     * fragment that specifies the *reverse* comparison to perform, e.g.
+     * for "if-le" you would use "gt".
+     *
+     * for: if-eqz, if-nez, if-ltz, if-gez, if-gtz, if-lez
+     */
+    /* if-cmp vAA, +BBBB */
+    mov     r0, rINST, lsr #8           @ r0<- AA
+    GET_VREG(r2, r0)                    @ r2<- vAA
+    mov     r9, #4                      @ r0<- BYTE branch dist for not-taken
+    cmp     r2, #0                      @ compare (vA, 0)
+    bne  1f                      @ branch to 1 if comparison failed
+    FETCH_S(r9, 1)                      @ r9<- branch offset, in code units
+    movs    r9, r9, asl #1              @ convert to bytes, check sign
+    bmi     common_backwardBranch       @ backward branch, do periodic checks
+1:  FETCH_ADVANCE_INST_RB(r9)           @ update rPC, load rINST
+    GET_INST_OPCODE(ip)                 @ extract opcode from rINST
+    GOTO_OPCODE(ip)                     @ jump to next instruction
+
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_IF_NEZ: /* 0x39 */
+/* File: armv5/OP_IF_NEZ.S */
+/* File: armv5/zcmp.S */
+    /*
+     * Generic one-operand compare-and-branch operation.  Provide a "revcmp"
+     * fragment that specifies the *reverse* comparison to perform, e.g.
+     * for "if-le" you would use "gt".
+     *
+     * for: if-eqz, if-nez, if-ltz, if-gez, if-gtz, if-lez
+     */
+    /* if-cmp vAA, +BBBB */
+    mov     r0, rINST, lsr #8           @ r0<- AA
+    GET_VREG(r2, r0)                    @ r2<- vAA
+    mov     r9, #4                      @ r0<- BYTE branch dist for not-taken
+    cmp     r2, #0                      @ compare (vA, 0)
+    beq  1f                      @ branch to 1 if comparison failed
+    FETCH_S(r9, 1)                      @ r9<- branch offset, in code units
+    movs    r9, r9, asl #1              @ convert to bytes, check sign
+    bmi     common_backwardBranch       @ backward branch, do periodic checks
+1:  FETCH_ADVANCE_INST_RB(r9)           @ update rPC, load rINST
+    GET_INST_OPCODE(ip)                 @ extract opcode from rINST
+    GOTO_OPCODE(ip)                     @ jump to next instruction
+
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_IF_LTZ: /* 0x3a */
+/* File: armv5/OP_IF_LTZ.S */
+/* File: armv5/zcmp.S */
+    /*
+     * Generic one-operand compare-and-branch operation.  Provide a "revcmp"
+     * fragment that specifies the *reverse* comparison to perform, e.g.
+     * for "if-le" you would use "gt".
+     *
+     * for: if-eqz, if-nez, if-ltz, if-gez, if-gtz, if-lez
+     */
+    /* if-cmp vAA, +BBBB */
+    mov     r0, rINST, lsr #8           @ r0<- AA
+    GET_VREG(r2, r0)                    @ r2<- vAA
+    mov     r9, #4                      @ r0<- BYTE branch dist for not-taken
+    cmp     r2, #0                      @ compare (vA, 0)
+    bge  1f                      @ branch to 1 if comparison failed
+    FETCH_S(r9, 1)                      @ r9<- branch offset, in code units
+    movs    r9, r9, asl #1              @ convert to bytes, check sign
+    bmi     common_backwardBranch       @ backward branch, do periodic checks
+1:  FETCH_ADVANCE_INST_RB(r9)           @ update rPC, load rINST
+    GET_INST_OPCODE(ip)                 @ extract opcode from rINST
+    GOTO_OPCODE(ip)                     @ jump to next instruction
+
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_IF_GEZ: /* 0x3b */
+/* File: armv5/OP_IF_GEZ.S */
+/* File: armv5/zcmp.S */
+    /*
+     * Generic one-operand compare-and-branch operation.  Provide a "revcmp"
+     * fragment that specifies the *reverse* comparison to perform, e.g.
+     * for "if-le" you would use "gt".
+     *
+     * for: if-eqz, if-nez, if-ltz, if-gez, if-gtz, if-lez
+     */
+    /* if-cmp vAA, +BBBB */
+    mov     r0, rINST, lsr #8           @ r0<- AA
+    GET_VREG(r2, r0)                    @ r2<- vAA
+    mov     r9, #4                      @ r0<- BYTE branch dist for not-taken
+    cmp     r2, #0                      @ compare (vA, 0)
+    blt  1f                      @ branch to 1 if comparison failed
+    FETCH_S(r9, 1)                      @ r9<- branch offset, in code units
+    movs    r9, r9, asl #1              @ convert to bytes, check sign
+    bmi     common_backwardBranch       @ backward branch, do periodic checks
+1:  FETCH_ADVANCE_INST_RB(r9)           @ update rPC, load rINST
+    GET_INST_OPCODE(ip)                 @ extract opcode from rINST
+    GOTO_OPCODE(ip)                     @ jump to next instruction
+
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_IF_GTZ: /* 0x3c */
+/* File: armv5/OP_IF_GTZ.S */
+/* File: armv5/zcmp.S */
+    /*
+     * Generic one-operand compare-and-branch operation.  Provide a "revcmp"
+     * fragment that specifies the *reverse* comparison to perform, e.g.
+     * for "if-le" you would use "gt".
+     *
+     * for: if-eqz, if-nez, if-ltz, if-gez, if-gtz, if-lez
+     */
+    /* if-cmp vAA, +BBBB */
+    mov     r0, rINST, lsr #8           @ r0<- AA
+    GET_VREG(r2, r0)                    @ r2<- vAA
+    mov     r9, #4                      @ r0<- BYTE branch dist for not-taken
+    cmp     r2, #0                      @ compare (vA, 0)
+    ble  1f                      @ branch to 1 if comparison failed
+    FETCH_S(r9, 1)                      @ r9<- branch offset, in code units
+    movs    r9, r9, asl #1              @ convert to bytes, check sign
+    bmi     common_backwardBranch       @ backward branch, do periodic checks
+1:  FETCH_ADVANCE_INST_RB(r9)           @ update rPC, load rINST
+    GET_INST_OPCODE(ip)                 @ extract opcode from rINST
+    GOTO_OPCODE(ip)                     @ jump to next instruction
+
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_IF_LEZ: /* 0x3d */
+/* File: armv5/OP_IF_LEZ.S */
+/* File: armv5/zcmp.S */
+    /*
+     * Generic one-operand compare-and-branch operation.  Provide a "revcmp"
+     * fragment that specifies the *reverse* comparison to perform, e.g.
+     * for "if-le" you would use "gt".
+     *
+     * for: if-eqz, if-nez, if-ltz, if-gez, if-gtz, if-lez
+     */
+    /* if-cmp vAA, +BBBB */
+    mov     r0, rINST, lsr #8           @ r0<- AA
+    GET_VREG(r2, r0)                    @ r2<- vAA
+    mov     r9, #4                      @ r0<- BYTE branch dist for not-taken
+    cmp     r2, #0                      @ compare (vA, 0)
+    bgt  1f                      @ branch to 1 if comparison failed
+    FETCH_S(r9, 1)                      @ r9<- branch offset, in code units
+    movs    r9, r9, asl #1              @ convert to bytes, check sign
+    bmi     common_backwardBranch       @ backward branch, do periodic checks
+1:  FETCH_ADVANCE_INST_RB(r9)           @ update rPC, load rINST
+    GET_INST_OPCODE(ip)                 @ extract opcode from rINST
+    GOTO_OPCODE(ip)                     @ jump to next instruction
+
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_3E: /* 0x3e */
+/* File: armv5/OP_UNUSED_3E.S */
+/* File: armv5/unused.S */
+    bl      common_abort
+
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_3F: /* 0x3f */
+/* File: armv5/OP_UNUSED_3F.S */
+/* File: armv5/unused.S */
+    bl      common_abort
+
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_40: /* 0x40 */
+/* File: armv5/OP_UNUSED_40.S */
+/* File: armv5/unused.S */
+    bl      common_abort
+
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_41: /* 0x41 */
+/* File: armv5/OP_UNUSED_41.S */
+/* File: armv5/unused.S */
+    bl      common_abort
+
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_42: /* 0x42 */
+/* File: armv5/OP_UNUSED_42.S */
+/* File: armv5/unused.S */
+    bl      common_abort
+
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_43: /* 0x43 */
+/* File: armv5/OP_UNUSED_43.S */
+/* File: armv5/unused.S */
+    bl      common_abort
+
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_AGET: /* 0x44 */
+/* File: armv5/OP_AGET.S */
+    /*
+     * Array get, 32 bits or less.  vAA <- vBB[vCC].
+     *
+     * Note: using the usual FETCH/and/shift stuff, this fits in exactly 17
+     * instructions.  We use a pair of FETCH_Bs instead.
+     *
+     * for: aget, aget-object, aget-boolean, aget-byte, aget-char, aget-short
+     */
+    /* op vAA, vBB, vCC */
+    FETCH_B(r2, 1, 0)                   @ r2<- BB
+    mov     r9, rINST, lsr #8           @ r9<- AA
+    FETCH_B(r3, 1, 1)                   @ r3<- CC
+    GET_VREG(r0, r2)                    @ r0<- vBB (array object)
+    GET_VREG(r1, r3)                    @ r1<- vCC (requested index)
+    cmp     r0, #0                      @ null array object?
+    beq     common_errNullObject        @ yes, bail
+    ldr     r3, [r0, #offArrayObject_length]    @ r3<- arrayObj->length
+    add     r0, r0, r1, lsl #2     @ r0<- arrayObj + index*width
+    cmp     r1, r3                      @ compare unsigned index, length
+    bcs     common_errArrayIndex        @ index >= length, bail
+    FETCH_ADVANCE_INST(2)               @ advance rPC, load rINST
+    ldr   r2, [r0, #offArrayObject_contents]  @ r2<- vBB[vCC]
+    GET_INST_OPCODE(ip)                 @ extract opcode from rINST
+    SET_VREG(r2, r9)                    @ vAA<- r2
+    GOTO_OPCODE(ip)                     @ jump to next instruction
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_AGET_WIDE: /* 0x45 */
+/* File: armv5/OP_AGET_WIDE.S */
+    /*
+     * Array get, 64 bits.  vAA <- vBB[vCC].
+     *
+     * Arrays of long/double are 64-bit aligned, so it's okay to use LDRD.
+     */
+    /* aget-wide vAA, vBB, vCC */
+    FETCH(r0, 1)                        @ r0<- CCBB
+    mov     r9, rINST, lsr #8           @ r9<- AA
+    and     r2, r0, #255                @ r2<- BB
+    mov     r3, r0, lsr #8              @ r3<- CC
+    GET_VREG(r0, r2)                    @ r0<- vBB (array object)
+    GET_VREG(r1, r3)                    @ r1<- vCC (requested index)
+    cmp     r0, #0                      @ null array object?
+    beq     common_errNullObject        @ yes, bail
+    ldr     r3, [r0, #offArrayObject_length]    @ r3<- arrayObj->length
+    add     r0, r0, r1, lsl #3          @ r0<- arrayObj + index*width
+    cmp     r1, r3                      @ compare unsigned index, length
+    bcc     .LOP_AGET_WIDE_finish          @ okay, continue below
+    b       common_errArrayIndex        @ index >= length, bail
+    @ May want to swap the order of these two branches depending on how the
+    @ branch prediction (if any) handles conditional forward branches vs.
+    @ unconditional forward branches.
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_AGET_OBJECT: /* 0x46 */
+/* File: armv5/OP_AGET_OBJECT.S */
+/* File: armv5/OP_AGET.S */
+    /*
+     * Array get, 32 bits or less.  vAA <- vBB[vCC].
+     *
+     * Note: using the usual FETCH/and/shift stuff, this fits in exactly 17
+     * instructions.  We use a pair of FETCH_Bs instead.
+     *
+     * for: aget, aget-object, aget-boolean, aget-byte, aget-char, aget-short
+     */
+    /* op vAA, vBB, vCC */
+    FETCH_B(r2, 1, 0)                   @ r2<- BB
+    mov     r9, rINST, lsr #8           @ r9<- AA
+    FETCH_B(r3, 1, 1)                   @ r3<- CC
+    GET_VREG(r0, r2)                    @ r0<- vBB (array object)
+    GET_VREG(r1, r3)                    @ r1<- vCC (requested index)
+    cmp     r0, #0                      @ null array object?
+    beq     common_errNullObject        @ yes, bail
+    ldr     r3, [r0, #offArrayObject_length]    @ r3<- arrayObj->length
+    add     r0, r0, r1, lsl #2     @ r0<- arrayObj + index*width
+    cmp     r1, r3                      @ compare unsigned index, length
+    bcs     common_errArrayIndex        @ index >= length, bail
+    FETCH_ADVANCE_INST(2)               @ advance rPC, load rINST
+    ldr   r2, [r0, #offArrayObject_contents]  @ r2<- vBB[vCC]
+    GET_INST_OPCODE(ip)                 @ extract opcode from rINST
+    SET_VREG(r2, r9)                    @ vAA<- r2
+    GOTO_OPCODE(ip)                     @ jump to next instruction
+
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_AGET_BOOLEAN: /* 0x47 */
+/* File: armv5/OP_AGET_BOOLEAN.S */
+/* File: armv5/OP_AGET.S */
+    /*
+     * Array get, 32 bits or less.  vAA <- vBB[vCC].
+     *
+     * Note: using the usual FETCH/and/shift stuff, this fits in exactly 17
+     * instructions.  We use a pair of FETCH_Bs instead.
+     *
+     * for: aget, aget-object, aget-boolean, aget-byte, aget-char, aget-short
+     */
+    /* op vAA, vBB, vCC */
+    FETCH_B(r2, 1, 0)                   @ r2<- BB
+    mov     r9, rINST, lsr #8           @ r9<- AA
+    FETCH_B(r3, 1, 1)                   @ r3<- CC
+    GET_VREG(r0, r2)                    @ r0<- vBB (array object)
+    GET_VREG(r1, r3)                    @ r1<- vCC (requested index)
+    cmp     r0, #0                      @ null array object?
+    beq     common_errNullObject        @ yes, bail
+    ldr     r3, [r0, #offArrayObject_length]    @ r3<- arrayObj->length
+    add     r0, r0, r1, lsl #0     @ r0<- arrayObj + index*width
+    cmp     r1, r3                      @ compare unsigned index, length
+    bcs     common_errArrayIndex        @ index >= length, bail
+    FETCH_ADVANCE_INST(2)               @ advance rPC, load rINST
+    ldrb   r2, [r0, #offArrayObject_contents]  @ r2<- vBB[vCC]
+    GET_INST_OPCODE(ip)                 @ extract opcode from rINST
+    SET_VREG(r2, r9)                    @ vAA<- r2
+    GOTO_OPCODE(ip)                     @ jump to next instruction
+
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_AGET_BYTE: /* 0x48 */
+/* File: armv5/OP_AGET_BYTE.S */
+/* File: armv5/OP_AGET.S */
+    /*
+     * Array get, 32 bits or less.  vAA <- vBB[vCC].
+     *
+     * Note: using the usual FETCH/and/shift stuff, this fits in exactly 17
+     * instructions.  We use a pair of FETCH_Bs instead.
+     *
+     * for: aget, aget-object, aget-boolean, aget-byte, aget-char, aget-short
+     */
+    /* op vAA, vBB, vCC */
+    FETCH_B(r2, 1, 0)                   @ r2<- BB
+    mov     r9, rINST, lsr #8           @ r9<- AA
+    FETCH_B(r3, 1, 1)                   @ r3<- CC
+    GET_VREG(r0, r2)                    @ r0<- vBB (array object)
+    GET_VREG(r1, r3)                    @ r1<- vCC (requested index)
+    cmp     r0, #0                      @ null array object?
+    beq     common_errNullObject        @ yes, bail
+    ldr     r3, [r0, #offArrayObject_length]    @ r3<- arrayObj->length
+    add     r0, r0, r1, lsl #0     @ r0<- arrayObj + index*width
+    cmp     r1, r3                      @ compare unsigned index, length
+    bcs     common_errArrayIndex        @ index >= length, bail
+    FETCH_ADVANCE_INST(2)               @ advance rPC, load rINST
+    ldrsb   r2, [r0, #offArrayObject_contents]  @ r2<- vBB[vCC]
+    GET_INST_OPCODE(ip)                 @ extract opcode from rINST
+    SET_VREG(r2, r9)                    @ vAA<- r2
+    GOTO_OPCODE(ip)                     @ jump to next instruction
+
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_AGET_CHAR: /* 0x49 */
+/* File: armv5/OP_AGET_CHAR.S */
+/* File: armv5/OP_AGET.S */
+    /*
+     * Array get, 32 bits or less.  vAA <- vBB[vCC].
+     *
+     * Note: using the usual FETCH/and/shift stuff, this fits in exactly 17
+     * instructions.  We use a pair of FETCH_Bs instead.
+     *
+     * for: aget, aget-object, aget-boolean, aget-byte, aget-char, aget-short
+     */
+    /* op vAA, vBB, vCC */
+    FETCH_B(r2, 1, 0)                   @ r2<- BB
+    mov     r9, rINST, lsr #8           @ r9<- AA
+    FETCH_B(r3, 1, 1)                   @ r3<- CC
+    GET_VREG(r0, r2)                    @ r0<- vBB (array object)
+    GET_VREG(r1, r3)                    @ r1<- vCC (requested index)
+    cmp     r0, #0                      @ null array object?
+    beq     common_errNullObject        @ yes, bail
+    ldr     r3, [r0, #offArrayObject_length]    @ r3<- arrayObj->length
+    add     r0, r0, r1, lsl #1     @ r0<- arrayObj + index*width
+    cmp     r1, r3                      @ compare unsigned index, length
+    bcs     common_errArrayIndex        @ index >= length, bail
+    FETCH_ADVANCE_INST(2)               @ advance rPC, load rINST
+    ldrh   r2, [r0, #offArrayObject_contents]  @ r2<- vBB[vCC]
+    GET_INST_OPCODE(ip)                 @ extract opcode from rINST
+    SET_VREG(r2, r9)                    @ vAA<- r2
+    GOTO_OPCODE(ip)                     @ jump to next instruction
+
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_AGET_SHORT: /* 0x4a */
+/* File: armv5/OP_AGET_SHORT.S */
+/* File: armv5/OP_AGET.S */
+    /*
+     * Array get, 32 bits or less.  vAA <- vBB[vCC].
+     *
+     * Note: using the usual FETCH/and/shift stuff, this fits in exactly 17
+     * instructions.  We use a pair of FETCH_Bs instead.
+     *
+     * for: aget, aget-object, aget-boolean, aget-byte, aget-char, aget-short
+     */
+    /* op vAA, vBB, vCC */
+    FETCH_B(r2, 1, 0)                   @ r2<- BB
+    mov     r9, rINST, lsr #8           @ r9<- AA
+    FETCH_B(r3, 1, 1)                   @ r3<- CC
+    GET_VREG(r0, r2)                    @ r0<- vBB (array object)
+    GET_VREG(r1, r3)                    @ r1<- vCC (requested index)
+    cmp     r0, #0                      @ null array object?
+    beq     common_errNullObject        @ yes, bail
+    ldr     r3, [r0, #offArrayObject_length]    @ r3<- arrayObj->length
+    add     r0, r0, r1, lsl #1     @ r0<- arrayObj + index*width
+    cmp     r1, r3                      @ compare unsigned index, length
+    bcs     common_errArrayIndex        @ index >= length, bail
+    FETCH_ADVANCE_INST(2)               @ advance rPC, load rINST
+    ldrsh   r2, [r0, #offArrayObject_contents]  @ r2<- vBB[vCC]
+    GET_INST_OPCODE(ip)                 @ extract opcode from rINST
+    SET_VREG(r2, r9)                    @ vAA<- r2
+    GOTO_OPCODE(ip)                     @ jump to next instruction
+
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_APUT: /* 0x4b */
+/* File: armv5/OP_APUT.S */
+    /*
+     * Array put, 32 bits or less.  vBB[vCC] <- vAA.
+     *
+     * Note: using the usual FETCH/and/shift stuff, this fits in exactly 17
+     * instructions.  We use a pair of FETCH_Bs instead.
+     *
+     * for: aput, aput-boolean, aput-byte, aput-char, aput-short
+     */
+    /* op vAA, vBB, vCC */
+    FETCH_B(r2, 1, 0)                   @ r2<- BB
+    mov     r9, rINST, lsr #8           @ r9<- AA
+    FETCH_B(r3, 1, 1)                   @ r3<- CC
+    GET_VREG(r0, r2)                    @ r0<- vBB (array object)
+    GET_VREG(r1, r3)                    @ r1<- vCC (requested index)
+    cmp     r0, #0                      @ null array object?
+    beq     common_errNullObject        @ yes, bail
+    ldr     r3, [r0, #offArrayObject_length]    @ r3<- arrayObj->length
+    add     r0, r0, r1, lsl #2     @ r0<- arrayObj + index*width
+    cmp     r1, r3                      @ compare unsigned index, length
+    bcs     common_errArrayIndex        @ index >= length, bail
+    FETCH_ADVANCE_INST(2)               @ advance rPC, load rINST
+    GET_VREG(r2, r9)                    @ r2<- vAA
+    GET_INST_OPCODE(ip)                 @ extract opcode from rINST
+    str  r2, [r0, #offArrayObject_contents]  @ vBB[vCC]<- r2
+    GOTO_OPCODE(ip)                     @ jump to next instruction
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_APUT_WIDE: /* 0x4c */
+/* File: armv5/OP_APUT_WIDE.S */
+    /*
+     * Array put, 64 bits.  vBB[vCC] <- vAA.
+     *
+     * Arrays of long/double are 64-bit aligned, so it's okay to use STRD.
+     */
+    /* aput-wide vAA, vBB, vCC */
+    FETCH(r0, 1)                        @ r0<- CCBB
+    mov     r9, rINST, lsr #8           @ r9<- AA
+    and     r2, r0, #255                @ r2<- BB
+    mov     r3, r0, lsr #8              @ r3<- CC
+    GET_VREG(r0, r2)                    @ r0<- vBB (array object)
+    GET_VREG(r1, r3)                    @ r1<- vCC (requested index)
+    cmp     r0, #0                      @ null array object?
+    beq     common_errNullObject        @ yes, bail
+    ldr     r3, [r0, #offArrayObject_length]    @ r3<- arrayObj->length
+    add     r0, r0, r1, lsl #3          @ r0<- arrayObj + index*width
+    cmp     r1, r3                      @ compare unsigned index, length
+    add     r9, rFP, r9, lsl #2         @ r9<- &fp[AA]
+    bcc     .LOP_APUT_WIDE_finish          @ okay, continue below
+    b       common_errArrayIndex        @ index >= length, bail
+    @ May want to swap the order of these two branches depending on how the
+    @ branch prediction (if any) handles conditional forward branches vs.
+    @ unconditional forward branches.
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_APUT_OBJECT: /* 0x4d */
+/* File: armv5/OP_APUT_OBJECT.S */
+    /*
+     * Store an object into an array.  vBB[vCC] <- vAA.
+     *
+     * Note: using the usual FETCH/and/shift stuff, this fits in exactly 17
+     * instructions.  We use a pair of FETCH_Bs instead.
+     *
+     * for: aput, aput-boolean, aput-byte, aput-char, aput-short
+     */
+    /* op vAA, vBB, vCC */
+    FETCH(r0, 1)                        @ r0<- CCBB
+    mov     r9, rINST, lsr #8           @ r9<- AA
+    and     r2, r0, #255                @ r2<- BB
+    mov     r3, r0, lsr #8              @ r3<- CC
+    GET_VREG(r1, r2)                    @ r1<- vBB (array object)
+    GET_VREG(r0, r3)                    @ r0<- vCC (requested index)
+    cmp     r1, #0                      @ null array object?
+    GET_VREG(r9, r9)                    @ r9<- vAA
+    beq     common_errNullObject        @ yes, bail
+    ldr     r3, [r1, #offArrayObject_length]    @ r3<- arrayObj->length
+    add     r10, r1, r0, lsl #2         @ r10<- arrayObj + index*width
+    cmp     r0, r3                      @ compare unsigned index, length
+    bcc     .LOP_APUT_OBJECT_finish          @ we're okay, continue on
+    b       common_errArrayIndex        @ index >= length, bail
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_APUT_BOOLEAN: /* 0x4e */
+/* File: armv5/OP_APUT_BOOLEAN.S */
+/* File: armv5/OP_APUT.S */
+    /*
+     * Array put, 32 bits or less.  vBB[vCC] <- vAA.
+     *
+     * Note: using the usual FETCH/and/shift stuff, this fits in exactly 17
+     * instructions.  We use a pair of FETCH_Bs instead.
+     *
+     * for: aput, aput-boolean, aput-byte, aput-char, aput-short
+     */
+    /* op vAA, vBB, vCC */
+    FETCH_B(r2, 1, 0)                   @ r2<- BB
+    mov     r9, rINST, lsr #8           @ r9<- AA
+    FETCH_B(r3, 1, 1)                   @ r3<- CC
+    GET_VREG(r0, r2)                    @ r0<- vBB (array object)
+    GET_VREG(r1, r3)                    @ r1<- vCC (requested index)
+    cmp     r0, #0                      @ null array object?
+    beq     common_errNullObject        @ yes, bail
+    ldr     r3, [r0, #offArrayObject_length]    @ r3<- arrayObj->length
+    add     r0, r0, r1, lsl #0     @ r0<- arrayObj + index*width
+    cmp     r1, r3                      @ compare unsigned index, length
+    bcs     common_errArrayIndex        @ index >= length, bail
+    FETCH_ADVANCE_INST(2)               @ advance rPC, load rINST
+    GET_VREG(r2, r9)                    @ r2<- vAA
+    GET_INST_OPCODE(ip)                 @ extract opcode from rINST
+    strb  r2, [r0, #offArrayObject_contents]  @ vBB[vCC]<- r2
+    GOTO_OPCODE(ip)                     @ jump to next instruction
+
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_APUT_BYTE: /* 0x4f */
+/* File: armv5/OP_APUT_BYTE.S */
+/* File: armv5/OP_APUT.S */
+    /*
+     * Array put, 32 bits or less.  vBB[vCC] <- vAA.
+     *
+     * Note: using the usual FETCH/and/shift stuff, this fits in exactly 17
+     * instructions.  We use a pair of FETCH_Bs instead.
+     *
+     * for: aput, aput-boolean, aput-byte, aput-char, aput-short
+     */
+    /* op vAA, vBB, vCC */
+    FETCH_B(r2, 1, 0)                   @ r2<- BB
+    mov     r9, rINST, lsr #8           @ r9<- AA
+    FETCH_B(r3, 1, 1)                   @ r3<- CC
+    GET_VREG(r0, r2)                    @ r0<- vBB (array object)
+    GET_VREG(r1, r3)                    @ r1<- vCC (requested index)
+    cmp     r0, #0                      @ null array object?
+    beq     common_errNullObject        @ yes, bail
+    ldr     r3, [r0, #offArrayObject_length]    @ r3<- arrayObj->length
+    add     r0, r0, r1, lsl #0     @ r0<- arrayObj + index*width
+    cmp     r1, r3                      @ compare unsigned index, length
+    bcs     common_errArrayIndex        @ index >= length, bail
+    FETCH_ADVANCE_INST(2)               @ advance rPC, load rINST
+    GET_VREG(r2, r9)                    @ r2<- vAA
+    GET_INST_OPCODE(ip)                 @ extract opcode from rINST
+    strb  r2, [r0, #offArrayObject_contents]  @ vBB[vCC]<- r2
+    GOTO_OPCODE(ip)                     @ jump to next instruction
+
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_APUT_CHAR: /* 0x50 */
+/* File: armv5/OP_APUT_CHAR.S */
+/* File: armv5/OP_APUT.S */
+    /*
+     * Array put, 32 bits or less.  vBB[vCC] <- vAA.
+     *
+     * Note: using the usual FETCH/and/shift stuff, this fits in exactly 17
+     * instructions.  We use a pair of FETCH_Bs instead.
+     *
+     * for: aput, aput-boolean, aput-byte, aput-char, aput-short
+     */
+    /* op vAA, vBB, vCC */
+    FETCH_B(r2, 1, 0)                   @ r2<- BB
+    mov     r9, rINST, lsr #8           @ r9<- AA
+    FETCH_B(r3, 1, 1)                   @ r3<- CC
+    GET_VREG(r0, r2)                    @ r0<- vBB (array object)
+    GET_VREG(r1, r3)                    @ r1<- vCC (requested index)
+    cmp     r0, #0                      @ null array object?
+    beq     common_errNullObject        @ yes, bail
+    ldr     r3, [r0, #offArrayObject_length]    @ r3<- arrayObj->length
+    add     r0, r0, r1, lsl #1     @ r0<- arrayObj + index*width
+    cmp     r1, r3                      @ compare unsigned index, length
+    bcs     common_errArrayIndex        @ index >= length, bail
+    FETCH_ADVANCE_INST(2)               @ advance rPC, load rINST
+    GET_VREG(r2, r9)                    @ r2<- vAA
+    GET_INST_OPCODE(ip)                 @ extract opcode from rINST
+    strh  r2, [r0, #offArrayObject_contents]  @ vBB[vCC]<- r2
+    GOTO_OPCODE(ip)                     @ jump to next instruction
+
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_APUT_SHORT: /* 0x51 */
+/* File: armv5/OP_APUT_SHORT.S */
+/* File: armv5/OP_APUT.S */
+    /*
+     * Array put, 32 bits or less.  vBB[vCC] <- vAA.
+     *
+     * Note: using the usual FETCH/and/shift stuff, this fits in exactly 17
+     * instructions.  We use a pair of FETCH_Bs instead.
+     *
+     * for: aput, aput-boolean, aput-byte, aput-char, aput-short
+     */
+    /* op vAA, vBB, vCC */
+    FETCH_B(r2, 1, 0)                   @ r2<- BB
+    mov     r9, rINST, lsr #8           @ r9<- AA
+    FETCH_B(r3, 1, 1)                   @ r3<- CC
+    GET_VREG(r0, r2)                    @ r0<- vBB (array object)
+    GET_VREG(r1, r3)                    @ r1<- vCC (requested index)
+    cmp     r0, #0                      @ null array object?
+    beq     common_errNullObject        @ yes, bail
+    ldr     r3, [r0, #offArrayObject_length]    @ r3<- arrayObj->length
+    add     r0, r0, r1, lsl #1     @ r0<- arrayObj + index*width
+    cmp     r1, r3                      @ compare unsigned index, length
+    bcs     common_errArrayIndex        @ index >= length, bail
+    FETCH_ADVANCE_INST(2)               @ advance rPC, load rINST
+    GET_VREG(r2, r9)                    @ r2<- vAA
+    GET_INST_OPCODE(ip)                 @ extract opcode from rINST
+    strh  r2, [r0, #offArrayObject_contents]  @ vBB[vCC]<- r2
+    GOTO_OPCODE(ip)                     @ jump to next instruction
+
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_IGET: /* 0x52 */
+/* File: armv5/OP_IGET.S */
+    /*
+     * General 32-bit instance field get.
+     *
+     * for: iget, iget-object, iget-boolean, iget-byte, iget-char, iget-short
+     */
+    /* op vA, vB, field@CCCC */
+    mov     r0, rINST, lsr #12          @ r0<- B
+    ldr     r3, [rGLUE, #offGlue_methodClassDex]    @ r3<- DvmDex
+    FETCH(r1, 1)                        @ r1<- field ref CCCC
+    ldr     r2, [r3, #offDvmDex_pResFields] @ r2<- pDvmDex->pResFields
+    GET_VREG(r9, r0)                    @ r9<- fp[B], the object pointer
+    ldr     r0, [r2, r1, lsl #2]        @ r0<- resolved InstField ptr
+    cmp     r0, #0                      @ is resolved entry null?
+    bne     .LOP_IGET_finish          @ no, already resolved
+8:  ldr     r2, [rGLUE, #offGlue_method]    @ r2<- current method
+    EXPORT_PC()                         @ resolve() could throw
+    ldr     r0, [r2, #offMethod_clazz]  @ r0<- method->clazz
+    bl      dvmResolveInstField         @ r0<- resolved InstField ptr
+    cmp     r0, #0
+    bne     .LOP_IGET_finish
+    b       common_exceptionThrown
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_IGET_WIDE: /* 0x53 */
+/* File: armv5/OP_IGET_WIDE.S */
+    /*
+     * Wide 32-bit instance field get.
+     */
+    /* iget-wide vA, vB, field@CCCC */
+    mov     r0, rINST, lsr #12          @ r0<- B
+    ldr     r3, [rGLUE, #offGlue_methodClassDex]    @ r3<- DvmDex
+    FETCH(r1, 1)                        @ r1<- field ref CCCC
+    ldr     r2, [r3, #offDvmDex_pResFields] @ r2<- pResFields
+    GET_VREG(r9, r0)                    @ r9<- fp[B], the object pointer
+    ldr     r0, [r2, r1, lsl #2]        @ r0<- resolved InstField ptr
+    cmp     r0, #0                      @ is resolved entry null?
+    bne     .LOP_IGET_WIDE_finish          @ no, already resolved
+8:  ldr     r2, [rGLUE, #offGlue_method] @ r2<- current method
+    EXPORT_PC()                         @ resolve() could throw
+    ldr     r0, [r2, #offMethod_clazz]  @ r0<- method->clazz
+    bl      dvmResolveInstField         @ r0<- resolved InstField ptr
+    cmp     r0, #0
+    bne     .LOP_IGET_WIDE_finish
+    b       common_exceptionThrown
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_IGET_OBJECT: /* 0x54 */
+/* File: armv5/OP_IGET_OBJECT.S */
+/* File: armv5/OP_IGET.S */
+    /*
+     * General 32-bit instance field get.
+     *
+     * for: iget, iget-object, iget-boolean, iget-byte, iget-char, iget-short
+     */
+    /* op vA, vB, field@CCCC */
+    mov     r0, rINST, lsr #12          @ r0<- B
+    ldr     r3, [rGLUE, #offGlue_methodClassDex]    @ r3<- DvmDex
+    FETCH(r1, 1)                        @ r1<- field ref CCCC
+    ldr     r2, [r3, #offDvmDex_pResFields] @ r2<- pDvmDex->pResFields
+    GET_VREG(r9, r0)                    @ r9<- fp[B], the object pointer
+    ldr     r0, [r2, r1, lsl #2]        @ r0<- resolved InstField ptr
+    cmp     r0, #0                      @ is resolved entry null?
+    bne     .LOP_IGET_OBJECT_finish          @ no, already resolved
+8:  ldr     r2, [rGLUE, #offGlue_method]    @ r2<- current method
+    EXPORT_PC()                         @ resolve() could throw
+    ldr     r0, [r2, #offMethod_clazz]  @ r0<- method->clazz
+    bl      dvmResolveInstField         @ r0<- resolved InstField ptr
+    cmp     r0, #0
+    bne     .LOP_IGET_OBJECT_finish
+    b       common_exceptionThrown
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_IGET_BOOLEAN: /* 0x55 */
+/* File: armv5/OP_IGET_BOOLEAN.S */
+@include "armv5/OP_IGET.S" { "load":"ldrb", "sqnum":"1" }
+/* File: armv5/OP_IGET.S */
+    /*
+     * General 32-bit instance field get.
+     *
+     * for: iget, iget-object, iget-boolean, iget-byte, iget-char, iget-short
+     */
+    /* op vA, vB, field@CCCC */
+    mov     r0, rINST, lsr #12          @ r0<- B
+    ldr     r3, [rGLUE, #offGlue_methodClassDex]    @ r3<- DvmDex
+    FETCH(r1, 1)                        @ r1<- field ref CCCC
+    ldr     r2, [r3, #offDvmDex_pResFields] @ r2<- pDvmDex->pResFields
+    GET_VREG(r9, r0)                    @ r9<- fp[B], the object pointer
+    ldr     r0, [r2, r1, lsl #2]        @ r0<- resolved InstField ptr
+    cmp     r0, #0                      @ is resolved entry null?
+    bne     .LOP_IGET_BOOLEAN_finish          @ no, already resolved
+8:  ldr     r2, [rGLUE, #offGlue_method]    @ r2<- current method
+    EXPORT_PC()                         @ resolve() could throw
+    ldr     r0, [r2, #offMethod_clazz]  @ r0<- method->clazz
+    bl      dvmResolveInstField         @ r0<- resolved InstField ptr
+    cmp     r0, #0
+    bne     .LOP_IGET_BOOLEAN_finish
+    b       common_exceptionThrown
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_IGET_BYTE: /* 0x56 */
+/* File: armv5/OP_IGET_BYTE.S */
+@include "armv5/OP_IGET.S" { "load":"ldrsb", "sqnum":"2" }
+/* File: armv5/OP_IGET.S */
+    /*
+     * General 32-bit instance field get.
+     *
+     * for: iget, iget-object, iget-boolean, iget-byte, iget-char, iget-short
+     */
+    /* op vA, vB, field@CCCC */
+    mov     r0, rINST, lsr #12          @ r0<- B
+    ldr     r3, [rGLUE, #offGlue_methodClassDex]    @ r3<- DvmDex
+    FETCH(r1, 1)                        @ r1<- field ref CCCC
+    ldr     r2, [r3, #offDvmDex_pResFields] @ r2<- pDvmDex->pResFields
+    GET_VREG(r9, r0)                    @ r9<- fp[B], the object pointer
+    ldr     r0, [r2, r1, lsl #2]        @ r0<- resolved InstField ptr
+    cmp     r0, #0                      @ is resolved entry null?
+    bne     .LOP_IGET_BYTE_finish          @ no, already resolved
+8:  ldr     r2, [rGLUE, #offGlue_method]    @ r2<- current method
+    EXPORT_PC()                         @ resolve() could throw
+    ldr     r0, [r2, #offMethod_clazz]  @ r0<- method->clazz
+    bl      dvmResolveInstField         @ r0<- resolved InstField ptr
+    cmp     r0, #0
+    bne     .LOP_IGET_BYTE_finish
+    b       common_exceptionThrown
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_IGET_CHAR: /* 0x57 */
+/* File: armv5/OP_IGET_CHAR.S */
+@include "armv5/OP_IGET.S" { "load":"ldrh", "sqnum":"3" }
+/* File: armv5/OP_IGET.S */
+    /*
+     * General 32-bit instance field get.
+     *
+     * for: iget, iget-object, iget-boolean, iget-byte, iget-char, iget-short
+     */
+    /* op vA, vB, field@CCCC */
+    mov     r0, rINST, lsr #12          @ r0<- B
+    ldr     r3, [rGLUE, #offGlue_methodClassDex]    @ r3<- DvmDex
+    FETCH(r1, 1)                        @ r1<- field ref CCCC
+    ldr     r2, [r3, #offDvmDex_pResFields] @ r2<- pDvmDex->pResFields
+    GET_VREG(r9, r0)                    @ r9<- fp[B], the object pointer
+    ldr     r0, [r2, r1, lsl #2]        @ r0<- resolved InstField ptr
+    cmp     r0, #0                      @ is resolved entry null?
+    bne     .LOP_IGET_CHAR_finish          @ no, already resolved
+8:  ldr     r2, [rGLUE, #offGlue_method]    @ r2<- current method
+    EXPORT_PC()                         @ resolve() could throw
+    ldr     r0, [r2, #offMethod_clazz]  @ r0<- method->clazz
+    bl      dvmResolveInstField         @ r0<- resolved InstField ptr
+    cmp     r0, #0
+    bne     .LOP_IGET_CHAR_finish
+    b       common_exceptionThrown
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_IGET_SHORT: /* 0x58 */
+/* File: armv5/OP_IGET_SHORT.S */
+@include "armv5/OP_IGET.S" { "load":"ldrsh", "sqnum":"4" }
+/* File: armv5/OP_IGET.S */
+    /*
+     * General 32-bit instance field get.
+     *
+     * for: iget, iget-object, iget-boolean, iget-byte, iget-char, iget-short
+     */
+    /* op vA, vB, field@CCCC */
+    mov     r0, rINST, lsr #12          @ r0<- B
+    ldr     r3, [rGLUE, #offGlue_methodClassDex]    @ r3<- DvmDex
+    FETCH(r1, 1)                        @ r1<- field ref CCCC
+    ldr     r2, [r3, #offDvmDex_pResFields] @ r2<- pDvmDex->pResFields
+    GET_VREG(r9, r0)                    @ r9<- fp[B], the object pointer
+    ldr     r0, [r2, r1, lsl #2]        @ r0<- resolved InstField ptr
+    cmp     r0, #0                      @ is resolved entry null?
+    bne     .LOP_IGET_SHORT_finish          @ no, already resolved
+8:  ldr     r2, [rGLUE, #offGlue_method]    @ r2<- current method
+    EXPORT_PC()                         @ resolve() could throw
+    ldr     r0, [r2, #offMethod_clazz]  @ r0<- method->clazz
+    bl      dvmResolveInstField         @ r0<- resolved InstField ptr
+    cmp     r0, #0
+    bne     .LOP_IGET_SHORT_finish
+    b       common_exceptionThrown
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_IPUT: /* 0x59 */
+/* File: armv5/OP_IPUT.S */
+    /*
+     * General 32-bit instance field put.
+     *
+     * for: iput, iput-object, iput-boolean, iput-byte, iput-char, iput-short
+     */
+    /* op vA, vB, field@CCCC */
+    mov     r0, rINST, lsr #12          @ r0<- B
+    ldr     r3, [rGLUE, #offGlue_methodClassDex]    @ r3<- DvmDex
+    FETCH(r1, 1)                        @ r1<- field ref CCCC
+    ldr     r2, [r3, #offDvmDex_pResFields] @ r2<- pDvmDex->pResFields
+    GET_VREG(r9, r0)                    @ r9<- fp[B], the object pointer
+    ldr     r0, [r2, r1, lsl #2]        @ r0<- resolved InstField ptr
+    cmp     r0, #0                      @ is resolved entry null?
+    bne     .LOP_IPUT_finish          @ no, already resolved
+8:  ldr     r2, [rGLUE, #offGlue_method]    @ r2<- current method
+    EXPORT_PC()                         @ resolve() could throw
+    ldr     r0, [r2, #offMethod_clazz]  @ r0<- method->clazz
+    bl      dvmResolveInstField         @ r0<- resolved InstField ptr
+    cmp     r0, #0                      @ success?
+    bne     .LOP_IPUT_finish          @ yes, finish up
+    b       common_exceptionThrown
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_IPUT_WIDE: /* 0x5a */
+/* File: armv5/OP_IPUT_WIDE.S */
+    /* iput-wide vA, vB, field@CCCC */
+    mov     r0, rINST, lsr #12          @ r0<- B
+    ldr     r3, [rGLUE, #offGlue_methodClassDex]    @ r3<- DvmDex
+    FETCH(r1, 1)                        @ r1<- field ref CCCC
+    ldr     r2, [r3, #offDvmDex_pResFields] @ r2<- pResFields
+    GET_VREG(r9, r0)                    @ r9<- fp[B], the object pointer
+    ldr     r0, [r2, r1, lsl #2]        @ r0<- resolved InstField ptr
+    cmp     r0, #0                      @ is resolved entry null?
+    bne     .LOP_IPUT_WIDE_finish          @ no, already resolved
+8:  ldr     r2, [rGLUE, #offGlue_method] @ r2<- current method
+    EXPORT_PC()                         @ resolve() could throw
+    ldr     r0, [r2, #offMethod_clazz]  @ r0<- method->clazz
+    bl      dvmResolveInstField         @ r0<- resolved InstField ptr
+    cmp     r0, #0                      @ success?
+    bne     .LOP_IPUT_WIDE_finish          @ yes, finish up
+    b       common_exceptionThrown
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_IPUT_OBJECT: /* 0x5b */
+/* File: armv5/OP_IPUT_OBJECT.S */
+/* File: armv5/OP_IPUT.S */
+    /*
+     * General 32-bit instance field put.
+     *
+     * for: iput, iput-object, iput-boolean, iput-byte, iput-char, iput-short
+     */
+    /* op vA, vB, field@CCCC */
+    mov     r0, rINST, lsr #12          @ r0<- B
+    ldr     r3, [rGLUE, #offGlue_methodClassDex]    @ r3<- DvmDex
+    FETCH(r1, 1)                        @ r1<- field ref CCCC
+    ldr     r2, [r3, #offDvmDex_pResFields] @ r2<- pDvmDex->pResFields
+    GET_VREG(r9, r0)                    @ r9<- fp[B], the object pointer
+    ldr     r0, [r2, r1, lsl #2]        @ r0<- resolved InstField ptr
+    cmp     r0, #0                      @ is resolved entry null?
+    bne     .LOP_IPUT_OBJECT_finish          @ no, already resolved
+8:  ldr     r2, [rGLUE, #offGlue_method]    @ r2<- current method
+    EXPORT_PC()                         @ resolve() could throw
+    ldr     r0, [r2, #offMethod_clazz]  @ r0<- method->clazz
+    bl      dvmResolveInstField         @ r0<- resolved InstField ptr
+    cmp     r0, #0                      @ success?
+    bne     .LOP_IPUT_OBJECT_finish          @ yes, finish up
+    b       common_exceptionThrown
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_IPUT_BOOLEAN: /* 0x5c */
+/* File: armv5/OP_IPUT_BOOLEAN.S */
+@include "armv5/OP_IPUT.S" { "store":"strb", "sqnum":"1" }
+/* File: armv5/OP_IPUT.S */
+    /*
+     * General 32-bit instance field put.
+     *
+     * for: iput, iput-object, iput-boolean, iput-byte, iput-char, iput-short
+     */
+    /* op vA, vB, field@CCCC */
+    mov     r0, rINST, lsr #12          @ r0<- B
+    ldr     r3, [rGLUE, #offGlue_methodClassDex]    @ r3<- DvmDex
+    FETCH(r1, 1)                        @ r1<- field ref CCCC
+    ldr     r2, [r3, #offDvmDex_pResFields] @ r2<- pDvmDex->pResFields
+    GET_VREG(r9, r0)                    @ r9<- fp[B], the object pointer
+    ldr     r0, [r2, r1, lsl #2]        @ r0<- resolved InstField ptr
+    cmp     r0, #0                      @ is resolved entry null?
+    bne     .LOP_IPUT_BOOLEAN_finish          @ no, already resolved
+8:  ldr     r2, [rGLUE, #offGlue_method]    @ r2<- current method
+    EXPORT_PC()                         @ resolve() could throw
+    ldr     r0, [r2, #offMethod_clazz]  @ r0<- method->clazz
+    bl      dvmResolveInstField         @ r0<- resolved InstField ptr
+    cmp     r0, #0                      @ success?
+    bne     .LOP_IPUT_BOOLEAN_finish          @ yes, finish up
+    b       common_exceptionThrown
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_IPUT_BYTE: /* 0x5d */
+/* File: armv5/OP_IPUT_BYTE.S */
+@include "armv5/OP_IPUT.S" { "store":"strb", "sqnum":"2" }
+/* File: armv5/OP_IPUT.S */
+    /*
+     * General 32-bit instance field put.
+     *
+     * for: iput, iput-object, iput-boolean, iput-byte, iput-char, iput-short
+     */
+    /* op vA, vB, field@CCCC */
+    mov     r0, rINST, lsr #12          @ r0<- B
+    ldr     r3, [rGLUE, #offGlue_methodClassDex]    @ r3<- DvmDex
+    FETCH(r1, 1)                        @ r1<- field ref CCCC
+    ldr     r2, [r3, #offDvmDex_pResFields] @ r2<- pDvmDex->pResFields
+    GET_VREG(r9, r0)                    @ r9<- fp[B], the object pointer
+    ldr     r0, [r2, r1, lsl #2]        @ r0<- resolved InstField ptr
+    cmp     r0, #0                      @ is resolved entry null?
+    bne     .LOP_IPUT_BYTE_finish          @ no, already resolved
+8:  ldr     r2, [rGLUE, #offGlue_method]    @ r2<- current method
+    EXPORT_PC()                         @ resolve() could throw
+    ldr     r0, [r2, #offMethod_clazz]  @ r0<- method->clazz
+    bl      dvmResolveInstField         @ r0<- resolved InstField ptr
+    cmp     r0, #0                      @ success?
+    bne     .LOP_IPUT_BYTE_finish          @ yes, finish up
+    b       common_exceptionThrown
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_IPUT_CHAR: /* 0x5e */
+/* File: armv5/OP_IPUT_CHAR.S */
+@include "armv5/OP_IPUT.S" { "store":"strh", "sqnum":"3" }
+/* File: armv5/OP_IPUT.S */
+    /*
+     * General 32-bit instance field put.
+     *
+     * for: iput, iput-object, iput-boolean, iput-byte, iput-char, iput-short
+     */
+    /* op vA, vB, field@CCCC */
+    mov     r0, rINST, lsr #12          @ r0<- B
+    ldr     r3, [rGLUE, #offGlue_methodClassDex]    @ r3<- DvmDex
+    FETCH(r1, 1)                        @ r1<- field ref CCCC
+    ldr     r2, [r3, #offDvmDex_pResFields] @ r2<- pDvmDex->pResFields
+    GET_VREG(r9, r0)                    @ r9<- fp[B], the object pointer
+    ldr     r0, [r2, r1, lsl #2]        @ r0<- resolved InstField ptr
+    cmp     r0, #0                      @ is resolved entry null?
+    bne     .LOP_IPUT_CHAR_finish          @ no, already resolved
+8:  ldr     r2, [rGLUE, #offGlue_method]    @ r2<- current method
+    EXPORT_PC()                         @ resolve() could throw
+    ldr     r0, [r2, #offMethod_clazz]  @ r0<- method->clazz
+    bl      dvmResolveInstField         @ r0<- resolved InstField ptr
+    cmp     r0, #0                      @ success?
+    bne     .LOP_IPUT_CHAR_finish          @ yes, finish up
+    b       common_exceptionThrown
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_IPUT_SHORT: /* 0x5f */
+/* File: armv5/OP_IPUT_SHORT.S */
+@include "armv5/OP_IPUT.S" { "store":"strh", "sqnum":"4" }
+/* File: armv5/OP_IPUT.S */
+    /*
+     * General 32-bit instance field put.
+     *
+     * for: iput, iput-object, iput-boolean, iput-byte, iput-char, iput-short
+     */
+    /* op vA, vB, field@CCCC */
+    mov     r0, rINST, lsr #12          @ r0<- B
+    ldr     r3, [rGLUE, #offGlue_methodClassDex]    @ r3<- DvmDex
+    FETCH(r1, 1)                        @ r1<- field ref CCCC
+    ldr     r2, [r3, #offDvmDex_pResFields] @ r2<- pDvmDex->pResFields
+    GET_VREG(r9, r0)                    @ r9<- fp[B], the object pointer
+    ldr     r0, [r2, r1, lsl #2]        @ r0<- resolved InstField ptr
+    cmp     r0, #0                      @ is resolved entry null?
+    bne     .LOP_IPUT_SHORT_finish          @ no, already resolved
+8:  ldr     r2, [rGLUE, #offGlue_method]    @ r2<- current method
+    EXPORT_PC()                         @ resolve() could throw
+    ldr     r0, [r2, #offMethod_clazz]  @ r0<- method->clazz
+    bl      dvmResolveInstField         @ r0<- resolved InstField ptr
+    cmp     r0, #0                      @ success?
+    bne     .LOP_IPUT_SHORT_finish          @ yes, finish up
+    b       common_exceptionThrown
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_SGET: /* 0x60 */
+/* File: armv5/OP_SGET.S */
+    /*
+     * General 32-bit SGET handler.
+     *
+     * for: sget, sget-object, sget-boolean, sget-byte, sget-char, sget-short
+     */
+    /* op vAA, field@BBBB */
+    ldr     r2, [rGLUE, #offGlue_methodClassDex]    @ r2<- DvmDex
+    FETCH(r1, 1)                        @ r1<- field ref BBBB
+    ldr     r2, [r2, #offDvmDex_pResFields] @ r2<- dvmDex->pResFields
+    ldr     r0, [r2, r1, lsl #2]        @ r0<- resolved StaticField ptr
+    cmp     r0, #0                      @ is resolved entry null?
+    beq     .LOP_SGET_resolve         @ yes, do resolve
+.LOP_SGET_finish: @ field ptr in r0
+    ldr     r1, [r0, #offStaticField_value] @ r1<- field value
+    mov     r2, rINST, lsr #8           @ r2<- AA
+    FETCH_ADVANCE_INST(2)               @ advance rPC, load rINST
+    SET_VREG(r1, r2)                    @ fp[AA]<- r1
+    GET_INST_OPCODE(ip)                 @ extract opcode from rINST
+    GOTO_OPCODE(ip)                     @ jump to next instruction
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_SGET_WIDE: /* 0x61 */
+/* File: armv5/OP_SGET_WIDE.S */
+    /*
+     * 64-bit SGET handler.
+     */
+    /* sget-wide vAA, field@BBBB */
+    ldr     r2, [rGLUE, #offGlue_methodClassDex]    @ r2<- DvmDex
+    FETCH(r1, 1)                        @ r1<- field ref BBBB
+    ldr     r2, [r2, #offDvmDex_pResFields] @ r2<- dvmDex->pResFields
+    ldr     r0, [r2, r1, lsl #2]        @ r0<- resolved StaticField ptr
+    cmp     r0, #0                      @ is resolved entry null?
+    beq     .LOP_SGET_WIDE_resolve         @ yes, do resolve
+.LOP_SGET_WIDE_finish:
+    mov     r1, rINST, lsr #8           @ r1<- AA
+    ldrd    r2, [r0, #offStaticField_value] @ r2/r3<- field value (aligned)
+    add     r1, rFP, r1, lsl #2         @ r1<- &fp[AA]
+    FETCH_ADVANCE_INST(2)               @ advance rPC, load rINST
+    stmia   r1, {r2-r3}                 @ vAA/vAA+1<- r2/r3
+    GET_INST_OPCODE(ip)                 @ extract opcode from rINST
+    GOTO_OPCODE(ip)                     @ jump to next instruction
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_SGET_OBJECT: /* 0x62 */
+/* File: armv5/OP_SGET_OBJECT.S */
+/* File: armv5/OP_SGET.S */
+    /*
+     * General 32-bit SGET handler.
+     *
+     * for: sget, sget-object, sget-boolean, sget-byte, sget-char, sget-short
+     */
+    /* op vAA, field@BBBB */
+    ldr     r2, [rGLUE, #offGlue_methodClassDex]    @ r2<- DvmDex
+    FETCH(r1, 1)                        @ r1<- field ref BBBB
+    ldr     r2, [r2, #offDvmDex_pResFields] @ r2<- dvmDex->pResFields
+    ldr     r0, [r2, r1, lsl #2]        @ r0<- resolved StaticField ptr
+    cmp     r0, #0                      @ is resolved entry null?
+    beq     .LOP_SGET_OBJECT_resolve         @ yes, do resolve
+.LOP_SGET_OBJECT_finish: @ field ptr in r0
+    ldr     r1, [r0, #offStaticField_value] @ r1<- field value
+    mov     r2, rINST, lsr #8           @ r2<- AA
+    FETCH_ADVANCE_INST(2)               @ advance rPC, load rINST
+    SET_VREG(r1, r2)                    @ fp[AA]<- r1
+    GET_INST_OPCODE(ip)                 @ extract opcode from rINST
+    GOTO_OPCODE(ip)                     @ jump to next instruction
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_SGET_BOOLEAN: /* 0x63 */
+/* File: armv5/OP_SGET_BOOLEAN.S */
+/* File: armv5/OP_SGET.S */
+    /*
+     * General 32-bit SGET handler.
+     *
+     * for: sget, sget-object, sget-boolean, sget-byte, sget-char, sget-short
+     */
+    /* op vAA, field@BBBB */
+    ldr     r2, [rGLUE, #offGlue_methodClassDex]    @ r2<- DvmDex
+    FETCH(r1, 1)                        @ r1<- field ref BBBB
+    ldr     r2, [r2, #offDvmDex_pResFields] @ r2<- dvmDex->pResFields
+    ldr     r0, [r2, r1, lsl #2]        @ r0<- resolved StaticField ptr
+    cmp     r0, #0                      @ is resolved entry null?
+    beq     .LOP_SGET_BOOLEAN_resolve         @ yes, do resolve
+.LOP_SGET_BOOLEAN_finish: @ field ptr in r0
+    ldr     r1, [r0, #offStaticField_value] @ r1<- field value
+    mov     r2, rINST, lsr #8           @ r2<- AA
+    FETCH_ADVANCE_INST(2)               @ advance rPC, load rINST
+    SET_VREG(r1, r2)                    @ fp[AA]<- r1
+    GET_INST_OPCODE(ip)                 @ extract opcode from rINST
+    GOTO_OPCODE(ip)                     @ jump to next instruction
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_SGET_BYTE: /* 0x64 */
+/* File: armv5/OP_SGET_BYTE.S */
+/* File: armv5/OP_SGET.S */
+    /*
+     * General 32-bit SGET handler.
+     *
+     * for: sget, sget-object, sget-boolean, sget-byte, sget-char, sget-short
+     */
+    /* op vAA, field@BBBB */
+    ldr     r2, [rGLUE, #offGlue_methodClassDex]    @ r2<- DvmDex
+    FETCH(r1, 1)                        @ r1<- field ref BBBB
+    ldr     r2, [r2, #offDvmDex_pResFields] @ r2<- dvmDex->pResFields
+    ldr     r0, [r2, r1, lsl #2]        @ r0<- resolved StaticField ptr
+    cmp     r0, #0                      @ is resolved entry null?
+    beq     .LOP_SGET_BYTE_resolve         @ yes, do resolve
+.LOP_SGET_BYTE_finish: @ field ptr in r0
+    ldr     r1, [r0, #offStaticField_value] @ r1<- field value
+    mov     r2, rINST, lsr #8           @ r2<- AA
+    FETCH_ADVANCE_INST(2)               @ advance rPC, load rINST
+    SET_VREG(r1, r2)                    @ fp[AA]<- r1
+    GET_INST_OPCODE(ip)                 @ extract opcode from rINST
+    GOTO_OPCODE(ip)                     @ jump to next instruction
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_SGET_CHAR: /* 0x65 */
+/* File: armv5/OP_SGET_CHAR.S */
+/* File: armv5/OP_SGET.S */
+    /*
+     * General 32-bit SGET handler.
+     *
+     * for: sget, sget-object, sget-boolean, sget-byte, sget-char, sget-short
+     */
+    /* op vAA, field@BBBB */
+    ldr     r2, [rGLUE, #offGlue_methodClassDex]    @ r2<- DvmDex
+    FETCH(r1, 1)                        @ r1<- field ref BBBB
+    ldr     r2, [r2, #offDvmDex_pResFields] @ r2<- dvmDex->pResFields
+    ldr     r0, [r2, r1, lsl #2]        @ r0<- resolved StaticField ptr
+    cmp     r0, #0                      @ is resolved entry null?
+    beq     .LOP_SGET_CHAR_resolve         @ yes, do resolve
+.LOP_SGET_CHAR_finish: @ field ptr in r0
+    ldr     r1, [r0, #offStaticField_value] @ r1<- field value
+    mov     r2, rINST, lsr #8           @ r2<- AA
+    FETCH_ADVANCE_INST(2)               @ advance rPC, load rINST
+    SET_VREG(r1, r2)                    @ fp[AA]<- r1
+    GET_INST_OPCODE(ip)                 @ extract opcode from rINST
+    GOTO_OPCODE(ip)                     @ jump to next instruction
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_SGET_SHORT: /* 0x66 */
+/* File: armv5/OP_SGET_SHORT.S */
+/* File: armv5/OP_SGET.S */
+    /*
+     * General 32-bit SGET handler.
+     *
+     * for: sget, sget-object, sget-boolean, sget-byte, sget-char, sget-short
+     */
+    /* op vAA, field@BBBB */
+    ldr     r2, [rGLUE, #offGlue_methodClassDex]    @ r2<- DvmDex
+    FETCH(r1, 1)                        @ r1<- field ref BBBB
+    ldr     r2, [r2, #offDvmDex_pResFields] @ r2<- dvmDex->pResFields
+    ldr     r0, [r2, r1, lsl #2]        @ r0<- resolved StaticField ptr
+    cmp     r0, #0                      @ is resolved entry null?
+    beq     .LOP_SGET_SHORT_resolve         @ yes, do resolve
+.LOP_SGET_SHORT_finish: @ field ptr in r0
+    ldr     r1, [r0, #offStaticField_value] @ r1<- field value
+    mov     r2, rINST, lsr #8           @ r2<- AA
+    FETCH_ADVANCE_INST(2)               @ advance rPC, load rINST
+    SET_VREG(r1, r2)                    @ fp[AA]<- r1
+    GET_INST_OPCODE(ip)                 @ extract opcode from rINST
+    GOTO_OPCODE(ip)                     @ jump to next instruction
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_SPUT: /* 0x67 */
+/* File: armv5/OP_SPUT.S */
+    /*
+     * General 32-bit SPUT handler.
+     *
+     * for: sput, sput-object, sput-boolean, sput-byte, sput-char, sput-short
+     */
+    /* op vAA, field@BBBB */
+    ldr     r2, [rGLUE, #offGlue_methodClassDex]    @ r2<- DvmDex
+    FETCH(r1, 1)                        @ r1<- field ref BBBB
+    ldr     r2, [r2, #offDvmDex_pResFields] @ r2<- dvmDex->pResFields
+    ldr     r0, [r2, r1, lsl #2]        @ r0<- resolved StaticField ptr
+    cmp     r0, #0                      @ is resolved entry null?
+    beq     .LOP_SPUT_resolve         @ yes, do resolve
+.LOP_SPUT_finish:   @ field ptr in r0
+    mov     r2, rINST, lsr #8           @ r2<- AA
+    FETCH_ADVANCE_INST(2)               @ advance rPC, load rINST
+    GET_VREG(r1, r2)                    @ r1<- fp[AA]
+    GET_INST_OPCODE(ip)                 @ extract opcode from rINST
+    str     r1, [r0, #offStaticField_value] @ field<- vAA
+    GOTO_OPCODE(ip)                     @ jump to next instruction
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_SPUT_WIDE: /* 0x68 */
+/* File: armv5/OP_SPUT_WIDE.S */
+    /*
+     * 64-bit SPUT handler.
+     */
+    /* sput-wide vAA, field@BBBB */
+    ldr     r2, [rGLUE, #offGlue_methodClassDex]    @ r2<- DvmDex
+    FETCH(r1, 1)                        @ r1<- field ref BBBB
+    ldr     r2, [r2, #offDvmDex_pResFields] @ r2<- dvmDex->pResFields
+    mov     r9, rINST, lsr #8           @ r9<- AA
+    ldr     r0, [r2, r1, lsl #2]        @ r0<- resolved StaticField ptr
+    add     r9, rFP, r9, lsl #2         @ r9<- &fp[AA]
+    cmp     r0, #0                      @ is resolved entry null?
+    beq     .LOP_SPUT_WIDE_resolve         @ yes, do resolve
+.LOP_SPUT_WIDE_finish: @ field ptr in r0, AA in r9
+    FETCH_ADVANCE_INST(2)               @ advance rPC, load rINST
+    ldmia   r9, {r2-r3}                 @ r2/r3<- vAA/vAA+1
+    GET_INST_OPCODE(ip)                 @ extract opcode from rINST
+    strd    r2, [r0, #offStaticField_value] @ field<- vAA/vAA+1
+    GOTO_OPCODE(ip)                     @ jump to next instruction
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_SPUT_OBJECT: /* 0x69 */
+/* File: armv5/OP_SPUT_OBJECT.S */
+/* File: armv5/OP_SPUT.S */
+    /*
+     * General 32-bit SPUT handler.
+     *
+     * for: sput, sput-object, sput-boolean, sput-byte, sput-char, sput-short
+     */
+    /* op vAA, field@BBBB */
+    ldr     r2, [rGLUE, #offGlue_methodClassDex]    @ r2<- DvmDex
+    FETCH(r1, 1)                        @ r1<- field ref BBBB
+    ldr     r2, [r2, #offDvmDex_pResFields] @ r2<- dvmDex->pResFields
+    ldr     r0, [r2, r1, lsl #2]        @ r0<- resolved StaticField ptr
+    cmp     r0, #0                      @ is resolved entry null?
+    beq     .LOP_SPUT_OBJECT_resolve         @ yes, do resolve
+.LOP_SPUT_OBJECT_finish:   @ field ptr in r0
+    mov     r2, rINST, lsr #8           @ r2<- AA
+    FETCH_ADVANCE_INST(2)               @ advance rPC, load rINST
+    GET_VREG(r1, r2)                    @ r1<- fp[AA]
+    GET_INST_OPCODE(ip)                 @ extract opcode from rINST
+    str     r1, [r0, #offStaticField_value] @ field<- vAA
+    GOTO_OPCODE(ip)                     @ jump to next instruction
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_SPUT_BOOLEAN: /* 0x6a */
+/* File: armv5/OP_SPUT_BOOLEAN.S */
+/* File: armv5/OP_SPUT.S */
+    /*
+     * General 32-bit SPUT handler.
+     *
+     * for: sput, sput-object, sput-boolean, sput-byte, sput-char, sput-short
+     */
+    /* op vAA, field@BBBB */
+    ldr     r2, [rGLUE, #offGlue_methodClassDex]    @ r2<- DvmDex
+    FETCH(r1, 1)                        @ r1<- field ref BBBB
+    ldr     r2, [r2, #offDvmDex_pResFields] @ r2<- dvmDex->pResFields
+    ldr     r0, [r2, r1, lsl #2]        @ r0<- resolved StaticField ptr
+    cmp     r0, #0                      @ is resolved entry null?
+    beq     .LOP_SPUT_BOOLEAN_resolve         @ yes, do resolve
+.LOP_SPUT_BOOLEAN_finish:   @ field ptr in r0
+    mov     r2, rINST, lsr #8           @ r2<- AA
+    FETCH_ADVANCE_INST(2)               @ advance rPC, load rINST
+    GET_VREG(r1, r2)                    @ r1<- fp[AA]
+    GET_INST_OPCODE(ip)                 @ extract opcode from rINST
+    str     r1, [r0, #offStaticField_value] @ field<- vAA
+    GOTO_OPCODE(ip)                     @ jump to next instruction
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_SPUT_BYTE: /* 0x6b */
+/* File: armv5/OP_SPUT_BYTE.S */
+/* File: armv5/OP_SPUT.S */
+    /*
+     * General 32-bit SPUT handler.
+     *
+     * for: sput, sput-object, sput-boolean, sput-byte, sput-char, sput-short
+     */
+    /* op vAA, field@BBBB */
+    ldr     r2, [rGLUE, #offGlue_methodClassDex]    @ r2<- DvmDex
+    FETCH(r1, 1)                        @ r1<- field ref BBBB
+    ldr     r2, [r2, #offDvmDex_pResFields] @ r2<- dvmDex->pResFields
+    ldr     r0, [r2, r1, lsl #2]        @ r0<- resolved StaticField ptr
+    cmp     r0, #0                      @ is resolved entry null?
+    beq     .LOP_SPUT_BYTE_resolve         @ yes, do resolve
+.LOP_SPUT_BYTE_finish:   @ field ptr in r0
+    mov     r2, rINST, lsr #8           @ r2<- AA
+    FETCH_ADVANCE_INST(2)               @ advance rPC, load rINST
+    GET_VREG(r1, r2)                    @ r1<- fp[AA]
+    GET_INST_OPCODE(ip)                 @ extract opcode from rINST
+    str     r1, [r0, #offStaticField_value] @ field<- vAA
+    GOTO_OPCODE(ip)                     @ jump to next instruction
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_SPUT_CHAR: /* 0x6c */
+/* File: armv5/OP_SPUT_CHAR.S */
+/* File: armv5/OP_SPUT.S */
+    /*
+     * General 32-bit SPUT handler.
+     *
+     * for: sput, sput-object, sput-boolean, sput-byte, sput-char, sput-short
+     */
+    /* op vAA, field@BBBB */
+    ldr     r2, [rGLUE, #offGlue_methodClassDex]    @ r2<- DvmDex
+    FETCH(r1, 1)                        @ r1<- field ref BBBB
+    ldr     r2, [r2, #offDvmDex_pResFields] @ r2<- dvmDex->pResFields
+    ldr     r0, [r2, r1, lsl #2]        @ r0<- resolved StaticField ptr
+    cmp     r0, #0                      @ is resolved entry null?
+    beq     .LOP_SPUT_CHAR_resolve         @ yes, do resolve
+.LOP_SPUT_CHAR_finish:   @ field ptr in r0
+    mov     r2, rINST, lsr #8           @ r2<- AA
+    FETCH_ADVANCE_INST(2)               @ advance rPC, load rINST
+    GET_VREG(r1, r2)                    @ r1<- fp[AA]
+    GET_INST_OPCODE(ip)                 @ extract opcode from rINST
+    str     r1, [r0, #offStaticField_value] @ field<- vAA
+    GOTO_OPCODE(ip)                     @ jump to next instruction
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_SPUT_SHORT: /* 0x6d */
+/* File: armv5/OP_SPUT_SHORT.S */
+/* File: armv5/OP_SPUT.S */
+    /*
+     * General 32-bit SPUT handler.
+     *
+     * for: sput, sput-object, sput-boolean, sput-byte, sput-char, sput-short
+     */
+    /* op vAA, field@BBBB */
+    ldr     r2, [rGLUE, #offGlue_methodClassDex]    @ r2<- DvmDex
+    FETCH(r1, 1)                        @ r1<- field ref BBBB
+    ldr     r2, [r2, #offDvmDex_pResFields] @ r2<- dvmDex->pResFields
+    ldr     r0, [r2, r1, lsl #2]        @ r0<- resolved StaticField ptr
+    cmp     r0, #0                      @ is resolved entry null?
+    beq     .LOP_SPUT_SHORT_resolve         @ yes, do resolve
+.LOP_SPUT_SHORT_finish:   @ field ptr in r0
+    mov     r2, rINST, lsr #8           @ r2<- AA
+    FETCH_ADVANCE_INST(2)               @ advance rPC, load rINST
+    GET_VREG(r1, r2)                    @ r1<- fp[AA]
+    GET_INST_OPCODE(ip)                 @ extract opcode from rINST
+    str     r1, [r0, #offStaticField_value] @ field<- vAA
+    GOTO_OPCODE(ip)                     @ jump to next instruction
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_INVOKE_VIRTUAL: /* 0x6e */
+/* File: armv5/OP_INVOKE_VIRTUAL.S */
+    /*
+     * Handle a virtual method call.
+     *
+     * for: invoke-virtual, invoke-virtual/range
+     */
+    /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */
+    /* op vAA, {vCCCC..v(CCCC+AA-1)}, meth@BBBB */
+    ldr     r3, [rGLUE, #offGlue_methodClassDex]    @ r3<- pDvmDex
+    FETCH(r1, 1)                        @ r1<- BBBB
+    ldr     r3, [r3, #offDvmDex_pResMethods]    @ r3<- pDvmDex->pResMethods
+    FETCH(r10, 2)                       @ r10<- GFED or CCCC
+    ldr     r0, [r3, r1, lsl #2]        @ r0<- resolved baseMethod
+    .if     (!0)
+    and     r10, r10, #15               @ r10<- D (or stays CCCC)
+    .endif
+    cmp     r0, #0                      @ already resolved?
+    EXPORT_PC()                         @ must export for invoke
+    bne     .LOP_INVOKE_VIRTUAL_continue        @ yes, continue on
+    ldr     r3, [rGLUE, #offGlue_method] @ r3<- glue->method
+    ldr     r0, [r3, #offMethod_clazz]  @ r0<- method->clazz
+    mov     r2, #METHOD_VIRTUAL         @ resolver method type
+    bl      dvmResolveMethod            @ r0<- call(clazz, ref, flags)
+    cmp     r0, #0                      @ got null?
+    bne     .LOP_INVOKE_VIRTUAL_continue        @ no, continue
+    b       common_exceptionThrown      @ yes, handle exception
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_INVOKE_SUPER: /* 0x6f */
+/* File: armv5/OP_INVOKE_SUPER.S */
+    /*
+     * Handle a "super" method call.
+     *
+     * for: invoke-super, invoke-super/range
+     */
+    /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */
+    /* op vAA, {vCCCC..v(CCCC+AA-1)}, meth@BBBB */
+    FETCH(r10, 2)                       @ r10<- GFED or CCCC
+    ldr     r3, [rGLUE, #offGlue_methodClassDex]    @ r3<- pDvmDex
+    .if     (!0)
+    and     r10, r10, #15               @ r10<- D (or stays CCCC)
+    .endif
+    FETCH(r1, 1)                        @ r1<- BBBB
+    ldr     r3, [r3, #offDvmDex_pResMethods]    @ r3<- pDvmDex->pResMethods
+    GET_VREG(r2, r10)                   @ r2<- "this" ptr
+    ldr     r0, [r3, r1, lsl #2]        @ r0<- resolved baseMethod
+    cmp     r2, #0                      @ null "this"?
+    ldr     r9, [rGLUE, #offGlue_method] @ r9<- current method
+    beq     common_errNullObject        @ null "this", throw exception
+    cmp     r0, #0                      @ already resolved?
+    ldr     r9, [r9, #offMethod_clazz]  @ r9<- method->clazz
+    EXPORT_PC()                         @ must export for invoke
+    bne     .LOP_INVOKE_SUPER_continue        @ resolved, continue on
+    b       .LOP_INVOKE_SUPER_resolve         @ do resolve now
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_INVOKE_DIRECT: /* 0x70 */
+/* File: armv5/OP_INVOKE_DIRECT.S */
+    /*
+     * Handle a direct method call.
+     *
+     * (We could defer the "is 'this' pointer null" test to the common
+     * method invocation code, and use a flag to indicate that static
+     * calls don't count.  If we do this as part of copying the arguments
+     * out we could avoiding loading the first arg twice.)
+     *
+     * for: invoke-direct, invoke-direct/range
+     */
+    /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */
+    /* op {vCCCC..v(CCCC+AA-1)}, meth@BBBB */
+    ldr     r3, [rGLUE, #offGlue_methodClassDex]    @ r3<- pDvmDex
+    FETCH(r1, 1)                        @ r1<- BBBB
+    ldr     r3, [r3, #offDvmDex_pResMethods]    @ r3<- pDvmDex->pResMethods
+    FETCH(r10, 2)                       @ r10<- GFED or CCCC
+    ldr     r0, [r3, r1, lsl #2]        @ r0<- resolved methodToCall
+    .if     (!0)
+    and     r10, r10, #15               @ r10<- D (or stays CCCC)
+    .endif
+    cmp     r0, #0                      @ already resolved?
+    EXPORT_PC()                         @ must export for invoke
+    GET_VREG(r2, r10)                   @ r2<- "this" ptr
+    beq     .LOP_INVOKE_DIRECT_resolve         @ not resolved, do it now
+.LOP_INVOKE_DIRECT_finish:
+    cmp     r2, #0                      @ null "this" ref?
+    bne     common_invokeMethodNoRange   @ no, continue on
+    b       common_errNullObject        @ yes, throw exception
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_INVOKE_STATIC: /* 0x71 */
+/* File: armv5/OP_INVOKE_STATIC.S */
+    /*
+     * Handle a static method call.
+     *
+     * for: invoke-static, invoke-static/range
+     */
+    /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */
+    /* op {vCCCC..v(CCCC+AA-1)}, meth@BBBB */
+    ldr     r3, [rGLUE, #offGlue_methodClassDex]    @ r3<- pDvmDex
+    FETCH(r1, 1)                        @ r1<- BBBB
+    ldr     r3, [r3, #offDvmDex_pResMethods]    @ r3<- pDvmDex->pResMethods
+    ldr     r0, [r3, r1, lsl #2]        @ r0<- resolved methodToCall
+    cmp     r0, #0                      @ already resolved?
+    EXPORT_PC()                         @ must export for invoke
+    bne     common_invokeMethodNoRange @ yes, continue on
+0:  ldr     r3, [rGLUE, #offGlue_method] @ r3<- glue->method
+    ldr     r0, [r3, #offMethod_clazz]  @ r0<- method->clazz
+    mov     r2, #METHOD_STATIC          @ resolver method type
+    bl      dvmResolveMethod            @ r0<- call(clazz, ref, flags)
+    cmp     r0, #0                      @ got null?
+    bne     common_invokeMethodNoRange @ no, continue
+    b       common_exceptionThrown      @ yes, handle exception
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_INVOKE_INTERFACE: /* 0x72 */
+/* File: armv5/OP_INVOKE_INTERFACE.S */
+    /*
+     * Handle an interface method call.
+     *
+     * for: invoke-interface, invoke-interface/range
+     */
+    /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */
+    /* op {vCCCC..v(CCCC+AA-1)}, meth@BBBB */
+    FETCH(r2, 2)                        @ r2<- FEDC or CCCC
+    FETCH(r1, 1)                        @ r1<- BBBB
+    .if     (!0)
+    and     r2, r2, #15                 @ r2<- C (or stays CCCC)
+    .endif
+    EXPORT_PC()                         @ must export for invoke
+    GET_VREG(r0, r2)                    @ r0<- first arg ("this")
+    ldr     r3, [rGLUE, #offGlue_methodClassDex]    @ r3<- methodClassDex
+    cmp     r0, #0                      @ null obj?
+    ldr     r2, [rGLUE, #offGlue_method]  @ r2<- method
+    beq     common_errNullObject        @ yes, fail
+    ldr     r0, [r0, #offObject_clazz]  @ r0<- thisPtr->clazz
+    bl      dvmFindInterfaceMethodInCache @ r0<- call(class, ref, method, dex)
+    cmp     r0, #0                      @ failed?
+    beq     common_exceptionThrown      @ yes, handle exception
+    b       common_invokeMethodNoRange @ jump to common handler 
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_73: /* 0x73 */
+/* File: armv5/OP_UNUSED_73.S */
+/* File: armv5/unused.S */
+    bl      common_abort
+
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_INVOKE_VIRTUAL_RANGE: /* 0x74 */
+/* File: armv5/OP_INVOKE_VIRTUAL_RANGE.S */
+/* File: armv5/OP_INVOKE_VIRTUAL.S */
+    /*
+     * Handle a virtual method call.
+     *
+     * for: invoke-virtual, invoke-virtual/range
+     */
+    /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */
+    /* op vAA, {vCCCC..v(CCCC+AA-1)}, meth@BBBB */
+    ldr     r3, [rGLUE, #offGlue_methodClassDex]    @ r3<- pDvmDex
+    FETCH(r1, 1)                        @ r1<- BBBB
+    ldr     r3, [r3, #offDvmDex_pResMethods]    @ r3<- pDvmDex->pResMethods
+    FETCH(r10, 2)                       @ r10<- GFED or CCCC
+    ldr     r0, [r3, r1, lsl #2]        @ r0<- resolved baseMethod
+    .if     (!1)
+    and     r10, r10, #15               @ r10<- D (or stays CCCC)
+    .endif
+    cmp     r0, #0                      @ already resolved?
+    EXPORT_PC()                         @ must export for invoke
+    bne     .LOP_INVOKE_VIRTUAL_RANGE_continue        @ yes, continue on
+    ldr     r3, [rGLUE, #offGlue_method] @ r3<- glue->method
+    ldr     r0, [r3, #offMethod_clazz]  @ r0<- method->clazz
+    mov     r2, #METHOD_VIRTUAL         @ resolver method type
+    bl      dvmResolveMethod            @ r0<- call(clazz, ref, flags)
+    cmp     r0, #0                      @ got null?
+    bne     .LOP_INVOKE_VIRTUAL_RANGE_continue        @ no, continue
+    b       common_exceptionThrown      @ yes, handle exception
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_INVOKE_SUPER_RANGE: /* 0x75 */
+/* File: armv5/OP_INVOKE_SUPER_RANGE.S */
+/* File: armv5/OP_INVOKE_SUPER.S */
+    /*
+     * Handle a "super" method call.
+     *
+     * for: invoke-super, invoke-super/range
+     */
+    /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */
+    /* op vAA, {vCCCC..v(CCCC+AA-1)}, meth@BBBB */
+    FETCH(r10, 2)                       @ r10<- GFED or CCCC
+    ldr     r3, [rGLUE, #offGlue_methodClassDex]    @ r3<- pDvmDex
+    .if     (!1)
+    and     r10, r10, #15               @ r10<- D (or stays CCCC)
+    .endif
+    FETCH(r1, 1)                        @ r1<- BBBB
+    ldr     r3, [r3, #offDvmDex_pResMethods]    @ r3<- pDvmDex->pResMethods
+    GET_VREG(r2, r10)                   @ r2<- "this" ptr
+    ldr     r0, [r3, r1, lsl #2]        @ r0<- resolved baseMethod
+    cmp     r2, #0                      @ null "this"?
+    ldr     r9, [rGLUE, #offGlue_method] @ r9<- current method
+    beq     common_errNullObject        @ null "this", throw exception
+    cmp     r0, #0                      @ already resolved?
+    ldr     r9, [r9, #offMethod_clazz]  @ r9<- method->clazz
+    EXPORT_PC()                         @ must export for invoke
+    bne     .LOP_INVOKE_SUPER_RANGE_continue        @ resolved, continue on
+    b       .LOP_INVOKE_SUPER_RANGE_resolve         @ do resolve now
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_INVOKE_DIRECT_RANGE: /* 0x76 */
+/* File: armv5/OP_INVOKE_DIRECT_RANGE.S */
+/* File: armv5/OP_INVOKE_DIRECT.S */
+    /*
+     * Handle a direct method call.
+     *
+     * (We could defer the "is 'this' pointer null" test to the common
+     * method invocation code, and use a flag to indicate that static
+     * calls don't count.  If we do this as part of copying the arguments
+     * out we could avoiding loading the first arg twice.)
+     *
+     * for: invoke-direct, invoke-direct/range
+     */
+    /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */
+    /* op {vCCCC..v(CCCC+AA-1)}, meth@BBBB */
+    ldr     r3, [rGLUE, #offGlue_methodClassDex]    @ r3<- pDvmDex
+    FETCH(r1, 1)                        @ r1<- BBBB
+    ldr     r3, [r3, #offDvmDex_pResMethods]    @ r3<- pDvmDex->pResMethods
+    FETCH(r10, 2)                       @ r10<- GFED or CCCC
+    ldr     r0, [r3, r1, lsl #2]        @ r0<- resolved methodToCall
+    .if     (!1)
+    and     r10, r10, #15               @ r10<- D (or stays CCCC)
+    .endif
+    cmp     r0, #0                      @ already resolved?
+    EXPORT_PC()                         @ must export for invoke
+    GET_VREG(r2, r10)                   @ r2<- "this" ptr
+    beq     .LOP_INVOKE_DIRECT_RANGE_resolve         @ not resolved, do it now
+.LOP_INVOKE_DIRECT_RANGE_finish:
+    cmp     r2, #0                      @ null "this" ref?
+    bne     common_invokeMethodRange   @ no, continue on
+    b       common_errNullObject        @ yes, throw exception
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_INVOKE_STATIC_RANGE: /* 0x77 */
+/* File: armv5/OP_INVOKE_STATIC_RANGE.S */
+/* File: armv5/OP_INVOKE_STATIC.S */
+    /*
+     * Handle a static method call.
+     *
+     * for: invoke-static, invoke-static/range
+     */
+    /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */
+    /* op {vCCCC..v(CCCC+AA-1)}, meth@BBBB */
+    ldr     r3, [rGLUE, #offGlue_methodClassDex]    @ r3<- pDvmDex
+    FETCH(r1, 1)                        @ r1<- BBBB
+    ldr     r3, [r3, #offDvmDex_pResMethods]    @ r3<- pDvmDex->pResMethods
+    ldr     r0, [r3, r1, lsl #2]        @ r0<- resolved methodToCall
+    cmp     r0, #0                      @ already resolved?
+    EXPORT_PC()                         @ must export for invoke
+    bne     common_invokeMethodRange @ yes, continue on
+0:  ldr     r3, [rGLUE, #offGlue_method] @ r3<- glue->method
+    ldr     r0, [r3, #offMethod_clazz]  @ r0<- method->clazz
+    mov     r2, #METHOD_STATIC          @ resolver method type
+    bl      dvmResolveMethod            @ r0<- call(clazz, ref, flags)
+    cmp     r0, #0                      @ got null?
+    bne     common_invokeMethodRange @ no, continue
+    b       common_exceptionThrown      @ yes, handle exception
+
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_INVOKE_INTERFACE_RANGE: /* 0x78 */
+/* File: armv5/OP_INVOKE_INTERFACE_RANGE.S */
+/* File: armv5/OP_INVOKE_INTERFACE.S */
+    /*
+     * Handle an interface method call.
+     *
+     * for: invoke-interface, invoke-interface/range
+     */
+    /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */
+    /* op {vCCCC..v(CCCC+AA-1)}, meth@BBBB */
+    FETCH(r2, 2)                        @ r2<- FEDC or CCCC
+    FETCH(r1, 1)                        @ r1<- BBBB
+    .if     (!1)
+    and     r2, r2, #15                 @ r2<- C (or stays CCCC)
+    .endif
+    EXPORT_PC()                         @ must export for invoke
+    GET_VREG(r0, r2)                    @ r0<- first arg ("this")
+    ldr     r3, [rGLUE, #offGlue_methodClassDex]    @ r3<- methodClassDex
+    cmp     r0, #0                      @ null obj?
+    ldr     r2, [rGLUE, #offGlue_method]  @ r2<- method
+    beq     common_errNullObject        @ yes, fail
+    ldr     r0, [r0, #offObject_clazz]  @ r0<- thisPtr->clazz
+    bl      dvmFindInterfaceMethodInCache @ r0<- call(class, ref, method, dex)
+    cmp     r0, #0                      @ failed?
+    beq     common_exceptionThrown      @ yes, handle exception
+    b       common_invokeMethodRange @ jump to common handler 
+
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_79: /* 0x79 */
+/* File: armv5/OP_UNUSED_79.S */
+/* File: armv5/unused.S */
+    bl      common_abort
+
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_7A: /* 0x7a */
+/* File: armv5/OP_UNUSED_7A.S */
+/* File: armv5/unused.S */
+    bl      common_abort
+
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_NEG_INT: /* 0x7b */
+/* File: armv5/OP_NEG_INT.S */
+/* File: armv5/unop.S */
+    /*
+     * Generic 32-bit unary operation.  Provide an "instr" line that
+     * specifies an instruction that performs "result = op r0".
+     * This could be an ARM instruction or a function call.
+     *
+     * for: neg-int, not-int, neg-float, int-to-float, float-to-int,
+     *      int-to-byte, int-to-char, int-to-short
+     */
+    /* unop vA, vB */
+    mov     r3, rINST, lsr #12          @ r3<- B
+    mov     r9, rINST, lsr #8           @ r9<- A+
+    GET_VREG(r0, r3)                    @ r0<- vB
+    and     r9, r9, #15
+                               @ optional op; may set condition codes
+    FETCH_ADVANCE_INST(1)               @ advance rPC, load rINST
+    rsb     r0, r0, #0                              @ r0<- op, r0-r3 changed
+    GET_INST_OPCODE(ip)                 @ extract opcode from rINST
+    SET_VREG(r0, r9)                    @ vAA<- r0
+    GOTO_OPCODE(ip)                     @ jump to next instruction
+    /* 9-10 instructions */
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_NOT_INT: /* 0x7c */
+/* File: armv5/OP_NOT_INT.S */
+/* File: armv5/unop.S */
+    /*
+     * Generic 32-bit unary operation.  Provide an "instr" line that
+     * specifies an instruction that performs "result = op r0".
+     * This could be an ARM instruction or a function call.
+     *
+     * for: neg-int, not-int, neg-float, int-to-float, float-to-int,
+     *      int-to-byte, int-to-char, int-to-short
+     */
+    /* unop vA, vB */
+    mov     r3, rINST, lsr #12          @ r3<- B
+    mov     r9, rINST, lsr #8           @ r9<- A+
+    GET_VREG(r0, r3)                    @ r0<- vB
+    and     r9, r9, #15
+                               @ optional op; may set condition codes
+    FETCH_ADVANCE_INST(1)               @ advance rPC, load rINST
+    mvn     r0, r0                              @ r0<- op, r0-r3 changed
+    GET_INST_OPCODE(ip)                 @ extract opcode from rINST
+    SET_VREG(r0, r9)                    @ vAA<- r0
+    GOTO_OPCODE(ip)                     @ jump to next instruction
+    /* 9-10 instructions */
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_NEG_LONG: /* 0x7d */
+/* File: armv5/OP_NEG_LONG.S */
+/* File: armv5/unopWide.S */
+    /*
+     * Generic 64-bit unary operation.  Provide an "instr" line that
+     * specifies an instruction that performs "result = op r0/r1".
+     * This could be an ARM instruction or a function call.
+     *
+     * For: neg-long, not-long, neg-double, long-to-double, double-to-long
+     */
+    /* unop vA, vB */
+    mov     r9, rINST, lsr #8           @ r9<- A+
+    mov     r3, rINST, lsr #12          @ r3<- B
+    and     r9, r9, #15
+    add     r3, rFP, r3, lsl #2         @ r3<- &fp[B]
+    add     r9, rFP, r9, lsl #2         @ r9<- &fp[A]
+    ldmia   r3, {r0-r1}                 @ r0/r1<- vAA
+    FETCH_ADVANCE_INST(1)               @ advance rPC, load rINST
+    rsbs    r0, r0, #0                           @ optional op; may set condition codes
+    rsc     r1, r1, #0                              @ r0/r1<- op, r2-r3 changed
+    GET_INST_OPCODE(ip)                 @ extract opcode from rINST
+    stmia   r9, {r0-r1}                 @ vAA<- r0/r1
+    GOTO_OPCODE(ip)                     @ jump to next instruction
+    /* 12-13 instructions */
+
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_NOT_LONG: /* 0x7e */
+/* File: armv5/OP_NOT_LONG.S */
+/* File: armv5/unopWide.S */
+    /*
+     * Generic 64-bit unary operation.  Provide an "instr" line that
+     * specifies an instruction that performs "result = op r0/r1".
+     * This could be an ARM instruction or a function call.
+     *
+     * For: neg-long, not-long, neg-double, long-to-double, double-to-long
+     */
+    /* unop vA, vB */
+    mov     r9, rINST, lsr #8           @ r9<- A+
+    mov     r3, rINST, lsr #12          @ r3<- B
+    and     r9, r9, #15
+    add     r3, rFP, r3, lsl #2         @ r3<- &fp[B]
+    add     r9, rFP, r9, lsl #2         @ r9<- &fp[A]
+    ldmia   r3, {r0-r1}                 @ r0/r1<- vAA
+    FETCH_ADVANCE_INST(1)               @ advance rPC, load rINST
+    mvn     r0, r0                           @ optional op; may set condition codes
+    mvn     r1, r1                              @ r0/r1<- op, r2-r3 changed
+    GET_INST_OPCODE(ip)                 @ extract opcode from rINST
+    stmia   r9, {r0-r1}                 @ vAA<- r0/r1
+    GOTO_OPCODE(ip)                     @ jump to next instruction
+    /* 12-13 instructions */
+
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_NEG_FLOAT: /* 0x7f */
+/* File: armv5/OP_NEG_FLOAT.S */
+/* File: armv5/unop.S */
+    /*
+     * Generic 32-bit unary operation.  Provide an "instr" line that
+     * specifies an instruction that performs "result = op r0".
+     * This could be an ARM instruction or a function call.
+     *
+     * for: neg-int, not-int, neg-float, int-to-float, float-to-int,
+     *      int-to-byte, int-to-char, int-to-short
+     */
+    /* unop vA, vB */
+    mov     r3, rINST, lsr #12          @ r3<- B
+    mov     r9, rINST, lsr #8           @ r9<- A+
+    GET_VREG(r0, r3)                    @ r0<- vB
+    and     r9, r9, #15
+                               @ optional op; may set condition codes
+    FETCH_ADVANCE_INST(1)               @ advance rPC, load rINST
+    add     r0, r0, #0x80000000                              @ r0<- op, r0-r3 changed
+    GET_INST_OPCODE(ip)                 @ extract opcode from rINST
+    SET_VREG(r0, r9)                    @ vAA<- r0
+    GOTO_OPCODE(ip)                     @ jump to next instruction
+    /* 9-10 instructions */
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_NEG_DOUBLE: /* 0x80 */
+/* File: armv5/OP_NEG_DOUBLE.S */
+/* File: armv5/unopWide.S */
+    /*
+     * Generic 64-bit unary operation.  Provide an "instr" line that
+     * specifies an instruction that performs "result = op r0/r1".
+     * This could be an ARM instruction or a function call.
+     *
+     * For: neg-long, not-long, neg-double, long-to-double, double-to-long
+     */
+    /* unop vA, vB */
+    mov     r9, rINST, lsr #8           @ r9<- A+
+    mov     r3, rINST, lsr #12          @ r3<- B
+    and     r9, r9, #15
+    add     r3, rFP, r3, lsl #2         @ r3<- &fp[B]
+    add     r9, rFP, r9, lsl #2         @ r9<- &fp[A]
+    ldmia   r3, {r0-r1}                 @ r0/r1<- vAA
+    FETCH_ADVANCE_INST(1)               @ advance rPC, load rINST
+                               @ optional op; may set condition codes
+    add     r1, r1, #0x80000000                              @ r0/r1<- op, r2-r3 changed
+    GET_INST_OPCODE(ip)                 @ extract opcode from rINST
+    stmia   r9, {r0-r1}                 @ vAA<- r0/r1
+    GOTO_OPCODE(ip)                     @ jump to next instruction
+    /* 12-13 instructions */
+
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_INT_TO_LONG: /* 0x81 */
+/* File: armv5/OP_INT_TO_LONG.S */
+/* File: armv5/unopWider.S */
+    /*
+     * Generic 32bit-to-64bit unary operation.  Provide an "instr" line
+     * that specifies an instruction that performs "result = op r0", where
+     * "result" is a 64-bit quantity in r0/r1.
+     *
+     * For: int-to-long, int-to-double, float-to-long, float-to-double
+     */
+    /* unop vA, vB */
+    mov     r9, rINST, lsr #8           @ r9<- A+
+    mov     r3, rINST, lsr #12          @ r3<- B
+    and     r9, r9, #15
+    GET_VREG(r0, r3)                    @ r0<- vB
+    add     r9, rFP, r9, lsl #2         @ r9<- &fp[A]
+                               @ optional op; may set condition codes
+    FETCH_ADVANCE_INST(1)               @ advance rPC, load rINST
+    mov     r1, r0, asr #31                              @ r0<- op, r0-r3 changed
+    GET_INST_OPCODE(ip)                 @ extract opcode from rINST
+    stmia   r9, {r0-r1}                 @ vA/vA+1<- r0/r1
+    GOTO_OPCODE(ip)                     @ jump to next instruction
+    /* 10-11 instructions */
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_INT_TO_FLOAT: /* 0x82 */
+/* File: armv5/OP_INT_TO_FLOAT.S */
+/* File: armv5/unop.S */
+    /*
+     * Generic 32-bit unary operation.  Provide an "instr" line that
+     * specifies an instruction that performs "result = op r0".
+     * This could be an ARM instruction or a function call.
+     *
+     * for: neg-int, not-int, neg-float, int-to-float, float-to-int,
+     *      int-to-byte, int-to-char, int-to-short
+     */
+    /* unop vA, vB */
+    mov     r3, rINST, lsr #12          @ r3<- B
+    mov     r9, rINST, lsr #8           @ r9<- A+
+    GET_VREG(r0, r3)                    @ r0<- vB
+    and     r9, r9, #15
+                               @ optional op; may set condition codes
+    FETCH_ADVANCE_INST(1)               @ advance rPC, load rINST
+    bl      __aeabi_i2f                              @ r0<- op, r0-r3 changed
+    GET_INST_OPCODE(ip)                 @ extract opcode from rINST
+    SET_VREG(r0, r9)                    @ vAA<- r0
+    GOTO_OPCODE(ip)                     @ jump to next instruction
+    /* 9-10 instructions */
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_INT_TO_DOUBLE: /* 0x83 */
+/* File: armv5/OP_INT_TO_DOUBLE.S */
+/* File: armv5/unopWider.S */
+    /*
+     * Generic 32bit-to-64bit unary operation.  Provide an "instr" line
+     * that specifies an instruction that performs "result = op r0", where
+     * "result" is a 64-bit quantity in r0/r1.
+     *
+     * For: int-to-long, int-to-double, float-to-long, float-to-double
+     */
+    /* unop vA, vB */
+    mov     r9, rINST, lsr #8           @ r9<- A+
+    mov     r3, rINST, lsr #12          @ r3<- B
+    and     r9, r9, #15
+    GET_VREG(r0, r3)                    @ r0<- vB
+    add     r9, rFP, r9, lsl #2         @ r9<- &fp[A]
+                               @ optional op; may set condition codes
+    FETCH_ADVANCE_INST(1)               @ advance rPC, load rINST
+    bl      __aeabi_i2d                              @ r0<- op, r0-r3 changed
+    GET_INST_OPCODE(ip)                 @ extract opcode from rINST
+    stmia   r9, {r0-r1}                 @ vA/vA+1<- r0/r1
+    GOTO_OPCODE(ip)                     @ jump to next instruction
+    /* 10-11 instructions */
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_LONG_TO_INT: /* 0x84 */
+/* File: armv5/OP_LONG_TO_INT.S */
+/* we ignore the high word, making this equivalent to a 32-bit reg move */
+/* File: armv5/OP_MOVE.S */
+    /* for move, move-object, long-to-int */
+    /* op vA, vB */
+    mov     r1, rINST, lsr #12          @ r1<- B from 15:12
+    mov     r0, rINST, lsr #8           @ r0<- A from 11:8
+    FETCH_ADVANCE_INST(1)               @ advance rPC, load rINST
+    GET_VREG(r2, r1)                    @ r2<- fp[B]
+    and     r0, r0, #15
+    GET_INST_OPCODE(ip)                 @ ip<- opcode from rINST
+    SET_VREG(r2, r0)                    @ fp[A]<- r2
+    GOTO_OPCODE(ip)                     @ execute next instruction
+
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_LONG_TO_FLOAT: /* 0x85 */
+/* File: armv5/OP_LONG_TO_FLOAT.S */
+/* File: armv5/unopNarrower.S */
+    /*
+     * Generic 64bit-to-32bit unary operation.  Provide an "instr" line
+     * that specifies an instruction that performs "result = op r0/r1", where
+     * "result" is a 32-bit quantity in r0.
+     *
+     * For: long-to-float, double-to-int, double-to-float
+     *
+     * (This would work for long-to-int, but that instruction is actually
+     * an exact match for OP_MOVE.)
+     */
+    /* unop vA, vB */
+    mov     r3, rINST, lsr #12          @ r3<- B
+    mov     r9, rINST, lsr #8           @ r9<- A+
+    add     r3, rFP, r3, lsl #2         @ r3<- &fp[B]
+    and     r9, r9, #15
+    ldmia   r3, {r0-r1}                 @ r0/r1<- vB/vB+1
+    FETCH_ADVANCE_INST(1)               @ advance rPC, load rINST
+                               @ optional op; may set condition codes
+    bl      __aeabi_l2f                              @ r0<- op, r0-r3 changed
+    GET_INST_OPCODE(ip)                 @ extract opcode from rINST
+    SET_VREG(r0, r9)                    @ vA<- r0
+    GOTO_OPCODE(ip)                     @ jump to next instruction
+    /* 10-11 instructions */
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_LONG_TO_DOUBLE: /* 0x86 */
+/* File: armv5/OP_LONG_TO_DOUBLE.S */
+/* File: armv5/unopWide.S */
+    /*
+     * Generic 64-bit unary operation.  Provide an "instr" line that
+     * specifies an instruction that performs "result = op r0/r1".
+     * This could be an ARM instruction or a function call.
+     *
+     * For: neg-long, not-long, neg-double, long-to-double, double-to-long
+     */
+    /* unop vA, vB */
+    mov     r9, rINST, lsr #8           @ r9<- A+
+    mov     r3, rINST, lsr #12          @ r3<- B
+    and     r9, r9, #15
+    add     r3, rFP, r3, lsl #2         @ r3<- &fp[B]
+    add     r9, rFP, r9, lsl #2         @ r9<- &fp[A]
+    ldmia   r3, {r0-r1}                 @ r0/r1<- vAA
+    FETCH_ADVANCE_INST(1)               @ advance rPC, load rINST
+                               @ optional op; may set condition codes
+    bl      __aeabi_l2d                              @ r0/r1<- op, r2-r3 changed
+    GET_INST_OPCODE(ip)                 @ extract opcode from rINST
+    stmia   r9, {r0-r1}                 @ vAA<- r0/r1
+    GOTO_OPCODE(ip)                     @ jump to next instruction
+    /* 12-13 instructions */
+
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_FLOAT_TO_INT: /* 0x87 */
+/* File: armv5/OP_FLOAT_TO_INT.S */
+/* EABI appears to have Java-style conversions of +inf/-inf/NaN */
+/* File: armv5/unop.S */
+    /*
+     * Generic 32-bit unary operation.  Provide an "instr" line that
+     * specifies an instruction that performs "result = op r0".
+     * This could be an ARM instruction or a function call.
+     *
+     * for: neg-int, not-int, neg-float, int-to-float, float-to-int,
+     *      int-to-byte, int-to-char, int-to-short
+     */
+    /* unop vA, vB */
+    mov     r3, rINST, lsr #12          @ r3<- B
+    mov     r9, rINST, lsr #8           @ r9<- A+
+    GET_VREG(r0, r3)                    @ r0<- vB
+    and     r9, r9, #15
+                               @ optional op; may set condition codes
+    FETCH_ADVANCE_INST(1)               @ advance rPC, load rINST
+    bl      __aeabi_f2iz                              @ r0<- op, r0-r3 changed
+    GET_INST_OPCODE(ip)                 @ extract opcode from rINST
+    SET_VREG(r0, r9)                    @ vAA<- r0
+    GOTO_OPCODE(ip)                     @ jump to next instruction
+    /* 9-10 instructions */
+
+
+#if 0
+@include "armv5/unop.S" {"instr":"bl      f2i_doconv"}
+@break
+/*
+ * Convert the float in r0 to an int in r0.
+ *
+ * We have to clip values to int min/max per the specification.  The
+ * expected common case is a "reasonable" value that converts directly
+ * to modest integer.  The EABI convert function isn't doing this for us.
+ */
+f2i_doconv:
+    stmfd   sp!, {r4, lr}
+    mov     r1, #0x4f000000             @ (float)maxint
+    mov     r4, r0
+    bl      __aeabi_fcmpge              @ is arg >= maxint?
+    cmp     r0, #0                      @ nonzero == yes
+    mvnne   r0, #0x80000000             @ return maxint (7fffffff)
+    ldmnefd sp!, {r4, pc}
+
+    mov     r0, r4                      @ recover arg
+    mov     r1, #0xcf000000             @ (float)minint
+    bl      __aeabi_fcmple              @ is arg <= minint?
+    cmp     r0, #0                      @ nonzero == yes
+    movne   r0, #0x80000000             @ return minint (80000000)
+    ldmnefd sp!, {r4, pc}
+
+    mov     r0, r4                      @ recover arg
+    mov     r1, r4
+    bl      __aeabi_fcmpeq              @ is arg == self?
+    cmp     r0, #0                      @ zero == no
+    ldmeqfd sp!, {r4, pc}               @ return zero for NaN
+
+    mov     r0, r4                      @ recover arg
+    bl      __aeabi_f2iz                @ convert float to int
+    ldmfd   sp!, {r4, pc}
+#endif
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_FLOAT_TO_LONG: /* 0x88 */
+/* File: armv5/OP_FLOAT_TO_LONG.S */
+@include "armv5/unopWider.S" {"instr":"bl      __aeabi_f2lz"}
+/* File: armv5/unopWider.S */
+    /*
+     * Generic 32bit-to-64bit unary operation.  Provide an "instr" line
+     * that specifies an instruction that performs "result = op r0", where
+     * "result" is a 64-bit quantity in r0/r1.
+     *
+     * For: int-to-long, int-to-double, float-to-long, float-to-double
+     */
+    /* unop vA, vB */
+    mov     r9, rINST, lsr #8           @ r9<- A+
+    mov     r3, rINST, lsr #12          @ r3<- B
+    and     r9, r9, #15
+    GET_VREG(r0, r3)                    @ r0<- vB
+    add     r9, rFP, r9, lsl #2         @ r9<- &fp[A]
+                               @ optional op; may set condition codes
+    FETCH_ADVANCE_INST(1)               @ advance rPC, load rINST
+    bl      f2l_doconv                              @ r0<- op, r0-r3 changed
+    GET_INST_OPCODE(ip)                 @ extract opcode from rINST
+    stmia   r9, {r0-r1}                 @ vA/vA+1<- r0/r1
+    GOTO_OPCODE(ip)                     @ jump to next instruction
+    /* 10-11 instructions */
+
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_FLOAT_TO_DOUBLE: /* 0x89 */
+/* File: armv5/OP_FLOAT_TO_DOUBLE.S */
+/* File: armv5/unopWider.S */
+    /*
+     * Generic 32bit-to-64bit unary operation.  Provide an "instr" line
+     * that specifies an instruction that performs "result = op r0", where
+     * "result" is a 64-bit quantity in r0/r1.
+     *
+     * For: int-to-long, int-to-double, float-to-long, float-to-double
+     */
+    /* unop vA, vB */
+    mov     r9, rINST, lsr #8           @ r9<- A+
+    mov     r3, rINST, lsr #12          @ r3<- B
+    and     r9, r9, #15
+    GET_VREG(r0, r3)                    @ r0<- vB
+    add     r9, rFP, r9, lsl #2         @ r9<- &fp[A]
+                               @ optional op; may set condition codes
+    FETCH_ADVANCE_INST(1)               @ advance rPC, load rINST
+    bl      __aeabi_f2d                              @ r0<- op, r0-r3 changed
+    GET_INST_OPCODE(ip)                 @ extract opcode from rINST
+    stmia   r9, {r0-r1}                 @ vA/vA+1<- r0/r1
+    GOTO_OPCODE(ip)                     @ jump to next instruction
+    /* 10-11 instructions */
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_DOUBLE_TO_INT: /* 0x8a */
+/* File: armv5/OP_DOUBLE_TO_INT.S */
+/* EABI appears to have Java-style conversions of +inf/-inf/NaN */
+/* File: armv5/unopNarrower.S */
+    /*
+     * Generic 64bit-to-32bit unary operation.  Provide an "instr" line
+     * that specifies an instruction that performs "result = op r0/r1", where
+     * "result" is a 32-bit quantity in r0.
+     *
+     * For: long-to-float, double-to-int, double-to-float
+     *
+     * (This would work for long-to-int, but that instruction is actually
+     * an exact match for OP_MOVE.)
+     */
+    /* unop vA, vB */
+    mov     r3, rINST, lsr #12          @ r3<- B
+    mov     r9, rINST, lsr #8           @ r9<- A+
+    add     r3, rFP, r3, lsl #2         @ r3<- &fp[B]
+    and     r9, r9, #15
+    ldmia   r3, {r0-r1}                 @ r0/r1<- vB/vB+1
+    FETCH_ADVANCE_INST(1)               @ advance rPC, load rINST
+                               @ optional op; may set condition codes
+    bl      __aeabi_d2iz                              @ r0<- op, r0-r3 changed
+    GET_INST_OPCODE(ip)                 @ extract opcode from rINST
+    SET_VREG(r0, r9)                    @ vA<- r0
+    GOTO_OPCODE(ip)                     @ jump to next instruction
+    /* 10-11 instructions */
+
+
+#if 0
+@include "armv5/unopNarrower.S" {"instr":"bl      d2i_doconv"}
+@break
+/*
+ * Convert the double in r0/r1 to an int in r0.
+ *
+ * We have to clip values to int min/max per the specification.  The
+ * expected common case is a "reasonable" value that converts directly
+ * to modest integer.  The EABI convert function isn't doing this for us.
+ */
+d2i_doconv:
+    stmfd   sp!, {r4, r5, lr}           @ save regs
+    ldr     r2, .LOP_DOUBLE_TO_INT_maxlo       @ (double)maxint, lo
+    ldr     r3, .LOP_DOUBLE_TO_INT_maxhi       @ (double)maxint, hi
+    sub     sp, sp, #4                  @ align for EABI
+    mov     r4, r0                      @ save r0
+    mov     r5, r1                      @  and r1
+    bl      __aeabi_dcmpge              @ is arg >= maxint?
+    cmp     r0, #0                      @ nonzero == yes
+    mvnne   r0, #0x80000000             @ return maxint (7fffffff)
+    bne     1f
+
+    mov     r0, r4                      @ recover arg
+    mov     r1, r5
+    ldr     r3, .LOP_DOUBLE_TO_INT_min         @ (double)minint, hi
+    mov     r2, #0                      @ (double)minint, lo
+    bl      __aeabi_dcmple              @ is arg <= minint?
+    cmp     r0, #0                      @ nonzero == yes
+    movne   r0, #0x80000000             @ return minint (80000000)
+    bne     1f
+
+    mov     r0, r4                      @ recover arg
+    mov     r1, r5
+    mov     r2, r4                      @ compare against self
+    mov     r3, r5
+    bl      __aeabi_dcmpeq              @ is arg == self?
+    cmp     r0, #0                      @ zero == no
+    beq     1f                          @ return zero for NaN
+
+    mov     r0, r4                      @ recover arg
+    mov     r1, r5
+    bl      __aeabi_d2iz                @ convert double to int
+
+1:
+    add     sp, sp, #4
+    ldmfd   sp!, {r4, r5, pc}
+
+.LOP_DOUBLE_TO_INT_maxlo:
+    .word   0xffc00000                  @ maxint, as a double (low word)
+.LOP_DOUBLE_TO_INT_maxhi:
+    .word   0x41dfffff                  @ maxint, as a double (high word)
+.LOP_DOUBLE_TO_INT_min:
+    .word   0xc1e00000                  @ minint, as a double (high word)
+#endif
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_DOUBLE_TO_LONG: /* 0x8b */
+/* File: armv5/OP_DOUBLE_TO_LONG.S */
+@include "armv5/unopWide.S" {"instr":"bl      __aeabi_d2lz"}
+/* File: armv5/unopWide.S */
+    /*
+     * Generic 64-bit unary operation.  Provide an "instr" line that
+     * specifies an instruction that performs "result = op r0/r1".
+     * This could be an ARM instruction or a function call.
+     *
+     * For: neg-long, not-long, neg-double, long-to-double, double-to-long
+     */
+    /* unop vA, vB */
+    mov     r9, rINST, lsr #8           @ r9<- A+
+    mov     r3, rINST, lsr #12          @ r3<- B
+    and     r9, r9, #15
+    add     r3, rFP, r3, lsl #2         @ r3<- &fp[B]
+    add     r9, rFP, r9, lsl #2         @ r9<- &fp[A]
+    ldmia   r3, {r0-r1}                 @ r0/r1<- vAA
+    FETCH_ADVANCE_INST(1)               @ advance rPC, load rINST
+                               @ optional op; may set condition codes
+    bl      d2l_doconv                              @ r0/r1<- op, r2-r3 changed
+    GET_INST_OPCODE(ip)                 @ extract opcode from rINST
+    stmia   r9, {r0-r1}                 @ vAA<- r0/r1
+    GOTO_OPCODE(ip)                     @ jump to next instruction
+    /* 12-13 instructions */
+
+
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_DOUBLE_TO_FLOAT: /* 0x8c */
+/* File: armv5/OP_DOUBLE_TO_FLOAT.S */
+/* File: armv5/unopNarrower.S */
+    /*
+     * Generic 64bit-to-32bit unary operation.  Provide an "instr" line
+     * that specifies an instruction that performs "result = op r0/r1", where
+     * "result" is a 32-bit quantity in r0.
+     *
+     * For: long-to-float, double-to-int, double-to-float
+     *
+     * (This would work for long-to-int, but that instruction is actually
+     * an exact match for OP_MOVE.)
+     */
+    /* unop vA, vB */
+    mov     r3, rINST, lsr #12          @ r3<- B
+    mov     r9, rINST, lsr #8           @ r9<- A+
+    add     r3, rFP, r3, lsl #2         @ r3<- &fp[B]
+    and     r9, r9, #15
+    ldmia   r3, {r0-r1}                 @ r0/r1<- vB/vB+1
+    FETCH_ADVANCE_INST(1)               @ advance rPC, load rINST
+                               @ optional op; may set condition codes
+    bl      __aeabi_d2f                              @ r0<- op, r0-r3 changed
+    GET_INST_OPCODE(ip)                 @ extract opcode from rINST
+    SET_VREG(r0, r9)                    @ vA<- r0
+    GOTO_OPCODE(ip)                     @ jump to next instruction
+    /* 10-11 instructions */
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_INT_TO_BYTE: /* 0x8d */
+/* File: armv5/OP_INT_TO_BYTE.S */
+/* File: armv5/unop.S */
+    /*
+     * Generic 32-bit unary operation.  Provide an "instr" line that
+     * specifies an instruction that performs "result = op r0".
+     * This could be an ARM instruction or a function call.
+     *
+     * for: neg-int, not-int, neg-float, int-to-float, float-to-int,
+     *      int-to-byte, int-to-char, int-to-short
+     */
+    /* unop vA, vB */
+    mov     r3, rINST, lsr #12          @ r3<- B
+    mov     r9, rINST, lsr #8           @ r9<- A+
+    GET_VREG(r0, r3)                    @ r0<- vB
+    and     r9, r9, #15
+    mov     r0, r0, asl #24                           @ optional op; may set condition codes
+    FETCH_ADVANCE_INST(1)               @ advance rPC, load rINST
+    mov     r0, r0, asr #24                              @ r0<- op, r0-r3 changed
+    GET_INST_OPCODE(ip)                 @ extract opcode from rINST
+    SET_VREG(r0, r9)                    @ vAA<- r0
+    GOTO_OPCODE(ip)                     @ jump to next instruction
+    /* 9-10 instructions */
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_INT_TO_CHAR: /* 0x8e */
+/* File: armv5/OP_INT_TO_CHAR.S */
+/* File: armv5/unop.S */
+    /*
+     * Generic 32-bit unary operation.  Provide an "instr" line that
+     * specifies an instruction that performs "result = op r0".
+     * This could be an ARM instruction or a function call.
+     *
+     * for: neg-int, not-int, neg-float, int-to-float, float-to-int,
+     *      int-to-byte, int-to-char, int-to-short
+     */
+    /* unop vA, vB */
+    mov     r3, rINST, lsr #12          @ r3<- B
+    mov     r9, rINST, lsr #8           @ r9<- A+
+    GET_VREG(r0, r3)                    @ r0<- vB
+    and     r9, r9, #15
+    mov     r0, r0, asl #16                           @ optional op; may set condition codes
+    FETCH_ADVANCE_INST(1)               @ advance rPC, load rINST
+    mov     r0, r0, lsr #16                              @ r0<- op, r0-r3 changed
+    GET_INST_OPCODE(ip)                 @ extract opcode from rINST
+    SET_VREG(r0, r9)                    @ vAA<- r0
+    GOTO_OPCODE(ip)                     @ jump to next instruction
+    /* 9-10 instructions */
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_INT_TO_SHORT: /* 0x8f */
+/* File: armv5/OP_INT_TO_SHORT.S */
+/* File: armv5/unop.S */
+    /*
+     * Generic 32-bit unary operation.  Provide an "instr" line that
+     * specifies an instruction that performs "result = op r0".
+     * This could be an ARM instruction or a function call.
+     *
+     * for: neg-int, not-int, neg-float, int-to-float, float-to-int,
+     *      int-to-byte, int-to-char, int-to-short
+     */
+    /* unop vA, vB */
+    mov     r3, rINST, lsr #12          @ r3<- B
+    mov     r9, rINST, lsr #8           @ r9<- A+
+    GET_VREG(r0, r3)                    @ r0<- vB
+    and     r9, r9, #15
+    mov     r0, r0, asl #16                           @ optional op; may set condition codes
+    FETCH_ADVANCE_INST(1)               @ advance rPC, load rINST
+    mov     r0, r0, asr #16                              @ r0<- op, r0-r3 changed
+    GET_INST_OPCODE(ip)                 @ extract opcode from rINST
+    SET_VREG(r0, r9)                    @ vAA<- r0
+    GOTO_OPCODE(ip)                     @ jump to next instruction
+    /* 9-10 instructions */
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_ADD_INT: /* 0x90 */
+/* File: armv5/OP_ADD_INT.S */
+/* File: armv5/binop.S */
+    /*
+     * Generic 32-bit binary operation.  Provide an "instr" line that
+     * specifies an instruction that performs "result = r0 op r1".
+     * This could be an ARM instruction or a function call.  (If the result
+     * comes back in a register other than r0, you can override "result".)
+     *
+     * If "chkzero" is set to 1, we perform a divide-by-zero check on
+     * vCC (r1).  Useful for integer division and modulus.
+     *
+     * For: add-int, sub-int, mul-int, div-int, rem-int, and-int, or-int,
+     *      xor-int, shl-int, shr-int, ushr-int, add-float, sub-float,
+     *      mul-float, div-float, rem-float
+     */
+    /* binop vAA, vBB, vCC */
+    FETCH(r0, 1)                        @ r0<- CCBB
+    mov     r9, rINST, lsr #8           @ r9<- AA
+    mov     r3, r0, lsr #8              @ r3<- CC
+    and     r2, r0, #255                @ r2<- BB
+    GET_VREG(r1, r3)                    @ r1<- vCC
+    GET_VREG(r0, r2)                    @ r0<- vBB
+    .if 0
+    cmp     r1, #0                      @ is second operand zero?
+    beq     common_errDivideByZero
+    .endif
+
+    FETCH_ADVANCE_INST(2)               @ advance rPC, load rINST
+                               @ optional op; may set condition codes
+    add     r0, r0, r1                              @ r0<- op, r0-r3 changed
+    GET_INST_OPCODE(ip)                 @ extract opcode from rINST
+    SET_VREG(r0, r9)               @ vAA<- r0
+    GOTO_OPCODE(ip)                     @ jump to next instruction
+    /* 11-14 instructions */
+
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_SUB_INT: /* 0x91 */
+/* File: armv5/OP_SUB_INT.S */
+/* File: armv5/binop.S */
+    /*
+     * Generic 32-bit binary operation.  Provide an "instr" line that
+     * specifies an instruction that performs "result = r0 op r1".
+     * This could be an ARM instruction or a function call.  (If the result
+     * comes back in a register other than r0, you can override "result".)
+     *
+     * If "chkzero" is set to 1, we perform a divide-by-zero check on
+     * vCC (r1).  Useful for integer division and modulus.
+     *
+     * For: add-int, sub-int, mul-int, div-int, rem-int, and-int, or-int,
+     *      xor-int, shl-int, shr-int, ushr-int, add-float, sub-float,
+     *      mul-float, div-float, rem-float
+     */
+    /* binop vAA, vBB, vCC */
+    FETCH(r0, 1)                        @ r0<- CCBB
+    mov     r9, rINST, lsr #8           @ r9<- AA
+    mov     r3, r0, lsr #8              @ r3<- CC
+    and     r2, r0, #255                @ r2<- BB
+    GET_VREG(r1, r3)                    @ r1<- vCC
+    GET_VREG(r0, r2)                    @ r0<- vBB
+    .if 0
+    cmp     r1, #0                      @ is second operand zero?
+    beq     common_errDivideByZero
+    .endif
+
+    FETCH_ADVANCE_INST(2)               @ advance rPC, load rINST
+                               @ optional op; may set condition codes
+    sub     r0, r0, r1                              @ r0<- op, r0-r3 changed
+    GET_INST_OPCODE(ip)                 @ extract opcode from rINST
+    SET_VREG(r0, r9)               @ vAA<- r0
+    GOTO_OPCODE(ip)                     @ jump to next instruction
+    /* 11-14 instructions */
+
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_MUL_INT: /* 0x92 */
+/* File: armv5/OP_MUL_INT.S */
+/* must be "mul r0, r1, r0" -- "r0, r0, r1" is illegal */
+/* File: armv5/binop.S */
+    /*
+     * Generic 32-bit binary operation.  Provide an "instr" line that
+     * specifies an instruction that performs "result = r0 op r1".
+     * This could be an ARM instruction or a function call.  (If the result
+     * comes back in a register other than r0, you can override "result".)
+     *
+     * If "chkzero" is set to 1, we perform a divide-by-zero check on
+     * vCC (r1).  Useful for integer division and modulus.
+     *
+     * For: add-int, sub-int, mul-int, div-int, rem-int, and-int, or-int,
+     *      xor-int, shl-int, shr-int, ushr-int, add-float, sub-float,
+     *      mul-float, div-float, rem-float
+     */
+    /* binop vAA, vBB, vCC */
+    FETCH(r0, 1)                        @ r0<- CCBB
+    mov     r9, rINST, lsr #8           @ r9<- AA
+    mov     r3, r0, lsr #8              @ r3<- CC
+    and     r2, r0, #255                @ r2<- BB
+    GET_VREG(r1, r3)                    @ r1<- vCC
+    GET_VREG(r0, r2)                    @ r0<- vBB
+    .if 0
+    cmp     r1, #0                      @ is second operand zero?
+    beq     common_errDivideByZero
+    .endif
+
+    FETCH_ADVANCE_INST(2)               @ advance rPC, load rINST
+                               @ optional op; may set condition codes
+    mul     r0, r1, r0                              @ r0<- op, r0-r3 changed
+    GET_INST_OPCODE(ip)                 @ extract opcode from rINST
+    SET_VREG(r0, r9)               @ vAA<- r0
+    GOTO_OPCODE(ip)                     @ jump to next instruction
+    /* 11-14 instructions */
+
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_DIV_INT: /* 0x93 */
+/* File: armv5/OP_DIV_INT.S */
+/* File: armv5/binop.S */
+    /*
+     * Generic 32-bit binary operation.  Provide an "instr" line that
+     * specifies an instruction that performs "result = r0 op r1".
+     * This could be an ARM instruction or a function call.  (If the result
+     * comes back in a register other than r0, you can override "result".)
+     *
+     * If "chkzero" is set to 1, we perform a divide-by-zero check on
+     * vCC (r1).  Useful for integer division and modulus.
+     *
+     * For: add-int, sub-int, mul-int, div-int, rem-int, and-int, or-int,
+     *      xor-int, shl-int, shr-int, ushr-int, add-float, sub-float,
+     *      mul-float, div-float, rem-float
+     */
+    /* binop vAA, vBB, vCC */
+    FETCH(r0, 1)                        @ r0<- CCBB
+    mov     r9, rINST, lsr #8           @ r9<- AA
+    mov     r3, r0, lsr #8              @ r3<- CC
+    and     r2, r0, #255                @ r2<- BB
+    GET_VREG(r1, r3)                    @ r1<- vCC
+    GET_VREG(r0, r2)                    @ r0<- vBB
+    .if 1
+    cmp     r1, #0                      @ is second operand zero?
+    beq     common_errDivideByZero
+    .endif
+
+    FETCH_ADVANCE_INST(2)               @ advance rPC, load rINST
+                               @ optional op; may set condition codes
+    bl     __aeabi_idiv                              @ r0<- op, r0-r3 changed
+    GET_INST_OPCODE(ip)                 @ extract opcode from rINST
+    SET_VREG(r0, r9)               @ vAA<- r0
+    GOTO_OPCODE(ip)                     @ jump to next instruction
+    /* 11-14 instructions */
+
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_REM_INT: /* 0x94 */
+/* File: armv5/OP_REM_INT.S */
+/* idivmod returns quotient in r0 and remainder in r1 */
+/* File: armv5/binop.S */
+    /*
+     * Generic 32-bit binary operation.  Provide an "instr" line that
+     * specifies an instruction that performs "result = r0 op r1".
+     * This could be an ARM instruction or a function call.  (If the result
+     * comes back in a register other than r0, you can override "result".)
+     *
+     * If "chkzero" is set to 1, we perform a divide-by-zero check on
+     * vCC (r1).  Useful for integer division and modulus.
+     *
+     * For: add-int, sub-int, mul-int, div-int, rem-int, and-int, or-int,
+     *      xor-int, shl-int, shr-int, ushr-int, add-float, sub-float,
+     *      mul-float, div-float, rem-float
+     */
+    /* binop vAA, vBB, vCC */
+    FETCH(r0, 1)                        @ r0<- CCBB
+    mov     r9, rINST, lsr #8           @ r9<- AA
+    mov     r3, r0, lsr #8              @ r3<- CC
+    and     r2, r0, #255                @ r2<- BB
+    GET_VREG(r1, r3)                    @ r1<- vCC
+    GET_VREG(r0, r2)                    @ r0<- vBB
+    .if 1
+    cmp     r1, #0                      @ is second operand zero?
+    beq     common_errDivideByZero
+    .endif
+
+    FETCH_ADVANCE_INST(2)               @ advance rPC, load rINST
+                               @ optional op; may set condition codes
+    bl      __aeabi_idivmod                              @ r1<- op, r0-r3 changed
+    GET_INST_OPCODE(ip)                 @ extract opcode from rINST
+    SET_VREG(r1, r9)               @ vAA<- r1
+    GOTO_OPCODE(ip)                     @ jump to next instruction
+    /* 11-14 instructions */
+
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_AND_INT: /* 0x95 */
+/* File: armv5/OP_AND_INT.S */
+/* File: armv5/binop.S */
+    /*
+     * Generic 32-bit binary operation.  Provide an "instr" line that
+     * specifies an instruction that performs "result = r0 op r1".
+     * This could be an ARM instruction or a function call.  (If the result
+     * comes back in a register other than r0, you can override "result".)
+     *
+     * If "chkzero" is set to 1, we perform a divide-by-zero check on
+     * vCC (r1).  Useful for integer division and modulus.
+     *
+     * For: add-int, sub-int, mul-int, div-int, rem-int, and-int, or-int,
+     *      xor-int, shl-int, shr-int, ushr-int, add-float, sub-float,
+     *      mul-float, div-float, rem-float
+     */
+    /* binop vAA, vBB, vCC */
+    FETCH(r0, 1)                        @ r0<- CCBB
+    mov     r9, rINST, lsr #8           @ r9<- AA
+    mov     r3, r0, lsr #8              @ r3<- CC
+    and     r2, r0, #255                @ r2<- BB
+    GET_VREG(r1, r3)                    @ r1<- vCC
+    GET_VREG(r0, r2)                    @ r0<- vBB
+    .if 0
+    cmp     r1, #0                      @ is second operand zero?
+    beq     common_errDivideByZero
+    .endif
+
+    FETCH_ADVANCE_INST(2)               @ advance rPC, load rINST
+                               @ optional op; may set condition codes
+    and     r0, r0, r1                              @ r0<- op, r0-r3 changed
+    GET_INST_OPCODE(ip)                 @ extract opcode from rINST
+    SET_VREG(r0, r9)               @ vAA<- r0
+    GOTO_OPCODE(ip)                     @ jump to next instruction
+    /* 11-14 instructions */
+
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_OR_INT: /* 0x96 */
+/* File: armv5/OP_OR_INT.S */
+/* File: armv5/binop.S */
+    /*
+     * Generic 32-bit binary operation.  Provide an "instr" line that
+     * specifies an instruction that performs "result = r0 op r1".
+     * This could be an ARM instruction or a function call.  (If the result
+     * comes back in a register other than r0, you can override "result".)
+     *
+     * If "chkzero" is set to 1, we perform a divide-by-zero check on
+     * vCC (r1).  Useful for integer division and modulus.
+     *
+     * For: add-int, sub-int, mul-int, div-int, rem-int, and-int, or-int,
+     *      xor-int, shl-int, shr-int, ushr-int, add-float, sub-float,
+     *      mul-float, div-float, rem-float
+     */
+    /* binop vAA, vBB, vCC */
+    FETCH(r0, 1)                        @ r0<- CCBB
+    mov     r9, rINST, lsr #8           @ r9<- AA
+    mov     r3, r0, lsr #8              @ r3<- CC
+    and     r2, r0, #255                @ r2<- BB
+    GET_VREG(r1, r3)                    @ r1<- vCC
+    GET_VREG(r0, r2)                    @ r0<- vBB
+    .if 0
+    cmp     r1, #0                      @ is second operand zero?
+    beq     common_errDivideByZero
+    .endif
+
+    FETCH_ADVANCE_INST(2)               @ advance rPC, load rINST
+                               @ optional op; may set condition codes
+    orr     r0, r0, r1                              @ r0<- op, r0-r3 changed
+    GET_INST_OPCODE(ip)                 @ extract opcode from rINST
+    SET_VREG(r0, r9)               @ vAA<- r0
+    GOTO_OPCODE(ip)                     @ jump to next instruction
+    /* 11-14 instructions */
+
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_XOR_INT: /* 0x97 */
+/* File: armv5/OP_XOR_INT.S */
+/* File: armv5/binop.S */
+    /*
+     * Generic 32-bit binary operation.  Provide an "instr" line that
+     * specifies an instruction that performs "result = r0 op r1".
+     * This could be an ARM instruction or a function call.  (If the result
+     * comes back in a register other than r0, you can override "result".)
+     *
+     * If "chkzero" is set to 1, we perform a divide-by-zero check on
+     * vCC (r1).  Useful for integer division and modulus.
+     *
+     * For: add-int, sub-int, mul-int, div-int, rem-int, and-int, or-int,
+     *      xor-int, shl-int, shr-int, ushr-int, add-float, sub-float,
+     *      mul-float, div-float, rem-float
+     */
+    /* binop vAA, vBB, vCC */
+    FETCH(r0, 1)                        @ r0<- CCBB
+    mov     r9, rINST, lsr #8           @ r9<- AA
+    mov     r3, r0, lsr #8              @ r3<- CC
+    and     r2, r0, #255                @ r2<- BB
+    GET_VREG(r1, r3)                    @ r1<- vCC
+    GET_VREG(r0, r2)                    @ r0<- vBB
+    .if 0
+    cmp     r1, #0                      @ is second operand zero?
+    beq     common_errDivideByZero
+    .endif
+
+    FETCH_ADVANCE_INST(2)               @ advance rPC, load rINST
+                               @ optional op; may set condition codes
+    eor     r0, r0, r1                              @ r0<- op, r0-r3 changed
+    GET_INST_OPCODE(ip)                 @ extract opcode from rINST
+    SET_VREG(r0, r9)               @ vAA<- r0
+    GOTO_OPCODE(ip)                     @ jump to next instruction
+    /* 11-14 instructions */
+
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_SHL_INT: /* 0x98 */
+/* File: armv5/OP_SHL_INT.S */
+/* File: armv5/binop.S */
+    /*
+     * Generic 32-bit binary operation.  Provide an "instr" line that
+     * specifies an instruction that performs "result = r0 op r1".
+     * This could be an ARM instruction or a function call.  (If the result
+     * comes back in a register other than r0, you can override "result".)
+     *
+     * If "chkzero" is set to 1, we perform a divide-by-zero check on
+     * vCC (r1).  Useful for integer division and modulus.
+     *
+     * For: add-int, sub-int, mul-int, div-int, rem-int, and-int, or-int,
+     *      xor-int, shl-int, shr-int, ushr-int, add-float, sub-float,
+     *      mul-float, div-float, rem-float
+     */
+    /* binop vAA, vBB, vCC */
+    FETCH(r0, 1)                        @ r0<- CCBB
+    mov     r9, rINST, lsr #8           @ r9<- AA
+    mov     r3, r0, lsr #8              @ r3<- CC
+    and     r2, r0, #255                @ r2<- BB
+    GET_VREG(r1, r3)                    @ r1<- vCC
+    GET_VREG(r0, r2)                    @ r0<- vBB
+    .if 0
+    cmp     r1, #0                      @ is second operand zero?
+    beq     common_errDivideByZero
+    .endif
+
+    FETCH_ADVANCE_INST(2)               @ advance rPC, load rINST
+    and     r1, r1, #31                           @ optional op; may set condition codes
+    mov     r0, r0, asl r1                              @ r0<- op, r0-r3 changed
+    GET_INST_OPCODE(ip)                 @ extract opcode from rINST
+    SET_VREG(r0, r9)               @ vAA<- r0
+    GOTO_OPCODE(ip)                     @ jump to next instruction
+    /* 11-14 instructions */
+
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_SHR_INT: /* 0x99 */
+/* File: armv5/OP_SHR_INT.S */
+/* File: armv5/binop.S */
+    /*
+     * Generic 32-bit binary operation.  Provide an "instr" line that
+     * specifies an instruction that performs "result = r0 op r1".
+     * This could be an ARM instruction or a function call.  (If the result
+     * comes back in a register other than r0, you can override "result".)
+     *
+     * If "chkzero" is set to 1, we perform a divide-by-zero check on
+     * vCC (r1).  Useful for integer division and modulus.
+     *
+     * For: add-int, sub-int, mul-int, div-int, rem-int, and-int, or-int,
+     *      xor-int, shl-int, shr-int, ushr-int, add-float, sub-float,
+     *      mul-float, div-float, rem-float
+     */
+    /* binop vAA, vBB, vCC */
+    FETCH(r0, 1)                        @ r0<- CCBB
+    mov     r9, rINST, lsr #8           @ r9<- AA
+    mov     r3, r0, lsr #8              @ r3<- CC
+    and     r2, r0, #255                @ r2<- BB
+    GET_VREG(r1, r3)                    @ r1<- vCC
+    GET_VREG(r0, r2)                    @ r0<- vBB
+    .if 0
+    cmp     r1, #0                      @ is second operand zero?
+    beq     common_errDivideByZero
+    .endif
+
+    FETCH_ADVANCE_INST(2)               @ advance rPC, load rINST
+    and     r1, r1, #31                           @ optional op; may set condition codes
+    mov     r0, r0, asr r1                              @ r0<- op, r0-r3 changed
+    GET_INST_OPCODE(ip)                 @ extract opcode from rINST
+    SET_VREG(r0, r9)               @ vAA<- r0
+    GOTO_OPCODE(ip)                     @ jump to next instruction
+    /* 11-14 instructions */
+
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_USHR_INT: /* 0x9a */
+/* File: armv5/OP_USHR_INT.S */
+/* File: armv5/binop.S */
+    /*
+     * Generic 32-bit binary operation.  Provide an "instr" line that
+     * specifies an instruction that performs "result = r0 op r1".
+     * This could be an ARM instruction or a function call.  (If the result
+     * comes back in a register other than r0, you can override "result".)
+     *
+     * If "chkzero" is set to 1, we perform a divide-by-zero check on
+     * vCC (r1).  Useful for integer division and modulus.
+     *
+     * For: add-int, sub-int, mul-int, div-int, rem-int, and-int, or-int,
+     *      xor-int, shl-int, shr-int, ushr-int, add-float, sub-float,
+     *      mul-float, div-float, rem-float
+     */
+    /* binop vAA, vBB, vCC */
+    FETCH(r0, 1)                        @ r0<- CCBB
+    mov     r9, rINST, lsr #8           @ r9<- AA
+    mov     r3, r0, lsr #8              @ r3<- CC
+    and     r2, r0, #255                @ r2<- BB
+    GET_VREG(r1, r3)                    @ r1<- vCC
+    GET_VREG(r0, r2)                    @ r0<- vBB
+    .if 0
+    cmp     r1, #0                      @ is second operand zero?
+    beq     common_errDivideByZero
+    .endif
+
+    FETCH_ADVANCE_INST(2)               @ advance rPC, load rINST
+    and     r1, r1, #31                           @ optional op; may set condition codes
+    mov     r0, r0, lsr r1                              @ r0<- op, r0-r3 changed
+    GET_INST_OPCODE(ip)                 @ extract opcode from rINST
+    SET_VREG(r0, r9)               @ vAA<- r0
+    GOTO_OPCODE(ip)                     @ jump to next instruction
+    /* 11-14 instructions */
+
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_ADD_LONG: /* 0x9b */
+/* File: armv5/OP_ADD_LONG.S */
+/* File: armv5/binopWide.S */
+    /*
+     * Generic 64-bit binary operation.  Provide an "instr" line that
+     * specifies an instruction that performs "result = r0-r1 op r2-r3".
+     * This could be an ARM instruction or a function call.  (If the result
+     * comes back in a register other than r0, you can override "result".)
+     *
+     * If "chkzero" is set to 1, we perform a divide-by-zero check on
+     * vCC (r1).  Useful for integer division and modulus.
+     *
+     * for: add-long, sub-long, div-long, rem-long, and-long, or-long,
+     *      xor-long, add-double, sub-double, mul-double, div-double,
+     *      rem-double
+     *
+     * IMPORTANT: you may specify "chkzero" or "preinstr" but not both.
+     */
+    /* binop vAA, vBB, vCC */
+    FETCH(r0, 1)                        @ r0<- CCBB
+    mov     r9, rINST, lsr #8           @ r9<- AA
+    and     r2, r0, #255                @ r2<- BB
+    mov     r3, r0, lsr #8              @ r3<- CC
+    add     r9, rFP, r9, lsl #2         @ r9<- &fp[AA]
+    add     r2, rFP, r2, lsl #2         @ r2<- &fp[BB]
+    add     r3, rFP, r3, lsl #2         @ r3<- &fp[CC]
+    ldmia   r2, {r0-r1}                 @ r0/r1<- vBB/vBB+1
+    ldmia   r3, {r2-r3}                 @ r2/r3<- vCC/vCC+1
+    .if 0
+    orrs    ip, r2, r3                  @ second arg (r2-r3) is zero?
+    beq     common_errDivideByZero
+    .endif
+    FETCH_ADVANCE_INST(2)               @ advance rPC, load rINST
+
+    adds    r0, r0, r2                           @ optional op; may set condition codes
+    adc     r1, r1, r3                              @ result<- op, r0-r3 changed
+    GET_INST_OPCODE(ip)                 @ extract opcode from rINST
+    stmia   r9, {r0,r1}     @ vAA/vAA+1<- r0/r1
+    GOTO_OPCODE(ip)                     @ jump to next instruction
+    /* 14-17 instructions */
+
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_SUB_LONG: /* 0x9c */
+/* File: armv5/OP_SUB_LONG.S */
+/* File: armv5/binopWide.S */
+    /*
+     * Generic 64-bit binary operation.  Provide an "instr" line that
+     * specifies an instruction that performs "result = r0-r1 op r2-r3".
+     * This could be an ARM instruction or a function call.  (If the result
+     * comes back in a register other than r0, you can override "result".)
+     *
+     * If "chkzero" is set to 1, we perform a divide-by-zero check on
+     * vCC (r1).  Useful for integer division and modulus.
+     *
+     * for: add-long, sub-long, div-long, rem-long, and-long, or-long,
+     *      xor-long, add-double, sub-double, mul-double, div-double,
+     *      rem-double
+     *
+     * IMPORTANT: you may specify "chkzero" or "preinstr" but not both.
+     */
+    /* binop vAA, vBB, vCC */
+    FETCH(r0, 1)                        @ r0<- CCBB
+    mov     r9, rINST, lsr #8           @ r9<- AA
+    and     r2, r0, #255                @ r2<- BB
+    mov     r3, r0, lsr #8              @ r3<- CC
+    add     r9, rFP, r9, lsl #2         @ r9<- &fp[AA]
+    add     r2, rFP, r2, lsl #2         @ r2<- &fp[BB]
+    add     r3, rFP, r3, lsl #2         @ r3<- &fp[CC]
+    ldmia   r2, {r0-r1}                 @ r0/r1<- vBB/vBB+1
+    ldmia   r3, {r2-r3}                 @ r2/r3<- vCC/vCC+1
+    .if 0
+    orrs    ip, r2, r3                  @ second arg (r2-r3) is zero?
+    beq     common_errDivideByZero
+    .endif
+    FETCH_ADVANCE_INST(2)               @ advance rPC, load rINST
+
+    subs    r0, r0, r2                           @ optional op; may set condition codes
+    sbc     r1, r1, r3                              @ result<- op, r0-r3 changed
+    GET_INST_OPCODE(ip)                 @ extract opcode from rINST
+    stmia   r9, {r0,r1}     @ vAA/vAA+1<- r0/r1
+    GOTO_OPCODE(ip)                     @ jump to next instruction
+    /* 14-17 instructions */
+
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_MUL_LONG: /* 0x9d */
+/* File: armv5/OP_MUL_LONG.S */
+    /*
+     * Signed 64-bit integer multiply.
+     *
+     * Consider WXxYZ (r1r0 x r3r2) with a long multiply:
+     *        WX
+     *      x YZ
+     *  --------
+     *     ZW ZX
+     *  YW YX
+     *
+     * The low word of the result holds ZX, the high word holds
+     * (ZW+YX) + (the high overflow from ZX).  YW doesn't matter because
+     * it doesn't fit in the low 64 bits.
+     *
+     * Unlike most ARM math operations, multiply instructions have
+     * restrictions on using the same register more than once (Rd and Rm
+     * cannot be the same).
+     */
+    /* mul-long vAA, vBB, vCC */
+    FETCH(r0, 1)                        @ r0<- CCBB
+    and     r2, r0, #255                @ r2<- BB
+    mov     r3, r0, lsr #8              @ r3<- CC
+    add     r2, rFP, r2, lsl #2         @ r2<- &fp[BB]
+    add     r3, rFP, r3, lsl #2         @ r3<- &fp[CC]
+    ldmia   r2, {r0-r1}                 @ r0/r1<- vBB/vBB+1
+    ldmia   r3, {r2-r3}                 @ r2/r3<- vCC/vCC+1
+    mul     ip, r2, r1                  @  ip<- ZxW
+    umull   r9, r10, r2, r0             @  r9/r10 <- ZxX
+    mla     r2, r0, r3, ip              @  r2<- YxX + (ZxW)
+    mov     r0, rINST, lsr #8           @ r0<- AA
+    add     r10, r2, r10                @  r10<- r10 + low(ZxW + (YxX))
+    add     r0, rFP, r0, lsl #2         @ r0<- &fp[AA]
+    FETCH_ADVANCE_INST(2)               @ advance rPC, load rINST
+    b       .LOP_MUL_LONG_finish
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_DIV_LONG: /* 0x9e */
+/* File: armv5/OP_DIV_LONG.S */
+/* File: armv5/binopWide.S */
+    /*
+     * Generic 64-bit binary operation.  Provide an "instr" line that
+     * specifies an instruction that performs "result = r0-r1 op r2-r3".
+     * This could be an ARM instruction or a function call.  (If the result
+     * comes back in a register other than r0, you can override "result".)
+     *
+     * If "chkzero" is set to 1, we perform a divide-by-zero check on
+     * vCC (r1).  Useful for integer division and modulus.
+     *
+     * for: add-long, sub-long, div-long, rem-long, and-long, or-long,
+     *      xor-long, add-double, sub-double, mul-double, div-double,
+     *      rem-double
+     *
+     * IMPORTANT: you may specify "chkzero" or "preinstr" but not both.
+     */
+    /* binop vAA, vBB, vCC */
+    FETCH(r0, 1)                        @ r0<- CCBB
+    mov     r9, rINST, lsr #8           @ r9<- AA
+    and     r2, r0, #255                @ r2<- BB
+    mov     r3, r0, lsr #8              @ r3<- CC
+    add     r9, rFP, r9, lsl #2         @ r9<- &fp[AA]
+    add     r2, rFP, r2, lsl #2         @ r2<- &fp[BB]
+    add     r3, rFP, r3, lsl #2         @ r3<- &fp[CC]
+    ldmia   r2, {r0-r1}                 @ r0/r1<- vBB/vBB+1
+    ldmia   r3, {r2-r3}                 @ r2/r3<- vCC/vCC+1
+    .if 1
+    orrs    ip, r2, r3                  @ second arg (r2-r3) is zero?
+    beq     common_errDivideByZero
+    .endif
+    FETCH_ADVANCE_INST(2)               @ advance rPC, load rINST
+
+                               @ optional op; may set condition codes
+    bl      __aeabi_ldivmod                              @ result<- op, r0-r3 changed
+    GET_INST_OPCODE(ip)                 @ extract opcode from rINST
+    stmia   r9, {r0,r1}     @ vAA/vAA+1<- r0/r1
+    GOTO_OPCODE(ip)                     @ jump to next instruction
+    /* 14-17 instructions */
+
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_REM_LONG: /* 0x9f */
+/* File: armv5/OP_REM_LONG.S */
+/* ldivmod returns quotient in r0/r1 and remainder in r2/r3 */
+/* File: armv5/binopWide.S */
+    /*
+     * Generic 64-bit binary operation.  Provide an "instr" line that
+     * specifies an instruction that performs "result = r0-r1 op r2-r3".
+     * This could be an ARM instruction or a function call.  (If the result
+     * comes back in a register other than r0, you can override "result".)
+     *
+     * If "chkzero" is set to 1, we perform a divide-by-zero check on
+     * vCC (r1).  Useful for integer division and modulus.
+     *
+     * for: add-long, sub-long, div-long, rem-long, and-long, or-long,
+     *      xor-long, add-double, sub-double, mul-double, div-double,
+     *      rem-double
+     *
+     * IMPORTANT: you may specify "chkzero" or "preinstr" but not both.
+     */
+    /* binop vAA, vBB, vCC */
+    FETCH(r0, 1)                        @ r0<- CCBB
+    mov     r9, rINST, lsr #8           @ r9<- AA
+    and     r2, r0, #255                @ r2<- BB
+    mov     r3, r0, lsr #8              @ r3<- CC
+    add     r9, rFP, r9, lsl #2         @ r9<- &fp[AA]
+    add     r2, rFP, r2, lsl #2         @ r2<- &fp[BB]
+    add     r3, rFP, r3, lsl #2         @ r3<- &fp[CC]
+    ldmia   r2, {r0-r1}                 @ r0/r1<- vBB/vBB+1
+    ldmia   r3, {r2-r3}                 @ r2/r3<- vCC/vCC+1
+    .if 1
+    orrs    ip, r2, r3                  @ second arg (r2-r3) is zero?
+    beq     common_errDivideByZero
+    .endif
+    FETCH_ADVANCE_INST(2)               @ advance rPC, load rINST
+
+                               @ optional op; may set condition codes
+    bl      __aeabi_ldivmod                              @ result<- op, r0-r3 changed
+    GET_INST_OPCODE(ip)                 @ extract opcode from rINST
+    stmia   r9, {r2,r3}     @ vAA/vAA+1<- r2/r3
+    GOTO_OPCODE(ip)                     @ jump to next instruction
+    /* 14-17 instructions */
+
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_AND_LONG: /* 0xa0 */
+/* File: armv5/OP_AND_LONG.S */
+/* File: armv5/binopWide.S */
+    /*
+     * Generic 64-bit binary operation.  Provide an "instr" line that
+     * specifies an instruction that performs "result = r0-r1 op r2-r3".
+     * This could be an ARM instruction or a function call.  (If the result
+     * comes back in a register other than r0, you can override "result".)
+     *
+     * If "chkzero" is set to 1, we perform a divide-by-zero check on
+     * vCC (r1).  Useful for integer division and modulus.
+     *
+     * for: add-long, sub-long, div-long, rem-long, and-long, or-long,
+     *      xor-long, add-double, sub-double, mul-double, div-double,
+     *      rem-double
+     *
+     * IMPORTANT: you may specify "chkzero" or "preinstr" but not both.
+     */
+    /* binop vAA, vBB, vCC */
+    FETCH(r0, 1)                        @ r0<- CCBB
+    mov     r9, rINST, lsr #8           @ r9<- AA
+    and     r2, r0, #255                @ r2<- BB
+    mov     r3, r0, lsr #8              @ r3<- CC
+    add     r9, rFP, r9, lsl #2         @ r9<- &fp[AA]
+    add     r2, rFP, r2, lsl #2         @ r2<- &fp[BB]
+    add     r3, rFP, r3, lsl #2         @ r3<- &fp[CC]
+    ldmia   r2, {r0-r1}                 @ r0/r1<- vBB/vBB+1
+    ldmia   r3, {r2-r3}                 @ r2/r3<- vCC/vCC+1
+    .if 0
+    orrs    ip, r2, r3                  @ second arg (r2-r3) is zero?
+    beq     common_errDivideByZero
+    .endif
+    FETCH_ADVANCE_INST(2)               @ advance rPC, load rINST
+
+    and     r0, r0, r2                           @ optional op; may set condition codes
+    and     r1, r1, r3                              @ result<- op, r0-r3 changed
+    GET_INST_OPCODE(ip)                 @ extract opcode from rINST
+    stmia   r9, {r0,r1}     @ vAA/vAA+1<- r0/r1
+    GOTO_OPCODE(ip)                     @ jump to next instruction
+    /* 14-17 instructions */
+
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_OR_LONG: /* 0xa1 */
+/* File: armv5/OP_OR_LONG.S */
+/* File: armv5/binopWide.S */
+    /*
+     * Generic 64-bit binary operation.  Provide an "instr" line that
+     * specifies an instruction that performs "result = r0-r1 op r2-r3".
+     * This could be an ARM instruction or a function call.  (If the result
+     * comes back in a register other than r0, you can override "result".)
+     *
+     * If "chkzero" is set to 1, we perform a divide-by-zero check on
+     * vCC (r1).  Useful for integer division and modulus.
+     *
+     * for: add-long, sub-long, div-long, rem-long, and-long, or-long,
+     *      xor-long, add-double, sub-double, mul-double, div-double,
+     *      rem-double
+     *
+     * IMPORTANT: you may specify "chkzero" or "preinstr" but not both.
+     */
+    /* binop vAA, vBB, vCC */
+    FETCH(r0, 1)                        @ r0<- CCBB
+    mov     r9, rINST, lsr #8           @ r9<- AA
+    and     r2, r0, #255                @ r2<- BB
+    mov     r3, r0, lsr #8              @ r3<- CC
+    add     r9, rFP, r9, lsl #2         @ r9<- &fp[AA]
+    add     r2, rFP, r2, lsl #2         @ r2<- &fp[BB]
+    add     r3, rFP, r3, lsl #2         @ r3<- &fp[CC]
+    ldmia   r2, {r0-r1}                 @ r0/r1<- vBB/vBB+1
+    ldmia   r3, {r2-r3}                 @ r2/r3<- vCC/vCC+1
+    .if 0
+    orrs    ip, r2, r3                  @ second arg (r2-r3) is zero?
+    beq     common_errDivideByZero
+    .endif
+    FETCH_ADVANCE_INST(2)               @ advance rPC, load rINST
+
+    orr     r0, r0, r2                           @ optional op; may set condition codes
+    orr     r1, r1, r3                              @ result<- op, r0-r3 changed
+    GET_INST_OPCODE(ip)                 @ extract opcode from rINST
+    stmia   r9, {r0,r1}     @ vAA/vAA+1<- r0/r1
+    GOTO_OPCODE(ip)                     @ jump to next instruction
+    /* 14-17 instructions */
+
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_XOR_LONG: /* 0xa2 */
+/* File: armv5/OP_XOR_LONG.S */
+/* File: armv5/binopWide.S */
+    /*
+     * Generic 64-bit binary operation.  Provide an "instr" line that
+     * specifies an instruction that performs "result = r0-r1 op r2-r3".
+     * This could be an ARM instruction or a function call.  (If the result
+     * comes back in a register other than r0, you can override "result".)
+     *
+     * If "chkzero" is set to 1, we perform a divide-by-zero check on
+     * vCC (r1).  Useful for integer division and modulus.
+     *
+     * for: add-long, sub-long, div-long, rem-long, and-long, or-long,
+     *      xor-long, add-double, sub-double, mul-double, div-double,
+     *      rem-double
+     *
+     * IMPORTANT: you may specify "chkzero" or "preinstr" but not both.
+     */
+    /* binop vAA, vBB, vCC */
+    FETCH(r0, 1)                        @ r0<- CCBB
+    mov     r9, rINST, lsr #8           @ r9<- AA
+    and     r2, r0, #255                @ r2<- BB
+    mov     r3, r0, lsr #8              @ r3<- CC
+    add     r9, rFP, r9, lsl #2         @ r9<- &fp[AA]
+    add     r2, rFP, r2, lsl #2         @ r2<- &fp[BB]
+    add     r3, rFP, r3, lsl #2         @ r3<- &fp[CC]
+    ldmia   r2, {r0-r1}                 @ r0/r1<- vBB/vBB+1
+    ldmia   r3, {r2-r3}                 @ r2/r3<- vCC/vCC+1
+    .if 0
+    orrs    ip, r2, r3                  @ second arg (r2-r3) is zero?
+    beq     common_errDivideByZero
+    .endif
+    FETCH_ADVANCE_INST(2)               @ advance rPC, load rINST
+
+    eor     r0, r0, r2                           @ optional op; may set condition codes
+    eor     r1, r1, r3                              @ result<- op, r0-r3 changed
+    GET_INST_OPCODE(ip)                 @ extract opcode from rINST
+    stmia   r9, {r0,r1}     @ vAA/vAA+1<- r0/r1
+    GOTO_OPCODE(ip)                     @ jump to next instruction
+    /* 14-17 instructions */
+
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_SHL_LONG: /* 0xa3 */
+/* File: armv5/OP_SHL_LONG.S */
+    /*
+     * Long integer shift.  This is different from the generic 32/64-bit
+     * binary operations because vAA/vBB are 64-bit but vCC (the shift
+     * distance) is 32-bit.  Also, Dalvik requires us to mask off the low
+     * 6 bits of the shift distance.
+     */
+    /* shl-long vAA, vBB, vCC */
+    FETCH(r0, 1)                        @ r0<- CCBB
+    mov     r9, rINST, lsr #8           @ r9<- AA
+    and     r3, r0, #255                @ r3<- BB
+    mov     r0, r0, lsr #8              @ r0<- CC
+    add     r3, rFP, r3, lsl #2         @ r3<- &fp[BB]
+    GET_VREG(r2, r0)                    @ r2<- vCC
+    ldmia   r3, {r0-r1}                 @ r0/r1<- vBB/vBB+1
+    and     r2, r2, #63                 @ r2<- r2 & 0x3f
+    add     r9, rFP, r9, lsl #2         @ r9<- &fp[AA]
+
+    mov     r1, r1, asl r2              @  r1<- r1 << r2
+    rsb     r3, r2, #32                 @  r3<- 32 - r2
+    orr     r1, r1, r0, lsr r3          @  r1<- r1 | (r0 << (32-r2))
+    subs    ip, r2, #32                 @  ip<- r2 - 32
+    movpl   r1, r0, asl ip              @  if r2 >= 32, r1<- r0 << (r2-32)
+    FETCH_ADVANCE_INST(2)               @ advance rPC, load rINST
+    b       .LOP_SHL_LONG_finish
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_SHR_LONG: /* 0xa4 */
+/* File: armv5/OP_SHR_LONG.S */
+    /*
+     * Long integer shift.  This is different from the generic 32/64-bit
+     * binary operations because vAA/vBB are 64-bit but vCC (the shift
+     * distance) is 32-bit.  Also, Dalvik requires us to mask off the low
+     * 6 bits of the shift distance.
+     */
+    /* shr-long vAA, vBB, vCC */
+    FETCH(r0, 1)                        @ r0<- CCBB
+    mov     r9, rINST, lsr #8           @ r9<- AA
+    and     r3, r0, #255                @ r3<- BB
+    mov     r0, r0, lsr #8              @ r0<- CC
+    add     r3, rFP, r3, lsl #2         @ r3<- &fp[BB]
+    GET_VREG(r2, r0)                    @ r2<- vCC
+    ldmia   r3, {r0-r1}                 @ r0/r1<- vBB/vBB+1
+    and     r2, r2, #63                 @ r0<- r0 & 0x3f
+    add     r9, rFP, r9, lsl #2         @ r9<- &fp[AA]
+
+    mov     r0, r0, lsr r2              @  r0<- r2 >> r2
+    rsb     r3, r2, #32                 @  r3<- 32 - r2
+    orr     r0, r0, r1, asl r3          @  r0<- r0 | (r1 << (32-r2))
+    subs    ip, r2, #32                 @  ip<- r2 - 32
+    movpl   r0, r1, asr ip              @  if r2 >= 32, r0<-r1 >> (r2-32)
+    FETCH_ADVANCE_INST(2)               @ advance rPC, load rINST
+    b       .LOP_SHR_LONG_finish
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_USHR_LONG: /* 0xa5 */
+/* File: armv5/OP_USHR_LONG.S */
+    /*
+     * Long integer shift.  This is different from the generic 32/64-bit
+     * binary operations because vAA/vBB are 64-bit but vCC (the shift
+     * distance) is 32-bit.  Also, Dalvik requires us to mask off the low
+     * 6 bits of the shift distance.
+     */
+    /* ushr-long vAA, vBB, vCC */
+    FETCH(r0, 1)                        @ r0<- CCBB
+    mov     r9, rINST, lsr #8           @ r9<- AA
+    and     r3, r0, #255                @ r3<- BB
+    mov     r0, r0, lsr #8              @ r0<- CC
+    add     r3, rFP, r3, lsl #2         @ r3<- &fp[BB]
+    GET_VREG(r2, r0)                    @ r2<- vCC
+    ldmia   r3, {r0-r1}                 @ r0/r1<- vBB/vBB+1
+    and     r2, r2, #63                 @ r0<- r0 & 0x3f
+    add     r9, rFP, r9, lsl #2         @ r9<- &fp[AA]
+
+    mov     r0, r0, lsr r2              @  r0<- r2 >> r2
+    rsb     r3, r2, #32                 @  r3<- 32 - r2
+    orr     r0, r0, r1, asl r3          @  r0<- r0 | (r1 << (32-r2))
+    subs    ip, r2, #32                 @  ip<- r2 - 32
+    movpl   r0, r1, lsr ip              @  if r2 >= 32, r0<-r1 >>> (r2-32)
+    FETCH_ADVANCE_INST(2)               @ advance rPC, load rINST
+    b       .LOP_USHR_LONG_finish
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_ADD_FLOAT: /* 0xa6 */
+/* File: armv5/OP_ADD_FLOAT.S */
+/* File: armv5/binop.S */
+    /*
+     * Generic 32-bit binary operation.  Provide an "instr" line that
+     * specifies an instruction that performs "result = r0 op r1".
+     * This could be an ARM instruction or a function call.  (If the result
+     * comes back in a register other than r0, you can override "result".)
+     *
+     * If "chkzero" is set to 1, we perform a divide-by-zero check on
+     * vCC (r1).  Useful for integer division and modulus.
+     *
+     * For: add-int, sub-int, mul-int, div-int, rem-int, and-int, or-int,
+     *      xor-int, shl-int, shr-int, ushr-int, add-float, sub-float,
+     *      mul-float, div-float, rem-float
+     */
+    /* binop vAA, vBB, vCC */
+    FETCH(r0, 1)                        @ r0<- CCBB
+    mov     r9, rINST, lsr #8           @ r9<- AA
+    mov     r3, r0, lsr #8              @ r3<- CC
+    and     r2, r0, #255                @ r2<- BB
+    GET_VREG(r1, r3)                    @ r1<- vCC
+    GET_VREG(r0, r2)                    @ r0<- vBB
+    .if 0
+    cmp     r1, #0                      @ is second operand zero?
+    beq     common_errDivideByZero
+    .endif
+
+    FETCH_ADVANCE_INST(2)               @ advance rPC, load rINST
+                               @ optional op; may set condition codes
+    bl      __aeabi_fadd                              @ r0<- op, r0-r3 changed
+    GET_INST_OPCODE(ip)                 @ extract opcode from rINST
+    SET_VREG(r0, r9)               @ vAA<- r0
+    GOTO_OPCODE(ip)                     @ jump to next instruction
+    /* 11-14 instructions */
+
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_SUB_FLOAT: /* 0xa7 */
+/* File: armv5/OP_SUB_FLOAT.S */
+/* File: armv5/binop.S */
+    /*
+     * Generic 32-bit binary operation.  Provide an "instr" line that
+     * specifies an instruction that performs "result = r0 op r1".
+     * This could be an ARM instruction or a function call.  (If the result
+     * comes back in a register other than r0, you can override "result".)
+     *
+     * If "chkzero" is set to 1, we perform a divide-by-zero check on
+     * vCC (r1).  Useful for integer division and modulus.
+     *
+     * For: add-int, sub-int, mul-int, div-int, rem-int, and-int, or-int,
+     *      xor-int, shl-int, shr-int, ushr-int, add-float, sub-float,
+     *      mul-float, div-float, rem-float
+     */
+    /* binop vAA, vBB, vCC */
+    FETCH(r0, 1)                        @ r0<- CCBB
+    mov     r9, rINST, lsr #8           @ r9<- AA
+    mov     r3, r0, lsr #8              @ r3<- CC
+    and     r2, r0, #255                @ r2<- BB
+    GET_VREG(r1, r3)                    @ r1<- vCC
+    GET_VREG(r0, r2)                    @ r0<- vBB
+    .if 0
+    cmp     r1, #0                      @ is second operand zero?
+    beq     common_errDivideByZero
+    .endif
+
+    FETCH_ADVANCE_INST(2)               @ advance rPC, load rINST
+                               @ optional op; may set condition codes
+    bl      __aeabi_fsub                              @ r0<- op, r0-r3 changed
+    GET_INST_OPCODE(ip)                 @ extract opcode from rINST
+    SET_VREG(r0, r9)               @ vAA<- r0
+    GOTO_OPCODE(ip)                     @ jump to next instruction
+    /* 11-14 instructions */
+
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_MUL_FLOAT: /* 0xa8 */
+/* File: armv5/OP_MUL_FLOAT.S */
+/* File: armv5/binop.S */
+    /*
+     * Generic 32-bit binary operation.  Provide an "instr" line that
+     * specifies an instruction that performs "result = r0 op r1".
+     * This could be an ARM instruction or a function call.  (If the result
+     * comes back in a register other than r0, you can override "result".)
+     *
+     * If "chkzero" is set to 1, we perform a divide-by-zero check on
+     * vCC (r1).  Useful for integer division and modulus.
+     *
+     * For: add-int, sub-int, mul-int, div-int, rem-int, and-int, or-int,
+     *      xor-int, shl-int, shr-int, ushr-int, add-float, sub-float,
+     *      mul-float, div-float, rem-float
+     */
+    /* binop vAA, vBB, vCC */
+    FETCH(r0, 1)                        @ r0<- CCBB
+    mov     r9, rINST, lsr #8           @ r9<- AA
+    mov     r3, r0, lsr #8              @ r3<- CC
+    and     r2, r0, #255                @ r2<- BB
+    GET_VREG(r1, r3)                    @ r1<- vCC
+    GET_VREG(r0, r2)                    @ r0<- vBB
+    .if 0
+    cmp     r1, #0                      @ is second operand zero?
+    beq     common_errDivideByZero
+    .endif
+
+    FETCH_ADVANCE_INST(2)               @ advance rPC, load rINST
+                               @ optional op; may set condition codes
+    bl      __aeabi_fmul                              @ r0<- op, r0-r3 changed
+    GET_INST_OPCODE(ip)                 @ extract opcode from rINST
+    SET_VREG(r0, r9)               @ vAA<- r0
+    GOTO_OPCODE(ip)                     @ jump to next instruction
+    /* 11-14 instructions */
+
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_DIV_FLOAT: /* 0xa9 */
+/* File: armv5/OP_DIV_FLOAT.S */
+/* File: armv5/binop.S */
+    /*
+     * Generic 32-bit binary operation.  Provide an "instr" line that
+     * specifies an instruction that performs "result = r0 op r1".
+     * This could be an ARM instruction or a function call.  (If the result
+     * comes back in a register other than r0, you can override "result".)
+     *
+     * If "chkzero" is set to 1, we perform a divide-by-zero check on
+     * vCC (r1).  Useful for integer division and modulus.
+     *
+     * For: add-int, sub-int, mul-int, div-int, rem-int, and-int, or-int,
+     *      xor-int, shl-int, shr-int, ushr-int, add-float, sub-float,
+     *      mul-float, div-float, rem-float
+     */
+    /* binop vAA, vBB, vCC */
+    FETCH(r0, 1)                        @ r0<- CCBB
+    mov     r9, rINST, lsr #8           @ r9<- AA
+    mov     r3, r0, lsr #8              @ r3<- CC
+    and     r2, r0, #255                @ r2<- BB
+    GET_VREG(r1, r3)                    @ r1<- vCC
+    GET_VREG(r0, r2)                    @ r0<- vBB
+    .if 0
+    cmp     r1, #0                      @ is second operand zero?
+    beq     common_errDivideByZero
+    .endif
+
+    FETCH_ADVANCE_INST(2)               @ advance rPC, load rINST
+                               @ optional op; may set condition codes
+    bl      __aeabi_fdiv                              @ r0<- op, r0-r3 changed
+    GET_INST_OPCODE(ip)                 @ extract opcode from rINST
+    SET_VREG(r0, r9)               @ vAA<- r0
+    GOTO_OPCODE(ip)                     @ jump to next instruction
+    /* 11-14 instructions */
+
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_REM_FLOAT: /* 0xaa */
+/* File: armv5/OP_REM_FLOAT.S */
+/* EABI doesn't define a float remainder function, but libm does */
+/* File: armv5/binop.S */
+    /*
+     * Generic 32-bit binary operation.  Provide an "instr" line that
+     * specifies an instruction that performs "result = r0 op r1".
+     * This could be an ARM instruction or a function call.  (If the result
+     * comes back in a register other than r0, you can override "result".)
+     *
+     * If "chkzero" is set to 1, we perform a divide-by-zero check on
+     * vCC (r1).  Useful for integer division and modulus.
+     *
+     * For: add-int, sub-int, mul-int, div-int, rem-int, and-int, or-int,
+     *      xor-int, shl-int, shr-int, ushr-int, add-float, sub-float,
+     *      mul-float, div-float, rem-float
+     */
+    /* binop vAA, vBB, vCC */
+    FETCH(r0, 1)                        @ r0<- CCBB
+    mov     r9, rINST, lsr #8           @ r9<- AA
+    mov     r3, r0, lsr #8              @ r3<- CC
+    and     r2, r0, #255                @ r2<- BB
+    GET_VREG(r1, r3)                    @ r1<- vCC
+    GET_VREG(r0, r2)                    @ r0<- vBB
+    .if 0
+    cmp     r1, #0                      @ is second operand zero?
+    beq     common_errDivideByZero
+    .endif
+
+    FETCH_ADVANCE_INST(2)               @ advance rPC, load rINST
+                               @ optional op; may set condition codes
+    bl      fmodf                              @ r0<- op, r0-r3 changed
+    GET_INST_OPCODE(ip)                 @ extract opcode from rINST
+    SET_VREG(r0, r9)               @ vAA<- r0
+    GOTO_OPCODE(ip)                     @ jump to next instruction
+    /* 11-14 instructions */
+
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_ADD_DOUBLE: /* 0xab */
+/* File: armv5/OP_ADD_DOUBLE.S */
+/* File: armv5/binopWide.S */
+    /*
+     * Generic 64-bit binary operation.  Provide an "instr" line that
+     * specifies an instruction that performs "result = r0-r1 op r2-r3".
+     * This could be an ARM instruction or a function call.  (If the result
+     * comes back in a register other than r0, you can override "result".)
+     *
+     * If "chkzero" is set to 1, we perform a divide-by-zero check on
+     * vCC (r1).  Useful for integer division and modulus.
+     *
+     * for: add-long, sub-long, div-long, rem-long, and-long, or-long,
+     *      xor-long, add-double, sub-double, mul-double, div-double,
+     *      rem-double
+     *
+     * IMPORTANT: you may specify "chkzero" or "preinstr" but not both.
+     */
+    /* binop vAA, vBB, vCC */
+    FETCH(r0, 1)                        @ r0<- CCBB
+    mov     r9, rINST, lsr #8           @ r9<- AA
+    and     r2, r0, #255                @ r2<- BB
+    mov     r3, r0, lsr #8              @ r3<- CC
+    add     r9, rFP, r9, lsl #2         @ r9<- &fp[AA]
+    add     r2, rFP, r2, lsl #2         @ r2<- &fp[BB]
+    add     r3, rFP, r3, lsl #2         @ r3<- &fp[CC]
+    ldmia   r2, {r0-r1}                 @ r0/r1<- vBB/vBB+1
+    ldmia   r3, {r2-r3}                 @ r2/r3<- vCC/vCC+1
+    .if 0
+    orrs    ip, r2, r3                  @ second arg (r2-r3) is zero?
+    beq     common_errDivideByZero
+    .endif
+    FETCH_ADVANCE_INST(2)               @ advance rPC, load rINST
+
+                               @ optional op; may set condition codes
+    bl      __aeabi_dadd                              @ result<- op, r0-r3 changed
+    GET_INST_OPCODE(ip)                 @ extract opcode from rINST
+    stmia   r9, {r0,r1}     @ vAA/vAA+1<- r0/r1
+    GOTO_OPCODE(ip)                     @ jump to next instruction
+    /* 14-17 instructions */
+
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_SUB_DOUBLE: /* 0xac */
+/* File: armv5/OP_SUB_DOUBLE.S */
+/* File: armv5/binopWide.S */
+    /*
+     * Generic 64-bit binary operation.  Provide an "instr" line that
+     * specifies an instruction that performs "result = r0-r1 op r2-r3".
+     * This could be an ARM instruction or a function call.  (If the result
+     * comes back in a register other than r0, you can override "result".)
+     *
+     * If "chkzero" is set to 1, we perform a divide-by-zero check on
+     * vCC (r1).  Useful for integer division and modulus.
+     *
+     * for: add-long, sub-long, div-long, rem-long, and-long, or-long,
+     *      xor-long, add-double, sub-double, mul-double, div-double,
+     *      rem-double
+     *
+     * IMPORTANT: you may specify "chkzero" or "preinstr" but not both.
+     */
+    /* binop vAA, vBB, vCC */
+    FETCH(r0, 1)                        @ r0<- CCBB
+    mov     r9, rINST, lsr #8           @ r9<- AA
+    and     r2, r0, #255                @ r2<- BB
+    mov     r3, r0, lsr #8              @ r3<- CC
+    add     r9, rFP, r9, lsl #2         @ r9<- &fp[AA]
+    add     r2, rFP, r2, lsl #2         @ r2<- &fp[BB]
+    add     r3, rFP, r3, lsl #2         @ r3<- &fp[CC]
+    ldmia   r2, {r0-r1}                 @ r0/r1<- vBB/vBB+1
+    ldmia   r3, {r2-r3}                 @ r2/r3<- vCC/vCC+1
+    .if 0
+    orrs    ip, r2, r3                  @ second arg (r2-r3) is zero?
+    beq     common_errDivideByZero
+    .endif
+    FETCH_ADVANCE_INST(2)               @ advance rPC, load rINST
+
+                               @ optional op; may set condition codes
+    bl      __aeabi_dsub                              @ result<- op, r0-r3 changed
+    GET_INST_OPCODE(ip)                 @ extract opcode from rINST
+    stmia   r9, {r0,r1}     @ vAA/vAA+1<- r0/r1
+    GOTO_OPCODE(ip)                     @ jump to next instruction
+    /* 14-17 instructions */
+
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_MUL_DOUBLE: /* 0xad */
+/* File: armv5/OP_MUL_DOUBLE.S */
+/* File: armv5/binopWide.S */
+    /*
+     * Generic 64-bit binary operation.  Provide an "instr" line that
+     * specifies an instruction that performs "result = r0-r1 op r2-r3".
+     * This could be an ARM instruction or a function call.  (If the result
+     * comes back in a register other than r0, you can override "result".)
+     *
+     * If "chkzero" is set to 1, we perform a divide-by-zero check on
+     * vCC (r1).  Useful for integer division and modulus.
+     *
+     * for: add-long, sub-long, div-long, rem-long, and-long, or-long,
+     *      xor-long, add-double, sub-double, mul-double, div-double,
+     *      rem-double
+     *
+     * IMPORTANT: you may specify "chkzero" or "preinstr" but not both.
+     */
+    /* binop vAA, vBB, vCC */
+    FETCH(r0, 1)                        @ r0<- CCBB
+    mov     r9, rINST, lsr #8           @ r9<- AA
+    and     r2, r0, #255                @ r2<- BB
+    mov     r3, r0, lsr #8              @ r3<- CC
+    add     r9, rFP, r9, lsl #2         @ r9<- &fp[AA]
+    add     r2, rFP, r2, lsl #2         @ r2<- &fp[BB]
+    add     r3, rFP, r3, lsl #2         @ r3<- &fp[CC]
+    ldmia   r2, {r0-r1}                 @ r0/r1<- vBB/vBB+1
+    ldmia   r3, {r2-r3}                 @ r2/r3<- vCC/vCC+1
+    .if 0
+    orrs    ip, r2, r3                  @ second arg (r2-r3) is zero?
+    beq     common_errDivideByZero
+    .endif
+    FETCH_ADVANCE_INST(2)               @ advance rPC, load rINST
+
+                               @ optional op; may set condition codes
+    bl      __aeabi_dmul                              @ result<- op, r0-r3 changed
+    GET_INST_OPCODE(ip)                 @ extract opcode from rINST
+    stmia   r9, {r0,r1}     @ vAA/vAA+1<- r0/r1
+    GOTO_OPCODE(ip)                     @ jump to next instruction
+    /* 14-17 instructions */
+
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_DIV_DOUBLE: /* 0xae */
+/* File: armv5/OP_DIV_DOUBLE.S */
+/* File: armv5/binopWide.S */
+    /*
+     * Generic 64-bit binary operation.  Provide an "instr" line that
+     * specifies an instruction that performs "result = r0-r1 op r2-r3".
+     * This could be an ARM instruction or a function call.  (If the result
+     * comes back in a register other than r0, you can override "result".)
+     *
+     * If "chkzero" is set to 1, we perform a divide-by-zero check on
+     * vCC (r1).  Useful for integer division and modulus.
+     *
+     * for: add-long, sub-long, div-long, rem-long, and-long, or-long,
+     *      xor-long, add-double, sub-double, mul-double, div-double,
+     *      rem-double
+     *
+     * IMPORTANT: you may specify "chkzero" or "preinstr" but not both.
+     */
+    /* binop vAA, vBB, vCC */
+    FETCH(r0, 1)                        @ r0<- CCBB
+    mov     r9, rINST, lsr #8           @ r9<- AA
+    and     r2, r0, #255                @ r2<- BB
+    mov     r3, r0, lsr #8              @ r3<- CC
+    add     r9, rFP, r9, lsl #2         @ r9<- &fp[AA]
+    add     r2, rFP, r2, lsl #2         @ r2<- &fp[BB]
+    add     r3, rFP, r3, lsl #2         @ r3<- &fp[CC]
+    ldmia   r2, {r0-r1}                 @ r0/r1<- vBB/vBB+1
+    ldmia   r3, {r2-r3}                 @ r2/r3<- vCC/vCC+1
+    .if 0
+    orrs    ip, r2, r3                  @ second arg (r2-r3) is zero?
+    beq     common_errDivideByZero
+    .endif
+    FETCH_ADVANCE_INST(2)               @ advance rPC, load rINST
+
+                               @ optional op; may set condition codes
+    bl      __aeabi_ddiv                              @ result<- op, r0-r3 changed
+    GET_INST_OPCODE(ip)                 @ extract opcode from rINST
+    stmia   r9, {r0,r1}     @ vAA/vAA+1<- r0/r1
+    GOTO_OPCODE(ip)                     @ jump to next instruction
+    /* 14-17 instructions */
+
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_REM_DOUBLE: /* 0xaf */
+/* File: armv5/OP_REM_DOUBLE.S */
+/* EABI doesn't define a double remainder function, but libm does */
+/* File: armv5/binopWide.S */
+    /*
+     * Generic 64-bit binary operation.  Provide an "instr" line that
+     * specifies an instruction that performs "result = r0-r1 op r2-r3".
+     * This could be an ARM instruction or a function call.  (If the result
+     * comes back in a register other than r0, you can override "result".)
+     *
+     * If "chkzero" is set to 1, we perform a divide-by-zero check on
+     * vCC (r1).  Useful for integer division and modulus.
+     *
+     * for: add-long, sub-long, div-long, rem-long, and-long, or-long,
+     *      xor-long, add-double, sub-double, mul-double, div-double,
+     *      rem-double
+     *
+     * IMPORTANT: you may specify "chkzero" or "preinstr" but not both.
+     */
+    /* binop vAA, vBB, vCC */
+    FETCH(r0, 1)                        @ r0<- CCBB
+    mov     r9, rINST, lsr #8           @ r9<- AA
+    and     r2, r0, #255                @ r2<- BB
+    mov     r3, r0, lsr #8              @ r3<- CC
+    add     r9, rFP, r9, lsl #2         @ r9<- &fp[AA]
+    add     r2, rFP, r2, lsl #2         @ r2<- &fp[BB]
+    add     r3, rFP, r3, lsl #2         @ r3<- &fp[CC]
+    ldmia   r2, {r0-r1}                 @ r0/r1<- vBB/vBB+1
+    ldmia   r3, {r2-r3}                 @ r2/r3<- vCC/vCC+1
+    .if 0
+    orrs    ip, r2, r3                  @ second arg (r2-r3) is zero?
+    beq     common_errDivideByZero
+    .endif
+    FETCH_ADVANCE_INST(2)               @ advance rPC, load rINST
+
+                               @ optional op; may set condition codes
+    bl      fmod                              @ result<- op, r0-r3 changed
+    GET_INST_OPCODE(ip)                 @ extract opcode from rINST
+    stmia   r9, {r0,r1}     @ vAA/vAA+1<- r0/r1
+    GOTO_OPCODE(ip)                     @ jump to next instruction
+    /* 14-17 instructions */
+
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_ADD_INT_2ADDR: /* 0xb0 */
+/* File: armv5/OP_ADD_INT_2ADDR.S */
+/* File: armv5/binop2addr.S */
+    /*
+     * Generic 32-bit "/2addr" binary operation.  Provide an "instr" line
+     * that specifies an instruction that performs "result = r0 op r1".
+     * This could be an ARM instruction or a function call.  (If the result
+     * comes back in a register other than r0, you can override "result".)
+     *
+     * If "chkzero" is set to 1, we perform a divide-by-zero check on
+     * vCC (r1).  Useful for integer division and modulus.
+     *
+     * For: add-int/2addr, sub-int/2addr, mul-int/2addr, div-int/2addr,
+     *      rem-int/2addr, and-int/2addr, or-int/2addr, xor-int/2addr,
+     *      shl-int/2addr, shr-int/2addr, ushr-int/2addr, add-float/2addr,
+     *      sub-float/2addr, mul-float/2addr, div-float/2addr, rem-float/2addr
+     */
+    /* binop/2addr vA, vB */
+    mov     r9, rINST, lsr #8           @ r9<- A+
+    mov     r3, rINST, lsr #12          @ r3<- B
+    and     r9, r9, #15
+    GET_VREG(r0, r9)                    @ r0<- vA
+    GET_VREG(r1, r3)                    @ r1<- vB
+    .if 0
+    cmp     r1, #0                      @ is second operand zero?
+    beq     common_errDivideByZero
+    .endif
+    FETCH_ADVANCE_INST(1)               @ advance rPC, load rINST
+
+                               @ optional op; may set condition codes
+    add     r0, r0, r1                              @ r0<- op, r0-r3 changed
+    GET_INST_OPCODE(ip)                 @ extract opcode from rINST
+    SET_VREG(r0, r9)               @ vAA<- r0
+    GOTO_OPCODE(ip)                     @ jump to next instruction
+    /* 10-13 instructions */
+
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_SUB_INT_2ADDR: /* 0xb1 */
+/* File: armv5/OP_SUB_INT_2ADDR.S */
+/* File: armv5/binop2addr.S */
+    /*
+     * Generic 32-bit "/2addr" binary operation.  Provide an "instr" line
+     * that specifies an instruction that performs "result = r0 op r1".
+     * This could be an ARM instruction or a function call.  (If the result
+     * comes back in a register other than r0, you can override "result".)
+     *
+     * If "chkzero" is set to 1, we perform a divide-by-zero check on
+     * vCC (r1).  Useful for integer division and modulus.
+     *
+     * For: add-int/2addr, sub-int/2addr, mul-int/2addr, div-int/2addr,
+     *      rem-int/2addr, and-int/2addr, or-int/2addr, xor-int/2addr,
+     *      shl-int/2addr, shr-int/2addr, ushr-int/2addr, add-float/2addr,
+     *      sub-float/2addr, mul-float/2addr, div-float/2addr, rem-float/2addr
+     */
+    /* binop/2addr vA, vB */
+    mov     r9, rINST, lsr #8           @ r9<- A+
+    mov     r3, rINST, lsr #12          @ r3<- B
+    and     r9, r9, #15
+    GET_VREG(r0, r9)                    @ r0<- vA
+    GET_VREG(r1, r3)                    @ r1<- vB
+    .if 0
+    cmp     r1, #0                      @ is second operand zero?
+    beq     common_errDivideByZero
+    .endif
+    FETCH_ADVANCE_INST(1)               @ advance rPC, load rINST
+
+                               @ optional op; may set condition codes
+    sub     r0, r0, r1                              @ r0<- op, r0-r3 changed
+    GET_INST_OPCODE(ip)                 @ extract opcode from rINST
+    SET_VREG(r0, r9)               @ vAA<- r0
+    GOTO_OPCODE(ip)                     @ jump to next instruction
+    /* 10-13 instructions */
+
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_MUL_INT_2ADDR: /* 0xb2 */
+/* File: armv5/OP_MUL_INT_2ADDR.S */
+/* must be "mul r0, r1, r0" -- "r0, r0, r1" is illegal */
+/* File: armv5/binop2addr.S */
+    /*
+     * Generic 32-bit "/2addr" binary operation.  Provide an "instr" line
+     * that specifies an instruction that performs "result = r0 op r1".
+     * This could be an ARM instruction or a function call.  (If the result
+     * comes back in a register other than r0, you can override "result".)
+     *
+     * If "chkzero" is set to 1, we perform a divide-by-zero check on
+     * vCC (r1).  Useful for integer division and modulus.
+     *
+     * For: add-int/2addr, sub-int/2addr, mul-int/2addr, div-int/2addr,
+     *      rem-int/2addr, and-int/2addr, or-int/2addr, xor-int/2addr,
+     *      shl-int/2addr, shr-int/2addr, ushr-int/2addr, add-float/2addr,
+     *      sub-float/2addr, mul-float/2addr, div-float/2addr, rem-float/2addr
+     */
+    /* binop/2addr vA, vB */
+    mov     r9, rINST, lsr #8           @ r9<- A+
+    mov     r3, rINST, lsr #12          @ r3<- B
+    and     r9, r9, #15
+    GET_VREG(r0, r9)                    @ r0<- vA
+    GET_VREG(r1, r3)                    @ r1<- vB
+    .if 0
+    cmp     r1, #0                      @ is second operand zero?
+    beq     common_errDivideByZero
+    .endif
+    FETCH_ADVANCE_INST(1)               @ advance rPC, load rINST
+
+                               @ optional op; may set condition codes
+    mul     r0, r1, r0                              @ r0<- op, r0-r3 changed
+    GET_INST_OPCODE(ip)                 @ extract opcode from rINST
+    SET_VREG(r0, r9)               @ vAA<- r0
+    GOTO_OPCODE(ip)                     @ jump to next instruction
+    /* 10-13 instructions */
+
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_DIV_INT_2ADDR: /* 0xb3 */
+/* File: armv5/OP_DIV_INT_2ADDR.S */
+/* File: armv5/binop2addr.S */
+    /*
+     * Generic 32-bit "/2addr" binary operation.  Provide an "instr" line
+     * that specifies an instruction that performs "result = r0 op r1".
+     * This could be an ARM instruction or a function call.  (If the result
+     * comes back in a register other than r0, you can override "result".)
+     *
+     * If "chkzero" is set to 1, we perform a divide-by-zero check on
+     * vCC (r1).  Useful for integer division and modulus.
+     *
+     * For: add-int/2addr, sub-int/2addr, mul-int/2addr, div-int/2addr,
+     *      rem-int/2addr, and-int/2addr, or-int/2addr, xor-int/2addr,
+     *      shl-int/2addr, shr-int/2addr, ushr-int/2addr, add-float/2addr,
+     *      sub-float/2addr, mul-float/2addr, div-float/2addr, rem-float/2addr
+     */
+    /* binop/2addr vA, vB */
+    mov     r9, rINST, lsr #8           @ r9<- A+
+    mov     r3, rINST, lsr #12          @ r3<- B
+    and     r9, r9, #15
+    GET_VREG(r0, r9)                    @ r0<- vA
+    GET_VREG(r1, r3)                    @ r1<- vB
+    .if 1
+    cmp     r1, #0                      @ is second operand zero?
+    beq     common_errDivideByZero
+    .endif
+    FETCH_ADVANCE_INST(1)               @ advance rPC, load rINST
+
+                               @ optional op; may set condition codes
+    bl     __aeabi_idiv                              @ r0<- op, r0-r3 changed
+    GET_INST_OPCODE(ip)                 @ extract opcode from rINST
+    SET_VREG(r0, r9)               @ vAA<- r0
+    GOTO_OPCODE(ip)                     @ jump to next instruction
+    /* 10-13 instructions */
+
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_REM_INT_2ADDR: /* 0xb4 */
+/* File: armv5/OP_REM_INT_2ADDR.S */
+/* idivmod returns quotient in r0 and remainder in r1 */
+/* File: armv5/binop2addr.S */
+    /*
+     * Generic 32-bit "/2addr" binary operation.  Provide an "instr" line
+     * that specifies an instruction that performs "result = r0 op r1".
+     * This could be an ARM instruction or a function call.  (If the result
+     * comes back in a register other than r0, you can override "result".)
+     *
+     * If "chkzero" is set to 1, we perform a divide-by-zero check on
+     * vCC (r1).  Useful for integer division and modulus.
+     *
+     * For: add-int/2addr, sub-int/2addr, mul-int/2addr, div-int/2addr,
+     *      rem-int/2addr, and-int/2addr, or-int/2addr, xor-int/2addr,
+     *      shl-int/2addr, shr-int/2addr, ushr-int/2addr, add-float/2addr,
+     *      sub-float/2addr, mul-float/2addr, div-float/2addr, rem-float/2addr
+     */
+    /* binop/2addr vA, vB */
+    mov     r9, rINST, lsr #8           @ r9<- A+
+    mov     r3, rINST, lsr #12          @ r3<- B
+    and     r9, r9, #15
+    GET_VREG(r0, r9)                    @ r0<- vA
+    GET_VREG(r1, r3)                    @ r1<- vB
+    .if 1
+    cmp     r1, #0                      @ is second operand zero?
+    beq     common_errDivideByZero
+    .endif
+    FETCH_ADVANCE_INST(1)               @ advance rPC, load rINST
+
+                               @ optional op; may set condition codes
+    bl      __aeabi_idivmod                              @ r1<- op, r0-r3 changed
+    GET_INST_OPCODE(ip)                 @ extract opcode from rINST
+    SET_VREG(r1, r9)               @ vAA<- r1
+    GOTO_OPCODE(ip)                     @ jump to next instruction
+    /* 10-13 instructions */
+
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_AND_INT_2ADDR: /* 0xb5 */
+/* File: armv5/OP_AND_INT_2ADDR.S */
+/* File: armv5/binop2addr.S */
+    /*
+     * Generic 32-bit "/2addr" binary operation.  Provide an "instr" line
+     * that specifies an instruction that performs "result = r0 op r1".
+     * This could be an ARM instruction or a function call.  (If the result
+     * comes back in a register other than r0, you can override "result".)
+     *
+     * If "chkzero" is set to 1, we perform a divide-by-zero check on
+     * vCC (r1).  Useful for integer division and modulus.
+     *
+     * For: add-int/2addr, sub-int/2addr, mul-int/2addr, div-int/2addr,
+     *      rem-int/2addr, and-int/2addr, or-int/2addr, xor-int/2addr,
+     *      shl-int/2addr, shr-int/2addr, ushr-int/2addr, add-float/2addr,
+     *      sub-float/2addr, mul-float/2addr, div-float/2addr, rem-float/2addr
+     */
+    /* binop/2addr vA, vB */
+    mov     r9, rINST, lsr #8           @ r9<- A+
+    mov     r3, rINST, lsr #12          @ r3<- B
+    and     r9, r9, #15
+    GET_VREG(r0, r9)                    @ r0<- vA
+    GET_VREG(r1, r3)                    @ r1<- vB
+    .if 0
+    cmp     r1, #0                      @ is second operand zero?
+    beq     common_errDivideByZero
+    .endif
+    FETCH_ADVANCE_INST(1)               @ advance rPC, load rINST
+
+                               @ optional op; may set condition codes
+    and     r0, r0, r1                              @ r0<- op, r0-r3 changed
+    GET_INST_OPCODE(ip)                 @ extract opcode from rINST
+    SET_VREG(r0, r9)               @ vAA<- r0
+    GOTO_OPCODE(ip)                     @ jump to next instruction
+    /* 10-13 instructions */
+
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_OR_INT_2ADDR: /* 0xb6 */
+/* File: armv5/OP_OR_INT_2ADDR.S */
+/* File: armv5/binop2addr.S */
+    /*
+     * Generic 32-bit "/2addr" binary operation.  Provide an "instr" line
+     * that specifies an instruction that performs "result = r0 op r1".
+     * This could be an ARM instruction or a function call.  (If the result
+     * comes back in a register other than r0, you can override "result".)
+     *
+     * If "chkzero" is set to 1, we perform a divide-by-zero check on
+     * vCC (r1).  Useful for integer division and modulus.
+     *
+     * For: add-int/2addr, sub-int/2addr, mul-int/2addr, div-int/2addr,
+     *      rem-int/2addr, and-int/2addr, or-int/2addr, xor-int/2addr,
+     *      shl-int/2addr, shr-int/2addr, ushr-int/2addr, add-float/2addr,
+     *      sub-float/2addr, mul-float/2addr, div-float/2addr, rem-float/2addr
+     */
+    /* binop/2addr vA, vB */
+    mov     r9, rINST, lsr #8           @ r9<- A+
+    mov     r3, rINST, lsr #12          @ r3<- B
+    and     r9, r9, #15
+    GET_VREG(r0, r9)                    @ r0<- vA
+    GET_VREG(r1, r3)                    @ r1<- vB
+    .if 0
+    cmp     r1, #0                      @ is second operand zero?
+    beq     common_errDivideByZero
+    .endif
+    FETCH_ADVANCE_INST(1)               @ advance rPC, load rINST
+
+                               @ optional op; may set condition codes
+    orr     r0, r0, r1                              @ r0<- op, r0-r3 changed
+    GET_INST_OPCODE(ip)                 @ extract opcode from rINST
+    SET_VREG(r0, r9)               @ vAA<- r0
+    GOTO_OPCODE(ip)                     @ jump to next instruction
+    /* 10-13 instructions */
+
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_XOR_INT_2ADDR: /* 0xb7 */
+/* File: armv5/OP_XOR_INT_2ADDR.S */
+/* File: armv5/binop2addr.S */
+    /*
+     * Generic 32-bit "/2addr" binary operation.  Provide an "instr" line
+     * that specifies an instruction that performs "result = r0 op r1".
+     * This could be an ARM instruction or a function call.  (If the result
+     * comes back in a register other than r0, you can override "result".)
+     *
+     * If "chkzero" is set to 1, we perform a divide-by-zero check on
+     * vCC (r1).  Useful for integer division and modulus.
+     *
+     * For: add-int/2addr, sub-int/2addr, mul-int/2addr, div-int/2addr,
+     *      rem-int/2addr, and-int/2addr, or-int/2addr, xor-int/2addr,
+     *      shl-int/2addr, shr-int/2addr, ushr-int/2addr, add-float/2addr,
+     *      sub-float/2addr, mul-float/2addr, div-float/2addr, rem-float/2addr
+     */
+    /* binop/2addr vA, vB */
+    mov     r9, rINST, lsr #8           @ r9<- A+
+    mov     r3, rINST, lsr #12          @ r3<- B
+    and     r9, r9, #15
+    GET_VREG(r0, r9)                    @ r0<- vA
+    GET_VREG(r1, r3)                    @ r1<- vB
+    .if 0
+    cmp     r1, #0                      @ is second operand zero?
+    beq     common_errDivideByZero
+    .endif
+    FETCH_ADVANCE_INST(1)               @ advance rPC, load rINST
+
+                               @ optional op; may set condition codes
+    eor     r0, r0, r1                              @ r0<- op, r0-r3 changed
+    GET_INST_OPCODE(ip)                 @ extract opcode from rINST
+    SET_VREG(r0, r9)               @ vAA<- r0
+    GOTO_OPCODE(ip)                     @ jump to next instruction
+    /* 10-13 instructions */
+
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_SHL_INT_2ADDR: /* 0xb8 */
+/* File: armv5/OP_SHL_INT_2ADDR.S */
+/* File: armv5/binop2addr.S */
+    /*
+     * Generic 32-bit "/2addr" binary operation.  Provide an "instr" line
+     * that specifies an instruction that performs "result = r0 op r1".
+     * This could be an ARM instruction or a function call.  (If the result
+     * comes back in a register other than r0, you can override "result".)
+     *
+     * If "chkzero" is set to 1, we perform a divide-by-zero check on
+     * vCC (r1).  Useful for integer division and modulus.
+     *
+     * For: add-int/2addr, sub-int/2addr, mul-int/2addr, div-int/2addr,
+     *      rem-int/2addr, and-int/2addr, or-int/2addr, xor-int/2addr,
+     *      shl-int/2addr, shr-int/2addr, ushr-int/2addr, add-float/2addr,
+     *      sub-float/2addr, mul-float/2addr, div-float/2addr, rem-float/2addr
+     */
+    /* binop/2addr vA, vB */
+    mov     r9, rINST, lsr #8           @ r9<- A+
+    mov     r3, rINST, lsr #12          @ r3<- B
+    and     r9, r9, #15
+    GET_VREG(r0, r9)                    @ r0<- vA
+    GET_VREG(r1, r3)                    @ r1<- vB
+    .if 0
+    cmp     r1, #0                      @ is second operand zero?
+    beq     common_errDivideByZero
+    .endif
+    FETCH_ADVANCE_INST(1)               @ advance rPC, load rINST
+
+    and     r1, r1, #31                           @ optional op; may set condition codes
+    mov     r0, r0, asl r1                              @ r0<- op, r0-r3 changed
+    GET_INST_OPCODE(ip)                 @ extract opcode from rINST
+    SET_VREG(r0, r9)               @ vAA<- r0
+    GOTO_OPCODE(ip)                     @ jump to next instruction
+    /* 10-13 instructions */
+
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_SHR_INT_2ADDR: /* 0xb9 */
+/* File: armv5/OP_SHR_INT_2ADDR.S */
+/* File: armv5/binop2addr.S */
+    /*
+     * Generic 32-bit "/2addr" binary operation.  Provide an "instr" line
+     * that specifies an instruction that performs "result = r0 op r1".
+     * This could be an ARM instruction or a function call.  (If the result
+     * comes back in a register other than r0, you can override "result".)
+     *
+     * If "chkzero" is set to 1, we perform a divide-by-zero check on
+     * vCC (r1).  Useful for integer division and modulus.
+     *
+     * For: add-int/2addr, sub-int/2addr, mul-int/2addr, div-int/2addr,
+     *      rem-int/2addr, and-int/2addr, or-int/2addr, xor-int/2addr,
+     *      shl-int/2addr, shr-int/2addr, ushr-int/2addr, add-float/2addr,
+     *      sub-float/2addr, mul-float/2addr, div-float/2addr, rem-float/2addr
+     */
+    /* binop/2addr vA, vB */
+    mov     r9, rINST, lsr #8           @ r9<- A+
+    mov     r3, rINST, lsr #12          @ r3<- B
+    and     r9, r9, #15
+    GET_VREG(r0, r9)                    @ r0<- vA
+    GET_VREG(r1, r3)                    @ r1<- vB
+    .if 0
+    cmp     r1, #0                      @ is second operand zero?
+    beq     common_errDivideByZero
+    .endif
+    FETCH_ADVANCE_INST(1)               @ advance rPC, load rINST
+
+    and     r1, r1, #31                           @ optional op; may set condition codes
+    mov     r0, r0, asr r1                              @ r0<- op, r0-r3 changed
+    GET_INST_OPCODE(ip)                 @ extract opcode from rINST
+    SET_VREG(r0, r9)               @ vAA<- r0
+    GOTO_OPCODE(ip)                     @ jump to next instruction
+    /* 10-13 instructions */
+
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_USHR_INT_2ADDR: /* 0xba */
+/* File: armv5/OP_USHR_INT_2ADDR.S */
+/* File: armv5/binop2addr.S */
+    /*
+     * Generic 32-bit "/2addr" binary operation.  Provide an "instr" line
+     * that specifies an instruction that performs "result = r0 op r1".
+     * This could be an ARM instruction or a function call.  (If the result
+     * comes back in a register other than r0, you can override "result".)
+     *
+     * If "chkzero" is set to 1, we perform a divide-by-zero check on
+     * vCC (r1).  Useful for integer division and modulus.
+     *
+     * For: add-int/2addr, sub-int/2addr, mul-int/2addr, div-int/2addr,
+     *      rem-int/2addr, and-int/2addr, or-int/2addr, xor-int/2addr,
+     *      shl-int/2addr, shr-int/2addr, ushr-int/2addr, add-float/2addr,
+     *      sub-float/2addr, mul-float/2addr, div-float/2addr, rem-float/2addr
+     */
+    /* binop/2addr vA, vB */
+    mov     r9, rINST, lsr #8           @ r9<- A+
+    mov     r3, rINST, lsr #12          @ r3<- B
+    and     r9, r9, #15
+    GET_VREG(r0, r9)                    @ r0<- vA
+    GET_VREG(r1, r3)                    @ r1<- vB
+    .if 0
+    cmp     r1, #0                      @ is second operand zero?
+    beq     common_errDivideByZero
+    .endif
+    FETCH_ADVANCE_INST(1)               @ advance rPC, load rINST
+
+    and     r1, r1, #31                           @ optional op; may set condition codes
+    mov     r0, r0, lsr r1                              @ r0<- op, r0-r3 changed
+    GET_INST_OPCODE(ip)                 @ extract opcode from rINST
+    SET_VREG(r0, r9)               @ vAA<- r0
+    GOTO_OPCODE(ip)                     @ jump to next instruction
+    /* 10-13 instructions */
+
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_ADD_LONG_2ADDR: /* 0xbb */
+/* File: armv5/OP_ADD_LONG_2ADDR.S */
+/* File: armv5/binopWide2addr.S */
+    /*
+     * Generic 64-bit "/2addr" binary operation.  Provide an "instr" line
+     * that specifies an instruction that performs "result = r0-r1 op r2-r3".
+     * This could be an ARM instruction or a function call.  (If the result
+     * comes back in a register other than r0, you can override "result".)
+     *
+     * If "chkzero" is set to 1, we perform a divide-by-zero check on
+     * vCC (r1).  Useful for integer division and modulus.
+     *
+     * For: add-long/2addr, sub-long/2addr, div-long/2addr, rem-long/2addr,
+     *      and-long/2addr, or-long/2addr, xor-long/2addr, add-double/2addr,
+     *      sub-double/2addr, mul-double/2addr, div-double/2addr,
+     *      rem-double/2addr
+     */
+    /* binop/2addr vA, vB */
+    mov     r9, rINST, lsr #8           @ r9<- A+
+    mov     r1, rINST, lsr #12          @ r1<- B
+    and     r9, r9, #15
+    add     r1, rFP, r1, lsl #2         @ r1<- &fp[B]
+    add     r9, rFP, r9, lsl #2         @ r9<- &fp[A]
+    ldmia   r1, {r2-r3}                 @ r2/r3<- vBB/vBB+1
+    ldmia   r9, {r0-r1}                 @ r0/r1<- vAA/vAA+1
+    .if 0
+    orrs    ip, r2, r3                  @ second arg (r2-r3) is zero?
+    beq     common_errDivideByZero
+    .endif
+    FETCH_ADVANCE_INST(1)               @ advance rPC, load rINST
+
+    adds    r0, r0, r2                           @ optional op; may set condition codes
+    adc     r1, r1, r3                              @ result<- op, r0-r3 changed
+    GET_INST_OPCODE(ip)                 @ extract opcode from rINST
+    stmia   r9, {r0,r1}     @ vAA/vAA+1<- r0/r1
+    GOTO_OPCODE(ip)                     @ jump to next instruction
+    /* 12-15 instructions */
+
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_SUB_LONG_2ADDR: /* 0xbc */
+/* File: armv5/OP_SUB_LONG_2ADDR.S */
+/* File: armv5/binopWide2addr.S */
+    /*
+     * Generic 64-bit "/2addr" binary operation.  Provide an "instr" line
+     * that specifies an instruction that performs "result = r0-r1 op r2-r3".
+     * This could be an ARM instruction or a function call.  (If the result
+     * comes back in a register other than r0, you can override "result".)
+     *
+     * If "chkzero" is set to 1, we perform a divide-by-zero check on
+     * vCC (r1).  Useful for integer division and modulus.
+     *
+     * For: add-long/2addr, sub-long/2addr, div-long/2addr, rem-long/2addr,
+     *      and-long/2addr, or-long/2addr, xor-long/2addr, add-double/2addr,
+     *      sub-double/2addr, mul-double/2addr, div-double/2addr,
+     *      rem-double/2addr
+     */
+    /* binop/2addr vA, vB */
+    mov     r9, rINST, lsr #8           @ r9<- A+
+    mov     r1, rINST, lsr #12          @ r1<- B
+    and     r9, r9, #15
+    add     r1, rFP, r1, lsl #2         @ r1<- &fp[B]
+    add     r9, rFP, r9, lsl #2         @ r9<- &fp[A]
+    ldmia   r1, {r2-r3}                 @ r2/r3<- vBB/vBB+1
+    ldmia   r9, {r0-r1}                 @ r0/r1<- vAA/vAA+1
+    .if 0
+    orrs    ip, r2, r3                  @ second arg (r2-r3) is zero?
+    beq     common_errDivideByZero
+    .endif
+    FETCH_ADVANCE_INST(1)               @ advance rPC, load rINST
+
+    subs    r0, r0, r2                           @ optional op; may set condition codes
+    sbc     r1, r1, r3                              @ result<- op, r0-r3 changed
+    GET_INST_OPCODE(ip)                 @ extract opcode from rINST
+    stmia   r9, {r0,r1}     @ vAA/vAA+1<- r0/r1
+    GOTO_OPCODE(ip)                     @ jump to next instruction
+    /* 12-15 instructions */
+
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_MUL_LONG_2ADDR: /* 0xbd */
+/* File: armv5/OP_MUL_LONG_2ADDR.S */
+    /*
+     * Signed 64-bit integer multiply, "/2addr" version.
+     *
+     * See OP_MUL_LONG for an explanation.
+     *
+     * We get a little tight on registers, so to avoid looking up &fp[A]
+     * again we stuff it into rINST.
+     */
+    /* mul-long/2addr vA, vB */
+    mov     r9, rINST, lsr #8           @ r9<- A+
+    mov     r1, rINST, lsr #12          @ r1<- B
+    and     r9, r9, #15
+    add     r1, rFP, r1, lsl #2         @ r1<- &fp[B]
+    add     rINST, rFP, r9, lsl #2      @ rINST<- &fp[A]
+    ldmia   r1, {r2-r3}                 @ r2/r3<- vBB/vBB+1
+    ldmia   rINST, {r0-r1}              @ r0/r1<- vAA/vAA+1
+    mul     ip, r2, r1                  @  ip<- ZxW
+    umull   r9, r10, r2, r0             @  r9/r10 <- ZxX
+    mla     r2, r0, r3, ip              @  r2<- YxX + (ZxW)
+    mov     r0, rINST                   @ r0<- &fp[A] (free up rINST)
+    FETCH_ADVANCE_INST(1)               @ advance rPC, load rINST
+    add     r10, r2, r10                @  r10<- r10 + low(ZxW + (YxX))
+    GET_INST_OPCODE(ip)                 @ extract opcode from rINST
+    stmia   r0, {r9-r10}                @ vAA/vAA+1<- r9/r10
+    GOTO_OPCODE(ip)                     @ jump to next instruction
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_DIV_LONG_2ADDR: /* 0xbe */
+/* File: armv5/OP_DIV_LONG_2ADDR.S */
+/* File: armv5/binopWide2addr.S */
+    /*
+     * Generic 64-bit "/2addr" binary operation.  Provide an "instr" line
+     * that specifies an instruction that performs "result = r0-r1 op r2-r3".
+     * This could be an ARM instruction or a function call.  (If the result
+     * comes back in a register other than r0, you can override "result".)
+     *
+     * If "chkzero" is set to 1, we perform a divide-by-zero check on
+     * vCC (r1).  Useful for integer division and modulus.
+     *
+     * For: add-long/2addr, sub-long/2addr, div-long/2addr, rem-long/2addr,
+     *      and-long/2addr, or-long/2addr, xor-long/2addr, add-double/2addr,
+     *      sub-double/2addr, mul-double/2addr, div-double/2addr,
+     *      rem-double/2addr
+     */
+    /* binop/2addr vA, vB */
+    mov     r9, rINST, lsr #8           @ r9<- A+
+    mov     r1, rINST, lsr #12          @ r1<- B
+    and     r9, r9, #15
+    add     r1, rFP, r1, lsl #2         @ r1<- &fp[B]
+    add     r9, rFP, r9, lsl #2         @ r9<- &fp[A]
+    ldmia   r1, {r2-r3}                 @ r2/r3<- vBB/vBB+1
+    ldmia   r9, {r0-r1}                 @ r0/r1<- vAA/vAA+1
+    .if 1
+    orrs    ip, r2, r3                  @ second arg (r2-r3) is zero?
+    beq     common_errDivideByZero
+    .endif
+    FETCH_ADVANCE_INST(1)               @ advance rPC, load rINST
+
+                               @ optional op; may set condition codes
+    bl      __aeabi_ldivmod                              @ result<- op, r0-r3 changed
+    GET_INST_OPCODE(ip)                 @ extract opcode from rINST
+    stmia   r9, {r0,r1}     @ vAA/vAA+1<- r0/r1
+    GOTO_OPCODE(ip)                     @ jump to next instruction
+    /* 12-15 instructions */
+
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_REM_LONG_2ADDR: /* 0xbf */
+/* File: armv5/OP_REM_LONG_2ADDR.S */
+/* ldivmod returns quotient in r0/r1 and remainder in r2/r3 */
+/* File: armv5/binopWide2addr.S */
+    /*
+     * Generic 64-bit "/2addr" binary operation.  Provide an "instr" line
+     * that specifies an instruction that performs "result = r0-r1 op r2-r3".
+     * This could be an ARM instruction or a function call.  (If the result
+     * comes back in a register other than r0, you can override "result".)
+     *
+     * If "chkzero" is set to 1, we perform a divide-by-zero check on
+     * vCC (r1).  Useful for integer division and modulus.
+     *
+     * For: add-long/2addr, sub-long/2addr, div-long/2addr, rem-long/2addr,
+     *      and-long/2addr, or-long/2addr, xor-long/2addr, add-double/2addr,
+     *      sub-double/2addr, mul-double/2addr, div-double/2addr,
+     *      rem-double/2addr
+     */
+    /* binop/2addr vA, vB */
+    mov     r9, rINST, lsr #8           @ r9<- A+
+    mov     r1, rINST, lsr #12          @ r1<- B
+    and     r9, r9, #15
+    add     r1, rFP, r1, lsl #2         @ r1<- &fp[B]
+    add     r9, rFP, r9, lsl #2         @ r9<- &fp[A]
+    ldmia   r1, {r2-r3}                 @ r2/r3<- vBB/vBB+1
+    ldmia   r9, {r0-r1}                 @ r0/r1<- vAA/vAA+1
+    .if 1
+    orrs    ip, r2, r3                  @ second arg (r2-r3) is zero?
+    beq     common_errDivideByZero
+    .endif
+    FETCH_ADVANCE_INST(1)               @ advance rPC, load rINST
+
+                               @ optional op; may set condition codes
+    bl      __aeabi_ldivmod                              @ result<- op, r0-r3 changed
+    GET_INST_OPCODE(ip)                 @ extract opcode from rINST
+    stmia   r9, {r2,r3}     @ vAA/vAA+1<- r2/r3
+    GOTO_OPCODE(ip)                     @ jump to next instruction
+    /* 12-15 instructions */
+
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_AND_LONG_2ADDR: /* 0xc0 */
+/* File: armv5/OP_AND_LONG_2ADDR.S */
+/* File: armv5/binopWide2addr.S */
+    /*
+     * Generic 64-bit "/2addr" binary operation.  Provide an "instr" line
+     * that specifies an instruction that performs "result = r0-r1 op r2-r3".
+     * This could be an ARM instruction or a function call.  (If the result
+     * comes back in a register other than r0, you can override "result".)
+     *
+     * If "chkzero" is set to 1, we perform a divide-by-zero check on
+     * vCC (r1).  Useful for integer division and modulus.
+     *
+     * For: add-long/2addr, sub-long/2addr, div-long/2addr, rem-long/2addr,
+     *      and-long/2addr, or-long/2addr, xor-long/2addr, add-double/2addr,
+     *      sub-double/2addr, mul-double/2addr, div-double/2addr,
+     *      rem-double/2addr
+     */
+    /* binop/2addr vA, vB */
+    mov     r9, rINST, lsr #8           @ r9<- A+
+    mov     r1, rINST, lsr #12          @ r1<- B
+    and     r9, r9, #15
+    add     r1, rFP, r1, lsl #2         @ r1<- &fp[B]
+    add     r9, rFP, r9, lsl #2         @ r9<- &fp[A]
+    ldmia   r1, {r2-r3}                 @ r2/r3<- vBB/vBB+1
+    ldmia   r9, {r0-r1}                 @ r0/r1<- vAA/vAA+1
+    .if 0
+    orrs    ip, r2, r3                  @ second arg (r2-r3) is zero?
+    beq     common_errDivideByZero
+    .endif
+    FETCH_ADVANCE_INST(1)               @ advance rPC, load rINST
+
+    and     r0, r0, r2                           @ optional op; may set condition codes
+    and     r1, r1, r3                              @ result<- op, r0-r3 changed
+    GET_INST_OPCODE(ip)                 @ extract opcode from rINST
+    stmia   r9, {r0,r1}     @ vAA/vAA+1<- r0/r1
+    GOTO_OPCODE(ip)                     @ jump to next instruction
+    /* 12-15 instructions */
+
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_OR_LONG_2ADDR: /* 0xc1 */
+/* File: armv5/OP_OR_LONG_2ADDR.S */
+/* File: armv5/binopWide2addr.S */
+    /*
+     * Generic 64-bit "/2addr" binary operation.  Provide an "instr" line
+     * that specifies an instruction that performs "result = r0-r1 op r2-r3".
+     * This could be an ARM instruction or a function call.  (If the result
+     * comes back in a register other than r0, you can override "result".)
+     *
+     * If "chkzero" is set to 1, we perform a divide-by-zero check on
+     * vCC (r1).  Useful for integer division and modulus.
+     *
+     * For: add-long/2addr, sub-long/2addr, div-long/2addr, rem-long/2addr,
+     *      and-long/2addr, or-long/2addr, xor-long/2addr, add-double/2addr,
+     *      sub-double/2addr, mul-double/2addr, div-double/2addr,
+     *      rem-double/2addr
+     */
+    /* binop/2addr vA, vB */
+    mov     r9, rINST, lsr #8           @ r9<- A+
+    mov     r1, rINST, lsr #12          @ r1<- B
+    and     r9, r9, #15
+    add     r1, rFP, r1, lsl #2         @ r1<- &fp[B]
+    add     r9, rFP, r9, lsl #2         @ r9<- &fp[A]
+    ldmia   r1, {r2-r3}                 @ r2/r3<- vBB/vBB+1
+    ldmia   r9, {r0-r1}                 @ r0/r1<- vAA/vAA+1
+    .if 0
+    orrs    ip, r2, r3                  @ second arg (r2-r3) is zero?
+    beq     common_errDivideByZero
+    .endif
+    FETCH_ADVANCE_INST(1)               @ advance rPC, load rINST
+
+    orr     r0, r0, r2                           @ optional op; may set condition codes
+    orr     r1, r1, r3                              @ result<- op, r0-r3 changed
+    GET_INST_OPCODE(ip)                 @ extract opcode from rINST
+    stmia   r9, {r0,r1}     @ vAA/vAA+1<- r0/r1
+    GOTO_OPCODE(ip)                     @ jump to next instruction
+    /* 12-15 instructions */
+
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_XOR_LONG_2ADDR: /* 0xc2 */
+/* File: armv5/OP_XOR_LONG_2ADDR.S */
+/* File: armv5/binopWide2addr.S */
+    /*
+     * Generic 64-bit "/2addr" binary operation.  Provide an "instr" line
+     * that specifies an instruction that performs "result = r0-r1 op r2-r3".
+     * This could be an ARM instruction or a function call.  (If the result
+     * comes back in a register other than r0, you can override "result".)
+     *
+     * If "chkzero" is set to 1, we perform a divide-by-zero check on
+     * vCC (r1).  Useful for integer division and modulus.
+     *
+     * For: add-long/2addr, sub-long/2addr, div-long/2addr, rem-long/2addr,
+     *      and-long/2addr, or-long/2addr, xor-long/2addr, add-double/2addr,
+     *      sub-double/2addr, mul-double/2addr, div-double/2addr,
+     *      rem-double/2addr
+     */
+    /* binop/2addr vA, vB */
+    mov     r9, rINST, lsr #8           @ r9<- A+
+    mov     r1, rINST, lsr #12          @ r1<- B
+    and     r9, r9, #15
+    add     r1, rFP, r1, lsl #2         @ r1<- &fp[B]
+    add     r9, rFP, r9, lsl #2         @ r9<- &fp[A]
+    ldmia   r1, {r2-r3}                 @ r2/r3<- vBB/vBB+1
+    ldmia   r9, {r0-r1}                 @ r0/r1<- vAA/vAA+1
+    .if 0
+    orrs    ip, r2, r3                  @ second arg (r2-r3) is zero?
+    beq     common_errDivideByZero
+    .endif
+    FETCH_ADVANCE_INST(1)               @ advance rPC, load rINST
+
+    eor     r0, r0, r2                           @ optional op; may set condition codes
+    eor     r1, r1, r3                              @ result<- op, r0-r3 changed
+    GET_INST_OPCODE(ip)                 @ extract opcode from rINST
+    stmia   r9, {r0,r1}     @ vAA/vAA+1<- r0/r1
+    GOTO_OPCODE(ip)                     @ jump to next instruction
+    /* 12-15 instructions */
+
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_SHL_LONG_2ADDR: /* 0xc3 */
+/* File: armv5/OP_SHL_LONG_2ADDR.S */
+    /*
+     * Long integer shift, 2addr version.  vA is 64-bit value/result, vB is
+     * 32-bit shift distance.
+     */
+    /* shl-long/2addr vA, vB */
+    mov     r9, rINST, lsr #8           @ r9<- A+
+    mov     r3, rINST, lsr #12          @ r3<- B
+    and     r9, r9, #15
+    GET_VREG(r2, r3)                    @ r2<- vB
+    add     r9, rFP, r9, lsl #2         @ r9<- &fp[A]
+    and     r2, r2, #63                 @ r2<- r2 & 0x3f
+    ldmia   r9, {r0-r1}                 @ r0/r1<- vAA/vAA+1
+
+    mov     r1, r1, asl r2              @  r1<- r1 << r2
+    rsb     r3, r2, #32                 @  r3<- 32 - r2
+    orr     r1, r1, r0, lsr r3          @  r1<- r1 | (r0 << (32-r2))
+    subs    ip, r2, #32                 @  ip<- r2 - 32
+    FETCH_ADVANCE_INST(1)               @ advance rPC, load rINST
+    movpl   r1, r0, asl ip              @  if r2 >= 32, r1<- r0 << (r2-32)
+    mov     r0, r0, asl r2              @  r0<- r0 << r2
+    b       .LOP_SHL_LONG_2ADDR_finish
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_SHR_LONG_2ADDR: /* 0xc4 */
+/* File: armv5/OP_SHR_LONG_2ADDR.S */
+    /*
+     * Long integer shift, 2addr version.  vA is 64-bit value/result, vB is
+     * 32-bit shift distance.
+     */
+    /* shr-long/2addr vA, vB */
+    mov     r9, rINST, lsr #8           @ r9<- A+
+    mov     r3, rINST, lsr #12          @ r3<- B
+    and     r9, r9, #15
+    GET_VREG(r2, r3)                    @ r2<- vB
+    add     r9, rFP, r9, lsl #2         @ r9<- &fp[A]
+    and     r2, r2, #63                 @ r2<- r2 & 0x3f
+    ldmia   r9, {r0-r1}                 @ r0/r1<- vAA/vAA+1
+
+    mov     r0, r0, lsr r2              @  r0<- r2 >> r2
+    rsb     r3, r2, #32                 @  r3<- 32 - r2
+    orr     r0, r0, r1, asl r3          @  r0<- r0 | (r1 << (32-r2))
+    subs    ip, r2, #32                 @  ip<- r2 - 32
+    FETCH_ADVANCE_INST(1)               @ advance rPC, load rINST
+    movpl   r0, r1, asr ip              @  if r2 >= 32, r0<-r1 >> (r2-32)
+    mov     r1, r1, asr r2              @  r1<- r1 >> r2
+    b       .LOP_SHR_LONG_2ADDR_finish
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_USHR_LONG_2ADDR: /* 0xc5 */
+/* File: armv5/OP_USHR_LONG_2ADDR.S */
+    /*
+     * Long integer shift, 2addr version.  vA is 64-bit value/result, vB is
+     * 32-bit shift distance.
+     */
+    /* ushr-long/2addr vA, vB */
+    mov     r9, rINST, lsr #8           @ r9<- A+
+    mov     r3, rINST, lsr #12          @ r3<- B
+    and     r9, r9, #15
+    GET_VREG(r2, r3)                    @ r2<- vB
+    add     r9, rFP, r9, lsl #2         @ r9<- &fp[A]
+    and     r2, r2, #63                 @ r2<- r2 & 0x3f
+    ldmia   r9, {r0-r1}                 @ r0/r1<- vAA/vAA+1
+
+    mov     r0, r0, lsr r2              @  r0<- r2 >> r2
+    rsb     r3, r2, #32                 @  r3<- 32 - r2
+    orr     r0, r0, r1, asl r3          @  r0<- r0 | (r1 << (32-r2))
+    subs    ip, r2, #32                 @  ip<- r2 - 32
+    FETCH_ADVANCE_INST(1)               @ advance rPC, load rINST
+    movpl   r0, r1, lsr ip              @  if r2 >= 32, r0<-r1 >>> (r2-32)
+    mov     r1, r1, lsr r2              @  r1<- r1 >>> r2
+    b       .LOP_USHR_LONG_2ADDR_finish
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_ADD_FLOAT_2ADDR: /* 0xc6 */
+/* File: armv5/OP_ADD_FLOAT_2ADDR.S */
+/* File: armv5/binop2addr.S */
+    /*
+     * Generic 32-bit "/2addr" binary operation.  Provide an "instr" line
+     * that specifies an instruction that performs "result = r0 op r1".
+     * This could be an ARM instruction or a function call.  (If the result
+     * comes back in a register other than r0, you can override "result".)
+     *
+     * If "chkzero" is set to 1, we perform a divide-by-zero check on
+     * vCC (r1).  Useful for integer division and modulus.
+     *
+     * For: add-int/2addr, sub-int/2addr, mul-int/2addr, div-int/2addr,
+     *      rem-int/2addr, and-int/2addr, or-int/2addr, xor-int/2addr,
+     *      shl-int/2addr, shr-int/2addr, ushr-int/2addr, add-float/2addr,
+     *      sub-float/2addr, mul-float/2addr, div-float/2addr, rem-float/2addr
+     */
+    /* binop/2addr vA, vB */
+    mov     r9, rINST, lsr #8           @ r9<- A+
+    mov     r3, rINST, lsr #12          @ r3<- B
+    and     r9, r9, #15
+    GET_VREG(r0, r9)                    @ r0<- vA
+    GET_VREG(r1, r3)                    @ r1<- vB
+    .if 0
+    cmp     r1, #0                      @ is second operand zero?
+    beq     common_errDivideByZero
+    .endif
+    FETCH_ADVANCE_INST(1)               @ advance rPC, load rINST
+
+                               @ optional op; may set condition codes
+    bl      __aeabi_fadd                              @ r0<- op, r0-r3 changed
+    GET_INST_OPCODE(ip)                 @ extract opcode from rINST
+    SET_VREG(r0, r9)               @ vAA<- r0
+    GOTO_OPCODE(ip)                     @ jump to next instruction
+    /* 10-13 instructions */
+
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_SUB_FLOAT_2ADDR: /* 0xc7 */
+/* File: armv5/OP_SUB_FLOAT_2ADDR.S */
+/* File: armv5/binop2addr.S */
+    /*
+     * Generic 32-bit "/2addr" binary operation.  Provide an "instr" line
+     * that specifies an instruction that performs "result = r0 op r1".
+     * This could be an ARM instruction or a function call.  (If the result
+     * comes back in a register other than r0, you can override "result".)
+     *
+     * If "chkzero" is set to 1, we perform a divide-by-zero check on
+     * vCC (r1).  Useful for integer division and modulus.
+     *
+     * For: add-int/2addr, sub-int/2addr, mul-int/2addr, div-int/2addr,
+     *      rem-int/2addr, and-int/2addr, or-int/2addr, xor-int/2addr,
+     *      shl-int/2addr, shr-int/2addr, ushr-int/2addr, add-float/2addr,
+     *      sub-float/2addr, mul-float/2addr, div-float/2addr, rem-float/2addr
+     */
+    /* binop/2addr vA, vB */
+    mov     r9, rINST, lsr #8           @ r9<- A+
+    mov     r3, rINST, lsr #12          @ r3<- B
+    and     r9, r9, #15
+    GET_VREG(r0, r9)                    @ r0<- vA
+    GET_VREG(r1, r3)                    @ r1<- vB
+    .if 0
+    cmp     r1, #0                      @ is second operand zero?
+    beq     common_errDivideByZero
+    .endif
+    FETCH_ADVANCE_INST(1)               @ advance rPC, load rINST
+
+                               @ optional op; may set condition codes
+    bl      __aeabi_fsub                              @ r0<- op, r0-r3 changed
+    GET_INST_OPCODE(ip)                 @ extract opcode from rINST
+    SET_VREG(r0, r9)               @ vAA<- r0
+    GOTO_OPCODE(ip)                     @ jump to next instruction
+    /* 10-13 instructions */
+
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_MUL_FLOAT_2ADDR: /* 0xc8 */
+/* File: armv5/OP_MUL_FLOAT_2ADDR.S */
+/* File: armv5/binop2addr.S */
+    /*
+     * Generic 32-bit "/2addr" binary operation.  Provide an "instr" line
+     * that specifies an instruction that performs "result = r0 op r1".
+     * This could be an ARM instruction or a function call.  (If the result
+     * comes back in a register other than r0, you can override "result".)
+     *
+     * If "chkzero" is set to 1, we perform a divide-by-zero check on
+     * vCC (r1).  Useful for integer division and modulus.
+     *
+     * For: add-int/2addr, sub-int/2addr, mul-int/2addr, div-int/2addr,
+     *      rem-int/2addr, and-int/2addr, or-int/2addr, xor-int/2addr,
+     *      shl-int/2addr, shr-int/2addr, ushr-int/2addr, add-float/2addr,
+     *      sub-float/2addr, mul-float/2addr, div-float/2addr, rem-float/2addr
+     */
+    /* binop/2addr vA, vB */
+    mov     r9, rINST, lsr #8           @ r9<- A+
+    mov     r3, rINST, lsr #12          @ r3<- B
+    and     r9, r9, #15
+    GET_VREG(r0, r9)                    @ r0<- vA
+    GET_VREG(r1, r3)                    @ r1<- vB
+    .if 0
+    cmp     r1, #0                      @ is second operand zero?
+    beq     common_errDivideByZero
+    .endif
+    FETCH_ADVANCE_INST(1)               @ advance rPC, load rINST
+
+                               @ optional op; may set condition codes
+    bl      __aeabi_fmul                              @ r0<- op, r0-r3 changed
+    GET_INST_OPCODE(ip)                 @ extract opcode from rINST
+    SET_VREG(r0, r9)               @ vAA<- r0
+    GOTO_OPCODE(ip)                     @ jump to next instruction
+    /* 10-13 instructions */
+
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_DIV_FLOAT_2ADDR: /* 0xc9 */
+/* File: armv5/OP_DIV_FLOAT_2ADDR.S */
+/* File: armv5/binop2addr.S */
+    /*
+     * Generic 32-bit "/2addr" binary operation.  Provide an "instr" line
+     * that specifies an instruction that performs "result = r0 op r1".
+     * This could be an ARM instruction or a function call.  (If the result
+     * comes back in a register other than r0, you can override "result".)
+     *
+     * If "chkzero" is set to 1, we perform a divide-by-zero check on
+     * vCC (r1).  Useful for integer division and modulus.
+     *
+     * For: add-int/2addr, sub-int/2addr, mul-int/2addr, div-int/2addr,
+     *      rem-int/2addr, and-int/2addr, or-int/2addr, xor-int/2addr,
+     *      shl-int/2addr, shr-int/2addr, ushr-int/2addr, add-float/2addr,
+     *      sub-float/2addr, mul-float/2addr, div-float/2addr, rem-float/2addr
+     */
+    /* binop/2addr vA, vB */
+    mov     r9, rINST, lsr #8           @ r9<- A+
+    mov     r3, rINST, lsr #12          @ r3<- B
+    and     r9, r9, #15
+    GET_VREG(r0, r9)                    @ r0<- vA
+    GET_VREG(r1, r3)                    @ r1<- vB
+    .if 0
+    cmp     r1, #0                      @ is second operand zero?
+    beq     common_errDivideByZero
+    .endif
+    FETCH_ADVANCE_INST(1)               @ advance rPC, load rINST
+
+                               @ optional op; may set condition codes
+    bl      __aeabi_fdiv                              @ r0<- op, r0-r3 changed
+    GET_INST_OPCODE(ip)                 @ extract opcode from rINST
+    SET_VREG(r0, r9)               @ vAA<- r0
+    GOTO_OPCODE(ip)                     @ jump to next instruction
+    /* 10-13 instructions */
+
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_REM_FLOAT_2ADDR: /* 0xca */
+/* File: armv5/OP_REM_FLOAT_2ADDR.S */
+/* EABI doesn't define a float remainder function, but libm does */
+/* File: armv5/binop2addr.S */
+    /*
+     * Generic 32-bit "/2addr" binary operation.  Provide an "instr" line
+     * that specifies an instruction that performs "result = r0 op r1".
+     * This could be an ARM instruction or a function call.  (If the result
+     * comes back in a register other than r0, you can override "result".)
+     *
+     * If "chkzero" is set to 1, we perform a divide-by-zero check on
+     * vCC (r1).  Useful for integer division and modulus.
+     *
+     * For: add-int/2addr, sub-int/2addr, mul-int/2addr, div-int/2addr,
+     *      rem-int/2addr, and-int/2addr, or-int/2addr, xor-int/2addr,
+     *      shl-int/2addr, shr-int/2addr, ushr-int/2addr, add-float/2addr,
+     *      sub-float/2addr, mul-float/2addr, div-float/2addr, rem-float/2addr
+     */
+    /* binop/2addr vA, vB */
+    mov     r9, rINST, lsr #8           @ r9<- A+
+    mov     r3, rINST, lsr #12          @ r3<- B
+    and     r9, r9, #15
+    GET_VREG(r0, r9)                    @ r0<- vA
+    GET_VREG(r1, r3)                    @ r1<- vB
+    .if 0
+    cmp     r1, #0                      @ is second operand zero?
+    beq     common_errDivideByZero
+    .endif
+    FETCH_ADVANCE_INST(1)               @ advance rPC, load rINST
+
+                               @ optional op; may set condition codes
+    bl      fmodf                              @ r0<- op, r0-r3 changed
+    GET_INST_OPCODE(ip)                 @ extract opcode from rINST
+    SET_VREG(r0, r9)               @ vAA<- r0
+    GOTO_OPCODE(ip)                     @ jump to next instruction
+    /* 10-13 instructions */
+
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_ADD_DOUBLE_2ADDR: /* 0xcb */
+/* File: armv5/OP_ADD_DOUBLE_2ADDR.S */
+/* File: armv5/binopWide2addr.S */
+    /*
+     * Generic 64-bit "/2addr" binary operation.  Provide an "instr" line
+     * that specifies an instruction that performs "result = r0-r1 op r2-r3".
+     * This could be an ARM instruction or a function call.  (If the result
+     * comes back in a register other than r0, you can override "result".)
+     *
+     * If "chkzero" is set to 1, we perform a divide-by-zero check on
+     * vCC (r1).  Useful for integer division and modulus.
+     *
+     * For: add-long/2addr, sub-long/2addr, div-long/2addr, rem-long/2addr,
+     *      and-long/2addr, or-long/2addr, xor-long/2addr, add-double/2addr,
+     *      sub-double/2addr, mul-double/2addr, div-double/2addr,
+     *      rem-double/2addr
+     */
+    /* binop/2addr vA, vB */
+    mov     r9, rINST, lsr #8           @ r9<- A+
+    mov     r1, rINST, lsr #12          @ r1<- B
+    and     r9, r9, #15
+    add     r1, rFP, r1, lsl #2         @ r1<- &fp[B]
+    add     r9, rFP, r9, lsl #2         @ r9<- &fp[A]
+    ldmia   r1, {r2-r3}                 @ r2/r3<- vBB/vBB+1
+    ldmia   r9, {r0-r1}                 @ r0/r1<- vAA/vAA+1
+    .if 0
+    orrs    ip, r2, r3                  @ second arg (r2-r3) is zero?
+    beq     common_errDivideByZero
+    .endif
+    FETCH_ADVANCE_INST(1)               @ advance rPC, load rINST
+
+                               @ optional op; may set condition codes
+    bl      __aeabi_dadd                              @ result<- op, r0-r3 changed
+    GET_INST_OPCODE(ip)                 @ extract opcode from rINST
+    stmia   r9, {r0,r1}     @ vAA/vAA+1<- r0/r1
+    GOTO_OPCODE(ip)                     @ jump to next instruction
+    /* 12-15 instructions */
+
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_SUB_DOUBLE_2ADDR: /* 0xcc */
+/* File: armv5/OP_SUB_DOUBLE_2ADDR.S */
+/* File: armv5/binopWide2addr.S */
+    /*
+     * Generic 64-bit "/2addr" binary operation.  Provide an "instr" line
+     * that specifies an instruction that performs "result = r0-r1 op r2-r3".
+     * This could be an ARM instruction or a function call.  (If the result
+     * comes back in a register other than r0, you can override "result".)
+     *
+     * If "chkzero" is set to 1, we perform a divide-by-zero check on
+     * vCC (r1).  Useful for integer division and modulus.
+     *
+     * For: add-long/2addr, sub-long/2addr, div-long/2addr, rem-long/2addr,
+     *      and-long/2addr, or-long/2addr, xor-long/2addr, add-double/2addr,
+     *      sub-double/2addr, mul-double/2addr, div-double/2addr,
+     *      rem-double/2addr
+     */
+    /* binop/2addr vA, vB */
+    mov     r9, rINST, lsr #8           @ r9<- A+
+    mov     r1, rINST, lsr #12          @ r1<- B
+    and     r9, r9, #15
+    add     r1, rFP, r1, lsl #2         @ r1<- &fp[B]
+    add     r9, rFP, r9, lsl #2         @ r9<- &fp[A]
+    ldmia   r1, {r2-r3}                 @ r2/r3<- vBB/vBB+1
+    ldmia   r9, {r0-r1}                 @ r0/r1<- vAA/vAA+1
+    .if 0
+    orrs    ip, r2, r3                  @ second arg (r2-r3) is zero?
+    beq     common_errDivideByZero
+    .endif
+    FETCH_ADVANCE_INST(1)               @ advance rPC, load rINST
+
+                               @ optional op; may set condition codes
+    bl      __aeabi_dsub                              @ result<- op, r0-r3 changed
+    GET_INST_OPCODE(ip)                 @ extract opcode from rINST
+    stmia   r9, {r0,r1}     @ vAA/vAA+1<- r0/r1
+    GOTO_OPCODE(ip)                     @ jump to next instruction
+    /* 12-15 instructions */
+
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_MUL_DOUBLE_2ADDR: /* 0xcd */
+/* File: armv5/OP_MUL_DOUBLE_2ADDR.S */
+/* File: armv5/binopWide2addr.S */
+    /*
+     * Generic 64-bit "/2addr" binary operation.  Provide an "instr" line
+     * that specifies an instruction that performs "result = r0-r1 op r2-r3".
+     * This could be an ARM instruction or a function call.  (If the result
+     * comes back in a register other than r0, you can override "result".)
+     *
+     * If "chkzero" is set to 1, we perform a divide-by-zero check on
+     * vCC (r1).  Useful for integer division and modulus.
+     *
+     * For: add-long/2addr, sub-long/2addr, div-long/2addr, rem-long/2addr,
+     *      and-long/2addr, or-long/2addr, xor-long/2addr, add-double/2addr,
+     *      sub-double/2addr, mul-double/2addr, div-double/2addr,
+     *      rem-double/2addr
+     */
+    /* binop/2addr vA, vB */
+    mov     r9, rINST, lsr #8           @ r9<- A+
+    mov     r1, rINST, lsr #12          @ r1<- B
+    and     r9, r9, #15
+    add     r1, rFP, r1, lsl #2         @ r1<- &fp[B]
+    add     r9, rFP, r9, lsl #2         @ r9<- &fp[A]
+    ldmia   r1, {r2-r3}                 @ r2/r3<- vBB/vBB+1
+    ldmia   r9, {r0-r1}                 @ r0/r1<- vAA/vAA+1
+    .if 0
+    orrs    ip, r2, r3                  @ second arg (r2-r3) is zero?
+    beq     common_errDivideByZero
+    .endif
+    FETCH_ADVANCE_INST(1)               @ advance rPC, load rINST
+
+                               @ optional op; may set condition codes
+    bl      __aeabi_dmul                              @ result<- op, r0-r3 changed
+    GET_INST_OPCODE(ip)                 @ extract opcode from rINST
+    stmia   r9, {r0,r1}     @ vAA/vAA+1<- r0/r1
+    GOTO_OPCODE(ip)                     @ jump to next instruction
+    /* 12-15 instructions */
+
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_DIV_DOUBLE_2ADDR: /* 0xce */
+/* File: armv5/OP_DIV_DOUBLE_2ADDR.S */
+/* File: armv5/binopWide2addr.S */
+    /*
+     * Generic 64-bit "/2addr" binary operation.  Provide an "instr" line
+     * that specifies an instruction that performs "result = r0-r1 op r2-r3".
+     * This could be an ARM instruction or a function call.  (If the result
+     * comes back in a register other than r0, you can override "result".)
+     *
+     * If "chkzero" is set to 1, we perform a divide-by-zero check on
+     * vCC (r1).  Useful for integer division and modulus.
+     *
+     * For: add-long/2addr, sub-long/2addr, div-long/2addr, rem-long/2addr,
+     *      and-long/2addr, or-long/2addr, xor-long/2addr, add-double/2addr,
+     *      sub-double/2addr, mul-double/2addr, div-double/2addr,
+     *      rem-double/2addr
+     */
+    /* binop/2addr vA, vB */
+    mov     r9, rINST, lsr #8           @ r9<- A+
+    mov     r1, rINST, lsr #12          @ r1<- B
+    and     r9, r9, #15
+    add     r1, rFP, r1, lsl #2         @ r1<- &fp[B]
+    add     r9, rFP, r9, lsl #2         @ r9<- &fp[A]
+    ldmia   r1, {r2-r3}                 @ r2/r3<- vBB/vBB+1
+    ldmia   r9, {r0-r1}                 @ r0/r1<- vAA/vAA+1
+    .if 0
+    orrs    ip, r2, r3                  @ second arg (r2-r3) is zero?
+    beq     common_errDivideByZero
+    .endif
+    FETCH_ADVANCE_INST(1)               @ advance rPC, load rINST
+
+                               @ optional op; may set condition codes
+    bl      __aeabi_ddiv                              @ result<- op, r0-r3 changed
+    GET_INST_OPCODE(ip)                 @ extract opcode from rINST
+    stmia   r9, {r0,r1}     @ vAA/vAA+1<- r0/r1
+    GOTO_OPCODE(ip)                     @ jump to next instruction
+    /* 12-15 instructions */
+
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_REM_DOUBLE_2ADDR: /* 0xcf */
+/* File: armv5/OP_REM_DOUBLE_2ADDR.S */
+/* EABI doesn't define a double remainder function, but libm does */
+/* File: armv5/binopWide2addr.S */
+    /*
+     * Generic 64-bit "/2addr" binary operation.  Provide an "instr" line
+     * that specifies an instruction that performs "result = r0-r1 op r2-r3".
+     * This could be an ARM instruction or a function call.  (If the result
+     * comes back in a register other than r0, you can override "result".)
+     *
+     * If "chkzero" is set to 1, we perform a divide-by-zero check on
+     * vCC (r1).  Useful for integer division and modulus.
+     *
+     * For: add-long/2addr, sub-long/2addr, div-long/2addr, rem-long/2addr,
+     *      and-long/2addr, or-long/2addr, xor-long/2addr, add-double/2addr,
+     *      sub-double/2addr, mul-double/2addr, div-double/2addr,
+     *      rem-double/2addr
+     */
+    /* binop/2addr vA, vB */
+    mov     r9, rINST, lsr #8           @ r9<- A+
+    mov     r1, rINST, lsr #12          @ r1<- B
+    and     r9, r9, #15
+    add     r1, rFP, r1, lsl #2         @ r1<- &fp[B]
+    add     r9, rFP, r9, lsl #2         @ r9<- &fp[A]
+    ldmia   r1, {r2-r3}                 @ r2/r3<- vBB/vBB+1
+    ldmia   r9, {r0-r1}                 @ r0/r1<- vAA/vAA+1
+    .if 0
+    orrs    ip, r2, r3                  @ second arg (r2-r3) is zero?
+    beq     common_errDivideByZero
+    .endif
+    FETCH_ADVANCE_INST(1)               @ advance rPC, load rINST
+
+                               @ optional op; may set condition codes
+    bl      fmod                              @ result<- op, r0-r3 changed
+    GET_INST_OPCODE(ip)                 @ extract opcode from rINST
+    stmia   r9, {r0,r1}     @ vAA/vAA+1<- r0/r1
+    GOTO_OPCODE(ip)                     @ jump to next instruction
+    /* 12-15 instructions */
+
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_ADD_INT_LIT16: /* 0xd0 */
+/* File: armv5/OP_ADD_INT_LIT16.S */
+/* File: armv5/binopLit16.S */
+    /*
+     * Generic 32-bit "lit16" binary operation.  Provide an "instr" line
+     * that specifies an instruction that performs "result = r0 op r1".
+     * This could be an ARM instruction or a function call.  (If the result
+     * comes back in a register other than r0, you can override "result".)
+     *
+     * If "chkzero" is set to 1, we perform a divide-by-zero check on
+     * vCC (r1).  Useful for integer division and modulus.
+     *
+     * For: add-int/lit16, rsub-int, mul-int/lit16, div-int/lit16,
+     *      rem-int/lit16, and-int/lit16, or-int/lit16, xor-int/lit16
+     */
+    /* binop/lit16 vA, vB, #+CCCC */
+    FETCH_S(r1, 1)                      @ r1<- ssssCCCC (sign-extended)
+    mov     r2, rINST, lsr #12          @ r2<- B
+    mov     r9, rINST, lsr #8           @ r9<- A+
+    GET_VREG(r0, r2)                    @ r0<- vB
+    and     r9, r9, #15
+    .if 0
+    cmp     r1, #0                      @ is second operand zero?
+    beq     common_errDivideByZero
+    .endif
+    FETCH_ADVANCE_INST(2)               @ advance rPC, load rINST
+
+    add     r0, r0, r1                              @ r0<- op, r0-r3 changed
+    GET_INST_OPCODE(ip)                 @ extract opcode from rINST
+    SET_VREG(r0, r9)               @ vAA<- r0
+    GOTO_OPCODE(ip)                     @ jump to next instruction
+    /* 10-13 instructions */
+
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_RSUB_INT: /* 0xd1 */
+/* File: armv5/OP_RSUB_INT.S */
+/* this op is "rsub-int", but can be thought of as "rsub-int/lit16" */
+/* File: armv5/binopLit16.S */
+    /*
+     * Generic 32-bit "lit16" binary operation.  Provide an "instr" line
+     * that specifies an instruction that performs "result = r0 op r1".
+     * This could be an ARM instruction or a function call.  (If the result
+     * comes back in a register other than r0, you can override "result".)
+     *
+     * If "chkzero" is set to 1, we perform a divide-by-zero check on
+     * vCC (r1).  Useful for integer division and modulus.
+     *
+     * For: add-int/lit16, rsub-int, mul-int/lit16, div-int/lit16,
+     *      rem-int/lit16, and-int/lit16, or-int/lit16, xor-int/lit16
+     */
+    /* binop/lit16 vA, vB, #+CCCC */
+    FETCH_S(r1, 1)                      @ r1<- ssssCCCC (sign-extended)
+    mov     r2, rINST, lsr #12          @ r2<- B
+    mov     r9, rINST, lsr #8           @ r9<- A+
+    GET_VREG(r0, r2)                    @ r0<- vB
+    and     r9, r9, #15
+    .if 0
+    cmp     r1, #0                      @ is second operand zero?
+    beq     common_errDivideByZero
+    .endif
+    FETCH_ADVANCE_INST(2)               @ advance rPC, load rINST
+
+    rsb     r0, r0, r1                              @ r0<- op, r0-r3 changed
+    GET_INST_OPCODE(ip)                 @ extract opcode from rINST
+    SET_VREG(r0, r9)               @ vAA<- r0
+    GOTO_OPCODE(ip)                     @ jump to next instruction
+    /* 10-13 instructions */
+
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_MUL_INT_LIT16: /* 0xd2 */
+/* File: armv5/OP_MUL_INT_LIT16.S */
+/* must be "mul r0, r1, r0" -- "r0, r0, r1" is illegal */
+/* File: armv5/binopLit16.S */
+    /*
+     * Generic 32-bit "lit16" binary operation.  Provide an "instr" line
+     * that specifies an instruction that performs "result = r0 op r1".
+     * This could be an ARM instruction or a function call.  (If the result
+     * comes back in a register other than r0, you can override "result".)
+     *
+     * If "chkzero" is set to 1, we perform a divide-by-zero check on
+     * vCC (r1).  Useful for integer division and modulus.
+     *
+     * For: add-int/lit16, rsub-int, mul-int/lit16, div-int/lit16,
+     *      rem-int/lit16, and-int/lit16, or-int/lit16, xor-int/lit16
+     */
+    /* binop/lit16 vA, vB, #+CCCC */
+    FETCH_S(r1, 1)                      @ r1<- ssssCCCC (sign-extended)
+    mov     r2, rINST, lsr #12          @ r2<- B
+    mov     r9, rINST, lsr #8           @ r9<- A+
+    GET_VREG(r0, r2)                    @ r0<- vB
+    and     r9, r9, #15
+    .if 0
+    cmp     r1, #0                      @ is second operand zero?
+    beq     common_errDivideByZero
+    .endif
+    FETCH_ADVANCE_INST(2)               @ advance rPC, load rINST
+
+    mul     r0, r1, r0                              @ r0<- op, r0-r3 changed
+    GET_INST_OPCODE(ip)                 @ extract opcode from rINST
+    SET_VREG(r0, r9)               @ vAA<- r0
+    GOTO_OPCODE(ip)                     @ jump to next instruction
+    /* 10-13 instructions */
+
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_DIV_INT_LIT16: /* 0xd3 */
+/* File: armv5/OP_DIV_INT_LIT16.S */
+/* File: armv5/binopLit16.S */
+    /*
+     * Generic 32-bit "lit16" binary operation.  Provide an "instr" line
+     * that specifies an instruction that performs "result = r0 op r1".
+     * This could be an ARM instruction or a function call.  (If the result
+     * comes back in a register other than r0, you can override "result".)
+     *
+     * If "chkzero" is set to 1, we perform a divide-by-zero check on
+     * vCC (r1).  Useful for integer division and modulus.
+     *
+     * For: add-int/lit16, rsub-int, mul-int/lit16, div-int/lit16,
+     *      rem-int/lit16, and-int/lit16, or-int/lit16, xor-int/lit16
+     */
+    /* binop/lit16 vA, vB, #+CCCC */
+    FETCH_S(r1, 1)                      @ r1<- ssssCCCC (sign-extended)
+    mov     r2, rINST, lsr #12          @ r2<- B
+    mov     r9, rINST, lsr #8           @ r9<- A+
+    GET_VREG(r0, r2)                    @ r0<- vB
+    and     r9, r9, #15
+    .if 1
+    cmp     r1, #0                      @ is second operand zero?
+    beq     common_errDivideByZero
+    .endif
+    FETCH_ADVANCE_INST(2)               @ advance rPC, load rINST
+
+    bl     __aeabi_idiv                              @ r0<- op, r0-r3 changed
+    GET_INST_OPCODE(ip)                 @ extract opcode from rINST
+    SET_VREG(r0, r9)               @ vAA<- r0
+    GOTO_OPCODE(ip)                     @ jump to next instruction
+    /* 10-13 instructions */
+
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_REM_INT_LIT16: /* 0xd4 */
+/* File: armv5/OP_REM_INT_LIT16.S */
+/* idivmod returns quotient in r0 and remainder in r1 */
+/* File: armv5/binopLit16.S */
+    /*
+     * Generic 32-bit "lit16" binary operation.  Provide an "instr" line
+     * that specifies an instruction that performs "result = r0 op r1".
+     * This could be an ARM instruction or a function call.  (If the result
+     * comes back in a register other than r0, you can override "result".)
+     *
+     * If "chkzero" is set to 1, we perform a divide-by-zero check on
+     * vCC (r1).  Useful for integer division and modulus.
+     *
+     * For: add-int/lit16, rsub-int, mul-int/lit16, div-int/lit16,
+     *      rem-int/lit16, and-int/lit16, or-int/lit16, xor-int/lit16
+     */
+    /* binop/lit16 vA, vB, #+CCCC */
+    FETCH_S(r1, 1)                      @ r1<- ssssCCCC (sign-extended)
+    mov     r2, rINST, lsr #12          @ r2<- B
+    mov     r9, rINST, lsr #8           @ r9<- A+
+    GET_VREG(r0, r2)                    @ r0<- vB
+    and     r9, r9, #15
+    .if 1
+    cmp     r1, #0                      @ is second operand zero?
+    beq     common_errDivideByZero
+    .endif
+    FETCH_ADVANCE_INST(2)               @ advance rPC, load rINST
+
+    bl      __aeabi_idivmod                              @ r1<- op, r0-r3 changed
+    GET_INST_OPCODE(ip)                 @ extract opcode from rINST
+    SET_VREG(r1, r9)               @ vAA<- r1
+    GOTO_OPCODE(ip)                     @ jump to next instruction
+    /* 10-13 instructions */
+
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_AND_INT_LIT16: /* 0xd5 */
+/* File: armv5/OP_AND_INT_LIT16.S */
+/* File: armv5/binopLit16.S */
+    /*
+     * Generic 32-bit "lit16" binary operation.  Provide an "instr" line
+     * that specifies an instruction that performs "result = r0 op r1".
+     * This could be an ARM instruction or a function call.  (If the result
+     * comes back in a register other than r0, you can override "result".)
+     *
+     * If "chkzero" is set to 1, we perform a divide-by-zero check on
+     * vCC (r1).  Useful for integer division and modulus.
+     *
+     * For: add-int/lit16, rsub-int, mul-int/lit16, div-int/lit16,
+     *      rem-int/lit16, and-int/lit16, or-int/lit16, xor-int/lit16
+     */
+    /* binop/lit16 vA, vB, #+CCCC */
+    FETCH_S(r1, 1)                      @ r1<- ssssCCCC (sign-extended)
+    mov     r2, rINST, lsr #12          @ r2<- B
+    mov     r9, rINST, lsr #8           @ r9<- A+
+    GET_VREG(r0, r2)                    @ r0<- vB
+    and     r9, r9, #15
+    .if 0
+    cmp     r1, #0                      @ is second operand zero?
+    beq     common_errDivideByZero
+    .endif
+    FETCH_ADVANCE_INST(2)               @ advance rPC, load rINST
+
+    and     r0, r0, r1                              @ r0<- op, r0-r3 changed
+    GET_INST_OPCODE(ip)                 @ extract opcode from rINST
+    SET_VREG(r0, r9)               @ vAA<- r0
+    GOTO_OPCODE(ip)                     @ jump to next instruction
+    /* 10-13 instructions */
+
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_OR_INT_LIT16: /* 0xd6 */
+/* File: armv5/OP_OR_INT_LIT16.S */
+/* File: armv5/binopLit16.S */
+    /*
+     * Generic 32-bit "lit16" binary operation.  Provide an "instr" line
+     * that specifies an instruction that performs "result = r0 op r1".
+     * This could be an ARM instruction or a function call.  (If the result
+     * comes back in a register other than r0, you can override "result".)
+     *
+     * If "chkzero" is set to 1, we perform a divide-by-zero check on
+     * vCC (r1).  Useful for integer division and modulus.
+     *
+     * For: add-int/lit16, rsub-int, mul-int/lit16, div-int/lit16,
+     *      rem-int/lit16, and-int/lit16, or-int/lit16, xor-int/lit16
+     */
+    /* binop/lit16 vA, vB, #+CCCC */
+    FETCH_S(r1, 1)                      @ r1<- ssssCCCC (sign-extended)
+    mov     r2, rINST, lsr #12          @ r2<- B
+    mov     r9, rINST, lsr #8           @ r9<- A+
+    GET_VREG(r0, r2)                    @ r0<- vB
+    and     r9, r9, #15
+    .if 0
+    cmp     r1, #0                      @ is second operand zero?
+    beq     common_errDivideByZero
+    .endif
+    FETCH_ADVANCE_INST(2)               @ advance rPC, load rINST
+
+    orr     r0, r0, r1                              @ r0<- op, r0-r3 changed
+    GET_INST_OPCODE(ip)                 @ extract opcode from rINST
+    SET_VREG(r0, r9)               @ vAA<- r0
+    GOTO_OPCODE(ip)                     @ jump to next instruction
+    /* 10-13 instructions */
+
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_XOR_INT_LIT16: /* 0xd7 */
+/* File: armv5/OP_XOR_INT_LIT16.S */
+/* File: armv5/binopLit16.S */
+    /*
+     * Generic 32-bit "lit16" binary operation.  Provide an "instr" line
+     * that specifies an instruction that performs "result = r0 op r1".
+     * This could be an ARM instruction or a function call.  (If the result
+     * comes back in a register other than r0, you can override "result".)
+     *
+     * If "chkzero" is set to 1, we perform a divide-by-zero check on
+     * vCC (r1).  Useful for integer division and modulus.
+     *
+     * For: add-int/lit16, rsub-int, mul-int/lit16, div-int/lit16,
+     *      rem-int/lit16, and-int/lit16, or-int/lit16, xor-int/lit16
+     */
+    /* binop/lit16 vA, vB, #+CCCC */
+    FETCH_S(r1, 1)                      @ r1<- ssssCCCC (sign-extended)
+    mov     r2, rINST, lsr #12          @ r2<- B
+    mov     r9, rINST, lsr #8           @ r9<- A+
+    GET_VREG(r0, r2)                    @ r0<- vB
+    and     r9, r9, #15
+    .if 0
+    cmp     r1, #0                      @ is second operand zero?
+    beq     common_errDivideByZero
+    .endif
+    FETCH_ADVANCE_INST(2)               @ advance rPC, load rINST
+
+    eor     r0, r0, r1                              @ r0<- op, r0-r3 changed
+    GET_INST_OPCODE(ip)                 @ extract opcode from rINST
+    SET_VREG(r0, r9)               @ vAA<- r0
+    GOTO_OPCODE(ip)                     @ jump to next instruction
+    /* 10-13 instructions */
+
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_ADD_INT_LIT8: /* 0xd8 */
+/* File: armv5/OP_ADD_INT_LIT8.S */
+/* File: armv5/binopLit8.S */
+    /*
+     * Generic 32-bit "lit8" binary operation.  Provide an "instr" line
+     * that specifies an instruction that performs "result = r0 op r1".
+     * This could be an ARM instruction or a function call.  (If the result
+     * comes back in a register other than r0, you can override "result".)
+     *
+     * If "chkzero" is set to 1, we perform a divide-by-zero check on
+     * vCC (r1).  Useful for integer division and modulus.
+     *
+     * For: add-int/lit8, rsub-int/lit8, mul-int/lit8, div-int/lit8,
+     *      rem-int/lit8, and-int/lit8, or-int/lit8, xor-int/lit8,
+     *      shl-int/lit8, shr-int/lit8, ushr-int/lit8
+     */
+    /* binop/lit8 vAA, vBB, #+CC */
+    FETCH_S(r3, 1)                      @ r3<- ssssCCBB (sign-extended for CC)
+    mov     r9, rINST, lsr #8           @ r9<- AA
+    and     r2, r3, #255                @ r2<- BB
+    GET_VREG(r0, r2)                    @ r0<- vBB
+    movs    r1, r3, asr #8              @ r1<- ssssssCC (sign extended)
+    .if 0
+    @cmp     r1, #0                      @ is second operand zero?
+    beq     common_errDivideByZero
+    .endif
+    FETCH_ADVANCE_INST(2)               @ advance rPC, load rINST
+
+                               @ optional op; may set condition codes
+    add     r0, r0, r1                              @ r0<- op, r0-r3 changed
+    GET_INST_OPCODE(ip)                 @ extract opcode from rINST
+    SET_VREG(r0, r9)               @ vAA<- r0
+    GOTO_OPCODE(ip)                     @ jump to next instruction
+    /* 10-12 instructions */
+
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_RSUB_INT_LIT8: /* 0xd9 */
+/* File: armv5/OP_RSUB_INT_LIT8.S */
+/* File: armv5/binopLit8.S */
+    /*
+     * Generic 32-bit "lit8" binary operation.  Provide an "instr" line
+     * that specifies an instruction that performs "result = r0 op r1".
+     * This could be an ARM instruction or a function call.  (If the result
+     * comes back in a register other than r0, you can override "result".)
+     *
+     * If "chkzero" is set to 1, we perform a divide-by-zero check on
+     * vCC (r1).  Useful for integer division and modulus.
+     *
+     * For: add-int/lit8, rsub-int/lit8, mul-int/lit8, div-int/lit8,
+     *      rem-int/lit8, and-int/lit8, or-int/lit8, xor-int/lit8,
+     *      shl-int/lit8, shr-int/lit8, ushr-int/lit8
+     */
+    /* binop/lit8 vAA, vBB, #+CC */
+    FETCH_S(r3, 1)                      @ r3<- ssssCCBB (sign-extended for CC)
+    mov     r9, rINST, lsr #8           @ r9<- AA
+    and     r2, r3, #255                @ r2<- BB
+    GET_VREG(r0, r2)                    @ r0<- vBB
+    movs    r1, r3, asr #8              @ r1<- ssssssCC (sign extended)
+    .if 0
+    @cmp     r1, #0                      @ is second operand zero?
+    beq     common_errDivideByZero
+    .endif
+    FETCH_ADVANCE_INST(2)               @ advance rPC, load rINST
+
+                               @ optional op; may set condition codes
+    rsb     r0, r0, r1                              @ r0<- op, r0-r3 changed
+    GET_INST_OPCODE(ip)                 @ extract opcode from rINST
+    SET_VREG(r0, r9)               @ vAA<- r0
+    GOTO_OPCODE(ip)                     @ jump to next instruction
+    /* 10-12 instructions */
+
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_MUL_INT_LIT8: /* 0xda */
+/* File: armv5/OP_MUL_INT_LIT8.S */
+/* must be "mul r0, r1, r0" -- "r0, r0, r1" is illegal */
+/* File: armv5/binopLit8.S */
+    /*
+     * Generic 32-bit "lit8" binary operation.  Provide an "instr" line
+     * that specifies an instruction that performs "result = r0 op r1".
+     * This could be an ARM instruction or a function call.  (If the result
+     * comes back in a register other than r0, you can override "result".)
+     *
+     * If "chkzero" is set to 1, we perform a divide-by-zero check on
+     * vCC (r1).  Useful for integer division and modulus.
+     *
+     * For: add-int/lit8, rsub-int/lit8, mul-int/lit8, div-int/lit8,
+     *      rem-int/lit8, and-int/lit8, or-int/lit8, xor-int/lit8,
+     *      shl-int/lit8, shr-int/lit8, ushr-int/lit8
+     */
+    /* binop/lit8 vAA, vBB, #+CC */
+    FETCH_S(r3, 1)                      @ r3<- ssssCCBB (sign-extended for CC)
+    mov     r9, rINST, lsr #8           @ r9<- AA
+    and     r2, r3, #255                @ r2<- BB
+    GET_VREG(r0, r2)                    @ r0<- vBB
+    movs    r1, r3, asr #8              @ r1<- ssssssCC (sign extended)
+    .if 0
+    @cmp     r1, #0                      @ is second operand zero?
+    beq     common_errDivideByZero
+    .endif
+    FETCH_ADVANCE_INST(2)               @ advance rPC, load rINST
+
+                               @ optional op; may set condition codes
+    mul     r0, r1, r0                              @ r0<- op, r0-r3 changed
+    GET_INST_OPCODE(ip)                 @ extract opcode from rINST
+    SET_VREG(r0, r9)               @ vAA<- r0
+    GOTO_OPCODE(ip)                     @ jump to next instruction
+    /* 10-12 instructions */
+
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_DIV_INT_LIT8: /* 0xdb */
+/* File: armv5/OP_DIV_INT_LIT8.S */
+/* File: armv5/binopLit8.S */
+    /*
+     * Generic 32-bit "lit8" binary operation.  Provide an "instr" line
+     * that specifies an instruction that performs "result = r0 op r1".
+     * This could be an ARM instruction or a function call.  (If the result
+     * comes back in a register other than r0, you can override "result".)
+     *
+     * If "chkzero" is set to 1, we perform a divide-by-zero check on
+     * vCC (r1).  Useful for integer division and modulus.
+     *
+     * For: add-int/lit8, rsub-int/lit8, mul-int/lit8, div-int/lit8,
+     *      rem-int/lit8, and-int/lit8, or-int/lit8, xor-int/lit8,
+     *      shl-int/lit8, shr-int/lit8, ushr-int/lit8
+     */
+    /* binop/lit8 vAA, vBB, #+CC */
+    FETCH_S(r3, 1)                      @ r3<- ssssCCBB (sign-extended for CC)
+    mov     r9, rINST, lsr #8           @ r9<- AA
+    and     r2, r3, #255                @ r2<- BB
+    GET_VREG(r0, r2)                    @ r0<- vBB
+    movs    r1, r3, asr #8              @ r1<- ssssssCC (sign extended)
+    .if 1
+    @cmp     r1, #0                      @ is second operand zero?
+    beq     common_errDivideByZero
+    .endif
+    FETCH_ADVANCE_INST(2)               @ advance rPC, load rINST
+
+                               @ optional op; may set condition codes
+    bl     __aeabi_idiv                              @ r0<- op, r0-r3 changed
+    GET_INST_OPCODE(ip)                 @ extract opcode from rINST
+    SET_VREG(r0, r9)               @ vAA<- r0
+    GOTO_OPCODE(ip)                     @ jump to next instruction
+    /* 10-12 instructions */
+
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_REM_INT_LIT8: /* 0xdc */
+/* File: armv5/OP_REM_INT_LIT8.S */
+/* idivmod returns quotient in r0 and remainder in r1 */
+/* File: armv5/binopLit8.S */
+    /*
+     * Generic 32-bit "lit8" binary operation.  Provide an "instr" line
+     * that specifies an instruction that performs "result = r0 op r1".
+     * This could be an ARM instruction or a function call.  (If the result
+     * comes back in a register other than r0, you can override "result".)
+     *
+     * If "chkzero" is set to 1, we perform a divide-by-zero check on
+     * vCC (r1).  Useful for integer division and modulus.
+     *
+     * For: add-int/lit8, rsub-int/lit8, mul-int/lit8, div-int/lit8,
+     *      rem-int/lit8, and-int/lit8, or-int/lit8, xor-int/lit8,
+     *      shl-int/lit8, shr-int/lit8, ushr-int/lit8
+     */
+    /* binop/lit8 vAA, vBB, #+CC */
+    FETCH_S(r3, 1)                      @ r3<- ssssCCBB (sign-extended for CC)
+    mov     r9, rINST, lsr #8           @ r9<- AA
+    and     r2, r3, #255                @ r2<- BB
+    GET_VREG(r0, r2)                    @ r0<- vBB
+    movs    r1, r3, asr #8              @ r1<- ssssssCC (sign extended)
+    .if 1
+    @cmp     r1, #0                      @ is second operand zero?
+    beq     common_errDivideByZero
+    .endif
+    FETCH_ADVANCE_INST(2)               @ advance rPC, load rINST
+
+                               @ optional op; may set condition codes
+    bl      __aeabi_idivmod                              @ r1<- op, r0-r3 changed
+    GET_INST_OPCODE(ip)                 @ extract opcode from rINST
+    SET_VREG(r1, r9)               @ vAA<- r1
+    GOTO_OPCODE(ip)                     @ jump to next instruction
+    /* 10-12 instructions */
+
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_AND_INT_LIT8: /* 0xdd */
+/* File: armv5/OP_AND_INT_LIT8.S */
+/* File: armv5/binopLit8.S */
+    /*
+     * Generic 32-bit "lit8" binary operation.  Provide an "instr" line
+     * that specifies an instruction that performs "result = r0 op r1".
+     * This could be an ARM instruction or a function call.  (If the result
+     * comes back in a register other than r0, you can override "result".)
+     *
+     * If "chkzero" is set to 1, we perform a divide-by-zero check on
+     * vCC (r1).  Useful for integer division and modulus.
+     *
+     * For: add-int/lit8, rsub-int/lit8, mul-int/lit8, div-int/lit8,
+     *      rem-int/lit8, and-int/lit8, or-int/lit8, xor-int/lit8,
+     *      shl-int/lit8, shr-int/lit8, ushr-int/lit8
+     */
+    /* binop/lit8 vAA, vBB, #+CC */
+    FETCH_S(r3, 1)                      @ r3<- ssssCCBB (sign-extended for CC)
+    mov     r9, rINST, lsr #8           @ r9<- AA
+    and     r2, r3, #255                @ r2<- BB
+    GET_VREG(r0, r2)                    @ r0<- vBB
+    movs    r1, r3, asr #8              @ r1<- ssssssCC (sign extended)
+    .if 0
+    @cmp     r1, #0                      @ is second operand zero?
+    beq     common_errDivideByZero
+    .endif
+    FETCH_ADVANCE_INST(2)               @ advance rPC, load rINST
+
+                               @ optional op; may set condition codes
+    and     r0, r0, r1                              @ r0<- op, r0-r3 changed
+    GET_INST_OPCODE(ip)                 @ extract opcode from rINST
+    SET_VREG(r0, r9)               @ vAA<- r0
+    GOTO_OPCODE(ip)                     @ jump to next instruction
+    /* 10-12 instructions */
+
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_OR_INT_LIT8: /* 0xde */
+/* File: armv5/OP_OR_INT_LIT8.S */
+/* File: armv5/binopLit8.S */
+    /*
+     * Generic 32-bit "lit8" binary operation.  Provide an "instr" line
+     * that specifies an instruction that performs "result = r0 op r1".
+     * This could be an ARM instruction or a function call.  (If the result
+     * comes back in a register other than r0, you can override "result".)
+     *
+     * If "chkzero" is set to 1, we perform a divide-by-zero check on
+     * vCC (r1).  Useful for integer division and modulus.
+     *
+     * For: add-int/lit8, rsub-int/lit8, mul-int/lit8, div-int/lit8,
+     *      rem-int/lit8, and-int/lit8, or-int/lit8, xor-int/lit8,
+     *      shl-int/lit8, shr-int/lit8, ushr-int/lit8
+     */
+    /* binop/lit8 vAA, vBB, #+CC */
+    FETCH_S(r3, 1)                      @ r3<- ssssCCBB (sign-extended for CC)
+    mov     r9, rINST, lsr #8           @ r9<- AA
+    and     r2, r3, #255                @ r2<- BB
+    GET_VREG(r0, r2)                    @ r0<- vBB
+    movs    r1, r3, asr #8              @ r1<- ssssssCC (sign extended)
+    .if 0
+    @cmp     r1, #0                      @ is second operand zero?
+    beq     common_errDivideByZero
+    .endif
+    FETCH_ADVANCE_INST(2)               @ advance rPC, load rINST
+
+                               @ optional op; may set condition codes
+    orr     r0, r0, r1                              @ r0<- op, r0-r3 changed
+    GET_INST_OPCODE(ip)                 @ extract opcode from rINST
+    SET_VREG(r0, r9)               @ vAA<- r0
+    GOTO_OPCODE(ip)                     @ jump to next instruction
+    /* 10-12 instructions */
+
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_XOR_INT_LIT8: /* 0xdf */
+/* File: armv5/OP_XOR_INT_LIT8.S */
+/* File: armv5/binopLit8.S */
+    /*
+     * Generic 32-bit "lit8" binary operation.  Provide an "instr" line
+     * that specifies an instruction that performs "result = r0 op r1".
+     * This could be an ARM instruction or a function call.  (If the result
+     * comes back in a register other than r0, you can override "result".)
+     *
+     * If "chkzero" is set to 1, we perform a divide-by-zero check on
+     * vCC (r1).  Useful for integer division and modulus.
+     *
+     * For: add-int/lit8, rsub-int/lit8, mul-int/lit8, div-int/lit8,
+     *      rem-int/lit8, and-int/lit8, or-int/lit8, xor-int/lit8,
+     *      shl-int/lit8, shr-int/lit8, ushr-int/lit8
+     */
+    /* binop/lit8 vAA, vBB, #+CC */
+    FETCH_S(r3, 1)                      @ r3<- ssssCCBB (sign-extended for CC)
+    mov     r9, rINST, lsr #8           @ r9<- AA
+    and     r2, r3, #255                @ r2<- BB
+    GET_VREG(r0, r2)                    @ r0<- vBB
+    movs    r1, r3, asr #8              @ r1<- ssssssCC (sign extended)
+    .if 0
+    @cmp     r1, #0                      @ is second operand zero?
+    beq     common_errDivideByZero
+    .endif
+    FETCH_ADVANCE_INST(2)               @ advance rPC, load rINST
+
+                               @ optional op; may set condition codes
+    eor     r0, r0, r1                              @ r0<- op, r0-r3 changed
+    GET_INST_OPCODE(ip)                 @ extract opcode from rINST
+    SET_VREG(r0, r9)               @ vAA<- r0
+    GOTO_OPCODE(ip)                     @ jump to next instruction
+    /* 10-12 instructions */
+
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_SHL_INT_LIT8: /* 0xe0 */
+/* File: armv5/OP_SHL_INT_LIT8.S */
+/* File: armv5/binopLit8.S */
+    /*
+     * Generic 32-bit "lit8" binary operation.  Provide an "instr" line
+     * that specifies an instruction that performs "result = r0 op r1".
+     * This could be an ARM instruction or a function call.  (If the result
+     * comes back in a register other than r0, you can override "result".)
+     *
+     * If "chkzero" is set to 1, we perform a divide-by-zero check on
+     * vCC (r1).  Useful for integer division and modulus.
+     *
+     * For: add-int/lit8, rsub-int/lit8, mul-int/lit8, div-int/lit8,
+     *      rem-int/lit8, and-int/lit8, or-int/lit8, xor-int/lit8,
+     *      shl-int/lit8, shr-int/lit8, ushr-int/lit8
+     */
+    /* binop/lit8 vAA, vBB, #+CC */
+    FETCH_S(r3, 1)                      @ r3<- ssssCCBB (sign-extended for CC)
+    mov     r9, rINST, lsr #8           @ r9<- AA
+    and     r2, r3, #255                @ r2<- BB
+    GET_VREG(r0, r2)                    @ r0<- vBB
+    movs    r1, r3, asr #8              @ r1<- ssssssCC (sign extended)
+    .if 0
+    @cmp     r1, #0                      @ is second operand zero?
+    beq     common_errDivideByZero
+    .endif
+    FETCH_ADVANCE_INST(2)               @ advance rPC, load rINST
+
+    and     r1, r1, #31                           @ optional op; may set condition codes
+    mov     r0, r0, asl r1                              @ r0<- op, r0-r3 changed
+    GET_INST_OPCODE(ip)                 @ extract opcode from rINST
+    SET_VREG(r0, r9)               @ vAA<- r0
+    GOTO_OPCODE(ip)                     @ jump to next instruction
+    /* 10-12 instructions */
+
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_SHR_INT_LIT8: /* 0xe1 */
+/* File: armv5/OP_SHR_INT_LIT8.S */
+/* File: armv5/binopLit8.S */
+    /*
+     * Generic 32-bit "lit8" binary operation.  Provide an "instr" line
+     * that specifies an instruction that performs "result = r0 op r1".
+     * This could be an ARM instruction or a function call.  (If the result
+     * comes back in a register other than r0, you can override "result".)
+     *
+     * If "chkzero" is set to 1, we perform a divide-by-zero check on
+     * vCC (r1).  Useful for integer division and modulus.
+     *
+     * For: add-int/lit8, rsub-int/lit8, mul-int/lit8, div-int/lit8,
+     *      rem-int/lit8, and-int/lit8, or-int/lit8, xor-int/lit8,
+     *      shl-int/lit8, shr-int/lit8, ushr-int/lit8
+     */
+    /* binop/lit8 vAA, vBB, #+CC */
+    FETCH_S(r3, 1)                      @ r3<- ssssCCBB (sign-extended for CC)
+    mov     r9, rINST, lsr #8           @ r9<- AA
+    and     r2, r3, #255                @ r2<- BB
+    GET_VREG(r0, r2)                    @ r0<- vBB
+    movs    r1, r3, asr #8              @ r1<- ssssssCC (sign extended)
+    .if 0
+    @cmp     r1, #0                      @ is second operand zero?
+    beq     common_errDivideByZero
+    .endif
+    FETCH_ADVANCE_INST(2)               @ advance rPC, load rINST
+
+    and     r1, r1, #31                           @ optional op; may set condition codes
+    mov     r0, r0, asr r1                              @ r0<- op, r0-r3 changed
+    GET_INST_OPCODE(ip)                 @ extract opcode from rINST
+    SET_VREG(r0, r9)               @ vAA<- r0
+    GOTO_OPCODE(ip)                     @ jump to next instruction
+    /* 10-12 instructions */
+
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_USHR_INT_LIT8: /* 0xe2 */
+/* File: armv5/OP_USHR_INT_LIT8.S */
+/* File: armv5/binopLit8.S */
+    /*
+     * Generic 32-bit "lit8" binary operation.  Provide an "instr" line
+     * that specifies an instruction that performs "result = r0 op r1".
+     * This could be an ARM instruction or a function call.  (If the result
+     * comes back in a register other than r0, you can override "result".)
+     *
+     * If "chkzero" is set to 1, we perform a divide-by-zero check on
+     * vCC (r1).  Useful for integer division and modulus.
+     *
+     * For: add-int/lit8, rsub-int/lit8, mul-int/lit8, div-int/lit8,
+     *      rem-int/lit8, and-int/lit8, or-int/lit8, xor-int/lit8,
+     *      shl-int/lit8, shr-int/lit8, ushr-int/lit8
+     */
+    /* binop/lit8 vAA, vBB, #+CC */
+    FETCH_S(r3, 1)                      @ r3<- ssssCCBB (sign-extended for CC)
+    mov     r9, rINST, lsr #8           @ r9<- AA
+    and     r2, r3, #255                @ r2<- BB
+    GET_VREG(r0, r2)                    @ r0<- vBB
+    movs    r1, r3, asr #8              @ r1<- ssssssCC (sign extended)
+    .if 0
+    @cmp     r1, #0                      @ is second operand zero?
+    beq     common_errDivideByZero
+    .endif
+    FETCH_ADVANCE_INST(2)               @ advance rPC, load rINST
+
+    and     r1, r1, #31                           @ optional op; may set condition codes
+    mov     r0, r0, lsr r1                              @ r0<- op, r0-r3 changed
+    GET_INST_OPCODE(ip)                 @ extract opcode from rINST
+    SET_VREG(r0, r9)               @ vAA<- r0
+    GOTO_OPCODE(ip)                     @ jump to next instruction
+    /* 10-12 instructions */
+
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_E3: /* 0xe3 */
+/* File: armv5/OP_UNUSED_E3.S */
+/* File: armv5/unused.S */
+    bl      common_abort
+
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_E4: /* 0xe4 */
+/* File: armv5/OP_UNUSED_E4.S */
+/* File: armv5/unused.S */
+    bl      common_abort
+
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_E5: /* 0xe5 */
+/* File: armv5/OP_UNUSED_E5.S */
+/* File: armv5/unused.S */
+    bl      common_abort
+
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_E6: /* 0xe6 */
+/* File: armv5/OP_UNUSED_E6.S */
+/* File: armv5/unused.S */
+    bl      common_abort
+
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_E7: /* 0xe7 */
+/* File: armv5/OP_UNUSED_E7.S */
+/* File: armv5/unused.S */
+    bl      common_abort
+
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_E8: /* 0xe8 */
+/* File: armv5/OP_UNUSED_E8.S */
+/* File: armv5/unused.S */
+    bl      common_abort
+
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_E9: /* 0xe9 */
+/* File: armv5/OP_UNUSED_E9.S */
+/* File: armv5/unused.S */
+    bl      common_abort
+
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_EA: /* 0xea */
+/* File: armv5/OP_UNUSED_EA.S */
+/* File: armv5/unused.S */
+    bl      common_abort
+
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_EB: /* 0xeb */
+/* File: armv5/OP_UNUSED_EB.S */
+/* File: armv5/unused.S */
+    bl      common_abort
+
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_EC: /* 0xec */
+/* File: armv5/OP_UNUSED_EC.S */
+/* File: armv5/unused.S */
+    bl      common_abort
+
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_ED: /* 0xed */
+/* File: armv5/OP_UNUSED_ED.S */
+/* File: armv5/unused.S */
+    bl      common_abort
+
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_EXECUTE_INLINE: /* 0xee */
+/* File: armv5/OP_EXECUTE_INLINE.S */
+    /*
+     * Execute a "native inline" instruction.
+     *
+     * We need to call:
+     *  dvmPerformInlineOp4Std(arg0, arg1, arg2, arg3, &retval, ref)
+     *
+     * The first four args are in r0-r3, but the last two must be pushed
+     * onto the stack.
+     */
+    /* [opt] execute-inline vAA, {vC, vD, vE, vF}, inline@BBBB */
+    FETCH(r10, 1)                       @ r10<- BBBB
+    add     r1, rGLUE, #offGlue_retval  @ r1<- &glue->retval
+    EXPORT_PC()                         @ can throw
+    sub     sp, sp, #8                  @ make room for arg(s)
+    mov     r0, rINST, lsr #12          @ r0<- B
+    str     r1, [sp]                    @ push &glue->retval
+    bl      .LOP_EXECUTE_INLINE_continue        @ make call; will return after
+    add     sp, sp, #8                  @ pop stack
+    cmp     r0, #0                      @ test boolean result of inline
+    beq     common_exceptionThrown      @ returned false, handle exception
+    FETCH_ADVANCE_INST(3)               @ advance rPC, load rINST
+    GET_INST_OPCODE(ip)                 @ extract opcode from rINST
+    GOTO_OPCODE(ip)                     @ jump to next instruction
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_EF: /* 0xef */
+/* File: armv5/OP_UNUSED_EF.S */
+/* File: armv5/unused.S */
+    bl      common_abort
+
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_INVOKE_DIRECT_EMPTY: /* 0xf0 */
+/* File: armv5/OP_INVOKE_DIRECT_EMPTY.S */
+    /*
+     * invoke-direct-empty is a no-op in a "standard" interpreter.
+     */
+    FETCH_ADVANCE_INST(3)               @ advance to next instr, load rINST
+    GET_INST_OPCODE(ip)                 @ ip<- opcode from rINST
+    GOTO_OPCODE(ip)                     @ execute it
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_F1: /* 0xf1 */
+/* File: armv5/OP_UNUSED_F1.S */
+/* File: armv5/unused.S */
+    bl      common_abort
+
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_IGET_QUICK: /* 0xf2 */
+/* File: armv5/OP_IGET_QUICK.S */
+    /* For: iget-quick, iget-object-quick */
+    /* op vA, vB, offset@CCCC */
+    mov     r2, rINST, lsr #12          @ r2<- B
+    GET_VREG(r3, r2)                    @ r3<- object we're operating on
+    FETCH(r1, 1)                        @ r1<- field byte offset
+    cmp     r3, #0                      @ check object for null
+    mov     r2, rINST, lsr #8           @ r2<- A(+)
+    beq     common_errNullObject        @ object was null
+    ldr     r0, [r3, r1]                @ r0<- obj.field (always 32 bits)
+    FETCH_ADVANCE_INST(2)               @ advance rPC, load rINST
+    and     r2, r2, #15
+    GET_INST_OPCODE(ip)                 @ extract opcode from rINST
+    SET_VREG(r0, r2)                    @ fp[A]<- r0
+    GOTO_OPCODE(ip)                     @ jump to next instruction
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_IGET_WIDE_QUICK: /* 0xf3 */
+/* File: armv5/OP_IGET_WIDE_QUICK.S */
+    /* iget-wide-quick vA, vB, offset@CCCC */
+    mov     r2, rINST, lsr #12          @ r2<- B
+    GET_VREG(r3, r2)                    @ r3<- object we're operating on
+    FETCH(r1, 1)                        @ r1<- field byte offset
+    cmp     r3, #0                      @ check object for null
+    mov     r2, rINST, lsr #8           @ r2<- A(+)
+    beq     common_errNullObject        @ object was null
+    ldrd    r0, [r3, r1]                @ r0<- obj.field (64 bits, aligned)
+    and     r2, r2, #15
+    FETCH_ADVANCE_INST(2)               @ advance rPC, load rINST
+    add     r3, rFP, r2, lsl #2         @ r3<- &fp[A]
+    GET_INST_OPCODE(ip)                 @ extract opcode from rINST
+    stmia   r3, {r0-r1}                 @ fp[A]<- r0/r1
+    GOTO_OPCODE(ip)                     @ jump to next instruction
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_IGET_OBJECT_QUICK: /* 0xf4 */
+/* File: armv5/OP_IGET_OBJECT_QUICK.S */
+/* File: armv5/OP_IGET_QUICK.S */
+    /* For: iget-quick, iget-object-quick */
+    /* op vA, vB, offset@CCCC */
+    mov     r2, rINST, lsr #12          @ r2<- B
+    GET_VREG(r3, r2)                    @ r3<- object we're operating on
+    FETCH(r1, 1)                        @ r1<- field byte offset
+    cmp     r3, #0                      @ check object for null
+    mov     r2, rINST, lsr #8           @ r2<- A(+)
+    beq     common_errNullObject        @ object was null
+    ldr     r0, [r3, r1]                @ r0<- obj.field (always 32 bits)
+    FETCH_ADVANCE_INST(2)               @ advance rPC, load rINST
+    and     r2, r2, #15
+    GET_INST_OPCODE(ip)                 @ extract opcode from rINST
+    SET_VREG(r0, r2)                    @ fp[A]<- r0
+    GOTO_OPCODE(ip)                     @ jump to next instruction
+
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_IPUT_QUICK: /* 0xf5 */
+/* File: armv5/OP_IPUT_QUICK.S */
+    /* For: iput-quick, iput-object-quick */
+    /* op vA, vB, offset@CCCC */
+    mov     r2, rINST, lsr #12          @ r2<- B
+    GET_VREG(r3, r2)                    @ r3<- fp[B], the object pointer
+    FETCH(r1, 1)                        @ r1<- field byte offset
+    cmp     r3, #0                      @ check object for null
+    mov     r2, rINST, lsr #8           @ r2<- A(+)
+    beq     common_errNullObject        @ object was null
+    and     r2, r2, #15
+    GET_VREG(r0, r2)                    @ r0<- fp[A]
+    FETCH_ADVANCE_INST(2)               @ advance rPC, load rINST
+    str     r0, [r3, r1]                @ obj.field (always 32 bits)<- r0
+    GET_INST_OPCODE(ip)                 @ extract opcode from rINST
+    GOTO_OPCODE(ip)                     @ jump to next instruction
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_IPUT_WIDE_QUICK: /* 0xf6 */
+/* File: armv5/OP_IPUT_WIDE_QUICK.S */
+    /* iput-wide-quick vA, vB, offset@CCCC */
+    mov     r0, rINST, lsr #8           @ r0<- A(+)
+    mov     r1, rINST, lsr #12          @ r1<- B
+    and     r0, r0, #15
+    GET_VREG(r2, r1)                    @ r2<- fp[B], the object pointer
+    add     r3, rFP, r0, lsl #2         @ r3<- &fp[A]
+    cmp     r2, #0                      @ check object for null
+    ldmia   r3, {r0-r1}                 @ r0/r1<- fp[A]
+    beq     common_errNullObject        @ object was null
+    FETCH(r3, 1)                        @ r3<- field byte offset
+    FETCH_ADVANCE_INST(2)               @ advance rPC, load rINST
+    strd    r0, [r2, r3]                @ obj.field (64 bits, aligned)<- r0/r1
+    GET_INST_OPCODE(ip)                 @ extract opcode from rINST
+    GOTO_OPCODE(ip)                     @ jump to next instruction
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_IPUT_OBJECT_QUICK: /* 0xf7 */
+/* File: armv5/OP_IPUT_OBJECT_QUICK.S */
+/* File: armv5/OP_IPUT_QUICK.S */
+    /* For: iput-quick, iput-object-quick */
+    /* op vA, vB, offset@CCCC */
+    mov     r2, rINST, lsr #12          @ r2<- B
+    GET_VREG(r3, r2)                    @ r3<- fp[B], the object pointer
+    FETCH(r1, 1)                        @ r1<- field byte offset
+    cmp     r3, #0                      @ check object for null
+    mov     r2, rINST, lsr #8           @ r2<- A(+)
+    beq     common_errNullObject        @ object was null
+    and     r2, r2, #15
+    GET_VREG(r0, r2)                    @ r0<- fp[A]
+    FETCH_ADVANCE_INST(2)               @ advance rPC, load rINST
+    str     r0, [r3, r1]                @ obj.field (always 32 bits)<- r0
+    GET_INST_OPCODE(ip)                 @ extract opcode from rINST
+    GOTO_OPCODE(ip)                     @ jump to next instruction
+
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_INVOKE_VIRTUAL_QUICK: /* 0xf8 */
+/* File: armv5/OP_INVOKE_VIRTUAL_QUICK.S */
+    /*
+     * Handle an optimized virtual method call.
+     *
+     * for: [opt] invoke-virtual-quick, invoke-virtual-quick/range
+     */
+    /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */
+    /* op vAA, {vCCCC..v(CCCC+AA-1)}, meth@BBBB */
+    FETCH(r3, 2)                        @ r3<- FEDC or CCCC
+    FETCH(r1, 1)                        @ r1<- BBBB
+    .if     (!0)
+    and     r3, r3, #15                 @ r3<- C (or stays CCCC)
+    .endif
+    GET_VREG(r2, r3)                    @ r2<- vC ("this" ptr)
+    cmp     r2, #0                      @ is "this" null?
+    beq     common_errNullObject        @ null "this", throw exception
+    ldr     r2, [r2, #offObject_clazz]  @ r2<- thisPtr->clazz
+    ldr     r2, [r2, #offClassObject_vtable]    @ r2<- thisPtr->clazz->vtable
+    EXPORT_PC()                         @ invoke must export
+    ldr     r0, [r2, r1, lsl #2]        @ r3<- vtable[BBBB]
+    bl      common_invokeMethodNoRange @ continue on
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_INVOKE_VIRTUAL_QUICK_RANGE: /* 0xf9 */
+/* File: armv5/OP_INVOKE_VIRTUAL_QUICK_RANGE.S */
+/* File: armv5/OP_INVOKE_VIRTUAL_QUICK.S */
+    /*
+     * Handle an optimized virtual method call.
+     *
+     * for: [opt] invoke-virtual-quick, invoke-virtual-quick/range
+     */
+    /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */
+    /* op vAA, {vCCCC..v(CCCC+AA-1)}, meth@BBBB */
+    FETCH(r3, 2)                        @ r3<- FEDC or CCCC
+    FETCH(r1, 1)                        @ r1<- BBBB
+    .if     (!1)
+    and     r3, r3, #15                 @ r3<- C (or stays CCCC)
+    .endif
+    GET_VREG(r2, r3)                    @ r2<- vC ("this" ptr)
+    cmp     r2, #0                      @ is "this" null?
+    beq     common_errNullObject        @ null "this", throw exception
+    ldr     r2, [r2, #offObject_clazz]  @ r2<- thisPtr->clazz
+    ldr     r2, [r2, #offClassObject_vtable]    @ r2<- thisPtr->clazz->vtable
+    EXPORT_PC()                         @ invoke must export
+    ldr     r0, [r2, r1, lsl #2]        @ r3<- vtable[BBBB]
+    bl      common_invokeMethodRange @ continue on
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_INVOKE_SUPER_QUICK: /* 0xfa */
+/* File: armv5/OP_INVOKE_SUPER_QUICK.S */
+    /*
+     * Handle an optimized "super" method call.
+     *
+     * for: [opt] invoke-super-quick, invoke-super-quick/range
+     */
+    /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */
+    /* op vAA, {vCCCC..v(CCCC+AA-1)}, meth@BBBB */
+    FETCH(r10, 2)                       @ r10<- GFED or CCCC
+    ldr     r2, [rGLUE, #offGlue_method]    @ r2<- current method
+    .if     (!0)
+    and     r10, r10, #15               @ r10<- D (or stays CCCC)
+    .endif
+    FETCH(r1, 1)                        @ r1<- BBBB
+    ldr     r2, [r2, #offMethod_clazz]  @ r2<- method->clazz
+    EXPORT_PC()                         @ must export for invoke
+    ldr     r2, [r2, #offClassObject_super]     @ r2<- method->clazz->super
+    GET_VREG(r3, r10)                   @ r3<- "this"
+    ldr     r2, [r2, #offClassObject_vtable]    @ r2<- ...clazz->super->vtable
+    cmp     r3, #0                      @ null "this" ref?
+    ldr     r0, [r2, r1, lsl #2]        @ r0<- super->vtable[BBBB]
+    beq     common_errNullObject        @ "this" is null, throw exception
+    bl      common_invokeMethodNoRange @ continue on
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_INVOKE_SUPER_QUICK_RANGE: /* 0xfb */
+/* File: armv5/OP_INVOKE_SUPER_QUICK_RANGE.S */
+/* File: armv5/OP_INVOKE_SUPER_QUICK.S */
+    /*
+     * Handle an optimized "super" method call.
+     *
+     * for: [opt] invoke-super-quick, invoke-super-quick/range
+     */
+    /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */
+    /* op vAA, {vCCCC..v(CCCC+AA-1)}, meth@BBBB */
+    FETCH(r10, 2)                       @ r10<- GFED or CCCC
+    ldr     r2, [rGLUE, #offGlue_method]    @ r2<- current method
+    .if     (!1)
+    and     r10, r10, #15               @ r10<- D (or stays CCCC)
+    .endif
+    FETCH(r1, 1)                        @ r1<- BBBB
+    ldr     r2, [r2, #offMethod_clazz]  @ r2<- method->clazz
+    EXPORT_PC()                         @ must export for invoke
+    ldr     r2, [r2, #offClassObject_super]     @ r2<- method->clazz->super
+    GET_VREG(r3, r10)                   @ r3<- "this"
+    ldr     r2, [r2, #offClassObject_vtable]    @ r2<- ...clazz->super->vtable
+    cmp     r3, #0                      @ null "this" ref?
+    ldr     r0, [r2, r1, lsl #2]        @ r0<- super->vtable[BBBB]
+    beq     common_errNullObject        @ "this" is null, throw exception
+    bl      common_invokeMethodRange @ continue on
+
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_FC: /* 0xfc */
+/* File: armv5/OP_UNUSED_FC.S */
+/* File: armv5/unused.S */
+    bl      common_abort
+
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_FD: /* 0xfd */
+/* File: armv5/OP_UNUSED_FD.S */
+/* File: armv5/unused.S */
+    bl      common_abort
+
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_FE: /* 0xfe */
+/* File: armv5/OP_UNUSED_FE.S */
+/* File: armv5/unused.S */
+    bl      common_abort
+
+
+
+/* ------------------------------ */
+    .balign 64
+.L_OP_UNUSED_FF: /* 0xff */
+/* File: armv5/OP_UNUSED_FF.S */
+/* File: armv5/unused.S */
+    bl      common_abort
+
+
+
+
+    .balign 64
+    .size   dvmAsmInstructionStart, .-dvmAsmInstructionStart
+    .global dvmAsmInstructionEnd
+dvmAsmInstructionEnd:
+
+/*
+ * ===========================================================================
+ *  Sister implementations
+ * ===========================================================================
+ */
+    .global dvmAsmSisterStart
+    .type   dvmAsmSisterStart, %function
+    .text
+    .balign 4
+dvmAsmSisterStart:
+
+/* continuation for OP_CONST_STRING */
+
+    /*
+     * Continuation if the String has not yet been resolved.
+     *  r1: BBBB (String ref)
+     *  r9: target register
+     */
+.LOP_CONST_STRING_resolve:
+    EXPORT_PC()
+    ldr     r0, [rGLUE, #offGlue_method] @ r0<- glue->method
+    ldr     r0, [r0, #offMethod_clazz]  @ r0<- method->clazz
+    bl      dvmResolveString            @ r0<- String reference
+    cmp     r0, #0                      @ failed?
+    beq     common_exceptionThrown      @ yup, handle the exception
+    FETCH_ADVANCE_INST(2)               @ advance rPC, load rINST
+    GET_INST_OPCODE(ip)                 @ extract opcode from rINST
+    SET_VREG(r0, r9)                    @ vAA<- r0
+    GOTO_OPCODE(ip)                     @ jump to next instruction
+
+
+/* continuation for OP_CONST_STRING_JUMBO */
+
+    /*
+     * Continuation if the String has not yet been resolved.
+     *  r1: BBBBBBBB (String ref)
+     *  r9: target register
+     */
+.LOP_CONST_STRING_JUMBO_resolve:
+    EXPORT_PC()
+    ldr     r0, [rGLUE, #offGlue_method] @ r0<- glue->method
+    ldr     r0, [r0, #offMethod_clazz]  @ r0<- method->clazz
+    bl      dvmResolveString            @ r0<- String reference
+    cmp     r0, #0                      @ failed?
+    beq     common_exceptionThrown      @ yup, handle the exception
+    FETCH_ADVANCE_INST(3)               @ advance rPC, load rINST
+    GET_INST_OPCODE(ip)                 @ extract opcode from rINST
+    SET_VREG(r0, r9)                    @ vAA<- r0
+    GOTO_OPCODE(ip)                     @ jump to next instruction
+
+
+/* continuation for OP_CONST_CLASS */
+
+    /*
+     * Continuation if the Class has not yet been resolved.
+     *  r1: BBBB (Class ref)
+     *  r9: target register
+     */
+.LOP_CONST_CLASS_resolve:
+    EXPORT_PC()
+    ldr     r0, [rGLUE, #offGlue_method] @ r0<- glue->method
+    mov     r2, #1                      @ r2<- true
+    ldr     r0, [r0, #offMethod_clazz]  @ r0<- method->clazz
+    bl      dvmResolveClass             @ r0<- Class reference
+    cmp     r0, #0                      @ failed?
+    beq     common_exceptionThrown      @ yup, handle the exception
+    FETCH_ADVANCE_INST(2)               @ advance rPC, load rINST
+    GET_INST_OPCODE(ip)                 @ extract opcode from rINST
+    SET_VREG(r0, r9)                    @ vAA<- r0
+    GOTO_OPCODE(ip)                     @ jump to next instruction
+
+
+/* continuation for OP_CHECK_CAST */
+
+    /*
+     * Trivial test failed, need to perform full check.  This is common.
+     *  r0 holds obj->clazz
+     *  r1 holds class resolved from BBBB
+     *  r9 holds object
+     */
+.LOP_CHECK_CAST_fullcheck:
+    bl      dvmInstanceofNonTrivial     @ r0<- boolean result
+    cmp     r0, #0                      @ failed?
+    bne     .LOP_CHECK_CAST_okay            @ no, success
+
+    @ A cast has failed.  We need to throw a ClassCastException with the
+    @ class of the object that failed to be cast.
+    EXPORT_PC()                         @ about to throw
+    ldr     r3, [r9, #offObject_clazz]  @ r3<- obj->clazz
+    ldr     r0, .LstrClassCastExceptionPtr
+    ldr     r1, [r3, #offClassObject_descriptor] @ r1<- obj->clazz->descriptor
+    bl      dvmThrowExceptionWithClassMessage
+    b       common_exceptionThrown
+
+    /*
+     * Resolution required.  This is the least-likely path.
+     *
+     *  r2 holds BBBB
+     *  r9 holds object
+     */
+.LOP_CHECK_CAST_resolve:
+    EXPORT_PC()                         @ resolve() could throw
+    ldr     r3, [rGLUE, #offGlue_method] @ r3<- glue->method
+    mov     r1, r2                      @ r1<- BBBB
+    mov     r2, #0                      @ r2<- false
+    ldr     r0, [r3, #offMethod_clazz]  @ r0<- method->clazz
+    bl      dvmResolveClass             @ r0<- resolved ClassObject ptr
+    cmp     r0, #0                      @ got null?
+    beq     common_exceptionThrown      @ yes, handle exception
+    mov     r1, r0                      @ r1<- class resolved from BBB
+    ldr     r0, [r9, #offObject_clazz]  @ r0<- obj->clazz
+    b       .LOP_CHECK_CAST_resolved        @ pick up where we left off
+
+.LstrClassCastExceptionPtr:
+    .word   .LstrClassCastException
+
+
+/* continuation for OP_INSTANCE_OF */
+
+    /*
+     * Trivial test failed, need to perform full check.  This is common.
+     *  r0 holds obj->clazz
+     *  r1 holds class resolved from BBBB
+     *  r9 holds A
+     */
+.LOP_INSTANCE_OF_fullcheck:
+    bl      dvmInstanceofNonTrivial     @ r0<- boolean result
+    @ fall through to OP_INSTANCE_OF_store
+
+    /*
+     * r0 holds boolean result
+     * r9 holds A
+     */
+.LOP_INSTANCE_OF_store:
+    FETCH_ADVANCE_INST(2)               @ advance rPC, load rINST
+    SET_VREG(r0, r9)                    @ vA<- r0
+    GET_INST_OPCODE(ip)                 @ extract opcode from rINST
+    GOTO_OPCODE(ip)                     @ jump to next instruction
+
+    /*
+     * Trivial test succeeded, save and bail.
+     *  r9 holds A
+     */
+.LOP_INSTANCE_OF_trivial:
+    mov     r0, #1                      @ indicate success
+    @ could b OP_INSTANCE_OF_store, but copying is faster and cheaper
+    FETCH_ADVANCE_INST(2)               @ advance rPC, load rINST
+    SET_VREG(r0, r9)                    @ vA<- r0
+    GET_INST_OPCODE(ip)                 @ extract opcode from rINST
+    GOTO_OPCODE(ip)                     @ jump to next instruction
+
+    /*
+     * Resolution required.  This is the least-likely path.
+     *
+     *  r3 holds BBBB
+     *  r9 holds A
+     */
+.LOP_INSTANCE_OF_resolve:
+    EXPORT_PC()                         @ resolve() could throw
+    ldr     r0, [rGLUE, #offGlue_method]    @ r0<- glue->method
+    mov     r1, r3                      @ r1<- BBBB
+    mov     r2, #1                      @ r2<- true
+    ldr     r0, [r0, #offMethod_clazz]  @ r0<- method->clazz
+    bl      dvmResolveClass             @ r0<- resolved ClassObject ptr
+    cmp     r0, #0                      @ got null?
+    beq     common_exceptionThrown      @ yes, handle exception
+    mov     r1, r0                      @ r1<- class resolved from BBB
+    mov     r3, rINST, lsr #12          @ r3<- B
+    GET_VREG(r0, r3)                    @ r0<- vB (object)
+    ldr     r0, [r0, #offObject_clazz]  @ r0<- obj->clazz
+    b       .LOP_INSTANCE_OF_resolved        @ pick up where we left off
+
+
+/* continuation for OP_NEW_INSTANCE */
+
+    .balign 32                          @ minimize cache lines
+.LOP_NEW_INSTANCE_finish: @ r0=class
+    bl      dvmAllocObject              @ r0<- new object
+    mov     r3, rINST, lsr #8           @ r3<- AA
+    cmp     r0, #0                      @ failed?
+    beq     common_exceptionThrown      @ yes, handle the exception
+    FETCH_ADVANCE_INST(2)               @ advance rPC, load rINST
+    GET_INST_OPCODE(ip)                 @ extract opcode from rINST
+    SET_VREG(r0, r3)                    @ vAA<- r0
+    GOTO_OPCODE(ip)                     @ jump to next instruction
+
+    /*
+     * Class initialization required.
+     *
+     *  r0 holds class object
+     */
+.LOP_NEW_INSTANCE_needinit:
+    mov     r9, r0                      @ save r0
+    bl      dvmInitClass                @ initialize class
+    cmp     r0, #0                      @ check boolean result
+    mov     r0, r9                      @ restore r0
+    bne     .LOP_NEW_INSTANCE_initialized     @ success, continue
+    b       common_exceptionThrown      @ failed, deal with init exception
+
+    /*
+     * Resolution required.  This is the least-likely path.
+     *
+     *  r1 holds BBBB
+     */
+.LOP_NEW_INSTANCE_resolve:
+    ldr     r3, [rGLUE, #offGlue_method] @ r3<- glue->method
+    mov     r2, #0                      @ r2<- false
+    ldr     r0, [r3, #offMethod_clazz]  @ r0<- method->clazz
+    bl      dvmResolveClass             @ r0<- resolved ClassObject ptr
+    cmp     r0, #0                      @ got null?
+    bne     .LOP_NEW_INSTANCE_resolved        @ no, continue
+    b       common_exceptionThrown      @ yes, handle exception
+
+    /*
+     * We can't instantiate an abstract class or interface, so throw an
+     * InstantiationError with the class descriptor as the message.
+     *
+     *  r0 holds class object
+     */
+.LOP_NEW_INSTANCE_abstract:
+    ldr     r1, [r0, #offClassObject_descriptor]
+    ldr     r0, .LstrInstantiationErrorPtr
+    bl      dvmThrowExceptionWithClassMessage
+    b       common_exceptionThrown
+
+.LstrInstantiationErrorPtr:
+    .word   .LstrInstantiationError
+
+
+/* continuation for OP_NEW_ARRAY */
+
+
+    /*
+     * Resolve class.  (This is an uncommon case.)
+     *
+     *  r1 holds array length
+     *  r2 holds class ref CCCC
+     */
+.LOP_NEW_ARRAY_resolve:
+    ldr     r3, [rGLUE, #offGlue_method] @ r3<- glue->method
+    mov     r9, r1                      @ r9<- length (save)
+    mov     r1, r2                      @ r1<- CCCC
+    mov     r2, #0                      @ r2<- false
+    ldr     r0, [r3, #offMethod_clazz]  @ r0<- method->clazz
+    bl      dvmResolveClass             @ r0<- call(clazz, ref)
+    cmp     r0, #0                      @ got null?
+    mov     r1, r9                      @ r1<- length (restore)
+    beq     common_exceptionThrown      @ yes, handle exception
+    @ fall through to OP_NEW_ARRAY_finish
+
+    /*
+     * Finish allocation.
+     *
+     *  r0 holds class
+     *  r1 holds array length
+     */
+.LOP_NEW_ARRAY_finish:
+    mov     r2, #ALLOC_DONT_TRACK       @ don't track in local refs table
+    bl      dvmAllocArrayByClass        @ r0<- call(clazz, length, flags)
+    cmp     r0, #0                      @ failed?
+    mov     r2, rINST, lsr #8           @ r2<- A+
+    beq     common_exceptionThrown      @ yes, handle the exception
+    FETCH_ADVANCE_INST(2)               @ advance rPC, load rINST
+    and     r2, r2, #15                 @ r2<- A
+    GET_INST_OPCODE(ip)                 @ extract opcode from rINST
+    SET_VREG(r0, r2)                    @ vA<- r0
+    GOTO_OPCODE(ip)                     @ jump to next instruction
+
+
+/* continuation for OP_FILLED_NEW_ARRAY */
+
+    /*
+     * On entry:
+     *  r0 holds array class
+     *  r10 holds AA or BA
+     */
+.LOP_FILLED_NEW_ARRAY_continue:
+    ldr     r3, [r0, #offClassObject_descriptor] @ r3<- arrayClass->descriptor
+    mov     r2, #ALLOC_DONT_TRACK       @ r2<- alloc flags
+    ldrb    r0, [r3, #1]                @ r0<- descriptor[1]
+    .if     0
+    mov     r1, r10                     @ r1<- AA (length)
+    .else
+    mov     r1, r10, lsr #4             @ r1<- B (length)
+    .endif
+    cmp     r0, #'I'                    @ array of ints?
+    mov     r9, r1                      @ save length in r9
+    bne     .LOP_FILLED_NEW_ARRAY_notimpl         @ no, not handled yet
+    bl      dvmAllocPrimitiveArray      @ r0<- call(typeCh, length, flags)
+    cmp     r0, #0                      @ null return?
+    beq     common_exceptionThrown      @ alloc failed, handle exception
+
+    FETCH(r1, 2)                        @ r1<- FEDC or CCCC
+    str     r0, [rGLUE, #offGlue_retval]    @ retval.l <- new array
+    add     r0, r0, #offArrayObject_contents @ r0<- newArray->contents
+    subs    r9, r9, #1                  @ length--, check for neg
+    FETCH_ADVANCE_INST(3)               @ advance to next instr, load rINST
+    bmi     2f                          @ was zero, bail
+
+    @ copy values from registers into the array
+    @ r0=array, r1=CCCC/FEDC, r9=length (from AA or B), r10=AA/BA
+    .if     0
+    add     r2, rFP, r1, lsl #2         @ r2<- &fp[CCCC]
+1:  ldr     r3, [r2], #4                @ r3<- *r2++
+    subs    r9, r9, #1                  @ count--
+    str     r3, [r0], #4                @ *contents++ = vX
+    bpl     1b
+    @ continue at 2
+    .else
+    cmp     r9, #4                      @ length was initially 5?
+    and     r2, r10, #15                @ r2<- A
+    bne     1f                          @ <= 4 args, branch
+    GET_VREG(r3, r2)                    @ r3<- vA
+    sub     r9, r9, #1                  @ count--
+    str     r3, [r0, #16]               @ contents[4] = vA
+1:  and     r2, r1, #15                 @ r2<- F/E/D/C
+    GET_VREG(r3, r2)                    @ r3<- vF/vE/vD/vC
+    mov     r1, r1, lsr #4              @ r1<- next reg in low 4
+    subs    r9, r9, #1                  @ count--
+    str     r3, [r0], #4                @ *contents++ = vX
+    bpl     1b
+    @ continue at 2
+    .endif
+
+2:
+    GET_INST_OPCODE(ip)                 @ ip<- opcode from rINST
+    GOTO_OPCODE(ip)                     @ execute it
+
+    /*
+     * Throw an exception indicating that we have not implemented this
+     * mode of filled-new-array.
+     */
+.LOP_FILLED_NEW_ARRAY_notimpl:
+    ldr     r0, strInternalError
+    ldr     r1, strFilledNewArrayNotImpl
+    bl      dvmThrowException
+    b       common_exceptionThrown
+
+    .if     (!0)                 @ define in one or the other, not both
+strFilledNewArrayNotImpl:
+    .word   .LstrFilledNewArrayNotImpl
+strInternalError:
+    .word   .LstrInternalError
+    .endif
+
+
+/* continuation for OP_FILLED_NEW_ARRAY_RANGE */
+
+    /*
+     * On entry:
+     *  r0 holds array class
+     *  r10 holds AA or BA
+     */
+.LOP_FILLED_NEW_ARRAY_RANGE_continue:
+    ldr     r3, [r0, #offClassObject_descriptor] @ r3<- arrayClass->descriptor
+    mov     r2, #ALLOC_DONT_TRACK       @ r2<- alloc flags
+    ldrb    r0, [r3, #1]                @ r0<- descriptor[1]
+    .if     1
+    mov     r1, r10                     @ r1<- AA (length)
+    .else
+    mov     r1, r10, lsr #4             @ r1<- B (length)
+    .endif
+    cmp     r0, #'I'                    @ array of ints?
+    mov     r9, r1                      @ save length in r9
+    bne     .LOP_FILLED_NEW_ARRAY_RANGE_notimpl         @ no, not handled yet
+    bl      dvmAllocPrimitiveArray      @ r0<- call(typeCh, length, flags)
+    cmp     r0, #0                      @ null return?
+    beq     common_exceptionThrown      @ alloc failed, handle exception
+
+    FETCH(r1, 2)                        @ r1<- FEDC or CCCC
+    str     r0, [rGLUE, #offGlue_retval]    @ retval.l <- new array
+    add     r0, r0, #offArrayObject_contents @ r0<- newArray->contents
+    subs    r9, r9, #1                  @ length--, check for neg
+    FETCH_ADVANCE_INST(3)               @ advance to next instr, load rINST
+    bmi     2f                          @ was zero, bail
+
+    @ copy values from registers into the array
+    @ r0=array, r1=CCCC/FEDC, r9=length (from AA or B), r10=AA/BA
+    .if     1
+    add     r2, rFP, r1, lsl #2         @ r2<- &fp[CCCC]
+1:  ldr     r3, [r2], #4                @ r3<- *r2++
+    subs    r9, r9, #1                  @ count--
+    str     r3, [r0], #4                @ *contents++ = vX
+    bpl     1b
+    @ continue at 2
+    .else
+    cmp     r9, #4                      @ length was initially 5?
+    and     r2, r10, #15                @ r2<- A
+    bne     1f                          @ <= 4 args, branch
+    GET_VREG(r3, r2)                    @ r3<- vA
+    sub     r9, r9, #1                  @ count--
+    str     r3, [r0, #16]               @ contents[4] = vA
+1:  and     r2, r1, #15                 @ r2<- F/E/D/C
+    GET_VREG(r3, r2)                    @ r3<- vF/vE/vD/vC
+    mov     r1, r1, lsr #4              @ r1<- next reg in low 4
+    subs    r9, r9, #1                  @ count--
+    str     r3, [r0], #4                @ *contents++ = vX
+    bpl     1b
+    @ continue at 2
+    .endif
+
+2:
+    GET_INST_OPCODE(ip)                 @ ip<- opcode from rINST
+    GOTO_OPCODE(ip)                     @ execute it
+
+    /*
+     * Throw an exception indicating that we have not implemented this
+     * mode of filled-new-array.
+     */
+.LOP_FILLED_NEW_ARRAY_RANGE_notimpl:
+    ldr     r0, strInternalError
+    ldr     r1, strFilledNewArrayNotImpl
+    bl      dvmThrowException
+    b       common_exceptionThrown
+
+    .if     (!1)                 @ define in one or the other, not both
+strFilledNewArrayNotImpl:
+    .word   .LstrFilledNewArrayNotImpl
+strInternalError:
+    .word   .LstrInternalError
+    .endif
+
+
+/* continuation for OP_CMPL_FLOAT */
+
+    @ Test for NaN with a second comparison.  EABI forbids testing bit
+    @ patterns, and we can't represent 0x7fc00000 in immediate form, so
+    @ make the library call.
+.LOP_CMPL_FLOAT_gt_or_nan:
+    mov     r1, r9                      @ reverse order
+    mov     r0, r10
+    bl      __aeabi_cfcmple             @ r0<- Z set if eq, C clear if <
+    @bleq    common_abort
+    movcc   r1, #1                      @ (greater than) r1<- 1
+    bcc     .LOP_CMPL_FLOAT_finish
+    mvn     r1, #0                            @ r1<- 1 or -1 for NaN
+    b       .LOP_CMPL_FLOAT_finish
+
+
+#if 0       /* "clasic" form */
+    FETCH(r0, 1)                        @ r0<- CCBB
+    and     r2, r0, #255                @ r2<- BB
+    mov     r3, r0, lsr #8              @ r3<- CC
+    GET_VREG(r9, r2)                    @ r9<- vBB
+    GET_VREG(r10, r3)                   @ r10<- vCC
+    mov     r0, r9                      @ r0<- vBB
+    mov     r1, r10                     @ r1<- vCC
+    bl      __aeabi_fcmpeq              @ r0<- (vBB == vCC)
+    cmp     r0, #0                      @ equal?
+    movne   r1, #0                      @ yes, result is 0
+    bne     OP_CMPL_FLOAT_finish
+    mov     r0, r9                      @ r0<- vBB
+    mov     r1, r10                     @ r1<- vCC
+    bl      __aeabi_fcmplt              @ r0<- (vBB < vCC)
+    cmp     r0, #0                      @ less than?
+    b       OP_CMPL_FLOAT_continue
+@%break
+
+OP_CMPL_FLOAT_continue:
+    mvnne   r1, #0                      @ yes, result is -1
+    bne     OP_CMPL_FLOAT_finish
+    mov     r0, r9                      @ r0<- vBB
+    mov     r1, r10                     @ r1<- vCC
+    bl      __aeabi_fcmpgt              @ r0<- (vBB > vCC)
+    cmp     r0, #0                      @ greater than?
+    beq     OP_CMPL_FLOAT_nan               @ no, must be NaN
+    mov     r1, #1                      @ yes, result is 1
+    @ fall through to _finish
+
+OP_CMPL_FLOAT_finish:
+    mov     r3, rINST, lsr #8           @ r3<- AA
+    FETCH_ADVANCE_INST(2)               @ advance rPC, load rINST
+    SET_VREG(r1, r3)                    @ vAA<- r1
+    GET_INST_OPCODE(ip)                 @ extract opcode from rINST
+    GOTO_OPCODE(ip)                     @ jump to next instruction
+
+    /*
+     * This is expected to be uncommon, so we double-branch (once to here,
+     * again back to _finish).
+     */
+OP_CMPL_FLOAT_nan:
+    mvn     r1, #0                            @ r1<- 1 or -1 for NaN
+    b       OP_CMPL_FLOAT_finish
+
+#endif
+
+
+/* continuation for OP_CMPG_FLOAT */
+
+    @ Test for NaN with a second comparison.  EABI forbids testing bit
+    @ patterns, and we can't represent 0x7fc00000 in immediate form, so
+    @ make the library call.
+.LOP_CMPG_FLOAT_gt_or_nan:
+    mov     r1, r9                      @ reverse order
+    mov     r0, r10
+    bl      __aeabi_cfcmple             @ r0<- Z set if eq, C clear if <
+    @bleq    common_abort
+    movcc   r1, #1                      @ (greater than) r1<- 1
+    bcc     .LOP_CMPG_FLOAT_finish
+    mov     r1, #1                            @ r1<- 1 or -1 for NaN
+    b       .LOP_CMPG_FLOAT_finish
+
+
+#if 0       /* "clasic" form */
+    FETCH(r0, 1)                        @ r0<- CCBB
+    and     r2, r0, #255                @ r2<- BB
+    mov     r3, r0, lsr #8              @ r3<- CC
+    GET_VREG(r9, r2)                    @ r9<- vBB
+    GET_VREG(r10, r3)                   @ r10<- vCC
+    mov     r0, r9                      @ r0<- vBB
+    mov     r1, r10                     @ r1<- vCC
+    bl      __aeabi_fcmpeq              @ r0<- (vBB == vCC)
+    cmp     r0, #0                      @ equal?
+    movne   r1, #0                      @ yes, result is 0
+    bne     OP_CMPG_FLOAT_finish
+    mov     r0, r9                      @ r0<- vBB
+    mov     r1, r10                     @ r1<- vCC
+    bl      __aeabi_fcmplt              @ r0<- (vBB < vCC)
+    cmp     r0, #0                      @ less than?
+    b       OP_CMPG_FLOAT_continue
+@%break
+
+OP_CMPG_FLOAT_continue:
+    mvnne   r1, #0                      @ yes, result is -1
+    bne     OP_CMPG_FLOAT_finish
+    mov     r0, r9                      @ r0<- vBB
+    mov     r1, r10                     @ r1<- vCC
+    bl      __aeabi_fcmpgt              @ r0<- (vBB > vCC)
+    cmp     r0, #0                      @ greater than?
+    beq     OP_CMPG_FLOAT_nan               @ no, must be NaN
+    mov     r1, #1                      @ yes, result is 1
+    @ fall through to _finish
+
+OP_CMPG_FLOAT_finish:
+    mov     r3, rINST, lsr #8           @ r3<- AA
+    FETCH_ADVANCE_INST(2)               @ advance rPC, load rINST
+    SET_VREG(r1, r3)                    @ vAA<- r1
+    GET_INST_OPCODE(ip)                 @ extract opcode from rINST
+    GOTO_OPCODE(ip)                     @ jump to next instruction
+
+    /*
+     * This is expected to be uncommon, so we double-branch (once to here,
+     * again back to _finish).
+     */
+OP_CMPG_FLOAT_nan:
+    mov     r1, #1                            @ r1<- 1 or -1 for NaN
+    b       OP_CMPG_FLOAT_finish
+
+#endif
+
+
+/* continuation for OP_CMPL_DOUBLE */
+
+    @ Test for NaN with a second comparison.  EABI forbids testing bit
+    @ patterns, and we can't represent 0x7fc00000 in immediate form, so
+    @ make the library call.
+.LOP_CMPL_DOUBLE_gt_or_nan:
+    ldmia   r10, {r0-r1}                @ reverse order
+    ldmia   r9, {r2-r3}
+    bl      __aeabi_cdcmple             @ r0<- Z set if eq, C clear if <
+    @bleq    common_abort
+    movcc   r1, #1                      @ (greater than) r1<- 1
+    bcc     .LOP_CMPL_DOUBLE_finish
+    mvn     r1, #0                            @ r1<- 1 or -1 for NaN
+    b       .LOP_CMPL_DOUBLE_finish
+
+
+/* continuation for OP_CMPG_DOUBLE */
+
+    @ Test for NaN with a second comparison.  EABI forbids testing bit
+    @ patterns, and we can't represent 0x7fc00000 in immediate form, so
+    @ make the library call.
+.LOP_CMPG_DOUBLE_gt_or_nan:
+    ldmia   r10, {r0-r1}                @ reverse order
+    ldmia   r9, {r2-r3}
+    bl      __aeabi_cdcmple             @ r0<- Z set if eq, C clear if <
+    @bleq    common_abort
+    movcc   r1, #1                      @ (greater than) r1<- 1
+    bcc     .LOP_CMPG_DOUBLE_finish
+    mov     r1, #1                            @ r1<- 1 or -1 for NaN
+    b       .LOP_CMPG_DOUBLE_finish
+
+
+/* continuation for OP_CMP_LONG */
+
+.LOP_CMP_LONG_less:
+    mvn     r1, #0                      @ r1<- -1
+    @ Want to cond code the next mov so we can avoid branch, but don't see it;
+    @ instead, we just replicate the tail end.
+    FETCH_ADVANCE_INST(2)               @ advance rPC, load rINST
+    SET_VREG(r1, r9)                    @ vAA<- r1
+    GET_INST_OPCODE(ip)                 @ extract opcode from rINST
+    GOTO_OPCODE(ip)                     @ jump to next instruction
+
+.LOP_CMP_LONG_greater:
+    mov     r1, #1                      @ r1<- 1
+    @ fall through to _finish
+
+.LOP_CMP_LONG_finish:
+    FETCH_ADVANCE_INST(2)               @ advance rPC, load rINST
+    SET_VREG(r1, r9)                    @ vAA<- r1
+    GET_INST_OPCODE(ip)                 @ extract opcode from rINST
+    GOTO_OPCODE(ip)                     @ jump to next instruction
+
+
+/* continuation for OP_AGET_WIDE */
+
+.LOP_AGET_WIDE_finish:
+    FETCH_ADVANCE_INST(2)               @ advance rPC, load rINST
+    ldrd    r2, [r0, #offArrayObject_contents]  @ r2/r3<- vBB[vCC]
+    add     r9, rFP, r9, lsl #2         @ r9<- &fp[AA]
+    GET_INST_OPCODE(ip)                 @ extract opcode from rINST
+    stmia   r9, {r2-r3}                 @ vAA/vAA+1<- r2/r3
+    GOTO_OPCODE(ip)                     @ jump to next instruction
+
+
+/* continuation for OP_APUT_WIDE */
+
+.LOP_APUT_WIDE_finish:
+    FETCH_ADVANCE_INST(2)               @ advance rPC, load rINST
+    ldmia   r9, {r2-r3}                 @ r2/r3<- vAA/vAA+1
+    GET_INST_OPCODE(ip)                 @ extract opcode from rINST
+    strd    r2, [r0, #offArrayObject_contents]  @ r2/r3<- vBB[vCC]
+    GOTO_OPCODE(ip)                     @ jump to next instruction
+
+
+/* continuation for OP_APUT_OBJECT */
+    /*
+     * On entry:
+     *  r1 = vBB (arrayObj)
+     *  r9 = vAA (obj)
+     *  r10 = offset into array (vBB + vCC * width)
+     */
+.LOP_APUT_OBJECT_finish:
+    cmp     r9, #0                      @ storing null reference?
+    beq     .LOP_APUT_OBJECT_skip_check      @ yes, skip type checks
+    ldr     r0, [r9, #offObject_clazz]  @ r0<- obj->clazz
+    ldr     r1, [r1, #offObject_clazz]  @ r1<- arrayObj->clazz
+    bl      dvmCanPutArrayElement       @ test object type vs. array type
+    cmp     r0, #0                      @ okay?
+    beq     common_errArrayStore        @ no
+.LOP_APUT_OBJECT_skip_check:
+    FETCH_ADVANCE_INST(2)               @ advance rPC, load rINST
+    GET_INST_OPCODE(ip)                 @ extract opcode from rINST
+    str     r9, [r10, #offArrayObject_contents] @ vBB[vCC]<- vAA
+    GOTO_OPCODE(ip)                     @ jump to next instruction
+
+
+/* continuation for OP_IGET */
+
+    /*
+     * Currently:
+     *  r0 holds resolved field
+     *  r9 holds object
+     */
+.LOP_IGET_finish:
+    @bl      common_squeak0
+    cmp     r9, #0                      @ check object for null
+    ldr     r3, [r0, #offInstField_byteOffset]  @ r3<- byte offset of field
+    beq     common_errNullObject        @ object was null
+    ldr   r0, [r9, r3]                @ r0<- obj.field (8/16/32 bits)
+    mov     r2, rINST, lsr #8           @ r2<- A+
+    FETCH_ADVANCE_INST(2)               @ advance rPC, load rINST
+    and     r2, r2, #15                 @ r2<- A
+    GET_INST_OPCODE(ip)                 @ extract opcode from rINST
+    SET_VREG(r0, r2)                    @ fp[A]<- r0
+    GOTO_OPCODE(ip)                     @ jump to next instruction
+
+
+/* continuation for OP_IGET_WIDE */
+
+    /*
+     * Currently:
+     *  r0 holds resolved field
+     *  r9 holds object
+     */
+.LOP_IGET_WIDE_finish:
+    cmp     r9, #0                      @ check object for null
+    ldr     r3, [r0, #offInstField_byteOffset]  @ r3<- byte offset of field
+    beq     common_errNullObject        @ object was null
+    mov     r2, rINST, lsr #8           @ r2<- A+
+    ldrd    r0, [r9, r3]                @ r0/r1<- obj.field (64-bit align ok)
+    and     r2, r2, #15                 @ r2<- A
+    FETCH_ADVANCE_INST(2)               @ advance rPC, load rINST
+    add     r3, rFP, r2, lsl #2         @ r3<- &fp[A]
+    GET_INST_OPCODE(ip)                 @ extract opcode from rINST
+    stmia   r3, {r0-r1}                 @ fp[A]<- r0/r1
+    GOTO_OPCODE(ip)                     @ jump to next instruction
+
+
+/* continuation for OP_IGET_OBJECT */
+
+    /*
+     * Currently:
+     *  r0 holds resolved field
+     *  r9 holds object
+     */
+.LOP_IGET_OBJECT_finish:
+    @bl      common_squeak0
+    cmp     r9, #0                      @ check object for null
+    ldr     r3, [r0, #offInstField_byteOffset]  @ r3<- byte offset of field
+    beq     common_errNullObject        @ object was null
+    ldr   r0, [r9, r3]                @ r0<- obj.field (8/16/32 bits)
+    mov     r2, rINST, lsr #8           @ r2<- A+
+    FETCH_ADVANCE_INST(2)               @ advance rPC, load rINST
+    and     r2, r2, #15                 @ r2<- A
+    GET_INST_OPCODE(ip)                 @ extract opcode from rINST
+    SET_VREG(r0, r2)                    @ fp[A]<- r0
+    GOTO_OPCODE(ip)                     @ jump to next instruction
+
+
+/* continuation for OP_IGET_BOOLEAN */
+
+    /*
+     * Currently:
+     *  r0 holds resolved field
+     *  r9 holds object
+     */
+.LOP_IGET_BOOLEAN_finish:
+    @bl      common_squeak1
+    cmp     r9, #0                      @ check object for null
+    ldr     r3, [r0, #offInstField_byteOffset]  @ r3<- byte offset of field
+    beq     common_errNullObject        @ object was null
+    ldr   r0, [r9, r3]                @ r0<- obj.field (8/16/32 bits)
+    mov     r2, rINST, lsr #8           @ r2<- A+
+    FETCH_ADVANCE_INST(2)               @ advance rPC, load rINST
+    and     r2, r2, #15                 @ r2<- A
+    GET_INST_OPCODE(ip)                 @ extract opcode from rINST
+    SET_VREG(r0, r2)                    @ fp[A]<- r0
+    GOTO_OPCODE(ip)                     @ jump to next instruction
+
+
+/* continuation for OP_IGET_BYTE */
+
+    /*
+     * Currently:
+     *  r0 holds resolved field
+     *  r9 holds object
+     */
+.LOP_IGET_BYTE_finish:
+    @bl      common_squeak2
+    cmp     r9, #0                      @ check object for null
+    ldr     r3, [r0, #offInstField_byteOffset]  @ r3<- byte offset of field
+    beq     common_errNullObject        @ object was null
+    ldr   r0, [r9, r3]                @ r0<- obj.field (8/16/32 bits)
+    mov     r2, rINST, lsr #8           @ r2<- A+
+    FETCH_ADVANCE_INST(2)               @ advance rPC, load rINST
+    and     r2, r2, #15                 @ r2<- A
+    GET_INST_OPCODE(ip)                 @ extract opcode from rINST
+    SET_VREG(r0, r2)                    @ fp[A]<- r0
+    GOTO_OPCODE(ip)                     @ jump to next instruction
+
+
+/* continuation for OP_IGET_CHAR */
+
+    /*
+     * Currently:
+     *  r0 holds resolved field
+     *  r9 holds object
+     */
+.LOP_IGET_CHAR_finish:
+    @bl      common_squeak3
+    cmp     r9, #0                      @ check object for null
+    ldr     r3, [r0, #offInstField_byteOffset]  @ r3<- byte offset of field
+    beq     common_errNullObject        @ object was null
+    ldr   r0, [r9, r3]                @ r0<- obj.field (8/16/32 bits)
+    mov     r2, rINST, lsr #8           @ r2<- A+
+    FETCH_ADVANCE_INST(2)               @ advance rPC, load rINST
+    and     r2, r2, #15                 @ r2<- A
+    GET_INST_OPCODE(ip)                 @ extract opcode from rINST
+    SET_VREG(r0, r2)                    @ fp[A]<- r0
+    GOTO_OPCODE(ip)                     @ jump to next instruction
+
+
+/* continuation for OP_IGET_SHORT */
+
+    /*
+     * Currently:
+     *  r0 holds resolved field
+     *  r9 holds object
+     */
+.LOP_IGET_SHORT_finish:
+    @bl      common_squeak4
+    cmp     r9, #0                      @ check object for null
+    ldr     r3, [r0, #offInstField_byteOffset]  @ r3<- byte offset of field
+    beq     common_errNullObject        @ object was null
+    ldr   r0, [r9, r3]                @ r0<- obj.field (8/16/32 bits)
+    mov     r2, rINST, lsr #8           @ r2<- A+
+    FETCH_ADVANCE_INST(2)               @ advance rPC, load rINST
+    and     r2, r2, #15                 @ r2<- A
+    GET_INST_OPCODE(ip)                 @ extract opcode from rINST
+    SET_VREG(r0, r2)                    @ fp[A]<- r0
+    GOTO_OPCODE(ip)                     @ jump to next instruction
+
+
+/* continuation for OP_IPUT */
+
+    /*
+     * Currently:
+     *  r0 holds resolved field
+     *  r9 holds object
+     */
+.LOP_IPUT_finish:
+    @bl      common_squeak0
+    mov     r1, rINST, lsr #8           @ r1<- A+
+    ldr     r3, [r0, #offInstField_byteOffset]  @ r3<- byte offset of field
+    and     r1, r1, #15                 @ r1<- A
+    cmp     r9, #0                      @ check object for null
+    GET_VREG(r0, r1)                    @ r0<- fp[A]
+    beq     common_errNullObject        @ object was null
+    FETCH_ADVANCE_INST(2)               @ advance rPC, load rINST
+    GET_INST_OPCODE(ip)                 @ extract opcode from rINST
+    str  r0, [r9, r3]                @ obj.field (8/16/32 bits)<- r0
+    GOTO_OPCODE(ip)                     @ jump to next instruction
+
+
+/* continuation for OP_IPUT_WIDE */
+
+    /*
+     * Currently:
+     *  r0 holds resolved field
+     *  r9 holds object
+     */
+.LOP_IPUT_WIDE_finish:
+    mov     r2, rINST, lsr #8           @ r2<- A+
+    cmp     r9, #0                      @ check object for null
+    and     r2, r2, #15                 @ r2<- A
+    ldr     r3, [r0, #offInstField_byteOffset]  @ r3<- byte offset of field
+    add     r2, rFP, r2, lsl #2         @ r3<- &fp[A]
+    beq     common_errNullObject        @ object was null
+    FETCH_ADVANCE_INST(2)               @ advance rPC, load rINST
+    ldmia   r2, {r0-r1}                 @ r0/r1<- fp[A]
+    GET_INST_OPCODE(ip)                 @ extract opcode from rINST
+    strd    r0, [r9, r3]                @ obj.field (64 bits, aligned)<- r0
+    GOTO_OPCODE(ip)                     @ jump to next instruction
+
+
+/* continuation for OP_IPUT_OBJECT */
+
+    /*
+     * Currently:
+     *  r0 holds resolved field
+     *  r9 holds object
+     */
+.LOP_IPUT_OBJECT_finish:
+    @bl      common_squeak0
+    mov     r1, rINST, lsr #8           @ r1<- A+
+    ldr     r3, [r0, #offInstField_byteOffset]  @ r3<- byte offset of field
+    and     r1, r1, #15                 @ r1<- A
+    cmp     r9, #0                      @ check object for null
+    GET_VREG(r0, r1)                    @ r0<- fp[A]
+    beq     common_errNullObject        @ object was null
+    FETCH_ADVANCE_INST(2)               @ advance rPC, load rINST
+    GET_INST_OPCODE(ip)                 @ extract opcode from rINST
+    str  r0, [r9, r3]                @ obj.field (8/16/32 bits)<- r0
+    GOTO_OPCODE(ip)                     @ jump to next instruction
+
+
+/* continuation for OP_IPUT_BOOLEAN */
+
+    /*
+     * Currently:
+     *  r0 holds resolved field
+     *  r9 holds object
+     */
+.LOP_IPUT_BOOLEAN_finish:
+    @bl      common_squeak1
+    mov     r1, rINST, lsr #8           @ r1<- A+
+    ldr     r3, [r0, #offInstField_byteOffset]  @ r3<- byte offset of field
+    and     r1, r1, #15                 @ r1<- A
+    cmp     r9, #0                      @ check object for null
+    GET_VREG(r0, r1)                    @ r0<- fp[A]
+    beq     common_errNullObject        @ object was null
+    FETCH_ADVANCE_INST(2)               @ advance rPC, load rINST
+    GET_INST_OPCODE(ip)                 @ extract opcode from rINST
+    str  r0, [r9, r3]                @ obj.field (8/16/32 bits)<- r0
+    GOTO_OPCODE(ip)                     @ jump to next instruction
+
+
+/* continuation for OP_IPUT_BYTE */
+
+    /*
+     * Currently:
+     *  r0 holds resolved field
+     *  r9 holds object
+     */
+.LOP_IPUT_BYTE_finish:
+    @bl      common_squeak2
+    mov     r1, rINST, lsr #8           @ r1<- A+
+    ldr     r3, [r0, #offInstField_byteOffset]  @ r3<- byte offset of field
+    and     r1, r1, #15                 @ r1<- A
+    cmp     r9, #0                      @ check object for null
+    GET_VREG(r0, r1)                    @ r0<- fp[A]
+    beq     common_errNullObject        @ object was null
+    FETCH_ADVANCE_INST(2)               @ advance rPC, load rINST
+    GET_INST_OPCODE(ip)                 @ extract opcode from rINST
+    str  r0, [r9, r3]                @ obj.field (8/16/32 bits)<- r0
+    GOTO_OPCODE(ip)                     @ jump to next instruction
+
+
+/* continuation for OP_IPUT_CHAR */
+
+    /*
+     * Currently:
+     *  r0 holds resolved field
+     *  r9 holds object
+     */
+.LOP_IPUT_CHAR_finish:
+    @bl      common_squeak3
+    mov     r1, rINST, lsr #8           @ r1<- A+
+    ldr     r3, [r0, #offInstField_byteOffset]  @ r3<- byte offset of field
+    and     r1, r1, #15                 @ r1<- A
+    cmp     r9, #0                      @ check object for null
+    GET_VREG(r0, r1)                    @ r0<- fp[A]
+    beq     common_errNullObject        @ object was null
+    FETCH_ADVANCE_INST(2)               @ advance rPC, load rINST
+    GET_INST_OPCODE(ip)                 @ extract opcode from rINST
+    str  r0, [r9, r3]                @ obj.field (8/16/32 bits)<- r0
+    GOTO_OPCODE(ip)                     @ jump to next instruction
+
+
+/* continuation for OP_IPUT_SHORT */
+
+    /*
+     * Currently:
+     *  r0 holds resolved field
+     *  r9 holds object
+     */
+.LOP_IPUT_SHORT_finish:
+    @bl      common_squeak4
+    mov     r1, rINST, lsr #8           @ r1<- A+
+    ldr     r3, [r0, #offInstField_byteOffset]  @ r3<- byte offset of field
+    and     r1, r1, #15                 @ r1<- A
+    cmp     r9, #0                      @ check object for null
+    GET_VREG(r0, r1)                    @ r0<- fp[A]
+    beq     common_errNullObject        @ object was null
+    FETCH_ADVANCE_INST(2)               @ advance rPC, load rINST
+    GET_INST_OPCODE(ip)                 @ extract opcode from rINST
+    str  r0, [r9, r3]                @ obj.field (8/16/32 bits)<- r0
+    GOTO_OPCODE(ip)                     @ jump to next instruction
+
+
+/* continuation for OP_SGET */
+
+    /*
+     * Continuation if the field has not yet been resolved.
+     *  r1: BBBB field ref
+     */
+.LOP_SGET_resolve:
+    ldr     r2, [rGLUE, #offGlue_method]    @ r2<- current method
+    EXPORT_PC()                         @ resolve() could throw, so export now
+    ldr     r0, [r2, #offMethod_clazz]  @ r0<- method->clazz
+    bl      dvmResolveStaticField       @ r0<- resolved StaticField ptr
+    cmp     r0, #0                      @ success?
+    bne     .LOP_SGET_finish          @ yes, finish
+    b       common_exceptionThrown      @ no, handle exception
+
+
+/* continuation for OP_SGET_WIDE */
+
+    /*
+     * Continuation if the field has not yet been resolved.
+     *  r1: BBBB field ref
+     */
+.LOP_SGET_WIDE_resolve:
+    ldr     r2, [rGLUE, #offGlue_method]    @ r2<- current method
+    EXPORT_PC()                         @ resolve() could throw, so export now
+    ldr     r0, [r2, #offMethod_clazz]  @ r0<- method->clazz
+    bl      dvmResolveStaticField       @ r0<- resolved StaticField ptr
+    cmp     r0, #0                      @ success?
+    bne     .LOP_SGET_WIDE_finish          @ yes, finish
+    b       common_exceptionThrown      @ no, handle exception
+
+
+/* continuation for OP_SGET_OBJECT */
+
+    /*
+     * Continuation if the field has not yet been resolved.
+     *  r1: BBBB field ref
+     */
+.LOP_SGET_OBJECT_resolve:
+    ldr     r2, [rGLUE, #offGlue_method]    @ r2<- current method
+    EXPORT_PC()                         @ resolve() could throw, so export now
+    ldr     r0, [r2, #offMethod_clazz]  @ r0<- method->clazz
+    bl      dvmResolveStaticField       @ r0<- resolved StaticField ptr
+    cmp     r0, #0                      @ success?
+    bne     .LOP_SGET_OBJECT_finish          @ yes, finish
+    b       common_exceptionThrown      @ no, handle exception
+
+
+/* continuation for OP_SGET_BOOLEAN */
+
+    /*
+     * Continuation if the field has not yet been resolved.
+     *  r1: BBBB field ref
+     */
+.LOP_SGET_BOOLEAN_resolve:
+    ldr     r2, [rGLUE, #offGlue_method]    @ r2<- current method
+    EXPORT_PC()                         @ resolve() could throw, so export now
+    ldr     r0, [r2, #offMethod_clazz]  @ r0<- method->clazz
+    bl      dvmResolveStaticField       @ r0<- resolved StaticField ptr
+    cmp     r0, #0                      @ success?
+    bne     .LOP_SGET_BOOLEAN_finish          @ yes, finish
+    b       common_exceptionThrown      @ no, handle exception
+
+
+/* continuation for OP_SGET_BYTE */
+
+    /*
+     * Continuation if the field has not yet been resolved.
+     *  r1: BBBB field ref
+     */
+.LOP_SGET_BYTE_resolve:
+    ldr     r2, [rGLUE, #offGlue_method]    @ r2<- current method
+    EXPORT_PC()                         @ resolve() could throw, so export now
+    ldr     r0, [r2, #offMethod_clazz]  @ r0<- method->clazz
+    bl      dvmResolveStaticField       @ r0<- resolved StaticField ptr
+    cmp     r0, #0                      @ success?
+    bne     .LOP_SGET_BYTE_finish          @ yes, finish
+    b       common_exceptionThrown      @ no, handle exception
+
+
+/* continuation for OP_SGET_CHAR */
+
+    /*
+     * Continuation if the field has not yet been resolved.
+     *  r1: BBBB field ref
+     */
+.LOP_SGET_CHAR_resolve:
+    ldr     r2, [rGLUE, #offGlue_method]    @ r2<- current method
+    EXPORT_PC()                         @ resolve() could throw, so export now
+    ldr     r0, [r2, #offMethod_clazz]  @ r0<- method->clazz
+    bl      dvmResolveStaticField       @ r0<- resolved StaticField ptr
+    cmp     r0, #0                      @ success?
+    bne     .LOP_SGET_CHAR_finish          @ yes, finish
+    b       common_exceptionThrown      @ no, handle exception
+
+
+/* continuation for OP_SGET_SHORT */
+
+    /*
+     * Continuation if the field has not yet been resolved.
+     *  r1: BBBB field ref
+     */
+.LOP_SGET_SHORT_resolve:
+    ldr     r2, [rGLUE, #offGlue_method]    @ r2<- current method
+    EXPORT_PC()                         @ resolve() could throw, so export now
+    ldr     r0, [r2, #offMethod_clazz]  @ r0<- method->clazz
+    bl      dvmResolveStaticField       @ r0<- resolved StaticField ptr
+    cmp     r0, #0                      @ success?
+    bne     .LOP_SGET_SHORT_finish          @ yes, finish
+    b       common_exceptionThrown      @ no, handle exception
+
+
+/* continuation for OP_SPUT */
+
+    /*
+     * Continuation if the field has not yet been resolved.
+     *  r1: BBBB field ref
+     */
+.LOP_SPUT_resolve:
+    ldr     r2, [rGLUE, #offGlue_method]    @ r2<- current method
+    EXPORT_PC()                         @ resolve() could throw, so export now
+    ldr     r0, [r2, #offMethod_clazz]  @ r0<- method->clazz
+    bl      dvmResolveStaticField       @ r0<- resolved StaticField ptr
+    cmp     r0, #0                      @ success?
+    bne     .LOP_SPUT_finish          @ yes, finish
+    b       common_exceptionThrown      @ no, handle exception
+
+
+/* continuation for OP_SPUT_WIDE */
+
+    /*
+     * Continuation if the field has not yet been resolved.
+     *  r1: BBBB field ref
+     *  r9: &fp[AA]
+     */
+.LOP_SPUT_WIDE_resolve:
+    ldr     r2, [rGLUE, #offGlue_method]    @ r2<- current method
+    EXPORT_PC()                         @ resolve() could throw, so export now
+    ldr     r0, [r2, #offMethod_clazz]  @ r0<- method->clazz
+    bl      dvmResolveStaticField       @ r0<- resolved StaticField ptr
+    cmp     r0, #0                      @ success?
+    bne     .LOP_SPUT_WIDE_finish          @ yes, finish
+    b       common_exceptionThrown      @ no, handle exception
+
+
+/* continuation for OP_SPUT_OBJECT */
+
+    /*
+     * Continuation if the field has not yet been resolved.
+     *  r1: BBBB field ref
+     */
+.LOP_SPUT_OBJECT_resolve:
+    ldr     r2, [rGLUE, #offGlue_method]    @ r2<- current method
+    EXPORT_PC()                         @ resolve() could throw, so export now
+    ldr     r0, [r2, #offMethod_clazz]  @ r0<- method->clazz
+    bl      dvmResolveStaticField       @ r0<- resolved StaticField ptr
+    cmp     r0, #0                      @ success?
+    bne     .LOP_SPUT_OBJECT_finish          @ yes, finish
+    b       common_exceptionThrown      @ no, handle exception
+
+
+/* continuation for OP_SPUT_BOOLEAN */
+
+    /*
+     * Continuation if the field has not yet been resolved.
+     *  r1: BBBB field ref
+     */
+.LOP_SPUT_BOOLEAN_resolve:
+    ldr     r2, [rGLUE, #offGlue_method]    @ r2<- current method
+    EXPORT_PC()                         @ resolve() could throw, so export now
+    ldr     r0, [r2, #offMethod_clazz]  @ r0<- method->clazz
+    bl      dvmResolveStaticField       @ r0<- resolved StaticField ptr
+    cmp     r0, #0                      @ success?
+    bne     .LOP_SPUT_BOOLEAN_finish          @ yes, finish
+    b       common_exceptionThrown      @ no, handle exception
+
+
+/* continuation for OP_SPUT_BYTE */
+
+    /*
+     * Continuation if the field has not yet been resolved.
+     *  r1: BBBB field ref
+     */
+.LOP_SPUT_BYTE_resolve:
+    ldr     r2, [rGLUE, #offGlue_method]    @ r2<- current method
+    EXPORT_PC()                         @ resolve() could throw, so export now
+    ldr     r0, [r2, #offMethod_clazz]  @ r0<- method->clazz
+    bl      dvmResolveStaticField       @ r0<- resolved StaticField ptr
+    cmp     r0, #0                      @ success?
+    bne     .LOP_SPUT_BYTE_finish          @ yes, finish
+    b       common_exceptionThrown      @ no, handle exception
+
+
+/* continuation for OP_SPUT_CHAR */
+
+    /*
+     * Continuation if the field has not yet been resolved.
+     *  r1: BBBB field ref
+     */
+.LOP_SPUT_CHAR_resolve:
+    ldr     r2, [rGLUE, #offGlue_method]    @ r2<- current method
+    EXPORT_PC()                         @ resolve() could throw, so export now
+    ldr     r0, [r2, #offMethod_clazz]  @ r0<- method->clazz
+    bl      dvmResolveStaticField       @ r0<- resolved StaticField ptr
+    cmp     r0, #0                      @ success?
+    bne     .LOP_SPUT_CHAR_finish          @ yes, finish
+    b       common_exceptionThrown      @ no, handle exception
+
+
+/* continuation for OP_SPUT_SHORT */
+
+    /*
+     * Continuation if the field has not yet been resolved.
+     *  r1: BBBB field ref
+     */
+.LOP_SPUT_SHORT_resolve:
+    ldr     r2, [rGLUE, #offGlue_method]    @ r2<- current method
+    EXPORT_PC()                         @ resolve() could throw, so export now
+    ldr     r0, [r2, #offMethod_clazz]  @ r0<- method->clazz
+    bl      dvmResolveStaticField       @ r0<- resolved StaticField ptr
+    cmp     r0, #0                      @ success?
+    bne     .LOP_SPUT_SHORT_finish          @ yes, finish
+    b       common_exceptionThrown      @ no, handle exception
+
+
+/* continuation for OP_INVOKE_VIRTUAL */
+
+    /*
+     * At this point:
+     *  r0 = resolved base method
+     *  r10 = C or CCCC (index of first arg, which is the "this" ptr)
+     */
+.LOP_INVOKE_VIRTUAL_continue:
+    GET_VREG(r1, r10)                   @ r1<- "this" ptr
+    ldrh    r2, [r0, #offMethod_methodIndex]    @ r2<- baseMethod->methodIndex
+    cmp     r1, #0                      @ is "this" null?
+    beq     common_errNullObject        @ null "this", throw exception
+    ldr     r3, [r1, #offObject_clazz]  @ r1<- thisPtr->clazz
+    ldr     r3, [r3, #offClassObject_vtable]    @ r3<- thisPtr->clazz->vtable
+    ldr     r0, [r3, r2, lsl #2]        @ r3<- vtable[methodIndex]
+    bl      common_invokeMethodNoRange @ continue on
+
+
+/* continuation for OP_INVOKE_SUPER */
+
+    /*
+     * At this point:
+     *  r0 = resolved base method
+     *  r9 = method->clazz
+     */
+.LOP_INVOKE_SUPER_continue:
+    ldr     r1, [r9, #offClassObject_super]     @ r1<- method->clazz->super
+    ldrh    r2, [r0, #offMethod_methodIndex]    @ r2<- baseMethod->methodIndex
+    ldr     r3, [r1, #offClassObject_vtableCount]   @ r3<- super->vtableCount
+    EXPORT_PC()                         @ must export for invoke
+    cmp     r2, r3                      @ compare (methodIndex, vtableCount)
+    bcs     .LOP_INVOKE_SUPER_nsm             @ method not present in superclass
+    ldr     r1, [r1, #offClassObject_vtable]    @ r1<- ...clazz->super->vtable
+    ldr     r0, [r1, r2, lsl #2]        @ r3<- vtable[methodIndex]
+    bl      common_invokeMethodNoRange @ continue on
+
+.LOP_INVOKE_SUPER_resolve:
+    mov     r0, r9                      @ r0<- method->clazz
+    mov     r2, #METHOD_VIRTUAL         @ resolver method type
+    bl      dvmResolveMethod            @ r0<- call(clazz, ref, flags)
+    cmp     r0, #0                      @ got null?
+    bne     .LOP_INVOKE_SUPER_continue        @ no, continue
+    b       common_exceptionThrown      @ yes, handle exception
+
+    /*
+     * Throw a NoSuchMethodError with the method name as the message.
+     *  r0 = resolved base method
+     */
+.LOP_INVOKE_SUPER_nsm:
+    ldr     r1, [r0, #offMethod_name]   @ r1<- method name
+    b       common_errNoSuchMethod
+
+
+/* continuation for OP_INVOKE_DIRECT */
+
+    /*
+     * On entry:
+     *  r1 = reference (BBBB or CCCC)
+     *  r10 = "this" register
+     */
+.LOP_INVOKE_DIRECT_resolve:
+    ldr     r3, [rGLUE, #offGlue_method] @ r3<- glue->method
+    ldr     r0, [r3, #offMethod_clazz]  @ r0<- method->clazz
+    mov     r2, #METHOD_DIRECT          @ resolver method type
+    bl      dvmResolveMethod            @ r0<- call(clazz, ref, flags)
+    cmp     r0, #0                      @ got null?
+    GET_VREG(r2, r10)                   @ r2<- "this" ptr (reload)
+    bne     .LOP_INVOKE_DIRECT_finish          @ no, continue
+    b       common_exceptionThrown      @ yes, handle exception
+
+
+/* continuation for OP_INVOKE_VIRTUAL_RANGE */
+
+    /*
+     * At this point:
+     *  r0 = resolved base method
+     *  r10 = C or CCCC (index of first arg, which is the "this" ptr)
+     */
+.LOP_INVOKE_VIRTUAL_RANGE_continue:
+    GET_VREG(r1, r10)                   @ r1<- "this" ptr
+    ldrh    r2, [r0, #offMethod_methodIndex]    @ r2<- baseMethod->methodIndex
+    cmp     r1, #0                      @ is "this" null?
+    beq     common_errNullObject        @ null "this", throw exception
+    ldr     r3, [r1, #offObject_clazz]  @ r1<- thisPtr->clazz
+    ldr     r3, [r3, #offClassObject_vtable]    @ r3<- thisPtr->clazz->vtable
+    ldr     r0, [r3, r2, lsl #2]        @ r3<- vtable[methodIndex]
+    bl      common_invokeMethodRange @ continue on
+
+
+/* continuation for OP_INVOKE_SUPER_RANGE */
+
+    /*
+     * At this point:
+     *  r0 = resolved base method
+     *  r9 = method->clazz
+     */
+.LOP_INVOKE_SUPER_RANGE_continue:
+    ldr     r1, [r9, #offClassObject_super]     @ r1<- method->clazz->super
+    ldrh    r2, [r0, #offMethod_methodIndex]    @ r2<- baseMethod->methodIndex
+    ldr     r3, [r1, #offClassObject_vtableCount]   @ r3<- super->vtableCount
+    EXPORT_PC()                         @ must export for invoke
+    cmp     r2, r3                      @ compare (methodIndex, vtableCount)
+    bcs     .LOP_INVOKE_SUPER_RANGE_nsm             @ method not present in superclass
+    ldr     r1, [r1, #offClassObject_vtable]    @ r1<- ...clazz->super->vtable
+    ldr     r0, [r1, r2, lsl #2]        @ r3<- vtable[methodIndex]
+    bl      common_invokeMethodRange @ continue on
+
+.LOP_INVOKE_SUPER_RANGE_resolve:
+    mov     r0, r9                      @ r0<- method->clazz
+    mov     r2, #METHOD_VIRTUAL         @ resolver method type
+    bl      dvmResolveMethod            @ r0<- call(clazz, ref, flags)
+    cmp     r0, #0                      @ got null?
+    bne     .LOP_INVOKE_SUPER_RANGE_continue        @ no, continue
+    b       common_exceptionThrown      @ yes, handle exception
+
+    /*
+     * Throw a NoSuchMethodError with the method name as the message.
+     *  r0 = resolved base method
+     */
+.LOP_INVOKE_SUPER_RANGE_nsm:
+    ldr     r1, [r0, #offMethod_name]   @ r1<- method name
+    b       common_errNoSuchMethod
+
+
+/* continuation for OP_INVOKE_DIRECT_RANGE */
+
+    /*
+     * On entry:
+     *  r1 = reference (BBBB or CCCC)
+     *  r10 = "this" register
+     */
+.LOP_INVOKE_DIRECT_RANGE_resolve:
+    ldr     r3, [rGLUE, #offGlue_method] @ r3<- glue->method
+    ldr     r0, [r3, #offMethod_clazz]  @ r0<- method->clazz
+    mov     r2, #METHOD_DIRECT          @ resolver method type
+    bl      dvmResolveMethod            @ r0<- call(clazz, ref, flags)
+    cmp     r0, #0                      @ got null?
+    GET_VREG(r2, r10)                   @ r2<- "this" ptr (reload)
+    bne     .LOP_INVOKE_DIRECT_RANGE_finish          @ no, continue
+    b       common_exceptionThrown      @ yes, handle exception
+
+
+/* continuation for OP_FLOAT_TO_LONG */
+/*
+ * Convert the float in r0 to a long in r0/r1.
+ *
+ * We have to clip values to long min/max per the specification.  The
+ * expected common case is a "reasonable" value that converts directly
+ * to modest integer.  The EABI convert function isn't doing this for us.
+ */
+f2l_doconv:
+    stmfd   sp!, {r4, lr}
+    mov     r1, #0x5f000000             @ (float)maxlong
+    mov     r4, r0
+    bl      __aeabi_fcmpge              @ is arg >= maxlong?
+    cmp     r0, #0                      @ nonzero == yes
+    mvnne   r0, #0                      @ return maxlong (7fffffff)
+    mvnne   r1, #0x80000000
+    ldmnefd sp!, {r4, pc}
+
+    mov     r0, r4                      @ recover arg
+    mov     r1, #0xdf000000             @ (float)minlong
+    bl      __aeabi_fcmple              @ is arg <= minlong?
+    cmp     r0, #0                      @ nonzero == yes
+    movne   r0, #0                      @ return minlong (80000000)
+    movne   r1, #0x80000000
+    ldmnefd sp!, {r4, pc}
+
+    mov     r0, r4                      @ recover arg
+    mov     r1, r4
+    bl      __aeabi_fcmpeq              @ is arg == self?
+    cmp     r0, #0                      @ zero == no
+    moveq   r1, #0                      @ return zero for NaN
+    ldmeqfd sp!, {r4, pc}
+
+    mov     r0, r4                      @ recover arg
+    bl      __aeabi_f2lz                @ convert float to long
+    ldmfd   sp!, {r4, pc}
+
+
+/* continuation for OP_DOUBLE_TO_LONG */
+/*
+ * Convert the double in r0/r1 to a long in r0/r1.
+ *
+ * We have to clip values to long min/max per the specification.  The
+ * expected common case is a "reasonable" value that converts directly
+ * to modest integer.  The EABI convert function isn't doing this for us.
+ */
+d2l_doconv:
+    stmfd   sp!, {r4, r5, lr}           @ save regs
+    ldr     r3, .LOP_DOUBLE_TO_LONG_max         @ (double)maxlong, hi
+    sub     sp, sp, #4                  @ align for EABI
+    mov     r2, #0                      @ (double)maxlong, lo
+    mov     r4, r0                      @ save r0
+    mov     r5, r1                      @  and r1
+    bl      __aeabi_dcmpge              @ is arg >= maxlong?
+    cmp     r0, #0                      @ nonzero == yes
+    mvnne   r0, #0                      @ return maxlong (7fffffffffffffff)
+    mvnne   r1, #0x80000000
+    bne     1f
+
+    mov     r0, r4                      @ recover arg
+    mov     r1, r5
+    ldr     r3, .LOP_DOUBLE_TO_LONG_min         @ (double)minlong, hi
+    mov     r2, #0                      @ (double)minlong, lo
+    bl      __aeabi_dcmple              @ is arg <= minlong?
+    cmp     r0, #0                      @ nonzero == yes
+    movne   r0, #0                      @ return minlong (8000000000000000)
+    movne   r1, #0x80000000
+    bne     1f
+
+    mov     r0, r4                      @ recover arg
+    mov     r1, r5
+    mov     r2, r4                      @ compare against self
+    mov     r3, r5
+    bl      __aeabi_dcmpeq              @ is arg == self?
+    cmp     r0, #0                      @ zero == no
+    moveq   r1, #0                      @ return zero for NaN
+    beq     1f
+
+    mov     r0, r4                      @ recover arg
+    mov     r1, r5
+    bl      __aeabi_d2lz                @ convert double to long
+
+1:
+    add     sp, sp, #4
+    ldmfd   sp!, {r4, r5, pc}
+
+.LOP_DOUBLE_TO_LONG_max:
+    .word   0x43e00000                  @ maxlong, as a double (high word)
+.LOP_DOUBLE_TO_LONG_min:
+    .word   0xc3e00000                  @ minlong, as a double (high word)
+
+
+/* continuation for OP_MUL_LONG */
+
+.LOP_MUL_LONG_finish:
+    GET_INST_OPCODE(ip)                 @ extract opcode from rINST
+    stmia   r0, {r9-r10}                @ vAA/vAA+1<- r9/r10
+    GOTO_OPCODE(ip)                     @ jump to next instruction
+
+
+/* continuation for OP_SHL_LONG */
+
+.LOP_SHL_LONG_finish:
+    mov     r0, r0, asl r2              @  r0<- r0 << r2
+    GET_INST_OPCODE(ip)                 @ extract opcode from rINST
+    stmia   r9, {r0-r1}                 @ vAA/vAA+1<- r0/r1
+    GOTO_OPCODE(ip)                     @ jump to next instruction
+
+
+/* continuation for OP_SHR_LONG */
+
+.LOP_SHR_LONG_finish:
+    mov     r1, r1, asr r2              @  r1<- r1 >> r2
+    GET_INST_OPCODE(ip)                 @ extract opcode from rINST
+    stmia   r9, {r0-r1}                 @ vAA/vAA+1<- r0/r1
+    GOTO_OPCODE(ip)                     @ jump to next instruction
+
+
+/* continuation for OP_USHR_LONG */
+
+.LOP_USHR_LONG_finish:
+    mov     r1, r1, lsr r2              @  r1<- r1 >>> r2
+    GET_INST_OPCODE(ip)                 @ extract opcode from rINST
+    stmia   r9, {r0-r1}                 @ vAA/vAA+1<- r0/r1
+    GOTO_OPCODE(ip)                     @ jump to next instruction
+
+
+/* continuation for OP_SHL_LONG_2ADDR */
+
+.LOP_SHL_LONG_2ADDR_finish:
+    GET_INST_OPCODE(ip)                 @ extract opcode from rINST
+    stmia   r9, {r0-r1}                 @ vAA/vAA+1<- r0/r1
+    GOTO_OPCODE(ip)                     @ jump to next instruction
+
+
+/* continuation for OP_SHR_LONG_2ADDR */
+
+.LOP_SHR_LONG_2ADDR_finish:
+    GET_INST_OPCODE(ip)                 @ extract opcode from rINST
+    stmia   r9, {r0-r1}                 @ vAA/vAA+1<- r0/r1
+    GOTO_OPCODE(ip)                     @ jump to next instruction
+
+
+/* continuation for OP_USHR_LONG_2ADDR */
+
+.LOP_USHR_LONG_2ADDR_finish:
+    GET_INST_OPCODE(ip)                 @ extract opcode from rINST
+    stmia   r9, {r0-r1}                 @ vAA/vAA+1<- r0/r1
+    GOTO_OPCODE(ip)                     @ jump to next instruction
+
+
+/* continuation for OP_EXECUTE_INLINE */
+
+    /*
+     * Extract args, call function.
+     *  r0 = #of args (0-4)
+     *  r10 = call index
+     *  lr = return addr, above  [DO NOT bl in here w/o preserving LR]
+     *
+     * Other ideas:
+     * - Use a jump table from the main piece to jump directly into the
+     *   AND/LDR pairs.  Costs a data load, saves a branch.
+     * - Have five separate pieces that do the loading, so we can work the
+     *   interleave a little better.  Increases code size.
+     */
+.LOP_EXECUTE_INLINE_continue:
+    rsb     r0, r0, #4                  @ r0<- 4-r0
+    FETCH(r9, 2)                        @ r9<- FEDC
+    add     pc, pc, r0, lsl #3          @ computed goto, 2 instrs each
+    bl      common_abort                @ (skipped due to ARM prefetch)
+4:  and     ip, r9, #0xf000             @ isolate F
+    ldr     r3, [rFP, ip, lsr #10]      @ r3<- vF (shift right 12, left 2)
+3:  and     ip, r9, #0x0f00             @ isolate E
+    ldr     r2, [rFP, ip, lsr #6]       @ r2<- vE
+2:  and     ip, r9, #0x00f0             @ isolate D
+    ldr     r1, [rFP, ip, lsr #2]       @ r1<- vD
+1:  and     ip, r9, #0x000f             @ isolate C
+    ldr     r0, [rFP, ip, lsl #2]       @ r0<- vC
+0:
+    @b       dvmPerformInlineOp4Std
+    ldr     r9, .LOP_EXECUTE_INLINE_table       @ table of InlineOperation
+    ldr     pc, [r9, r10, lsl #4]       @ sizeof=16, "func" is first entry
+    @ (not reached)
+
+.LOP_EXECUTE_INLINE_table:
+    .word   gDvmInlineOpsTable
+
+
+    .size   dvmAsmSisterStart, .-dvmAsmSisterStart
+    .global dvmAsmSisterEnd
+dvmAsmSisterEnd:
+
+/* File: armv5/footer.S */
+/*
+ * ===========================================================================
+ *  Common subroutines and data
+ * ===========================================================================
+ */
+
+    .text
+    .align  2
+
+/*
+ * Common code when a backward branch is taken.
+ *
+ * On entry:
+ *  r9 is PC adjustment *in bytes*
+ */
+common_backwardBranch:
+    mov     r0, #kInterpEntryInstr
+    bl      common_periodicChecks
+    FETCH_ADVANCE_INST_RB(r9)           @ update rPC, load rINST
+    GET_INST_OPCODE(ip)                 @ extract opcode from rINST
+    GOTO_OPCODE(ip)                     @ jump to next instruction
+
+
+/*
+ * Need to see if the thread needs to be suspended or debugger/profiler
+ * activity has begun.
+ *
+ * TODO: if JDWP isn't running, zero out pDebuggerActive pointer so we don't
+ * have to do the second ldr.
+ *
+ * TODO: reduce this so we're just checking a single location.
+ *
+ * On entry:
+ *  r0 is reentry type, e.g. kInterpEntryInstr
+ *  r9 is trampoline PC adjustment *in bytes*
+ */
+common_periodicChecks:
+    ldr     r3, [rGLUE, #offGlue_pSelfSuspendCount] @ r3<- &suspendCount
+
+#if defined(WITH_DEBUGGER)
+    ldr     r1, [rGLUE, #offGlue_pDebuggerActive]   @ r1<- &debuggerActive
+#endif
+#if defined(WITH_PROFILER)
+    ldr     r2, [rGLUE, #offGlue_pActiveProfilers]  @ r2<- &activeProfilers
+#endif
+
+    ldr     r3, [r3]                    @ r3<- suspendCount (int)
+
+#if defined(WITH_DEBUGGER)
+    ldrb    r1, [r1]                    @ r1<- debuggerActive (boolean)
+#endif
+#if defined (WITH_PROFILER)
+    ldr     r2, [r2]                    @ r2<- activeProfilers (int)
+#endif
+
+    cmp     r3, #0                      @ suspend pending?
+    bne     2f                          @ yes, check suspend
+
+#if defined(WITH_DEBUGGER) || defined(WITH_PROFILER)
+# if defined(WITH_DEBUGGER) && defined(WITH_PROFILER)
+    orrs    r1, r1, r2                  @ r1<- r1 | r2
+    cmp     r1, #0                      @ debugger attached or profiler started?
+# elif defined(WITH_DEBUGGER)
+    cmp     r1, #0                      @ debugger attached?
+# elif defined(WITH_PROFILER)
+    cmp     r2, #0                      @ profiler started?
+# endif
+    bne     3f                          @ debugger/profiler, switch interp
+#endif
+
+    mov     pc, lr                      @ nothing to do, return
+
+2:  @ check suspend
+    ldr     r0, [rGLUE, #offGlue_self]  @ r0<- glue->self
+    b       dvmCheckSuspendPending      @ suspend if necessary, then return
+
+3:  @ debugger/profiler enabled, bail out
+    add     rPC, rPC, r9                @ update rPC
+    str     r0, [rGLUE, #offGlue_entryPoint]
+    mov     r1, #1                      @ "want switch" = true
+    b       common_gotoBail
+
+
+/*
+ * The equivalent of "goto bail", this calls through the "bail handler".
+ *
+ * State registers will be saved to the "glue" area before bailing.
+ *
+ * On entry:
+ *  r1 is "bool changeInterp", indicating if we want to switch to the
+ *     other interpreter or just bail all the way out
+ */
+common_gotoBail:
+    SAVE_PC_FP_TO_GLUE()                @ export state to "glue"
+    mov     r0, rGLUE                   @ r0<- glue ptr
+    b       dvmMterpStdBail             @ call(glue, changeInterp)
+
+    @add     r1, r1, #1                  @ using (boolean+1)
+    @add     r0, rGLUE, #offGlue_jmpBuf  @ r0<- &glue->jmpBuf
+    @bl      _longjmp                    @ does not return
+    @bl      common_abort
+
+
+/*
+ * Common code for method invocation with range.
+ *
+ * On entry:
+ *  r0 is "Method* methodToCall", the method we're trying to call
+ */
+common_invokeMethodRange:
+.LinvokeNewRange:
+    @ prepare to copy args to "outs" area of current frame
+    movs    r2, rINST, lsr #8           @ r2<- AA (arg count) -- test for zero
+    SAVEAREA_FROM_FP(r10, rFP)          @ r10<- stack save area
+    beq     .LinvokeArgsDone            @ if no args, skip the rest
+    FETCH(r1, 2)                        @ r1<- CCCC
+
+    @ r0=methodToCall, r1=CCCC, r2=count, r10=outs
+    @ (very few methods have > 10 args; could unroll for common cases)
+    add     r3, rFP, r1, lsl #2         @ r3<- &fp[CCCC]
+    sub     r10, r10, r2, lsl #2        @ r10<- "outs" area, for call args
+1:  ldr     r1, [r3], #4                @ val = *fp++
+    subs    r2, r2, #1                  @ count--
+    str     r1, [r10], #4               @ *outs++ = val
+    bne     1b                          @ ...while count != 0
+    b       .LinvokeArgsDone
+
+/*
+ * Common code for method invocation without range.
+ *
+ * On entry:
+ *  r0 is "Method* methodToCall", the method we're trying to call
+ */
+common_invokeMethodNoRange:
+.LinvokeNewNoRange:
+    @ prepare to copy args to "outs" area of current frame
+    movs    r2, rINST, lsr #12          @ r2<- B (arg count) -- test for zero
+    SAVEAREA_FROM_FP(r10, rFP)          @ r10<- stack save area
+    beq     .LinvokeArgsDone            @ if no args, skip the rest
+    FETCH(r1, 2)                        @ r1<- GFED
+
+    @ r0=methodToCall, r1=GFED, r2=count, r10=outs
+.LinvokeNonRange:
+    rsb     r2, r2, #5                  @ r2<- 5-r2
+    add     pc, pc, r2, lsl #4          @ computed goto, 4 instrs each
+    bl      common_abort                @ (skipped due to ARM prefetch)
+5:  and     ip, rINST, #0x0f00          @ isolate A
+    ldr     r3, [rFP, ip, lsr #6]       @ r3<- vA (shift right 8, left 2)
+    mov     r0, r0                      @ nop
+    str     r3, [r10, #-4]!             @ *--outs = vA
+4:  and     ip, r1, #0xf000             @ isolate G
+    ldr     r3, [rFP, ip, lsr #10]      @ r3<- vG (shift right 12, left 2)
+    mov     r0, r0                      @ nop
+    str     r3, [r10, #-4]!             @ *--outs = vG
+3:  and     ip, r1, #0x0f00             @ isolate F
+    ldr     r3, [rFP, ip, lsr #6]       @ r3<- vF
+    mov     r0, r0                      @ nop
+    str     r3, [r10, #-4]!             @ *--outs = vF
+2:  and     ip, r1, #0x00f0             @ isolate E
+    ldr     r3, [rFP, ip, lsr #2]       @ r3<- vE
+    mov     r0, r0                      @ nop
+    str     r3, [r10, #-4]!             @ *--outs = vE
+1:  and     ip, r1, #0x000f             @ isolate D
+    ldr     r3, [rFP, ip, lsl #2]       @ r3<- vD
+    mov     r0, r0                      @ nop
+    str     r3, [r10, #-4]!             @ *--outs = vD
+0:  @ fall through to .LinvokeArgsDone
+
+.LinvokeArgsDone: @ r0=methodToCall
+    @ find space for the new stack frame, check for overflow
+    SAVEAREA_FROM_FP(r1, rFP)           @ r1<- stack save area
+    ldrh    r2, [r0, #offMethod_registersSize]  @ r2<- methodToCall->regsSize
+    ldrh    r3, [r0, #offMethod_outsSize]   @ r3<- methodToCall->outsSize
+    sub     r1, r1, r2, lsl #2          @ r1<- newFp (old savearea - regsSize)
+    SAVEAREA_FROM_FP(r10, r1)           @ r10<- newSaveArea
+@    bl      common_dumpRegs
+    ldr     r9, [rGLUE, #offGlue_interpStackEnd]    @ r9<- interpStackEnd
+    sub     r3, r10, r3, lsl #2         @ r3<- bottom (newsave - outsSize)
+    cmp     r3, r9                      @ bottom < interpStackEnd?
+    blt     .LstackOverflow             @ yes, this frame will overflow stack
+
+    @ set up newSaveArea
+#ifdef EASY_GDB
+    SAVEAREA_FROM_FP(ip, rFP)           @ ip<- stack save area
+    str     ip, [r10, #offStackSaveArea_prevSave]
+#endif
+    str     rFP, [r10, #offStackSaveArea_prevFrame]
+    str     rPC, [r10, #offStackSaveArea_savedPc]
+    str     r0, [r10, #offStackSaveArea_method]
+
+    ldr     r3, [r0, #offMethod_accessFlags] @ r3<- methodToCall->accessFlags
+    tst     r3, #ACC_NATIVE
+    bne     .LinvokeNative
+
+    /*
+    stmfd   sp!, {r0-r3}
+    bl      common_printNewline
+    mov     r0, rFP
+    mov     r1, #0
+    bl      dvmDumpFp
+    ldmfd   sp!, {r0-r3}
+    stmfd   sp!, {r0-r3}
+    mov     r0, r1
+    mov     r1, r10
+    bl      dvmDumpFp
+    bl      common_printNewline
+    ldmfd   sp!, {r0-r3}
+    */
+
+    @ Update "glue" values for the new method
+    @ r0=methodToCall, r1=newFp
+    ldr     r3, [r0, #offMethod_clazz]      @ r3<- method->clazz
+    str     r0, [rGLUE, #offGlue_method]    @ glue->method = methodToCall
+    ldr     r3, [r3, #offClassObject_pDvmDex] @ r3<- method->clazz->pDvmDex
+    ldr     rPC, [r0, #offMethod_insns]     @ rPC<- method->insns
+    str     r3, [rGLUE, #offGlue_methodClassDex] @ glue->methodClassDex = ...
+    ldr     r2, [rGLUE, #offGlue_self]      @ r2<- glue->self
+    FETCH_INST()                            @ load rINST from rPC
+    mov     rFP, r1                         @ fp = newFp
+    GET_INST_OPCODE(ip)                     @ extract opcode from rINST
+    str     r1, [r2, #offThread_curFrame]   @ self->curFrame = newFp
+    GOTO_OPCODE(ip)                         @ jump to next instruction
+
+.LinvokeNative:
+    @ Prep for the native call
+    @ r0=methodToCall, r1=newFp, r10=newSaveArea
+    ldr     r3, [rGLUE, #offGlue_self]      @ r3<- glue->self
+    ldr     r9, [r3, #offThread_jniLocal_nextEntry] @ r9<- thread->refNext
+    str     r1, [r3, #offThread_curFrame]   @ self->curFrame = newFp
+    str     r9, [r10, #offStackSaveArea_localRefTop] @newFp->localRefTop=refNext
+    mov     r9, r3                      @ r9<- glue->self (preserve)
+
+    mov     r2, r0                      @ r2<- methodToCall
+    mov     r0, r1                      @ r0<- newFp (points to args)
+    add     r1, rGLUE, #offGlue_retval  @ r1<- &retval
+
+#ifdef ASSIST_DEBUGGER
+    /* insert fake function header to help gdb find the stack frame */
+    b       .Lskip
+    .type   dalvik_mterp, %function
+dalvik_mterp:
+    .fnstart
+    MTERP_ENTRY1
+    MTERP_ENTRY2
+.Lskip:
+#endif
+
+    mov     lr, pc                      @ set return addr
+    ldr     pc, [r2, #offMethod_nativeFunc] @ pc<- methodToCall->nativeFunc
+
+    @ native return; r9=self, r10=newSaveArea
+    @ equivalent to dvmPopJniLocals
+    ldr     r0, [r10, #offStackSaveArea_localRefTop] @ r0<- newSave->localRefTop
+    ldr     r1, [r9, #offThread_exception] @ check for exception
+    str     rFP, [r9, #offThread_curFrame]  @ self->curFrame = fp
+    cmp     r1, #0                      @ null?
+    str     r0, [r9, #offThread_jniLocal_nextEntry] @ self->refNext<- r0
+    bne     common_exceptionThrown      @ no, handle exception
+
+    FETCH_ADVANCE_INST(3)               @ advance rPC, load rINST
+    GET_INST_OPCODE(ip)                 @ extract opcode from rINST
+    GOTO_OPCODE(ip)                     @ jump to next instruction
+
+.LstackOverflow:
+    ldr     r0, [rGLUE, #offGlue_self]  @ r0<- self
+    bl      dvmHandleStackOverflow
+    b       common_exceptionThrown
+#ifdef ASSIST_DEBUGGER
+    .fnend
+#endif
+
+
+    /*
+     * Common code for method invocation, calling through "glue code".
+     *
+     * TODO: now that we have range and non-range invoke handlers, this
+     *       needs to be split into two.  Maybe just create entry points
+     *       that set r9 and jump here?
+     *
+     * On entry:
+     *  r0 is "Method* methodToCall", the method we're trying to call
+     *  r9 is "bool methodCallRange", indicating if this is a /range variant
+     */
+     .if    0
+.LinvokeOld:
+    sub     sp, sp, #8                  @ space for args + pad
+    FETCH(ip, 2)                        @ ip<- FEDC or CCCC
+    mov     r2, r0                      @ A2<- methodToCall
+    mov     r0, rGLUE                   @ A0<- glue
+    SAVE_PC_FP_TO_GLUE()                @ export state to "glue"
+    mov     r1, r9                      @ A1<- methodCallRange
+    mov     r3, rINST, lsr #8           @ A3<- AA
+    str     ip, [sp, #0]                @ A4<- ip
+    bl      dvmMterp_invokeMethod       @ call the C invokeMethod
+    add     sp, sp, #8                  @ remove arg area
+    b       common_resumeAfterGlueCall  @ continue to next instruction
+    .endif
+
+
+
+/*
+ * Common code for handling a return instruction.
+ *
+ * This does not return.
+ */
+common_returnFromMethod:
+.LreturnNew:
+    mov     r0, #kInterpEntryReturn
+    mov     r9, #0
+    bl      common_periodicChecks
+
+    SAVEAREA_FROM_FP(r0, rFP)           @ r0<- saveArea (old)
+    ldr     rFP, [r0, #offStackSaveArea_prevFrame] @ fp = saveArea->prevFrame
+    ldr     r2, [rFP, #(offStackSaveArea_method - sizeofStackSaveArea)]
+                                        @ r2<- method we're returning to
+    cmp     r2, #0                      @ is this a break frame?
+    mov     r1, #0                      @ "want switch" = false
+    beq     common_gotoBail             @ break frame, bail out completely
+
+    ldr     rPC, [r0, #offStackSaveArea_savedPc] @ pc = saveArea->savedPc
+    ldr     r3, [rGLUE, #offGlue_self]      @ r3<- glue->self
+    str     r2, [rGLUE, #offGlue_method]    @ glue->method = newSave->method
+    str     rFP, [r3, #offThread_curFrame]  @ self->curFrame = fp
+    ldr     r1, [r2, #offMethod_clazz]      @ r1<- method->clazz
+    FETCH_ADVANCE_INST(3)               @ advance rPC, load rINST
+    ldr     r1, [r1, #offClassObject_pDvmDex]   @ r1<- method->clazz->pDvmDex
+    GET_INST_OPCODE(ip)                 @ extract opcode from rINST
+    str     r1, [rGLUE, #offGlue_methodClassDex]
+    GOTO_OPCODE(ip)                     @ jump to next instruction
+
+    /*
+     * Return handling, calls through "glue code".
+     */
+     .if    0
+.LreturnOld:
+    SAVE_PC_FP_TO_GLUE()                @ export state
+    mov     r0, rGLUE                   @ arg to function
+    bl      dvmMterp_returnFromMethod
+    b       common_resumeAfterGlueCall
+    .endif
+
+
+/*
+ * Somebody has thrown an exception.  Handle it.
+ *
+ * If the exception processing code returns to us (instead of falling
+ * out of the interpreter), continue with whatever the next instruction
+ * now happens to be.
+ *
+ * This does not return.
+ */
+common_exceptionThrown:
+.LexceptionNew:
+    mov     r0, #kInterpEntryThrow
+    mov     r9, #0
+    bl      common_periodicChecks
+
+    ldr     r10, [rGLUE, #offGlue_self] @ r10<- glue->self
+    ldr     r9, [r10, #offThread_exception] @ r9<- self->exception
+    mov     r1, r10                     @ r1<- self
+    mov     r0, r9                      @ r0<- exception
+    bl      dvmAddTrackedAlloc          @ don't let the exception be GCed
+    mov     r3, #0                      @ r3<- NULL
+    str     r3, [r10, #offThread_exception] @ self->exception = NULL
+
+    /* set up args and a local for "&fp" */
+    /* (str sp, [sp, #-4]!  would be perfect here, but is discouraged) */
+    str     rFP, [sp, #-4]!             @ *--sp = fp
+    mov     ip, sp                      @ ip<- &fp
+    mov     r3, #0                      @ r3<- false
+    str     ip, [sp, #-4]!              @ *--sp = &fp
+    ldr     r1, [rGLUE, #offGlue_method] @ r1<- glue->method
+    mov     r0, r10                     @ r0<- self
+    ldr     r1, [r1, #offMethod_insns]  @ r1<- method->insns
+    mov     r2, r9                      @ r2<- exception
+    sub     r1, rPC, r1                 @ r1<- pc - method->insns
+    mov     r1, r1, asr #1              @ r1<- offset in code units
+
+    /* call, r0 gets catchRelPc (a code-unit offset) */
+    bl      dvmFindCatchBlock           @ call(self, relPc, exc, scan?, &fp)
+    ldr     rFP, [sp, #4]               @ retrieve the updated rFP
+    cmp     r0, #0                      @ is catchRelPc < 0?
+    add     sp, sp, #8                  @ restore stack
+    bmi     .LnotCaughtLocally
+
+    /* fix stack overflow if necessary; must preserve r0 */
+    ldrb    r1, [r10, #offThread_stackOverflowed]
+    cmp     r1, #0                      @ did we overflow earlier?
+    beq     1f                          @ no, skip ahead
+    mov     r9, r0                      @ r9<- r0 (save it)
+    mov     r0, r10                     @ r0<- self
+    bl      dvmCleanupStackOverflow     @ call(self)
+    mov     r0, r9                      @ r0<- r9 (restore it)
+    ldr     r9, [r10, #offThread_exception] @ r9<- self->exception
+1:
+
+    /* adjust locals to match self->curFrame and updated PC */
+    SAVEAREA_FROM_FP(r1, rFP)           @ r1<- new save area
+    ldr     r1, [r1, #offStackSaveArea_method] @ r1<- new method
+    str     r1, [rGLUE, #offGlue_method]    @ glue->method = new method
+    ldr     r2, [r1, #offMethod_clazz]      @ r2<- method->clazz
+    ldr     r3, [r1, #offMethod_insns]      @ r3<- method->insns
+    ldr     r2, [r2, #offClassObject_pDvmDex] @ r2<- method->clazz->pDvmDex
+    add     rPC, r3, r0, asl #1             @ rPC<- method->insns + catchRelPc
+    str     r2, [rGLUE, #offGlue_methodClassDex] @ glue->pDvmDex = meth...
+
+    /* release the tracked alloc on the exception */
+    mov     r0, r9                      @ r0<- exception
+    mov     r1, r10                     @ r1<- self
+    bl      dvmReleaseTrackedAlloc      @ release the exception
+
+    /* restore the exception if the handler wants it */
+    FETCH_INST()                        @ load rINST from rPC
+    GET_INST_OPCODE(ip)                 @ extract opcode from rINST
+    cmp     ip, #OP_MOVE_EXCEPTION      @ is it "move-exception"?
+    streq   r9, [r10, #offThread_exception] @ yes, restore the exception
+    GOTO_OPCODE(ip)                     @ jump to next instruction
+
+.LnotCaughtLocally: @ r9=exception, r10=self
+    /* fix stack overflow if necessary */
+    ldrb    r1, [r10, #offThread_stackOverflowed]
+    cmp     r1, #0                      @ did we overflow earlier?
+    movne   r0, r10                     @ if yes: r0<- self
+    blne    dvmCleanupStackOverflow     @ if yes: call(self)
+
+    @ may want to show "not caught locally" debug messages here
+#if DVM_SHOW_EXCEPTION >= 2
+    /* call __android_log_print(prio, tag, format, ...) */
+    /* "Exception %s from %s:%d not caught locally" */
+    @ dvmLineNumFromPC(method, pc - method->insns)
+    ldr     r0, [rGLUE, #offGlue_method]
+    ldr     r1, [r0, #offMethod_insns]
+    sub     r1, rPC, r1
+    asr     r1, r1, #1
+    bl      dvmLineNumFromPC
+    str     r0, [sp, #-4]!
+    @ dvmGetMethodSourceFile(method)
+    ldr     r0, [rGLUE, #offGlue_method]
+    bl      dvmGetMethodSourceFile
+    str     r0, [sp, #-4]!
+    @ exception->clazz->descriptor
+    ldr     r3, [r9, #offObject_clazz]
+    ldr     r3, [r3, #offClassObject_descriptor]
+    @
+    ldr     r2, strExceptionNotCaughtLocally
+    ldr     r1, strLogTag
+    mov     r0, #3                      @ LOG_DEBUG
+    bl      __android_log_print
+#endif
+    str     r9, [r10, #offThread_exception] @ restore exception
+    mov     r0, r9                      @ r0<- exception
+    mov     r1, r10                     @ r1<- self
+    bl      dvmReleaseTrackedAlloc      @ release the exception
+    mov     r1, #0                      @ "want switch" = false
+    b       common_gotoBail             @ bail out
+
+
+    /*
+     * Exception handling, calls through "glue code".
+     */
+    .if     0
+.LexceptionOld:
+    SAVE_PC_FP_TO_GLUE()                @ export state
+    mov     r0, rGLUE                   @ arg to function
+    bl      dvmMterp_exceptionThrown
+    b       common_resumeAfterGlueCall
+    .endif
+
+
+/*
+ * After returning from a "glued" function, pull out the updated
+ * values and start executing at the next instruction.
+ */
+common_resumeAfterGlueCall:
+    LOAD_PC_FP_FROM_GLUE()              @ pull rPC and rFP out of glue
+    FETCH_INST()                        @ load rINST from rPC
+    GET_INST_OPCODE(ip)                 @ extract opcode from rINST
+    GOTO_OPCODE(ip)                     @ jump to next instruction
+
+/*
+ * Invalid array index.
+ */
+common_errArrayIndex:
+    EXPORT_PC()
+    ldr     r0, strArrayIndexException
+    mov     r1, #0
+    bl      dvmThrowException
+    b       common_exceptionThrown
+
+/*
+ * Invalid array value.
+ */
+common_errArrayStore:
+    EXPORT_PC()
+    ldr     r0, strArrayStoreException
+    mov     r1, #0
+    bl      dvmThrowException
+    b       common_exceptionThrown
+
+/*
+ * Integer divide or mod by zero.
+ */
+common_errDivideByZero:
+    EXPORT_PC()
+    ldr     r0, strArithmeticException
+    ldr     r1, strDivideByZero
+    bl      dvmThrowException
+    b       common_exceptionThrown
+
+/*
+ * Attempt to allocate an array with a negative size.
+ */
+common_errNegativeArraySize:
+    EXPORT_PC()
+    ldr     r0, strNegativeArraySizeException
+    mov     r1, #0
+    bl      dvmThrowException
+    b       common_exceptionThrown
+
+/*
+ * Invocation of a non-existent method.
+ */
+common_errNoSuchMethod:
+    EXPORT_PC()
+    ldr     r0, strNoSuchMethodError
+    mov     r1, #0
+    bl      dvmThrowException
+    b       common_exceptionThrown
+
+/*
+ * We encountered a null object when we weren't expecting one.  We
+ * export the PC, throw a NullPointerException, and goto the exception
+ * processing code.
+ */
+common_errNullObject:
+    EXPORT_PC()
+    ldr     r0, strNullPointerException
+    mov     r1, #0
+    bl      dvmThrowException
+    b       common_exceptionThrown
+
+/*
+ * For debugging, cause an immediate fault.  The source address will
+ * be in lr (use a bl instruction to jump here).
+ */
+common_abort:
+    ldr     pc, .LdeadFood
+.LdeadFood:
+    .word   0xdeadf00d
+
+/*
+ * Spit out a "we were here", preserving all registers.  (The attempt
+ * to save ip won't work, but we need to save an even number of
+ * registers for EABI 64-bit stack alignment.)
+ */
+    .macro  SQUEAK num
+common_squeak\num:
+    stmfd   sp!, {r0, r1, r2, r3, ip, lr}
+    ldr     r0, strSqueak
+    mov     r1, #\num
+    bl      printf
+    ldmfd   sp!, {r0, r1, r2, r3, ip, pc}
+    .endm
+
+    SQUEAK  0
+    SQUEAK  1
+    SQUEAK  2
+    SQUEAK  3
+    SQUEAK  4
+    SQUEAK  5
+
+/*
+ * Spit out the number in r0, preserving registers.
+ */
+common_printNum:
+    stmfd   sp!, {r0, r1, r2, r3, ip, lr}
+    mov     r1, r0
+    ldr     r0, strSqueak
+    bl      printf
+    ldmfd   sp!, {r0, r1, r2, r3, ip, pc}
+
+/*
+ * Print a newline, preserving registers.
+ */
+common_printNewline:
+    stmfd   sp!, {r0, r1, r2, r3, ip, lr}
+    ldr     r0, strNewline
+    bl      printf
+    ldmfd   sp!, {r0, r1, r2, r3, ip, pc}
+
+    /*
+     * Print the 32-bit quantity in r0 as a hex value, preserving registers.
+     */
+common_printHex:
+    stmfd   sp!, {r0, r1, r2, r3, ip, lr}
+    mov     r1, r0
+    ldr     r0, strPrintHex
+    bl      printf
+    ldmfd   sp!, {r0, r1, r2, r3, ip, pc}
+
+/*
+ * Print the 64-bit quantity in r0-r1, preserving registers.
+ */
+common_printLong:
+    stmfd   sp!, {r0, r1, r2, r3, ip, lr}
+    mov     r3, r1
+    mov     r2, r0
+    ldr     r0, strPrintLong
+    bl      printf
+    ldmfd   sp!, {r0, r1, r2, r3, ip, pc}
+
+/*
+ * Print full method info.  Pass the Method* in r0.  Preserves regs.
+ */
+common_printMethod:
+    stmfd   sp!, {r0, r1, r2, r3, ip, lr}
+    bl      dvmMterpPrintMethod
+    ldmfd   sp!, {r0, r1, r2, r3, ip, pc}
+
+/*
+ * Call a C helper function that dumps regs and possibly some
+ * additional info.  Requires the C function to be compiled in.
+ */
+    .if     0
+common_dumpRegs:
+    stmfd   sp!, {r0, r1, r2, r3, ip, lr}
+    bl      dvmMterpDumpArmRegs
+    ldmfd   sp!, {r0, r1, r2, r3, ip, pc}
+    .endif
+
+
+/*
+ * String references, must be close to the code that uses them.
+ */
+    .align  2
+strArithmeticException:
+    .word   .LstrArithmeticException
+strArrayIndexException:
+    .word   .LstrArrayIndexException
+strArrayStoreException:
+    .word   .LstrArrayStoreException
+strDivideByZero:
+    .word   .LstrDivideByZero
+strNegativeArraySizeException:
+    .word   .LstrNegativeArraySizeException
+strNoSuchMethodError:
+    .word   .LstrNoSuchMethodError
+strNullPointerException:
+    .word   .LstrNullPointerException
+
+strLogTag:
+    .word   .LstrLogTag
+strExceptionNotCaughtLocally:
+    .word   .LstrExceptionNotCaughtLocally
+
+strNewline:
+    .word   .LstrNewline
+strSqueak:
+    .word   .LstrSqueak
+strPrintHex:
+    .word   .LstrPrintHex
+strPrintLong:
+    .word   .LstrPrintLong
+
+/*
+ * Zero-terminated ASCII string data.
+ *
+ * On ARM we have two choices: do like gcc does, and LDR from a .word
+ * with the address, or use an ADR pseudo-op to get the address
+ * directly.  ADR saves 4 bytes and an indirection, but it's using a
+ * PC-relative addressing mode and hence has a limited range, which
+ * makes it not work well with mergeable string sections.
+ */
+    .section .rodata.str1.4,"aMS",%progbits,1
+
+.LstrBadEntryPoint:
+    .asciz  "Bad entry point %d\n"
+.LstrArithmeticException:
+    .asciz  "Ljava/lang/ArithmeticException;"
+.LstrArrayIndexException:
+    .asciz  "Ljava/lang/ArrayIndexOutOfBoundsException;"
+.LstrArrayStoreException:
+    .asciz  "Ljava/lang/ArrayStoreException;"
+.LstrClassCastException:
+    .asciz  "Ljava/lang/ClassCastException;"
+.LstrDivideByZero:
+    .asciz  "divide by zero"
+.LstrFilledNewArrayNotImpl:
+    .asciz  "filled-new-array only implemented for 'int'"
+.LstrInternalError:
+    .asciz  "Ljava/lang/InternalError;"
+.LstrInstantiationError:
+    .asciz  "Ljava/lang/InstantiationError;"
+.LstrNegativeArraySizeException:
+    .asciz  "Ljava/lang/NegativeArraySizeException;"
+.LstrNoSuchMethodError:
+    .asciz  "Ljava/lang/NoSuchMethodError;"
+.LstrNullPointerException:
+    .asciz  "Ljava/lang/NullPointerException;"
+
+.LstrLogTag:
+    .asciz  "mterp"
+.LstrExceptionNotCaughtLocally:
+    .asciz  "Exception %s from %s:%d not caught locally\n"
+
+.LstrNewline:
+    .asciz  "\n"
+.LstrSqueak:
+    .asciz  "<%d>"
+.LstrPrintHex:
+    .asciz  "<0x%x>"
+.LstrPrintLong:
+    .asciz  "<%lld>"
+
+
diff --git a/vm/mterp/out/InterpAsm-desktop.S b/vm/mterp/out/InterpAsm-desktop.S
new file mode 100644
index 0000000..6d7689c
--- /dev/null
+++ b/vm/mterp/out/InterpAsm-desktop.S
@@ -0,0 +1,35 @@
+/*
+ * This file was generated automatically by gen-mterp.py for 'desktop'.
+ *
+ * --> DO NOT EDIT <--
+ */
+
+
+    .global dvmAsmInstructionStart
+    .type   dvmAsmInstructionStart, %function
+dvmAsmInstructionStart = .L_OP_NOP
+    .text
+
+    .balign 64
+.L_OP_NOP:   /* dummy */
+
+    .balign 64
+    .size   dvmAsmInstructionStart, .-dvmAsmInstructionStart
+    .global dvmAsmInstructionEnd
+dvmAsmInstructionEnd:
+
+/*
+ * ===========================================================================
+ *  Sister implementations
+ * ===========================================================================
+ */
+    .global dvmAsmSisterStart
+    .type   dvmAsmSisterStart, %function
+    .text
+    .balign 4
+dvmAsmSisterStart:
+
+    .size   dvmAsmSisterStart, .-dvmAsmSisterStart
+    .global dvmAsmSisterEnd
+dvmAsmSisterEnd:
+
diff --git a/vm/mterp/out/InterpC-armv5.c b/vm/mterp/out/InterpC-armv5.c
new file mode 100644
index 0000000..1e112ad
--- /dev/null
+++ b/vm/mterp/out/InterpC-armv5.c
@@ -0,0 +1,1736 @@
+/*
+ * This file was generated automatically by gen-mterp.py for 'armv5'.
+ *
+ * --> DO NOT EDIT <--
+ */
+
+/* File: c/header.c */
+/*
+ * Copyright (C) 2008 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/* common includes */
+#include "Dalvik.h"
+#include "interp/InterpDefs.h"
+#include "mterp/Mterp.h"
+#include <math.h>                   // needed for fmod, fmodf
+
+
+#define GOTO_TARGET_DECL(_target, ...)                                      \
+    void dvmMterp_##_target(MterpGlue* glue, ## __VA_ARGS__);
+
+GOTO_TARGET_DECL(filledNewArray, bool methodCallRange);
+GOTO_TARGET_DECL(invokeVirtual, bool methodCallRange);
+GOTO_TARGET_DECL(invokeSuper, bool methodCallRange);
+GOTO_TARGET_DECL(invokeInterface, bool methodCallRange);
+GOTO_TARGET_DECL(invokeDirect, bool methodCallRange);
+GOTO_TARGET_DECL(invokeStatic, bool methodCallRange);
+GOTO_TARGET_DECL(invokeVirtualQuick, bool methodCallRange);
+GOTO_TARGET_DECL(invokeSuperQuick, bool methodCallRange);
+GOTO_TARGET_DECL(invokeMethod, bool methodCallRange, const Method* methodToCall,
+    u2 count, u2 regs);
+GOTO_TARGET_DECL(returnFromMethod);
+GOTO_TARGET_DECL(exceptionThrown);
+
+
+/* File: c/opcommon.c */
+/*
+ * Redefine what used to be local variable accesses into MterpGlue struct
+ * references.  (These are undefined down in "footer.c".)
+ */
+#define retval                  glue->retval
+#define pc                      glue->pc
+#define fp                      glue->fp
+#define method                  glue->method
+#define methodClassDex          glue->methodClassDex
+#define self                    glue->self
+//#define entryPoint              glue->entryPoint
+#define debugTrackedRefStart    glue->debugTrackedRefStart
+
+
+/*
+ * Replace the opcode definition macros.  Here, each opcode is a separate
+ * function that takes a "glue" argument and returns void.  We can't declare
+ * these "static" because they may be called from an assembly stub.
+ */
+#undef HANDLE_OPCODE
+#undef OP_END
+#undef FINISH
+
+#define HANDLE_OPCODE(_op)                                                  \
+    void dvmMterp_##_op(MterpGlue* glue) {                                  \
+        u2 ref, vsrc1, vsrc2, vdst;                                         \
+        u2 inst = FETCH(0);
+
+#define OP_END }
+
+/*
+ * Like standard FINISH, but don't reload "inst", and return to caller
+ * when done.
+ */
+#define FINISH(_offset) {                                                   \
+        ADJUST_PC(_offset);                                                 \
+        CHECK_DEBUG_AND_PROF();                                             \
+        CHECK_TRACKED_REFS();                                               \
+        return;                                                             \
+    }
+
+
+/*
+ * The "goto label" statements turn into function calls followed by
+ * return statements.  Some of the functions take arguments.
+ */
+#define GOTO(_target, ...)                                                  \
+    do {                                                                    \
+        dvmMterp_##_target(glue, ## __VA_ARGS__);                           \
+        return;                                                             \
+    } while(false)
+
+/*
+ * As a special case, "goto bail" turns into a longjmp.  "_switch" should be
+ * "true" if we need to switch to the other interpreter upon our return.
+ */
+#define GOTO_BAIL(_switch)                                                  \
+    dvmMterpStdBail(glue, _switch);
+
+/* for now, mterp is always a "standard" interpreter */
+#define INTERP_TYPE INTERP_STD
+
+/*
+ * Periodic checks macro, slightly modified.
+ */
+#define PERIODIC_CHECKS(_entryPoint, _pcadj) {                              \
+        dvmCheckSuspendQuick(self);                                         \
+        if (NEED_INTERP_SWITCH(INTERP_TYPE)) {                              \
+            ADJUST_PC(_pcadj);                                              \
+            glue->entryPoint = _entryPoint;                                 \
+            LOGVV("threadid=%d: switch to STD ep=%d adj=%d\n",              \
+                glue->self->threadId, (_entryPoint), (_pcadj));             \
+            GOTO_BAIL(true);                                                \
+        }                                                                   \
+    }
+
+
+/*
+ * ===========================================================================
+ *
+ * What follows are the "common" opcode definitions copied & pasted from the
+ * basic interpreter.  The only changes that need to be made to the original
+ * sources are:
+ *  - replace "goto exceptionThrown" with "GOTO(exceptionThrown)"
+ *
+ * ===========================================================================
+ */
+
+
+#define HANDLE_NUMCONV(_opcode, _opname, _fromtype, _totype)                \
+    HANDLE_OPCODE(_opcode /*vA, vB*/)                                       \
+        vdst = INST_A(inst);                                                \
+        vsrc1 = INST_B(inst);                                               \
+        ILOGV("|%s v%d,v%d", (_opname), vdst, vsrc1);                       \
+        SET_REGISTER##_totype(vdst,                                         \
+            GET_REGISTER##_fromtype(vsrc1));                                \
+        FINISH(1);
+
+#define HANDLE_FLOAT_TO_INT(_opcode, _opname, _fromvtype, _fromrtype,       \
+        _tovtype, _tortype)                                                 \
+    HANDLE_OPCODE(_opcode /*vA, vB*/)                                       \
+    {                                                                       \
+        /* spec defines specific handling for +/- inf and NaN values */     \
+        _fromvtype val;                                                     \
+        _tovtype intMin, intMax, result;                                    \
+        vdst = INST_A(inst);                                                \
+        vsrc1 = INST_B(inst);                                               \
+        ILOGV("|%s v%d,v%d", (_opname), vdst, vsrc1);                       \
+        val = GET_REGISTER##_fromrtype(vsrc1);                              \
+        intMin = (_tovtype) 1 << (sizeof(_tovtype) * 8 -1);                 \
+        intMax = ~intMin;                                                   \
+        result = (_tovtype) val;                                            \
+        if (val >= intMax)          /* +inf */                              \
+            result = intMax;                                                \
+        else if (val <= intMin)     /* -inf */                              \
+            result = intMin;                                                \
+        else if (val != val)        /* NaN */                               \
+            result = 0;                                                     \
+        else                                                                \
+            result = (_tovtype) val;                                        \
+        SET_REGISTER##_tortype(vdst, result);                               \
+    }                                                                       \
+    FINISH(1);
+
+#define HANDLE_INT_TO_SMALL(_opcode, _opname, _type)                        \
+    HANDLE_OPCODE(_opcode /*vA, vB*/)                                       \
+        vdst = INST_A(inst);                                                \
+        vsrc1 = INST_B(inst);                                               \
+        ILOGV("|int-to-%s v%d,v%d", (_opname), vdst, vsrc1);                \
+        SET_REGISTER(vdst, (_type) GET_REGISTER(vsrc1));                    \
+        FINISH(1);
+
+/* NOTE: the comparison result is always a signed 4-byte integer */
+#define HANDLE_OP_CMPX(_opcode, _opname, _varType, _type, _nanVal)          \
+    HANDLE_OPCODE(_opcode /*vAA, vBB, vCC*/)                                \
+    {                                                                       \
+        int result;                                                         \
+        u2 regs;                                                            \
+        _varType val1, val2;                                                \
+        vdst = INST_AA(inst);                                               \
+        regs = FETCH(1);                                                    \
+        vsrc1 = regs & 0xff;                                                \
+        vsrc2 = regs >> 8;                                                  \
+        ILOGV("|cmp%s v%d,v%d,v%d", (_opname), vdst, vsrc1, vsrc2);         \
+        val1 = GET_REGISTER##_type(vsrc1);                                  \
+        val2 = GET_REGISTER##_type(vsrc2);                                  \
+        if (val1 == val2)                                                   \
+            result = 0;                                                     \
+        else if (val1 < val2)                                               \
+            result = -1;                                                    \
+        else if (val1 > val2)                                               \
+            result = 1;                                                     \
+        else                                                                \
+            result = (_nanVal);                                             \
+        ILOGV("+ result=%d\n", result);                                     \
+        SET_REGISTER(vdst, result);                                         \
+    }                                                                       \
+    FINISH(2);
+
+#define HANDLE_OP_IF_XX(_opcode, _opname, _cmp)                             \
+    HANDLE_OPCODE(_opcode /*vA, vB, +CCCC*/)                                \
+        vsrc1 = INST_A(inst);                                               \
+        vsrc2 = INST_B(inst);                                               \
+        if ((s4) GET_REGISTER(vsrc1) _cmp (s4) GET_REGISTER(vsrc2)) {       \
+            int branchOffset = (s2)FETCH(1);    /* sign-extended */         \
+            ILOGV("|if-%s v%d,v%d,+0x%04x", (_opname), vsrc1, vsrc2,        \
+                branchOffset);                                              \
+            ILOGV("> branch taken");                                        \
+            if (branchOffset < 0)                                           \
+                PERIODIC_CHECKS(kInterpEntryInstr, branchOffset);           \
+            FINISH(branchOffset);                                           \
+        } else {                                                            \
+            ILOGV("|if-%s v%d,v%d,-", (_opname), vsrc1, vsrc2);             \
+            FINISH(2);                                                      \
+        }
+
+#define HANDLE_OP_IF_XXZ(_opcode, _opname, _cmp)                            \
+    HANDLE_OPCODE(_opcode /*vAA, +BBBB*/)                                   \
+        vsrc1 = INST_AA(inst);                                              \
+        if ((s4) GET_REGISTER(vsrc1) _cmp 0) {                              \
+            int branchOffset = (s2)FETCH(1);    /* sign-extended */         \
+            ILOGV("|if-%s v%d,+0x%04x", (_opname), vsrc1, branchOffset);    \
+            ILOGV("> branch taken");                                        \
+            if (branchOffset < 0)                                           \
+                PERIODIC_CHECKS(kInterpEntryInstr, branchOffset);           \
+            FINISH(branchOffset);                                           \
+        } else {                                                            \
+            ILOGV("|if-%s v%d,-", (_opname), vsrc1);                        \
+            FINISH(2);                                                      \
+        }
+
+#define HANDLE_UNOP(_opcode, _opname, _pfx, _sfx, _type)                    \
+    HANDLE_OPCODE(_opcode /*vA, vB*/)                                       \
+        vdst = INST_A(inst);                                                \
+        vsrc1 = INST_B(inst);                                               \
+        ILOGV("|%s v%d,v%d", (_opname), vdst, vsrc1);                       \
+        SET_REGISTER##_type(vdst, _pfx GET_REGISTER##_type(vsrc1) _sfx);    \
+        FINISH(1);
+
+#define HANDLE_OP_X_INT(_opcode, _opname, _op, _chkdiv)                     \
+    HANDLE_OPCODE(_opcode /*vAA, vBB, vCC*/)                                \
+    {                                                                       \
+        u2 srcRegs;                                                         \
+        vdst = INST_AA(inst);                                               \
+        srcRegs = FETCH(1);                                                 \
+        vsrc1 = srcRegs & 0xff;                                             \
+        vsrc2 = srcRegs >> 8;                                               \
+        ILOGV("|%s-int v%d,v%d", (_opname), vdst, vsrc1);                   \
+        if (_chkdiv) {                                                      \
+            if (GET_REGISTER(vsrc2) == 0) {                                 \
+                EXPORT_PC();                                                \
+                dvmThrowException("Ljava/lang/ArithmeticException;",        \
+                    "divide by zero");                                      \
+                GOTO(exceptionThrown);                                      \
+            }                                                               \
+        }                                                                   \
+        SET_REGISTER(vdst,                                                  \
+            (s4) GET_REGISTER(vsrc1) _op (s4) GET_REGISTER(vsrc2));         \
+    }                                                                       \
+    FINISH(2);
+
+#define HANDLE_OP_SHX_INT(_opcode, _opname, _cast, _op)                     \
+    HANDLE_OPCODE(_opcode /*vAA, vBB, vCC*/)                                \
+    {                                                                       \
+        u2 srcRegs;                                                         \
+        vdst = INST_AA(inst);                                               \
+        srcRegs = FETCH(1);                                                 \
+        vsrc1 = srcRegs & 0xff;                                             \
+        vsrc2 = srcRegs >> 8;                                               \
+        ILOGV("|%s-int v%d,v%d", (_opname), vdst, vsrc1);                   \
+        SET_REGISTER(vdst,                                                  \
+            _cast GET_REGISTER(vsrc1) _op (GET_REGISTER(vsrc2) & 0x1f));    \
+    }                                                                       \
+    FINISH(2);
+
+#define HANDLE_OP_X_INT_LIT16(_opcode, _opname, _cast, _op, _chkdiv)        \
+    HANDLE_OPCODE(_opcode /*vA, vB, #+CCCC*/)                               \
+        vdst = INST_A(inst);                                                \
+        vsrc1 = INST_B(inst);                                               \
+        vsrc2 = FETCH(1);                                                   \
+        ILOGV("|%s-int/lit16 v%d,v%d,#+0x%04x",                             \
+            (_opname), vdst, vsrc1, vsrc2);                                 \
+        if (_chkdiv) {                                                      \
+            if ((s2) vsrc2 == 0) {                                          \
+                EXPORT_PC();                                                \
+                dvmThrowException("Ljava/lang/ArithmeticException;",        \
+                    "divide by zero");                                      \
+                GOTO(exceptionThrown);                                      \
+            }                                                               \
+        }                                                                   \
+        SET_REGISTER(vdst,                                                  \
+            _cast GET_REGISTER(vsrc1) _op (s2) vsrc2);                      \
+        FINISH(2);
+
+#define HANDLE_OP_X_INT_LIT8(_opcode, _opname, _op, _chkdiv)                \
+    HANDLE_OPCODE(_opcode /*vAA, vBB, #+CC*/)                               \
+    {                                                                       \
+        u2 litInfo;                                                         \
+        vdst = INST_AA(inst);                                               \
+        litInfo = FETCH(1);                                                 \
+        vsrc1 = litInfo & 0xff;                                             \
+        vsrc2 = litInfo >> 8;       /* constant */                          \
+        ILOGV("|%s-int/lit8 v%d,v%d,#+0x%02x",                              \
+            (_opname), vdst, vsrc1, vsrc2);                                 \
+        if (_chkdiv) {                                                      \
+            if ((s1) vsrc2 == 0) {                                          \
+                EXPORT_PC();                                                \
+                dvmThrowException("Ljava/lang/ArithmeticException;",        \
+                    "divide by zero");                                      \
+                GOTO(exceptionThrown);                                      \
+            }                                                               \
+        }                                                                   \
+        SET_REGISTER(vdst,                                                  \
+            (s4) GET_REGISTER(vsrc1) _op (s1) vsrc2);                       \
+    }                                                                       \
+    FINISH(2);
+
+#define HANDLE_OP_SHX_INT_LIT8(_opcode, _opname, _cast, _op)                \
+    HANDLE_OPCODE(_opcode /*vAA, vBB, #+CC*/)                               \
+    {                                                                       \
+        u2 litInfo;                                                         \
+        vdst = INST_AA(inst);                                               \
+        litInfo = FETCH(1);                                                 \
+        vsrc1 = litInfo & 0xff;                                             \
+        vsrc2 = litInfo >> 8;       /* constant */                          \
+        ILOGV("|%s-int/lit8 v%d,v%d,#+0x%02x",                              \
+            (_opname), vdst, vsrc1, vsrc2);                                 \
+        SET_REGISTER(vdst,                                                  \
+            _cast GET_REGISTER(vsrc1) _op (vsrc2 & 0x1f));                  \
+    }                                                                       \
+    FINISH(2);
+
+#define HANDLE_OP_X_INT_2ADDR(_opcode, _opname, _op, _chkdiv)               \
+    HANDLE_OPCODE(_opcode /*vA, vB*/)                                       \
+        vdst = INST_A(inst);                                                \
+        vsrc1 = INST_B(inst);                                               \
+        ILOGV("|%s-int-2addr v%d,v%d", (_opname), vdst, vsrc1);             \
+        if (_chkdiv) {                                                      \
+            if (GET_REGISTER(vsrc1) == 0) {                                 \
+                EXPORT_PC();                                                \
+                dvmThrowException("Ljava/lang/ArithmeticException;",        \
+                    "divide by zero");                                      \
+                GOTO(exceptionThrown);                                      \
+            }                                                               \
+        }                                                                   \
+        SET_REGISTER(vdst,                                                  \
+            (s4) GET_REGISTER(vdst) _op (s4) GET_REGISTER(vsrc1));          \
+        FINISH(1);
+
+#define HANDLE_OP_SHX_INT_2ADDR(_opcode, _opname, _cast, _op)               \
+    HANDLE_OPCODE(_opcode /*vA, vB*/)                                       \
+        vdst = INST_A(inst);                                                \
+        vsrc1 = INST_B(inst);                                               \
+        ILOGV("|%s-int-2addr v%d,v%d", (_opname), vdst, vsrc1);             \
+        SET_REGISTER(vdst,                                                  \
+            _cast GET_REGISTER(vdst) _op (GET_REGISTER(vsrc1) & 0x1f));     \
+        FINISH(1);
+
+#define HANDLE_OP_X_LONG(_opcode, _opname, _op, _chkdiv)                    \
+    HANDLE_OPCODE(_opcode /*vAA, vBB, vCC*/)                                \
+    {                                                                       \
+        u2 srcRegs;                                                         \
+        vdst = INST_AA(inst);                                               \
+        srcRegs = FETCH(1);                                                 \
+        vsrc1 = srcRegs & 0xff;                                             \
+        vsrc2 = srcRegs >> 8;                                               \
+        ILOGV("|%s-long v%d,v%d,v%d", (_opname), vdst, vsrc1, vsrc2);       \
+        if (_chkdiv) {                                                      \
+            if (GET_REGISTER_WIDE(vsrc2) == 0) {                            \
+                EXPORT_PC();                                                \
+                dvmThrowException("Ljava/lang/ArithmeticException;",        \
+                    "divide by zero");                                      \
+                GOTO(exceptionThrown);                                      \
+            }                                                               \
+        }                                                                   \
+        SET_REGISTER_WIDE(vdst,                                             \
+            (s8) GET_REGISTER_WIDE(vsrc1) _op (s8) GET_REGISTER_WIDE(vsrc2)); \
+    }                                                                       \
+    FINISH(2);
+
+#define HANDLE_OP_SHX_LONG(_opcode, _opname, _cast, _op)                    \
+    HANDLE_OPCODE(_opcode /*vAA, vBB, vCC*/)                                \
+    {                                                                       \
+        u2 srcRegs;                                                         \
+        vdst = INST_AA(inst);                                               \
+        srcRegs = FETCH(1);                                                 \
+        vsrc1 = srcRegs & 0xff;                                             \
+        vsrc2 = srcRegs >> 8;                                               \
+        ILOGV("|%s-long v%d,v%d,v%d", (_opname), vdst, vsrc1, vsrc2);       \
+        SET_REGISTER_WIDE(vdst,                                             \
+            _cast GET_REGISTER_WIDE(vsrc1) _op (GET_REGISTER(vsrc2) & 0x3f)); \
+    }                                                                       \
+    FINISH(2);
+
+#define HANDLE_OP_X_LONG_2ADDR(_opcode, _opname, _op, _chkdiv)              \
+    HANDLE_OPCODE(_opcode /*vA, vB*/)                                       \
+        vdst = INST_A(inst);                                                \
+        vsrc1 = INST_B(inst);                                               \
+        ILOGV("|%s-long-2addr v%d,v%d", (_opname), vdst, vsrc1);            \
+        if (_chkdiv) {                                                      \
+            if (GET_REGISTER_WIDE(vsrc1) == 0) {                            \
+                EXPORT_PC();                                                \
+                dvmThrowException("Ljava/lang/ArithmeticException;",        \
+                    "divide by zero");                                      \
+                GOTO(exceptionThrown);                                      \
+            }                                                               \
+        }                                                                   \
+        SET_REGISTER_WIDE(vdst,                                             \
+            (s8) GET_REGISTER_WIDE(vdst) _op (s8)GET_REGISTER_WIDE(vsrc1)); \
+        FINISH(1);
+
+#define HANDLE_OP_SHX_LONG_2ADDR(_opcode, _opname, _cast, _op)              \
+    HANDLE_OPCODE(_opcode /*vA, vB*/)                                       \
+        vdst = INST_A(inst);                                                \
+        vsrc1 = INST_B(inst);                                               \
+        ILOGV("|%s-long-2addr v%d,v%d", (_opname), vdst, vsrc1);            \
+        SET_REGISTER_WIDE(vdst,                                             \
+            _cast GET_REGISTER_WIDE(vdst) _op (GET_REGISTER(vsrc1) & 0x3f)); \
+        FINISH(1);
+
+#define HANDLE_OP_X_FLOAT(_opcode, _opname, _op)                            \
+    HANDLE_OPCODE(_opcode /*vAA, vBB, vCC*/)                                \
+    {                                                                       \
+        u2 srcRegs;                                                         \
+        vdst = INST_AA(inst);                                               \
+        srcRegs = FETCH(1);                                                 \
+        vsrc1 = srcRegs & 0xff;                                             \
+        vsrc2 = srcRegs >> 8;                                               \
+        ILOGV("|%s-float v%d,v%d,v%d", (_opname), vdst, vsrc1, vsrc2);      \
+        SET_REGISTER_FLOAT(vdst,                                            \
+            GET_REGISTER_FLOAT(vsrc1) _op GET_REGISTER_FLOAT(vsrc2));       \
+    }                                                                       \
+    FINISH(2);
+
+#define HANDLE_OP_X_DOUBLE(_opcode, _opname, _op)                           \
+    HANDLE_OPCODE(_opcode /*vAA, vBB, vCC*/)                                \
+    {                                                                       \
+        u2 srcRegs;                                                         \
+        vdst = INST_AA(inst);                                               \
+        srcRegs = FETCH(1);                                                 \
+        vsrc1 = srcRegs & 0xff;                                             \
+        vsrc2 = srcRegs >> 8;                                               \
+        ILOGV("|%s-double v%d,v%d,v%d", (_opname), vdst, vsrc1, vsrc2);     \
+        SET_REGISTER_DOUBLE(vdst,                                           \
+            GET_REGISTER_DOUBLE(vsrc1) _op GET_REGISTER_DOUBLE(vsrc2));     \
+    }                                                                       \
+    FINISH(2);
+
+#define HANDLE_OP_X_FLOAT_2ADDR(_opcode, _opname, _op)                      \
+    HANDLE_OPCODE(_opcode /*vA, vB*/)                                       \
+        vdst = INST_A(inst);                                                \
+        vsrc1 = INST_B(inst);                                               \
+        ILOGV("|%s-float-2addr v%d,v%d", (_opname), vdst, vsrc1);           \
+        SET_REGISTER_FLOAT(vdst,                                            \
+            GET_REGISTER_FLOAT(vdst) _op GET_REGISTER_FLOAT(vsrc1));        \
+        FINISH(1);
+
+#define HANDLE_OP_X_DOUBLE_2ADDR(_opcode, _opname, _op)                     \
+    HANDLE_OPCODE(_opcode /*vA, vB*/)                                       \
+        vdst = INST_A(inst);                                                \
+        vsrc1 = INST_B(inst);                                               \
+        ILOGV("|%s-double-2addr v%d,v%d", (_opname), vdst, vsrc1);          \
+        SET_REGISTER_DOUBLE(vdst,                                           \
+            GET_REGISTER_DOUBLE(vdst) _op GET_REGISTER_DOUBLE(vsrc1));      \
+        FINISH(1);
+
+#define HANDLE_OP_AGET(_opcode, _opname, _type, _regsize)                   \
+    HANDLE_OPCODE(_opcode /*vAA, vBB, vCC*/)                                \
+    {                                                                       \
+        ArrayObject* arrayObj;                                              \
+        u2 arrayInfo;                                                       \
+        EXPORT_PC();                                                        \
+        vdst = INST_AA(inst);                                               \
+        arrayInfo = FETCH(1);                                               \
+        vsrc1 = arrayInfo & 0xff;    /* array ptr */                        \
+        vsrc2 = arrayInfo >> 8;      /* index */                            \
+        ILOGV("|aget%s v%d,v%d,v%d", (_opname), vdst, vsrc1, vsrc2);        \
+        arrayObj = (ArrayObject*) GET_REGISTER(vsrc1);                      \
+        if (!checkForNull((Object*) arrayObj))                              \
+            GOTO(exceptionThrown);                                          \
+        if (GET_REGISTER(vsrc2) >= arrayObj->length) {                      \
+            LOGV("Invalid array access: %p %d (len=%d)\n",                  \
+                arrayObj, vsrc2, arrayObj->length);                         \
+            dvmThrowException("Ljava/lang/ArrayIndexOutOfBoundsException;", \
+                NULL);                                                      \
+            GOTO(exceptionThrown);                                          \
+        }                                                                   \
+        SET_REGISTER##_regsize(vdst,                                        \
+            ((_type*) arrayObj->contents)[GET_REGISTER(vsrc2)]);            \
+        ILOGV("+ AGET[%d]=0x%x", GET_REGISTER(vsrc2), GET_REGISTER(vdst));  \
+    }                                                                       \
+    FINISH(2);
+
+#define HANDLE_OP_APUT(_opcode, _opname, _type, _regsize)                   \
+    HANDLE_OPCODE(_opcode /*vAA, vBB, vCC*/)                                \
+    {                                                                       \
+        ArrayObject* arrayObj;                                              \
+        u2 arrayInfo;                                                       \
+        EXPORT_PC();                                                        \
+        vdst = INST_AA(inst);       /* AA: source value */                  \
+        arrayInfo = FETCH(1);                                               \
+        vsrc1 = arrayInfo & 0xff;   /* BB: array ptr */                     \
+        vsrc2 = arrayInfo >> 8;     /* CC: index */                         \
+        ILOGV("|aput%s v%d,v%d,v%d", (_opname), vdst, vsrc1, vsrc2);        \
+        arrayObj = (ArrayObject*) GET_REGISTER(vsrc1);                      \
+        if (!checkForNull((Object*) arrayObj))                              \
+            GOTO(exceptionThrown);                                          \
+        if (GET_REGISTER(vsrc2) >= arrayObj->length) {                      \
+            dvmThrowException("Ljava/lang/ArrayIndexOutOfBoundsException;", \
+                NULL);                                                      \
+            GOTO(exceptionThrown);                                          \
+        }                                                                   \
+        ILOGV("+ APUT[%d]=0x%08x", GET_REGISTER(vsrc2), GET_REGISTER(vdst));\
+        ((_type*) arrayObj->contents)[GET_REGISTER(vsrc2)] =                \
+            GET_REGISTER##_regsize(vdst);                                   \
+    }                                                                       \
+    FINISH(2);
+
+/*
+ * It's possible to get a bad value out of a field with sub-32-bit stores
+ * because the -quick versions always operate on 32 bits.  Consider:
+ *   short foo = -1  (sets a 32-bit register to 0xffffffff)
+ *   iput-quick foo  (writes all 32 bits to the field)
+ *   short bar = 1   (sets a 32-bit register to 0x00000001)
+ *   iput-short      (writes the low 16 bits to the field)
+ *   iget-quick foo  (reads all 32 bits from the field, yielding 0xffff0001)
+ * This can only happen when optimized and non-optimized code has interleaved
+ * access to the same field.  This is unlikely but possible.
+ *
+ * The easiest way to fix this is to always read/write 32 bits at a time.  On
+ * a device with a 16-bit data bus this is sub-optimal.  (The alternative
+ * approach is to have sub-int versions of iget-quick, but now we're wasting
+ * Dalvik instruction space and making it less likely that handler code will
+ * already be in the CPU i-cache.)
+ */
+#define HANDLE_IGET_X(_opcode, _opname, _ftype, _regsize)                   \
+    HANDLE_OPCODE(_opcode /*vA, vB, field@CCCC*/)                           \
+    {                                                                       \
+        InstField* ifield;                                                  \
+        Object* obj;                                                        \
+        EXPORT_PC();                                                        \
+        vdst = INST_A(inst);                                                \
+        vsrc1 = INST_B(inst);   /* object ptr */                            \
+        ref = FETCH(1);         /* field ref */                             \
+        ILOGV("|iget%s v%d,v%d,field@0x%04x", (_opname), vdst, vsrc1, ref); \
+        obj = (Object*) GET_REGISTER(vsrc1);                                \
+        if (!checkForNull(obj))                                             \
+            GOTO(exceptionThrown);                                          \
+        ifield = (InstField*) dvmDexGetResolvedField(methodClassDex, ref);  \
+        if (ifield == NULL) {                                               \
+            ifield = dvmResolveInstField(method->clazz, ref);               \
+            if (ifield == NULL)                                             \
+                GOTO(exceptionThrown);                                      \
+        }                                                                   \
+        SET_REGISTER##_regsize(vdst,                                        \
+            dvmGetField##_ftype(obj, ifield->byteOffset));                  \
+        ILOGV("+ IGET '%s'=0x%08llx", ifield->field.name,                   \
+            (u8) GET_REGISTER##_regsize(vdst));                             \
+        UPDATE_FIELD_GET(&ifield->field);                                   \
+    }                                                                       \
+    FINISH(2);
+
+#define HANDLE_IGET_X_QUICK(_opcode, _opname, _ftype, _regsize)             \
+    HANDLE_OPCODE(_opcode /*vA, vB, field@CCCC*/)                           \
+    {                                                                       \
+        Object* obj;                                                        \
+        vdst = INST_A(inst);                                                \
+        vsrc1 = INST_B(inst);   /* object ptr */                            \
+        ref = FETCH(1);         /* field offset */                          \
+        ILOGV("|iget%s-quick v%d,v%d,field@+%u",                            \
+            (_opname), vdst, vsrc1, ref);                                   \
+        obj = (Object*) GET_REGISTER(vsrc1);                                \
+        if (!checkForNullExportPC(obj, fp, pc))                             \
+            GOTO(exceptionThrown);                                          \
+        SET_REGISTER##_regsize(vdst, dvmGetField##_ftype(obj, ref));        \
+        ILOGV("+ IGETQ %d=0x%08llx", ref,                                   \
+            (u8) GET_REGISTER##_regsize(vdst));                             \
+    }                                                                       \
+    FINISH(2);
+
+#define HANDLE_IPUT_X(_opcode, _opname, _ftype, _regsize)                   \
+    HANDLE_OPCODE(_opcode /*vA, vB, field@CCCC*/)                           \
+    {                                                                       \
+        InstField* ifield;                                                  \
+        Object* obj;                                                        \
+        EXPORT_PC();                                                        \
+        vdst = INST_A(inst);                                                \
+        vsrc1 = INST_B(inst);   /* object ptr */                            \
+        ref = FETCH(1);         /* field ref */                             \
+        ILOGV("|iput%s v%d,v%d,field@0x%04x", (_opname), vdst, vsrc1, ref); \
+        obj = (Object*) GET_REGISTER(vsrc1);                                \
+        if (!checkForNull(obj))                                             \
+            GOTO(exceptionThrown);                                          \
+        ifield = (InstField*) dvmDexGetResolvedField(methodClassDex, ref);  \
+        if (ifield == NULL) {                                               \
+            ifield = dvmResolveInstField(method->clazz, ref);               \
+            if (ifield == NULL)                                             \
+                GOTO(exceptionThrown);                                      \
+        }                                                                   \
+        dvmSetField##_ftype(obj, ifield->byteOffset,                        \
+            GET_REGISTER##_regsize(vdst));                                  \
+        ILOGV("+ IPUT '%s'=0x%08llx", ifield->field.name,                   \
+            (u8) GET_REGISTER##_regsize(vdst));                             \
+        UPDATE_FIELD_PUT(&ifield->field);                                   \
+    }                                                                       \
+    FINISH(2);
+
+#define HANDLE_IPUT_X_QUICK(_opcode, _opname, _ftype, _regsize)             \
+    HANDLE_OPCODE(_opcode /*vA, vB, field@CCCC*/)                           \
+    {                                                                       \
+        Object* obj;                                                        \
+        vdst = INST_A(inst);                                                \
+        vsrc1 = INST_B(inst);   /* object ptr */                            \
+        ref = FETCH(1);         /* field offset */                          \
+        ILOGV("|iput%s-quick v%d,v%d,field@0x%04x",                         \
+            (_opname), vdst, vsrc1, ref);                                   \
+        obj = (Object*) GET_REGISTER(vsrc1);                                \
+        if (!checkForNullExportPC(obj, fp, pc))                             \
+            GOTO(exceptionThrown);                                          \
+        dvmSetField##_ftype(obj, ref, GET_REGISTER##_regsize(vdst));        \
+        ILOGV("+ IPUTQ %d=0x%08llx", ref,                                   \
+            (u8) GET_REGISTER##_regsize(vdst));                             \
+    }                                                                       \
+    FINISH(2);
+
+#define HANDLE_SGET_X(_opcode, _opname, _ftype, _regsize)                   \
+    HANDLE_OPCODE(_opcode /*vAA, field@BBBB*/)                              \
+    {                                                                       \
+        StaticField* sfield;                                                \
+        vdst = INST_AA(inst);                                               \
+        ref = FETCH(1);         /* field ref */                             \
+        ILOGV("|sget%s v%d,sfield@0x%04x", (_opname), vdst, ref);           \
+        sfield = (StaticField*)dvmDexGetResolvedField(methodClassDex, ref); \
+        if (sfield == NULL) {                                               \
+            EXPORT_PC();                                                    \
+            sfield = dvmResolveStaticField(method->clazz, ref);             \
+            if (sfield == NULL)                                             \
+                GOTO(exceptionThrown);                                      \
+        }                                                                   \
+        SET_REGISTER##_regsize(vdst, dvmGetStaticField##_ftype(sfield));    \
+        ILOGV("+ SGET '%s'=0x%08llx",                                       \
+            sfield->field.name, (u8)GET_REGISTER##_regsize(vdst));          \
+        UPDATE_FIELD_GET(&sfield->field);                                   \
+    }                                                                       \
+    FINISH(2);
+
+#define HANDLE_SPUT_X(_opcode, _opname, _ftype, _regsize)                   \
+    HANDLE_OPCODE(_opcode /*vAA, field@BBBB*/)                              \
+    {                                                                       \
+        StaticField* sfield;                                                \
+        vdst = INST_AA(inst);                                               \
+        ref = FETCH(1);         /* field ref */                             \
+        ILOGV("|sput%s v%d,sfield@0x%04x", (_opname), vdst, ref);           \
+        sfield = (StaticField*)dvmDexGetResolvedField(methodClassDex, ref); \
+        if (sfield == NULL) {                                               \
+            EXPORT_PC();                                                    \
+            sfield = dvmResolveStaticField(method->clazz, ref);             \
+            if (sfield == NULL)                                             \
+                GOTO(exceptionThrown);                                      \
+        }                                                                   \
+        dvmSetStaticField##_ftype(sfield, GET_REGISTER##_regsize(vdst));    \
+        ILOGV("+ SPUT '%s'=0x%08llx",                                       \
+            sfield->field.name, (u8)GET_REGISTER##_regsize(vdst));          \
+        UPDATE_FIELD_PUT(&sfield->field);                                   \
+    }                                                                       \
+    FINISH(2);
+
+
+/* File: c/footer.c */
+/*
+ * C footer.  This has some common code shared by the various targets.
+ */
+
+#define GOTO_TARGET(_target, ...)                                           \
+    void dvmMterp_##_target(MterpGlue* glue, ## __VA_ARGS__) {              \
+        u2 ref, vsrc1, vsrc2, vdst;                                         \
+        u2 inst = FETCH(0);                                                 \
+        const Method* methodToCall;                                         \
+        StackSaveArea* debugSaveArea;
+
+#define GOTO_TARGET_END }
+
+
+/*
+ * Everything from here on is a "goto target".  In the basic interpreter
+ * we jump into these targets and then jump directly to the handler for
+ * next instruction.  Here, these are subroutines that return to the caller.
+ */
+
+GOTO_TARGET(filledNewArray, bool methodCallRange)
+    {
+        ClassObject* arrayClass;
+        ArrayObject* newArray;
+        int* contents;
+        char typeCh;
+        int i;
+        u4 arg5;
+
+        EXPORT_PC();
+
+        ref = FETCH(1);             /* class ref */
+        vdst = FETCH(2);            /* first 4 regs -or- range base */
+
+        if (methodCallRange) {
+            vsrc1 = INST_AA(inst);  /* #of elements */
+            arg5 = -1;              /* silence compiler warning */
+            ILOGV("|filled-new-array-range args=%d @0x%04x {regs=v%d-v%d}",
+                vsrc1, ref, vdst, vdst+vsrc1-1);
+        } else {
+            arg5 = INST_A(inst);
+            vsrc1 = INST_B(inst);   /* #of elements */
+            ILOGV("|filled-new-array args=%d @0x%04x {regs=0x%04x %x}",
+                vsrc1, ref, vdst, arg5);
+        }
+
+        /*
+         * Resolve the array class.
+         */
+        arrayClass = dvmDexGetResolvedClass(methodClassDex, ref);
+        if (arrayClass == NULL) {
+            arrayClass = dvmResolveClass(method->clazz, ref, false);
+            if (arrayClass == NULL)
+                GOTO(exceptionThrown);
+        }
+        /*
+        if (!dvmIsArrayClass(arrayClass)) {
+            dvmThrowException("Ljava/lang/RuntimeError;",
+                "filled-new-array needs array class");
+            GOTO(exceptionThrown);
+        }
+        */
+        /* verifier guarantees this is an array class */
+        assert(dvmIsArrayClass(arrayClass));
+        assert(dvmIsClassInitialized(arrayClass));
+
+        /*
+         * Create an array of the specified type.
+         */
+        LOGVV("+++ filled-new-array type is '%s'\n", arrayClass->descriptor);
+        typeCh = arrayClass->descriptor[1];
+        if (typeCh == 'D' || typeCh == 'J') {
+            /* category 2 primitives not allowed */
+            dvmThrowException("Ljava/lang/RuntimeError;",
+                "bad filled array req");
+            GOTO(exceptionThrown);
+        } else if (typeCh == 'L' || typeCh == '[') {
+            /* create array of objects or array of arrays */
+            /* TODO: need some work in the verifier before we allow this */
+            LOGE("fnao not implemented\n");
+            dvmThrowException("Ljava/lang/InternalError;",
+                "filled-new-array not implemented for reference types");
+            GOTO(exceptionThrown);
+        } else if (typeCh != 'I') {
+            /* TODO: requires multiple "fill in" loops with different widths */
+            LOGE("non-int not implemented\n");
+            dvmThrowException("Ljava/lang/InternalError;",
+                "filled-new-array not implemented for anything but 'int'");
+            GOTO(exceptionThrown);
+        }
+
+        assert(strchr("BCIFZ", typeCh) != NULL);
+        newArray = dvmAllocPrimitiveArray(arrayClass->descriptor[1], vsrc1,
+                    ALLOC_DONT_TRACK);
+        if (newArray == NULL)
+            GOTO(exceptionThrown);
+
+        /*
+         * Fill in the elements.  It's legal for vsrc1 to be zero.
+         */
+        contents = (int*) newArray->contents;
+        if (methodCallRange) {
+            for (i = 0; i < vsrc1; i++)
+                contents[i] = GET_REGISTER(vdst+i);
+        } else {
+            assert(vsrc1 <= 5);
+            if (vsrc1 == 5) {
+                contents[4] = GET_REGISTER(arg5);
+                vsrc1--;
+            }
+            for (i = 0; i < vsrc1; i++) {
+                contents[i] = GET_REGISTER(vdst & 0x0f);
+                vdst >>= 4;
+            }
+        }
+
+        retval.l = newArray;
+    }
+    FINISH(3);
+GOTO_TARGET_END
+
+
+GOTO_TARGET(invokeVirtual, bool methodCallRange)
+    {
+        Method* baseMethod;
+        Object* thisPtr;
+
+        EXPORT_PC();
+
+        vsrc1 = INST_AA(inst);      /* AA (count) or BA (count + arg 5) */
+        ref = FETCH(1);             /* method ref */
+        vdst = FETCH(2);            /* 4 regs -or- first reg */
+
+        if (methodCallRange) {
+            /*
+             * The object against which we are executing a method is always
+             * in the first argument.
+             */
+            assert(vsrc1 > 0);
+            ILOGV("|invoke-virtual-range args=%d @0x%04x {regs=v%d-v%d}",
+                vsrc1, ref, vdst, vdst+vsrc1-1);
+            thisPtr = (Object*) GET_REGISTER(vdst);
+        } else {
+            /*
+             * The object against which we are executing a method is always
+             * in the first argument.
+             */
+            assert((vsrc1>>4) > 0);
+            ILOGV("|invoke-virtual args=%d @0x%04x {regs=0x%04x %x}",
+                vsrc1 >> 4, ref, vdst, vsrc1 & 0x0f);
+            thisPtr = (Object*) GET_REGISTER(vdst & 0x0f);
+        }
+
+        if (!checkForNull(thisPtr))
+            GOTO(exceptionThrown);
+
+        /*
+         * Resolve the method.  This is the correct method for the static
+         * type of the object.  We also verify access permissions here.
+         */
+        baseMethod = dvmDexGetResolvedMethod(methodClassDex, ref);
+        if (baseMethod == NULL) {
+            baseMethod = dvmResolveMethod(method->clazz, ref, METHOD_VIRTUAL);
+            if (baseMethod == NULL) {
+                ILOGV("+ unknown method or access denied\n");
+                GOTO(exceptionThrown);
+            }
+        }
+
+        /*
+         * Combine the object we found with the vtable offset in the
+         * method.
+         */
+        assert(baseMethod->methodIndex < thisPtr->clazz->vtableCount);
+        methodToCall = thisPtr->clazz->vtable[baseMethod->methodIndex];
+
+#if 0
+        if (dvmIsAbstractMethod(methodToCall)) {
+            /*
+             * This can happen if you create two classes, Base and Sub, where
+             * Sub is a sub-class of Base.  Declare a protected abstract
+             * method foo() in Base, and invoke foo() from a method in Base.
+             * Base is an "abstract base class" and is never instantiated
+             * directly.  Now, Override foo() in Sub, and use Sub.  This
+             * Works fine unless Sub stops providing an implementation of
+             * the method.
+             */
+            dvmThrowException("Ljava/lang/AbstractMethodError;",
+                "abstract method not implemented");
+            GOTO(exceptionThrown);
+        }
+#else
+        assert(!dvmIsAbstractMethod(methodToCall) ||
+            methodToCall->nativeFunc != NULL);
+#endif
+
+        LOGVV("+++ base=%s.%s virtual[%d]=%s.%s\n",
+            baseMethod->clazz->descriptor, baseMethod->name,
+            (u4) baseMethod->methodIndex,
+            methodToCall->clazz->descriptor, methodToCall->name);
+        assert(methodToCall != NULL);
+
+#if 0
+        if (vsrc1 != methodToCall->insSize) {
+            LOGW("WRONG METHOD: base=%s.%s virtual[%d]=%s.%s\n",
+                baseMethod->clazz->descriptor, baseMethod->name,
+                (u4) baseMethod->methodIndex,
+                methodToCall->clazz->descriptor, methodToCall->name);
+            //dvmDumpClass(baseMethod->clazz);
+            //dvmDumpClass(methodToCall->clazz);
+            dvmDumpAllClasses(0);
+        }
+#endif
+
+        GOTO(invokeMethod, methodCallRange, methodToCall, vsrc1, vdst);
+    }
+GOTO_TARGET_END
+
+GOTO_TARGET(invokeSuper, bool methodCallRange)
+    {
+        Method* baseMethod;
+        u2 thisReg;
+
+        EXPORT_PC();
+
+        vsrc1 = INST_AA(inst);      /* AA (count) or BA (count + arg 5) */
+        ref = FETCH(1);             /* method ref */
+        vdst = FETCH(2);            /* 4 regs -or- first reg */
+
+        if (methodCallRange) {
+            ILOGV("|invoke-super-range args=%d @0x%04x {regs=v%d-v%d}",
+                vsrc1, ref, vdst, vdst+vsrc1-1);
+            thisReg = vdst;
+        } else {
+            ILOGV("|invoke-super args=%d @0x%04x {regs=0x%04x %x}",
+                vsrc1 >> 4, ref, vdst, vsrc1 & 0x0f);
+            thisReg = vdst & 0x0f;
+        }
+        /* impossible in well-formed code, but we must check nevertheless */
+        if (!checkForNull((Object*) GET_REGISTER(thisReg)))
+            GOTO(exceptionThrown);
+
+        /*
+         * Resolve the method.  This is the correct method for the static
+         * type of the object.  We also verify access permissions here.
+         * The first arg to dvmResolveMethod() is just the referring class
+         * (used for class loaders and such), so we don't want to pass
+         * the superclass into the resolution call.
+         */
+        baseMethod = dvmDexGetResolvedMethod(methodClassDex, ref);
+        if (baseMethod == NULL) {
+            baseMethod = dvmResolveMethod(method->clazz, ref, METHOD_VIRTUAL);
+            if (baseMethod == NULL) {
+                ILOGV("+ unknown method or access denied\n");
+                GOTO(exceptionThrown);
+            }
+        }
+
+        /*
+         * Combine the object we found with the vtable offset in the
+         * method's class.
+         *
+         * We're using the current method's class' superclass, not the
+         * superclass of "this".  This is because we might be executing
+         * in a method inherited from a superclass, and we want to run
+         * in that class' superclass.
+         */
+        if (baseMethod->methodIndex >= method->clazz->super->vtableCount) {
+            /*
+             * Method does not exist in the superclass.  Could happen if
+             * superclass gets updated.
+             */
+            dvmThrowException("Ljava/lang/NoSuchMethodError;",
+                baseMethod->name);
+            GOTO(exceptionThrown);
+        }
+        methodToCall = method->clazz->super->vtable[baseMethod->methodIndex];
+#if 0
+        if (dvmIsAbstractMethod(methodToCall)) {
+            dvmThrowException("Ljava/lang/AbstractMethodError;",
+                "abstract method not implemented");
+            GOTO(exceptionThrown);
+        }
+#else
+        assert(!dvmIsAbstractMethod(methodToCall) ||
+            methodToCall->nativeFunc != NULL);
+#endif
+        LOGVV("+++ base=%s.%s super-virtual=%s.%s\n",
+            baseMethod->clazz->descriptor, baseMethod->name,
+            methodToCall->clazz->descriptor, methodToCall->name);
+        assert(methodToCall != NULL);
+
+        GOTO(invokeMethod, methodCallRange, methodToCall, vsrc1, vdst);
+    }
+GOTO_TARGET_END
+
+GOTO_TARGET(invokeInterface, bool methodCallRange)
+    {
+        Object* thisPtr;
+        ClassObject* thisClass;
+
+        EXPORT_PC();
+
+        vsrc1 = INST_AA(inst);      /* AA (count) or BA (count + arg 5) */
+        ref = FETCH(1);             /* method ref */
+        vdst = FETCH(2);            /* 4 regs -or- first reg */
+
+        if (methodCallRange) {
+            /*
+             * The object against which we are executing a method is always
+             * in the first argument.
+             */
+            assert(vsrc1 > 0);
+            ILOGV("|invoke-interface-range args=%d @0x%04x {regs=v%d-v%d}",
+                vsrc1, ref, vdst, vdst+vsrc1-1);
+            thisPtr = (Object*) GET_REGISTER(vdst);
+        } else {
+            /*
+             * The object against which we are executing a method is always
+             * in the first argument.
+             */
+            assert((vsrc1>>4) > 0);
+            ILOGV("|invoke-interface args=%d @0x%04x {regs=0x%04x %x}",
+                vsrc1 >> 4, ref, vdst, vsrc1 & 0x0f);
+            thisPtr = (Object*) GET_REGISTER(vdst & 0x0f);
+        }
+
+        if (!checkForNull(thisPtr))
+            GOTO(exceptionThrown);
+        thisClass = thisPtr->clazz;
+
+        /*
+         * Given a class and a method index, find the Method* with the
+         * actual code we want to execute.
+         */
+        methodToCall = dvmFindInterfaceMethodInCache(thisClass, ref, method,
+                        methodClassDex);
+        if (methodToCall == NULL) {
+            assert(dvmCheckException(self));
+            GOTO(exceptionThrown);
+        }
+
+        GOTO(invokeMethod, methodCallRange, methodToCall, vsrc1, vdst);
+    }
+GOTO_TARGET_END
+
+GOTO_TARGET(invokeDirect, bool methodCallRange)
+    {
+        u2 thisReg;
+
+        vsrc1 = INST_AA(inst);      /* AA (count) or BA (count + arg 5) */
+        ref = FETCH(1);             /* method ref */
+        vdst = FETCH(2);            /* 4 regs -or- first reg */
+
+        EXPORT_PC();
+
+        if (methodCallRange) {
+            ILOGV("|invoke-direct-range args=%d @0x%04x {regs=v%d-v%d}",
+                vsrc1, ref, vdst, vdst+vsrc1-1);
+            thisReg = vdst;
+        } else {
+            ILOGV("|invoke-direct args=%d @0x%04x {regs=0x%04x %x}",
+                vsrc1 >> 4, ref, vdst, vsrc1 & 0x0f);
+            thisReg = vdst & 0x0f;
+        }
+        if (!checkForNull((Object*) GET_REGISTER(thisReg)))
+            GOTO(exceptionThrown);
+
+        methodToCall = dvmDexGetResolvedMethod(methodClassDex, ref);
+        if (methodToCall == NULL) {
+            methodToCall = dvmResolveMethod(method->clazz, ref, METHOD_DIRECT);
+            if (methodToCall == NULL) {
+                ILOGV("+ unknown direct method\n");     // should be impossible
+                GOTO(exceptionThrown);
+            }
+        }
+        GOTO(invokeMethod, methodCallRange, methodToCall, vsrc1, vdst);
+    }
+GOTO_TARGET_END
+
+GOTO_TARGET(invokeStatic, bool methodCallRange)
+    vsrc1 = INST_AA(inst);      /* AA (count) or BA (count + arg 5) */
+    ref = FETCH(1);             /* method ref */
+    vdst = FETCH(2);            /* 4 regs -or- first reg */
+
+    EXPORT_PC();
+
+    if (methodCallRange)
+        ILOGV("|invoke-static-range args=%d @0x%04x {regs=v%d-v%d}",
+            vsrc1, ref, vdst, vdst+vsrc1-1);
+    else
+        ILOGV("|invoke-static args=%d @0x%04x {regs=0x%04x %x}",
+            vsrc1 >> 4, ref, vdst, vsrc1 & 0x0f);
+    methodToCall = dvmDexGetResolvedMethod(methodClassDex, ref);
+    if (methodToCall == NULL) {
+        methodToCall = dvmResolveMethod(method->clazz, ref, METHOD_STATIC);
+        if (methodToCall == NULL) {
+            ILOGV("+ unknown method\n");
+            GOTO(exceptionThrown);
+        }
+    }
+    GOTO(invokeMethod, methodCallRange, methodToCall, vsrc1, vdst);
+GOTO_TARGET_END
+
+GOTO_TARGET(invokeVirtualQuick, bool methodCallRange)
+    {
+        Object* thisPtr;
+
+        EXPORT_PC();
+
+        vsrc1 = INST_AA(inst);      /* AA (count) or BA (count + arg 5) */
+        ref = FETCH(1);             /* vtable index */
+        vdst = FETCH(2);            /* 4 regs -or- first reg */
+
+        if (methodCallRange) {
+            /*
+             * The object against which we are executing a method is always
+             * in the first argument.
+             */
+            assert(vsrc1 > 0);
+            ILOGV("|invoke-virtual-quick-range args=%d @0x%04x {regs=v%d-v%d}",
+                vsrc1, ref, vdst, vdst+vsrc1-1);
+            thisPtr = (Object*) GET_REGISTER(vdst);
+        } else {
+            /*
+             * The object against which we are executing a method is always
+             * in the first argument.
+             */
+            assert((vsrc1>>4) > 0);
+            ILOGV("|invoke-virtual-quick args=%d @0x%04x {regs=0x%04x %x}",
+                vsrc1 >> 4, ref, vdst, vsrc1 & 0x0f);
+            thisPtr = (Object*) GET_REGISTER(vdst & 0x0f);
+        }
+
+        if (!checkForNull(thisPtr))
+            GOTO(exceptionThrown);
+
+        /*
+         * Combine the object we found with the vtable offset in the
+         * method.
+         */
+        assert(ref < thisPtr->clazz->vtableCount);
+        methodToCall = thisPtr->clazz->vtable[ref];
+
+#if 0
+        if (dvmIsAbstractMethod(methodToCall)) {
+            dvmThrowException("Ljava/lang/AbstractMethodError;",
+                "abstract method not implemented");
+            GOTO(exceptionThrown);
+        }
+#else
+        assert(!dvmIsAbstractMethod(methodToCall) ||
+            methodToCall->nativeFunc != NULL);
+#endif
+
+        LOGVV("+++ virtual[%d]=%s.%s\n",
+            ref, methodToCall->clazz->descriptor, methodToCall->name);
+        assert(methodToCall != NULL);
+
+        GOTO(invokeMethod, methodCallRange, methodToCall, vsrc1, vdst);
+    }
+GOTO_TARGET_END
+
+GOTO_TARGET(invokeSuperQuick, bool methodCallRange)
+    {
+        u2 thisReg;
+
+        EXPORT_PC();
+
+        vsrc1 = INST_AA(inst);      /* AA (count) or BA (count + arg 5) */
+        ref = FETCH(1);             /* vtable index */
+        vdst = FETCH(2);            /* 4 regs -or- first reg */
+
+        if (methodCallRange) {
+            ILOGV("|invoke-super-quick-range args=%d @0x%04x {regs=v%d-v%d}",
+                vsrc1, ref, vdst, vdst+vsrc1-1);
+            thisReg = vdst;
+        } else {
+            ILOGV("|invoke-super-quick args=%d @0x%04x {regs=0x%04x %x}",
+                vsrc1 >> 4, ref, vdst, vsrc1 & 0x0f);
+            thisReg = vdst & 0x0f;
+        }
+        /* impossible in well-formed code, but we must check nevertheless */
+        if (!checkForNull((Object*) GET_REGISTER(thisReg)))
+            GOTO(exceptionThrown);
+
+#if 0   /* impossible in optimized + verified code */
+        if (ref >= method->clazz->super->vtableCount) {
+            dvmThrowException("Ljava/lang/NoSuchMethodError;", NULL);
+            GOTO(exceptionThrown);
+        }
+#else
+        assert(ref < method->clazz->super->vtableCount);
+#endif
+
+        /*
+         * Combine the object we found with the vtable offset in the
+         * method's class.
+         *
+         * We're using the current method's class' superclass, not the
+         * superclass of "this".  This is because we might be executing
+         * in a method inherited from a superclass, and we want to run
+         * in the method's class' superclass.
+         */
+        methodToCall = method->clazz->super->vtable[ref];
+
+#if 0
+        if (dvmIsAbstractMethod(methodToCall)) {
+            dvmThrowException("Ljava/lang/AbstractMethodError;",
+                "abstract method not implemented");
+            GOTO(exceptionThrown);
+        }
+#else
+        assert(!dvmIsAbstractMethod(methodToCall) ||
+            methodToCall->nativeFunc != NULL);
+#endif
+        LOGVV("+++ super-virtual[%d]=%s.%s\n",
+            ref, methodToCall->clazz->descriptor, methodToCall->name);
+        assert(methodToCall != NULL);
+
+        GOTO(invokeMethod, methodCallRange, methodToCall, vsrc1, vdst);
+    }
+GOTO_TARGET_END
+
+
+
+    /*
+     * General handling for return-void, return, and return-wide.  Put the
+     * return value in "retval" before jumping here.
+     */
+GOTO_TARGET(returnFromMethod)
+    {
+        StackSaveArea* saveArea;
+
+        /*
+         * We must do this BEFORE we pop the previous stack frame off, so
+         * that the GC can see the return value (if any) in the local vars.
+         *
+         * Since this is now an interpreter switch point, we must do it before
+         * we do anything at all.
+         */
+        PERIODIC_CHECKS(kInterpEntryReturn, 0);
+
+        ILOGV("> retval=0x%llx (leaving %s.%s %s)",
+            retval.j, method->clazz->descriptor, method->name,
+            method->signature);
+        //DUMP_REGS(method, fp);
+
+        saveArea = SAVEAREA_FROM_FP(fp);
+
+#ifdef EASY_GDB
+        debugSaveArea = saveArea;
+#endif
+#if (INTERP_TYPE == INTERP_DBG) && defined(WITH_PROFILER)
+        TRACE_METHOD_EXIT(self, method);
+#endif
+
+        /* back up to previous frame and see if we hit a break */
+        fp = saveArea->prevFrame;
+        assert(fp != NULL);
+        if (dvmIsBreakFrame(fp)) {
+            /* bail without popping the method frame from stack */
+            LOGVV("+++ returned into break frame\n");
+            GOTO_BAIL(false);
+        }
+
+        /* update thread FP, and reset local variables */
+        self->curFrame = fp;
+        method =
+#undef method       // ARRGH!
+            SAVEAREA_FROM_FP(fp)->method;
+#define method glue->method
+        //methodClass = method->clazz;
+        methodClassDex = method->clazz->pDvmDex;
+        pc = saveArea->savedPc;
+        ILOGD("> (return to %s.%s %s)", method->clazz->descriptor,
+            method->name, method->signature);
+
+        /* use FINISH on the caller's invoke instruction */
+        //u2 invokeInstr = INST_INST(FETCH(0));
+        if (true /*invokeInstr >= OP_INVOKE_VIRTUAL &&
+            invokeInstr <= OP_INVOKE_INTERFACE*/)
+        {
+            FINISH(3);
+        } else {
+            //LOGE("Unknown invoke instr %02x at %d\n",
+            //    invokeInstr, (int) (pc - method->insns));
+            assert(false);
+        }
+    }
+GOTO_TARGET_END
+
+
+    /*
+     * Jump here when the code throws an exception.
+     *
+     * By the time we get here, the Throwable has been created and the stack
+     * trace has been saved off.
+     */
+GOTO_TARGET(exceptionThrown)
+    {
+        Object* exception;
+        int catchRelPc;
+
+        /*
+         * Since this is now an interpreter switch point, we must do it before
+         * we do anything at all.
+         */
+        PERIODIC_CHECKS(kInterpEntryThrow, 0);
+
+        /*
+         * We save off the exception and clear the exception status.  While
+         * processing the exception we might need to load some Throwable
+         * classes, and we don't want class loader exceptions to get
+         * confused with this one.
+         */
+        assert(dvmCheckException(self));
+        exception = dvmGetException(self);
+        dvmAddTrackedAlloc(exception, self);
+        dvmClearException(self);
+
+        LOGV("Handling exception %s at %s:%d\n",
+            exception->clazz->descriptor, method->name,
+            dvmLineNumFromPC(method, pc - method->insns));
+
+#if (INTERP_TYPE == INTERP_DBG) && defined(WITH_DEBUGGER)
+        /*
+         * Tell the debugger about it.
+         *
+         * TODO: if the exception was thrown by interpreted code, control
+         * fell through native, and then back to us, we will report the
+         * exception at the point of the throw and again here.  We can avoid
+         * this by not reporting exceptions when we jump here directly from
+         * the native call code above, but then we won't report exceptions
+         * that were thrown *from* the JNI code (as opposed to *through* it).
+         *
+         * The correct solution is probably to ignore from-native exceptions
+         * here, and have the JNI exception code do the reporting to the
+         * debugger.
+         */
+        if (gDvm.debuggerActive) {
+            void* catchFrame;
+            catchRelPc = dvmFindCatchBlock(self, pc - method->insns,
+                        exception, true, &catchFrame);
+            dvmDbgPostException(fp, pc - method->insns, catchFrame, catchRelPc,
+                exception);
+        }
+#endif
+
+        /*
+         * We need to unroll to the catch block or the nearest "break"
+         * frame.
+         *
+         * A break frame could indicate that we have reached an intermediate
+         * native call, or have gone off the top of the stack and the thread
+         * needs to exit.  Either way, we return from here, leaving the
+         * exception raised.
+         *
+         * If we do find a catch block, we want to transfer execution to
+         * that point.
+         */
+        catchRelPc = dvmFindCatchBlock(self, pc - method->insns,
+                    exception, false, (void*)&fp);
+
+        /*
+         * Restore the stack bounds after an overflow.  This isn't going to
+         * be correct in all circumstances, e.g. if JNI code devours the
+         * exception this won't happen until some other exception gets
+         * thrown.  If the code keeps pushing the stack bounds we'll end
+         * up aborting the VM.
+         */
+        if (self->stackOverflowed)
+            dvmCleanupStackOverflow(self);
+
+        if (catchRelPc < 0) {
+            /* falling through to JNI code or off the bottom of the stack */
+#if DVM_SHOW_EXCEPTION >= 2
+            LOGD("Exception %s from %s:%d not caught locally\n",
+                exception->clazz->descriptor, dvmGetMethodSourceFile(method),
+                dvmLineNumFromPC(method, pc - method->insns));
+#endif
+            dvmSetException(self, exception);
+            dvmReleaseTrackedAlloc(exception, self);
+            GOTO_BAIL(false);
+        }
+
+#if DVM_SHOW_EXCEPTION >= 3
+        {
+            const Method* catchMethod =
+#undef method
+                SAVEAREA_FROM_FP(fp)->method;
+#define method glue->method
+            LOGD("Exception %s thrown from %s:%d to %s:%d\n",
+                exception->clazz->descriptor, dvmGetMethodSourceFile(method),
+                dvmLineNumFromPC(method, pc - method->insns),
+                dvmGetMethodSourceFile(catchMethod),
+                dvmLineNumFromPC(catchMethod, catchRelPc));
+        }
+#endif
+
+        /*
+         * Adjust local variables to match self->curFrame and the
+         * updated PC.
+         */
+        //fp = (u4*) self->curFrame;
+        method =
+#undef method
+            SAVEAREA_FROM_FP(fp)->method;
+#define method glue->method
+        //methodClass = method->clazz;
+        methodClassDex = method->clazz->pDvmDex;
+        pc = method->insns + catchRelPc;
+        ILOGV("> pc <-- %s.%s %s", method->clazz->descriptor, method->name,
+            method->signature);
+        DUMP_REGS(method, fp, false);               // show all regs
+
+        /*
+         * Restore the exception if the handler wants it.
+         *
+         * The Dalvik spec mandates that, if an exception handler wants to
+         * do something with the exception, the first instruction executed
+         * must be "move-exception".  We can pass the exception along
+         * through the thread struct, and let the move-exception instruction
+         * clear it for us.
+         *
+         * If the handler doesn't call move-exception, we don't want to
+         * finish here with an exception still pending.
+         */
+        if (INST_INST(FETCH(0)) == OP_MOVE_EXCEPTION)
+            dvmSetException(self, exception);
+
+        dvmReleaseTrackedAlloc(exception, self);
+        FINISH(0);
+    }
+GOTO_TARGET_END
+
+
+    /*
+     * General handling for invoke-{virtual,super,direct,static,interface},
+     * including "quick" variants.
+     *
+     * Set "methodToCall" to the Method we're calling, and "methodCallRange"
+     * depending on whether this is a "/range" instruction.
+     *
+     * For a range call:
+     *  "vsrc1" holds the argument count (8 bits)
+     *  "vdst" holds the first argument in the range
+     * For a non-range call:
+     *  "vsrc1" holds the argument count (4 bits) and the 5th argument index
+     *  "vdst" holds four 4-bit register indices
+     *
+     * The caller must EXPORT_PC before jumping here, because any method
+     * call can throw a stack overflow exception.
+     */
+GOTO_TARGET(invokeMethod, bool methodCallRange, const Method* _methodToCall,
+    u2 count, u2 regs)
+    {
+        vsrc1 = count; vdst = regs; methodToCall = _methodToCall;  /* ADDED */
+
+        //printf("range=%d call=%p count=%d regs=0x%04x\n",
+        //    methodCallRange, methodToCall, count, regs);
+        //printf(" --> %s.%s %s\n", methodToCall->clazz->descriptor,
+        //    methodToCall->name, methodToCall->signature);
+
+        u4* outs;
+        int i;
+
+        /*
+         * Copy args.  This may corrupt vsrc1/vdst.
+         */
+        if (methodCallRange) {
+            // could use memcpy or a "Duff's device"; most functions have
+            // so few args it won't matter much
+            assert(vsrc1 <= method->outsSize);
+            assert(vsrc1 == methodToCall->insSize);
+            outs = OUTS_FROM_FP(fp, vsrc1);
+            for (i = 0; i < vsrc1; i++)
+                outs[i] = GET_REGISTER(vdst+i);
+        } else {
+            u4 count = vsrc1 >> 4;
+
+            assert(count <= method->outsSize);
+            assert(count == methodToCall->insSize);
+            assert(count <= 5);
+
+            outs = OUTS_FROM_FP(fp, count);
+#if 0
+            if (count == 5) {
+                outs[4] = GET_REGISTER(vsrc1 & 0x0f);
+                count--;
+            }
+            for (i = 0; i < (int) count; i++) {
+                outs[i] = GET_REGISTER(vdst & 0x0f);
+                vdst >>= 4;
+            }
+#else
+            // This version executes fewer instructions but is larger
+            // overall.  Seems to be a teensy bit faster.
+            assert((vdst >> 16) == 0);  // 16 bits -or- high 16 bits clear
+            switch (count) {
+            case 5:
+                outs[4] = GET_REGISTER(vsrc1 & 0x0f);
+            case 4:
+                outs[3] = GET_REGISTER(vdst >> 12);
+            case 3:
+                outs[2] = GET_REGISTER((vdst & 0x0f00) >> 8);
+            case 2:
+                outs[1] = GET_REGISTER((vdst & 0x00f0) >> 4);
+            case 1:
+                outs[0] = GET_REGISTER(vdst & 0x0f);
+            default:
+                ;
+            }
+#endif
+        }
+    }
+
+    /*
+     * (This was originally a "goto" target; I've kept it separate from the
+     * stuff above in case we want to refactor things again.)
+     *
+     * At this point, we have the arguments stored in the "outs" area of
+     * the current method's stack frame, and the method to call in
+     * "methodToCall".  Push a new stack frame.
+     */
+    {
+        StackSaveArea* newSaveArea;
+        u4* newFp;
+
+        ILOGV("> %s%s.%s %s",
+            dvmIsNativeMethod(methodToCall) ? "(NATIVE) " : "",
+            methodToCall->clazz->descriptor, methodToCall->name,
+            methodToCall->signature);
+
+        newFp = (u4*) SAVEAREA_FROM_FP(fp) - methodToCall->registersSize;
+        newSaveArea = SAVEAREA_FROM_FP(newFp);
+
+        /* verify that we have enough space */
+        if (true) {
+            u1* bottom;
+            bottom = (u1*) newSaveArea - methodToCall->outsSize * sizeof(u4);
+            if (bottom < self->interpStackEnd) {
+                /* stack overflow */
+                LOGV("Stack overflow on method call (start=%p end=%p newBot=%p size=%d '%s')\n",
+                    self->interpStackStart, self->interpStackEnd, bottom,
+                    self->interpStackSize, methodToCall->name);
+                dvmHandleStackOverflow(self);
+                assert(dvmCheckException(self));
+                GOTO(exceptionThrown);
+            }
+            //LOGD("+++ fp=%p newFp=%p newSave=%p bottom=%p\n",
+            //    fp, newFp, newSaveArea, bottom);
+        }
+
+#ifdef LOG_INSTR
+        if (methodToCall->registersSize > methodToCall->insSize) {
+            /*
+             * This makes valgrind quiet when we print registers that
+             * haven't been initialized.  Turn it off when the debug
+             * messages are disabled -- we want valgrind to report any
+             * used-before-initialized issues.
+             */
+            memset(newFp, 0xcc,
+                (methodToCall->registersSize - methodToCall->insSize) * 4);
+        }
+#endif
+
+#ifdef EASY_GDB
+        newSaveArea->prevSave = SAVEAREA_FROM_FP(fp);
+#endif
+        newSaveArea->prevFrame = fp;
+        newSaveArea->savedPc = pc;
+#undef method
+        newSaveArea->method = methodToCall;
+#define method glue->method
+
+        if (!dvmIsNativeMethod(methodToCall)) {
+            /*
+             * "Call" interpreted code.  Reposition the PC, update the
+             * frame pointer and other local state, and continue.
+             */
+            method = methodToCall;
+            methodClassDex = method->clazz->pDvmDex;
+            pc = methodToCall->insns;
+            fp = self->curFrame = newFp;
+#ifdef EASY_GDB
+            debugSaveArea = SAVEAREA_FROM_FP(newFp);
+#endif
+#if INTERP_TYPE == INTERP_DBG
+            debugIsMethodEntry = true;              // profiling, debugging
+#endif
+            ILOGD("> pc <-- %s.%s %s", method->clazz->descriptor, method->name,
+                method->signature);
+            DUMP_REGS(method, fp, true);            // show input args
+            FINISH(0);                              // jump to method start
+        } else {
+            /* set this up for JNI locals, even if not a JNI native */
+            newSaveArea->xtra.localRefTop = self->jniLocalRefTable.nextEntry;
+
+            self->curFrame = newFp;
+
+            DUMP_REGS(methodToCall, newFp, true);   // show input args
+
+#if (INTERP_TYPE == INTERP_DBG) && defined(WITH_DEBUGGER)
+            if (gDvm.debuggerActive) {
+                dvmDbgPostLocationEvent(methodToCall, -1,
+                    dvmGetThisPtr(method, fp), DBG_METHOD_ENTRY);
+            }
+#endif
+#if (INTERP_TYPE == INTERP_DBG) && defined(WITH_PROFILER)
+            TRACE_METHOD_ENTER(self, methodToCall);
+#endif
+
+            ILOGD("> native <-- %s.%s %s", methodToCall->clazz->descriptor,
+                methodToCall->name, methodToCall->signature);
+
+            /*
+             * Jump through native call bridge.  Because we leave no
+             * space for locals on native calls, "newFp" points directly
+             * to the method arguments.
+             */
+            (*methodToCall->nativeFunc)(newFp, &retval, methodToCall, self);
+
+#if (INTERP_TYPE == INTERP_DBG) && defined(WITH_DEBUGGER)
+            if (gDvm.debuggerActive) {
+                dvmDbgPostLocationEvent(methodToCall, -1,
+                    dvmGetThisPtr(method, fp), DBG_METHOD_EXIT);
+            }
+#endif
+#if (INTERP_TYPE == INTERP_DBG) && defined(WITH_PROFILER)
+            TRACE_METHOD_EXIT(self, methodToCall);
+#endif
+
+            /* pop frame off */
+            dvmPopJniLocals(self, newSaveArea);
+            self->curFrame = fp;
+
+            /*
+             * If the native code threw an exception, or interpreted code
+             * invoked by the native call threw one and nobody has cleared
+             * it, jump to our local exception handling.
+             */
+            if (dvmCheckException(self)) {
+                LOGV("Exception thrown by/below native code\n");
+                GOTO(exceptionThrown);
+            }
+
+            ILOGD("> retval=0x%llx (leaving native)", retval.j);
+            ILOGD("> (return from native %s.%s to %s.%s %s)",
+                methodToCall->clazz->descriptor, methodToCall->name,
+                method->clazz->descriptor, method->name,
+                method->signature);
+
+            //u2 invokeInstr = INST_INST(FETCH(0));
+            if (true /*invokeInstr >= OP_INVOKE_VIRTUAL &&
+                invokeInstr <= OP_INVOKE_INTERFACE*/)
+            {
+                FINISH(3);
+            } else {
+                //LOGE("Unknown invoke instr %02x at %d\n",
+                //    invokeInstr, (int) (pc - method->insns));
+                assert(false);
+            }
+        }
+    }
+    assert(false);      // should not get here
+GOTO_TARGET_END
+
+
+/* undefine "magic" name remapping */
+#undef retval
+#undef pc
+#undef fp
+#undef method
+#undef methodClassDex
+#undef self
+#undef debugTrackedRefStart
+
+/* File: armv5/debug.c */
+#include <inttypes.h>
+
+/*
+ * Dump the fixed-purpose ARM registers, along with some other info.
+ *
+ * This function MUST be compiled in ARM mode -- THUMB will yield bogus
+ * results.
+ *
+ * This will NOT preserve r0-r3/ip.
+ */
+void dvmMterpDumpArmRegs(uint32_t r0, uint32_t r1, uint32_t r2, uint32_t r3)
+{
+    register uint32_t rPC       asm("r4");
+    register uint32_t rFP       asm("r5");
+    register uint32_t rGLUE     asm("r6");
+    register uint32_t rIBASE    asm("r7");
+    register uint32_t rINST     asm("r8");
+    register uint32_t r9        asm("r9");
+    register uint32_t r10       asm("r10");
+
+    extern char dvmAsmInstructionStart[];
+
+    printf("REGS: r0=%08x r1=%08x r2=%08x r3=%08x\n", r0, r1, r2, r3);
+    printf("    : rPC=%08x rFP=%08x rGLUE=%08x rIBASE=%08x\n",
+        rPC, rFP, rGLUE, rIBASE);
+    printf("    : rINST=%08x r9=%08x r10=%08x\n", rINST, r9, r10);
+
+    MterpGlue* glue = (MterpGlue*) rGLUE;
+    const Method* method = glue->method;
+    printf("    + self is %p\n", dvmThreadSelf());
+    //printf("    + currently in %s.%s %s\n",
+    //    method->clazz->descriptor, method->name, method->signature);
+    //printf("    + dvmAsmInstructionStart = %p\n", dvmAsmInstructionStart);
+    //printf("    + next handler for 0x%02x = %p\n",
+    //    rINST & 0xff, dvmAsmInstructionStart + (rINST & 0xff) * 64);
+}
+
+/*
+ * Dump the StackSaveArea for the specified frame pointer.
+ */
+void dvmDumpFp(void* fp, StackSaveArea* otherSaveArea)
+{
+    StackSaveArea* saveArea = SAVEAREA_FROM_FP(fp);
+    printf("StackSaveArea for fp %p [%p/%p]:\n", fp, saveArea, otherSaveArea);
+#ifdef EASY_GDB
+    printf("  prevSave=%p, prevFrame=%p savedPc=%p meth=%p curPc=%p\n",
+        saveArea->prevSave, saveArea->prevFrame, saveArea->savedPc,
+        saveArea->method, saveArea->xtra.currentPc);
+#else
+    printf("  prevFrame=%p savedPc=%p meth=%p curPc=%p fp[0]=0x%08x\n",
+        saveArea->prevFrame, saveArea->savedPc,
+        saveArea->method, saveArea->xtra.currentPc,
+        *(u4*)fp);
+#endif
+}
+
+/*
+ * Does the bulk of the work for common_printMethod().
+ */
+void dvmMterpPrintMethod(Method* method)
+{
+    /*
+     * It is a direct (non-virtual) method if it is static, private,
+     * or a constructor.
+     */
+    bool isDirect = 
+        ((method->accessFlags & (ACC_STATIC|ACC_PRIVATE)) != 0) ||
+        (method->name[0] == '<');
+
+    char* desc = dexProtoCopyMethodDescriptor(&method->prototype);
+        
+    printf("<%c:%s.%s %s> ",
+            isDirect ? 'D' : 'V',
+            method->clazz->descriptor,
+            method->name,
+            desc);
+
+    free(desc);
+}
+
diff --git a/vm/mterp/out/InterpC-desktop.c b/vm/mterp/out/InterpC-desktop.c
new file mode 100644
index 0000000..061e095
--- /dev/null
+++ b/vm/mterp/out/InterpC-desktop.c
@@ -0,0 +1,3480 @@
+/*
+ * This file was generated automatically by gen-mterp.py for 'desktop'.
+ *
+ * --> DO NOT EDIT <--
+ */
+
+/* File: c/header.c */
+/*
+ * Copyright (C) 2008 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/* common includes */
+#include "Dalvik.h"
+#include "interp/InterpDefs.h"
+#include "mterp/Mterp.h"
+#include <math.h>                   // needed for fmod, fmodf
+
+
+#define GOTO_TARGET_DECL(_target, ...)                                      \
+    void dvmMterp_##_target(MterpGlue* glue, ## __VA_ARGS__);
+
+GOTO_TARGET_DECL(filledNewArray, bool methodCallRange);
+GOTO_TARGET_DECL(invokeVirtual, bool methodCallRange);
+GOTO_TARGET_DECL(invokeSuper, bool methodCallRange);
+GOTO_TARGET_DECL(invokeInterface, bool methodCallRange);
+GOTO_TARGET_DECL(invokeDirect, bool methodCallRange);
+GOTO_TARGET_DECL(invokeStatic, bool methodCallRange);
+GOTO_TARGET_DECL(invokeVirtualQuick, bool methodCallRange);
+GOTO_TARGET_DECL(invokeSuperQuick, bool methodCallRange);
+GOTO_TARGET_DECL(invokeMethod, bool methodCallRange, const Method* methodToCall,
+    u2 count, u2 regs);
+GOTO_TARGET_DECL(returnFromMethod);
+GOTO_TARGET_DECL(exceptionThrown);
+
+
+/* File: c/opcommon.c */
+/*
+ * Redefine what used to be local variable accesses into MterpGlue struct
+ * references.  (These are undefined down in "footer.c".)
+ */
+#define retval                  glue->retval
+#define pc                      glue->pc
+#define fp                      glue->fp
+#define method                  glue->method
+#define methodClassDex          glue->methodClassDex
+#define self                    glue->self
+//#define entryPoint              glue->entryPoint
+#define debugTrackedRefStart    glue->debugTrackedRefStart
+
+
+/*
+ * Replace the opcode definition macros.  Here, each opcode is a separate
+ * function that takes a "glue" argument and returns void.  We can't declare
+ * these "static" because they may be called from an assembly stub.
+ */
+#undef HANDLE_OPCODE
+#undef OP_END
+#undef FINISH
+
+#define HANDLE_OPCODE(_op)                                                  \
+    void dvmMterp_##_op(MterpGlue* glue) {                                  \
+        u2 ref, vsrc1, vsrc2, vdst;                                         \
+        u2 inst = FETCH(0);
+
+#define OP_END }
+
+/*
+ * Like standard FINISH, but don't reload "inst", and return to caller
+ * when done.
+ */
+#define FINISH(_offset) {                                                   \
+        ADJUST_PC(_offset);                                                 \
+        CHECK_DEBUG_AND_PROF();                                             \
+        CHECK_TRACKED_REFS();                                               \
+        return;                                                             \
+    }
+
+
+/*
+ * The "goto label" statements turn into function calls followed by
+ * return statements.  Some of the functions take arguments.
+ */
+#define GOTO(_target, ...)                                                  \
+    do {                                                                    \
+        dvmMterp_##_target(glue, ## __VA_ARGS__);                           \
+        return;                                                             \
+    } while(false)
+
+/*
+ * As a special case, "goto bail" turns into a longjmp.  "_switch" should be
+ * "true" if we need to switch to the other interpreter upon our return.
+ */
+#define GOTO_BAIL(_switch)                                                  \
+    dvmMterpStdBail(glue, _switch);
+
+/* for now, mterp is always a "standard" interpreter */
+#define INTERP_TYPE INTERP_STD
+
+/*
+ * Periodic checks macro, slightly modified.
+ */
+#define PERIODIC_CHECKS(_entryPoint, _pcadj) {                              \
+        dvmCheckSuspendQuick(self);                                         \
+        if (NEED_INTERP_SWITCH(INTERP_TYPE)) {                              \
+            ADJUST_PC(_pcadj);                                              \
+            glue->entryPoint = _entryPoint;                                 \
+            LOGVV("threadid=%d: switch to STD ep=%d adj=%d\n",              \
+                glue->self->threadId, (_entryPoint), (_pcadj));             \
+            GOTO_BAIL(true);                                                \
+        }                                                                   \
+    }
+
+
+/*
+ * ===========================================================================
+ *
+ * What follows are the "common" opcode definitions copied & pasted from the
+ * basic interpreter.  The only changes that need to be made to the original
+ * sources are:
+ *  - replace "goto exceptionThrown" with "GOTO(exceptionThrown)"
+ *
+ * ===========================================================================
+ */
+
+
+#define HANDLE_NUMCONV(_opcode, _opname, _fromtype, _totype)                \
+    HANDLE_OPCODE(_opcode /*vA, vB*/)                                       \
+        vdst = INST_A(inst);                                                \
+        vsrc1 = INST_B(inst);                                               \
+        ILOGV("|%s v%d,v%d", (_opname), vdst, vsrc1);                       \
+        SET_REGISTER##_totype(vdst,                                         \
+            GET_REGISTER##_fromtype(vsrc1));                                \
+        FINISH(1);
+
+#define HANDLE_FLOAT_TO_INT(_opcode, _opname, _fromvtype, _fromrtype,       \
+        _tovtype, _tortype)                                                 \
+    HANDLE_OPCODE(_opcode /*vA, vB*/)                                       \
+    {                                                                       \
+        /* spec defines specific handling for +/- inf and NaN values */     \
+        _fromvtype val;                                                     \
+        _tovtype intMin, intMax, result;                                    \
+        vdst = INST_A(inst);                                                \
+        vsrc1 = INST_B(inst);                                               \
+        ILOGV("|%s v%d,v%d", (_opname), vdst, vsrc1);                       \
+        val = GET_REGISTER##_fromrtype(vsrc1);                              \
+        intMin = (_tovtype) 1 << (sizeof(_tovtype) * 8 -1);                 \
+        intMax = ~intMin;                                                   \
+        result = (_tovtype) val;                                            \
+        if (val >= intMax)          /* +inf */                              \
+            result = intMax;                                                \
+        else if (val <= intMin)     /* -inf */                              \
+            result = intMin;                                                \
+        else if (val != val)        /* NaN */                               \
+            result = 0;                                                     \
+        else                                                                \
+            result = (_tovtype) val;                                        \
+        SET_REGISTER##_tortype(vdst, result);                               \
+    }                                                                       \
+    FINISH(1);
+
+#define HANDLE_INT_TO_SMALL(_opcode, _opname, _type)                        \
+    HANDLE_OPCODE(_opcode /*vA, vB*/)                                       \
+        vdst = INST_A(inst);                                                \
+        vsrc1 = INST_B(inst);                                               \
+        ILOGV("|int-to-%s v%d,v%d", (_opname), vdst, vsrc1);                \
+        SET_REGISTER(vdst, (_type) GET_REGISTER(vsrc1));                    \
+        FINISH(1);
+
+/* NOTE: the comparison result is always a signed 4-byte integer */
+#define HANDLE_OP_CMPX(_opcode, _opname, _varType, _type, _nanVal)          \
+    HANDLE_OPCODE(_opcode /*vAA, vBB, vCC*/)                                \
+    {                                                                       \
+        int result;                                                         \
+        u2 regs;                                                            \
+        _varType val1, val2;                                                \
+        vdst = INST_AA(inst);                                               \
+        regs = FETCH(1);                                                    \
+        vsrc1 = regs & 0xff;                                                \
+        vsrc2 = regs >> 8;                                                  \
+        ILOGV("|cmp%s v%d,v%d,v%d", (_opname), vdst, vsrc1, vsrc2);         \
+        val1 = GET_REGISTER##_type(vsrc1);                                  \
+        val2 = GET_REGISTER##_type(vsrc2);                                  \
+        if (val1 == val2)                                                   \
+            result = 0;                                                     \
+        else if (val1 < val2)                                               \
+            result = -1;                                                    \
+        else if (val1 > val2)                                               \
+            result = 1;                                                     \
+        else                                                                \
+            result = (_nanVal);                                             \
+        ILOGV("+ result=%d\n", result);                                     \
+        SET_REGISTER(vdst, result);                                         \
+    }                                                                       \
+    FINISH(2);
+
+#define HANDLE_OP_IF_XX(_opcode, _opname, _cmp)                             \
+    HANDLE_OPCODE(_opcode /*vA, vB, +CCCC*/)                                \
+        vsrc1 = INST_A(inst);                                               \
+        vsrc2 = INST_B(inst);                                               \
+        if ((s4) GET_REGISTER(vsrc1) _cmp (s4) GET_REGISTER(vsrc2)) {       \
+            int branchOffset = (s2)FETCH(1);    /* sign-extended */         \
+            ILOGV("|if-%s v%d,v%d,+0x%04x", (_opname), vsrc1, vsrc2,        \
+                branchOffset);                                              \
+            ILOGV("> branch taken");                                        \
+            if (branchOffset < 0)                                           \
+                PERIODIC_CHECKS(kInterpEntryInstr, branchOffset);           \
+            FINISH(branchOffset);                                           \
+        } else {                                                            \
+            ILOGV("|if-%s v%d,v%d,-", (_opname), vsrc1, vsrc2);             \
+            FINISH(2);                                                      \
+        }
+
+#define HANDLE_OP_IF_XXZ(_opcode, _opname, _cmp)                            \
+    HANDLE_OPCODE(_opcode /*vAA, +BBBB*/)                                   \
+        vsrc1 = INST_AA(inst);                                              \
+        if ((s4) GET_REGISTER(vsrc1) _cmp 0) {                              \
+            int branchOffset = (s2)FETCH(1);    /* sign-extended */         \
+            ILOGV("|if-%s v%d,+0x%04x", (_opname), vsrc1, branchOffset);    \
+            ILOGV("> branch taken");                                        \
+            if (branchOffset < 0)                                           \
+                PERIODIC_CHECKS(kInterpEntryInstr, branchOffset);           \
+            FINISH(branchOffset);                                           \
+        } else {                                                            \
+            ILOGV("|if-%s v%d,-", (_opname), vsrc1);                        \
+            FINISH(2);                                                      \
+        }
+
+#define HANDLE_UNOP(_opcode, _opname, _pfx, _sfx, _type)                    \
+    HANDLE_OPCODE(_opcode /*vA, vB*/)                                       \
+        vdst = INST_A(inst);                                                \
+        vsrc1 = INST_B(inst);                                               \
+        ILOGV("|%s v%d,v%d", (_opname), vdst, vsrc1);                       \
+        SET_REGISTER##_type(vdst, _pfx GET_REGISTER##_type(vsrc1) _sfx);    \
+        FINISH(1);
+
+#define HANDLE_OP_X_INT(_opcode, _opname, _op, _chkdiv)                     \
+    HANDLE_OPCODE(_opcode /*vAA, vBB, vCC*/)                                \
+    {                                                                       \
+        u2 srcRegs;                                                         \
+        vdst = INST_AA(inst);                                               \
+        srcRegs = FETCH(1);                                                 \
+        vsrc1 = srcRegs & 0xff;                                             \
+        vsrc2 = srcRegs >> 8;                                               \
+        ILOGV("|%s-int v%d,v%d", (_opname), vdst, vsrc1);                   \
+        if (_chkdiv) {                                                      \
+            if (GET_REGISTER(vsrc2) == 0) {                                 \
+                EXPORT_PC();                                                \
+                dvmThrowException("Ljava/lang/ArithmeticException;",        \
+                    "divide by zero");                                      \
+                GOTO(exceptionThrown);                                      \
+            }                                                               \
+        }                                                                   \
+        SET_REGISTER(vdst,                                                  \
+            (s4) GET_REGISTER(vsrc1) _op (s4) GET_REGISTER(vsrc2));         \
+    }                                                                       \
+    FINISH(2);
+
+#define HANDLE_OP_SHX_INT(_opcode, _opname, _cast, _op)                     \
+    HANDLE_OPCODE(_opcode /*vAA, vBB, vCC*/)                                \
+    {                                                                       \
+        u2 srcRegs;                                                         \
+        vdst = INST_AA(inst);                                               \
+        srcRegs = FETCH(1);                                                 \
+        vsrc1 = srcRegs & 0xff;                                             \
+        vsrc2 = srcRegs >> 8;                                               \
+        ILOGV("|%s-int v%d,v%d", (_opname), vdst, vsrc1);                   \
+        SET_REGISTER(vdst,                                                  \
+            _cast GET_REGISTER(vsrc1) _op (GET_REGISTER(vsrc2) & 0x1f));    \
+    }                                                                       \
+    FINISH(2);
+
+#define HANDLE_OP_X_INT_LIT16(_opcode, _opname, _cast, _op, _chkdiv)        \
+    HANDLE_OPCODE(_opcode /*vA, vB, #+CCCC*/)                               \
+        vdst = INST_A(inst);                                                \
+        vsrc1 = INST_B(inst);                                               \
+        vsrc2 = FETCH(1);                                                   \
+        ILOGV("|%s-int/lit16 v%d,v%d,#+0x%04x",                             \
+            (_opname), vdst, vsrc1, vsrc2);                                 \
+        if (_chkdiv) {                                                      \
+            if ((s2) vsrc2 == 0) {                                          \
+                EXPORT_PC();                                                \
+                dvmThrowException("Ljava/lang/ArithmeticException;",        \
+                    "divide by zero");                                      \
+                GOTO(exceptionThrown);                                      \
+            }                                                               \
+        }                                                                   \
+        SET_REGISTER(vdst,                                                  \
+            _cast GET_REGISTER(vsrc1) _op (s2) vsrc2);                      \
+        FINISH(2);
+
+#define HANDLE_OP_X_INT_LIT8(_opcode, _opname, _op, _chkdiv)                \
+    HANDLE_OPCODE(_opcode /*vAA, vBB, #+CC*/)                               \
+    {                                                                       \
+        u2 litInfo;                                                         \
+        vdst = INST_AA(inst);                                               \
+        litInfo = FETCH(1);                                                 \
+        vsrc1 = litInfo & 0xff;                                             \
+        vsrc2 = litInfo >> 8;       /* constant */                          \
+        ILOGV("|%s-int/lit8 v%d,v%d,#+0x%02x",                              \
+            (_opname), vdst, vsrc1, vsrc2);                                 \
+        if (_chkdiv) {                                                      \
+            if ((s1) vsrc2 == 0) {                                          \
+                EXPORT_PC();                                                \
+                dvmThrowException("Ljava/lang/ArithmeticException;",        \
+                    "divide by zero");                                      \
+                GOTO(exceptionThrown);                                      \
+            }                                                               \
+        }                                                                   \
+        SET_REGISTER(vdst,                                                  \
+            (s4) GET_REGISTER(vsrc1) _op (s1) vsrc2);                       \
+    }                                                                       \
+    FINISH(2);
+
+#define HANDLE_OP_SHX_INT_LIT8(_opcode, _opname, _cast, _op)                \
+    HANDLE_OPCODE(_opcode /*vAA, vBB, #+CC*/)                               \
+    {                                                                       \
+        u2 litInfo;                                                         \
+        vdst = INST_AA(inst);                                               \
+        litInfo = FETCH(1);                                                 \
+        vsrc1 = litInfo & 0xff;                                             \
+        vsrc2 = litInfo >> 8;       /* constant */                          \
+        ILOGV("|%s-int/lit8 v%d,v%d,#+0x%02x",                              \
+            (_opname), vdst, vsrc1, vsrc2);                                 \
+        SET_REGISTER(vdst,                                                  \
+            _cast GET_REGISTER(vsrc1) _op (vsrc2 & 0x1f));                  \
+    }                                                                       \
+    FINISH(2);
+
+#define HANDLE_OP_X_INT_2ADDR(_opcode, _opname, _op, _chkdiv)               \
+    HANDLE_OPCODE(_opcode /*vA, vB*/)                                       \
+        vdst = INST_A(inst);                                                \
+        vsrc1 = INST_B(inst);                                               \
+        ILOGV("|%s-int-2addr v%d,v%d", (_opname), vdst, vsrc1);             \
+        if (_chkdiv) {                                                      \
+            if (GET_REGISTER(vsrc1) == 0) {                                 \
+                EXPORT_PC();                                                \
+                dvmThrowException("Ljava/lang/ArithmeticException;",        \
+                    "divide by zero");                                      \
+                GOTO(exceptionThrown);                                      \
+            }                                                               \
+        }                                                                   \
+        SET_REGISTER(vdst,                                                  \
+            (s4) GET_REGISTER(vdst) _op (s4) GET_REGISTER(vsrc1));          \
+        FINISH(1);
+
+#define HANDLE_OP_SHX_INT_2ADDR(_opcode, _opname, _cast, _op)               \
+    HANDLE_OPCODE(_opcode /*vA, vB*/)                                       \
+        vdst = INST_A(inst);                                                \
+        vsrc1 = INST_B(inst);                                               \
+        ILOGV("|%s-int-2addr v%d,v%d", (_opname), vdst, vsrc1);             \
+        SET_REGISTER(vdst,                                                  \
+            _cast GET_REGISTER(vdst) _op (GET_REGISTER(vsrc1) & 0x1f));     \
+        FINISH(1);
+
+#define HANDLE_OP_X_LONG(_opcode, _opname, _op, _chkdiv)                    \
+    HANDLE_OPCODE(_opcode /*vAA, vBB, vCC*/)                                \
+    {                                                                       \
+        u2 srcRegs;                                                         \
+        vdst = INST_AA(inst);                                               \
+        srcRegs = FETCH(1);                                                 \
+        vsrc1 = srcRegs & 0xff;                                             \
+        vsrc2 = srcRegs >> 8;                                               \
+        ILOGV("|%s-long v%d,v%d,v%d", (_opname), vdst, vsrc1, vsrc2);       \
+        if (_chkdiv) {                                                      \
+            if (GET_REGISTER_WIDE(vsrc2) == 0) {                            \
+                EXPORT_PC();                                                \
+                dvmThrowException("Ljava/lang/ArithmeticException;",        \
+                    "divide by zero");                                      \
+                GOTO(exceptionThrown);                                      \
+            }                                                               \
+        }                                                                   \
+        SET_REGISTER_WIDE(vdst,                                             \
+            (s8) GET_REGISTER_WIDE(vsrc1) _op (s8) GET_REGISTER_WIDE(vsrc2)); \
+    }                                                                       \
+    FINISH(2);
+
+#define HANDLE_OP_SHX_LONG(_opcode, _opname, _cast, _op)                    \
+    HANDLE_OPCODE(_opcode /*vAA, vBB, vCC*/)                                \
+    {                                                                       \
+        u2 srcRegs;                                                         \
+        vdst = INST_AA(inst);                                               \
+        srcRegs = FETCH(1);                                                 \
+        vsrc1 = srcRegs & 0xff;                                             \
+        vsrc2 = srcRegs >> 8;                                               \
+        ILOGV("|%s-long v%d,v%d,v%d", (_opname), vdst, vsrc1, vsrc2);       \
+        SET_REGISTER_WIDE(vdst,                                             \
+            _cast GET_REGISTER_WIDE(vsrc1) _op (GET_REGISTER(vsrc2) & 0x3f)); \
+    }                                                                       \
+    FINISH(2);
+
+#define HANDLE_OP_X_LONG_2ADDR(_opcode, _opname, _op, _chkdiv)              \
+    HANDLE_OPCODE(_opcode /*vA, vB*/)                                       \
+        vdst = INST_A(inst);                                                \
+        vsrc1 = INST_B(inst);                                               \
+        ILOGV("|%s-long-2addr v%d,v%d", (_opname), vdst, vsrc1);            \
+        if (_chkdiv) {                                                      \
+            if (GET_REGISTER_WIDE(vsrc1) == 0) {                            \
+                EXPORT_PC();                                                \
+                dvmThrowException("Ljava/lang/ArithmeticException;",        \
+                    "divide by zero");                                      \
+                GOTO(exceptionThrown);                                      \
+            }                                                               \
+        }                                                                   \
+        SET_REGISTER_WIDE(vdst,                                             \
+            (s8) GET_REGISTER_WIDE(vdst) _op (s8)GET_REGISTER_WIDE(vsrc1)); \
+        FINISH(1);
+
+#define HANDLE_OP_SHX_LONG_2ADDR(_opcode, _opname, _cast, _op)              \
+    HANDLE_OPCODE(_opcode /*vA, vB*/)                                       \
+        vdst = INST_A(inst);                                                \
+        vsrc1 = INST_B(inst);                                               \
+        ILOGV("|%s-long-2addr v%d,v%d", (_opname), vdst, vsrc1);            \
+        SET_REGISTER_WIDE(vdst,                                             \
+            _cast GET_REGISTER_WIDE(vdst) _op (GET_REGISTER(vsrc1) & 0x3f)); \
+        FINISH(1);
+
+#define HANDLE_OP_X_FLOAT(_opcode, _opname, _op)                            \
+    HANDLE_OPCODE(_opcode /*vAA, vBB, vCC*/)                                \
+    {                                                                       \
+        u2 srcRegs;                                                         \
+        vdst = INST_AA(inst);                                               \
+        srcRegs = FETCH(1);                                                 \
+        vsrc1 = srcRegs & 0xff;                                             \
+        vsrc2 = srcRegs >> 8;                                               \
+        ILOGV("|%s-float v%d,v%d,v%d", (_opname), vdst, vsrc1, vsrc2);      \
+        SET_REGISTER_FLOAT(vdst,                                            \
+            GET_REGISTER_FLOAT(vsrc1) _op GET_REGISTER_FLOAT(vsrc2));       \
+    }                                                                       \
+    FINISH(2);
+
+#define HANDLE_OP_X_DOUBLE(_opcode, _opname, _op)                           \
+    HANDLE_OPCODE(_opcode /*vAA, vBB, vCC*/)                                \
+    {                                                                       \
+        u2 srcRegs;                                                         \
+        vdst = INST_AA(inst);                                               \
+        srcRegs = FETCH(1);                                                 \
+        vsrc1 = srcRegs & 0xff;                                             \
+        vsrc2 = srcRegs >> 8;                                               \
+        ILOGV("|%s-double v%d,v%d,v%d", (_opname), vdst, vsrc1, vsrc2);     \
+        SET_REGISTER_DOUBLE(vdst,                                           \
+            GET_REGISTER_DOUBLE(vsrc1) _op GET_REGISTER_DOUBLE(vsrc2));     \
+    }                                                                       \
+    FINISH(2);
+
+#define HANDLE_OP_X_FLOAT_2ADDR(_opcode, _opname, _op)                      \
+    HANDLE_OPCODE(_opcode /*vA, vB*/)                                       \
+        vdst = INST_A(inst);                                                \
+        vsrc1 = INST_B(inst);                                               \
+        ILOGV("|%s-float-2addr v%d,v%d", (_opname), vdst, vsrc1);           \
+        SET_REGISTER_FLOAT(vdst,                                            \
+            GET_REGISTER_FLOAT(vdst) _op GET_REGISTER_FLOAT(vsrc1));        \
+        FINISH(1);
+
+#define HANDLE_OP_X_DOUBLE_2ADDR(_opcode, _opname, _op)                     \
+    HANDLE_OPCODE(_opcode /*vA, vB*/)                                       \
+        vdst = INST_A(inst);                                                \
+        vsrc1 = INST_B(inst);                                               \
+        ILOGV("|%s-double-2addr v%d,v%d", (_opname), vdst, vsrc1);          \
+        SET_REGISTER_DOUBLE(vdst,                                           \
+            GET_REGISTER_DOUBLE(vdst) _op GET_REGISTER_DOUBLE(vsrc1));      \
+        FINISH(1);
+
+#define HANDLE_OP_AGET(_opcode, _opname, _type, _regsize)                   \
+    HANDLE_OPCODE(_opcode /*vAA, vBB, vCC*/)                                \
+    {                                                                       \
+        ArrayObject* arrayObj;                                              \
+        u2 arrayInfo;                                                       \
+        EXPORT_PC();                                                        \
+        vdst = INST_AA(inst);                                               \
+        arrayInfo = FETCH(1);                                               \
+        vsrc1 = arrayInfo & 0xff;    /* array ptr */                        \
+        vsrc2 = arrayInfo >> 8;      /* index */                            \
+        ILOGV("|aget%s v%d,v%d,v%d", (_opname), vdst, vsrc1, vsrc2);        \
+        arrayObj = (ArrayObject*) GET_REGISTER(vsrc1);                      \
+        if (!checkForNull((Object*) arrayObj))                              \
+            GOTO(exceptionThrown);                                          \
+        if (GET_REGISTER(vsrc2) >= arrayObj->length) {                      \
+            LOGV("Invalid array access: %p %d (len=%d)\n",                  \
+                arrayObj, vsrc2, arrayObj->length);                         \
+            dvmThrowException("Ljava/lang/ArrayIndexOutOfBoundsException;", \
+                NULL);                                                      \
+            GOTO(exceptionThrown);                                          \
+        }                                                                   \
+        SET_REGISTER##_regsize(vdst,                                        \
+            ((_type*) arrayObj->contents)[GET_REGISTER(vsrc2)]);            \
+        ILOGV("+ AGET[%d]=0x%x", GET_REGISTER(vsrc2), GET_REGISTER(vdst));  \
+    }                                                                       \
+    FINISH(2);
+
+#define HANDLE_OP_APUT(_opcode, _opname, _type, _regsize)                   \
+    HANDLE_OPCODE(_opcode /*vAA, vBB, vCC*/)                                \
+    {                                                                       \
+        ArrayObject* arrayObj;                                              \
+        u2 arrayInfo;                                                       \
+        EXPORT_PC();                                                        \
+        vdst = INST_AA(inst);       /* AA: source value */                  \
+        arrayInfo = FETCH(1);                                               \
+        vsrc1 = arrayInfo & 0xff;   /* BB: array ptr */                     \
+        vsrc2 = arrayInfo >> 8;     /* CC: index */                         \
+        ILOGV("|aput%s v%d,v%d,v%d", (_opname), vdst, vsrc1, vsrc2);        \
+        arrayObj = (ArrayObject*) GET_REGISTER(vsrc1);                      \
+        if (!checkForNull((Object*) arrayObj))                              \
+            GOTO(exceptionThrown);                                          \
+        if (GET_REGISTER(vsrc2) >= arrayObj->length) {                      \
+            dvmThrowException("Ljava/lang/ArrayIndexOutOfBoundsException;", \
+                NULL);                                                      \
+            GOTO(exceptionThrown);                                          \
+        }                                                                   \
+        ILOGV("+ APUT[%d]=0x%08x", GET_REGISTER(vsrc2), GET_REGISTER(vdst));\
+        ((_type*) arrayObj->contents)[GET_REGISTER(vsrc2)] =                \
+            GET_REGISTER##_regsize(vdst);                                   \
+    }                                                                       \
+    FINISH(2);
+
+/*
+ * It's possible to get a bad value out of a field with sub-32-bit stores
+ * because the -quick versions always operate on 32 bits.  Consider:
+ *   short foo = -1  (sets a 32-bit register to 0xffffffff)
+ *   iput-quick foo  (writes all 32 bits to the field)
+ *   short bar = 1   (sets a 32-bit register to 0x00000001)
+ *   iput-short      (writes the low 16 bits to the field)
+ *   iget-quick foo  (reads all 32 bits from the field, yielding 0xffff0001)
+ * This can only happen when optimized and non-optimized code has interleaved
+ * access to the same field.  This is unlikely but possible.
+ *
+ * The easiest way to fix this is to always read/write 32 bits at a time.  On
+ * a device with a 16-bit data bus this is sub-optimal.  (The alternative
+ * approach is to have sub-int versions of iget-quick, but now we're wasting
+ * Dalvik instruction space and making it less likely that handler code will
+ * already be in the CPU i-cache.)
+ */
+#define HANDLE_IGET_X(_opcode, _opname, _ftype, _regsize)                   \
+    HANDLE_OPCODE(_opcode /*vA, vB, field@CCCC*/)                           \
+    {                                                                       \
+        InstField* ifield;                                                  \
+        Object* obj;                                                        \
+        EXPORT_PC();                                                        \
+        vdst = INST_A(inst);                                                \
+        vsrc1 = INST_B(inst);   /* object ptr */                            \
+        ref = FETCH(1);         /* field ref */                             \
+        ILOGV("|iget%s v%d,v%d,field@0x%04x", (_opname), vdst, vsrc1, ref); \
+        obj = (Object*) GET_REGISTER(vsrc1);                                \
+        if (!checkForNull(obj))                                             \
+            GOTO(exceptionThrown);                                          \
+        ifield = (InstField*) dvmDexGetResolvedField(methodClassDex, ref);  \
+        if (ifield == NULL) {                                               \
+            ifield = dvmResolveInstField(method->clazz, ref);               \
+            if (ifield == NULL)                                             \
+                GOTO(exceptionThrown);                                      \
+        }                                                                   \
+        SET_REGISTER##_regsize(vdst,                                        \
+            dvmGetField##_ftype(obj, ifield->byteOffset));                  \
+        ILOGV("+ IGET '%s'=0x%08llx", ifield->field.name,                   \
+            (u8) GET_REGISTER##_regsize(vdst));                             \
+        UPDATE_FIELD_GET(&ifield->field);                                   \
+    }                                                                       \
+    FINISH(2);
+
+#define HANDLE_IGET_X_QUICK(_opcode, _opname, _ftype, _regsize)             \
+    HANDLE_OPCODE(_opcode /*vA, vB, field@CCCC*/)                           \
+    {                                                                       \
+        Object* obj;                                                        \
+        vdst = INST_A(inst);                                                \
+        vsrc1 = INST_B(inst);   /* object ptr */                            \
+        ref = FETCH(1);         /* field offset */                          \
+        ILOGV("|iget%s-quick v%d,v%d,field@+%u",                            \
+            (_opname), vdst, vsrc1, ref);                                   \
+        obj = (Object*) GET_REGISTER(vsrc1);                                \
+        if (!checkForNullExportPC(obj, fp, pc))                             \
+            GOTO(exceptionThrown);                                          \
+        SET_REGISTER##_regsize(vdst, dvmGetField##_ftype(obj, ref));        \
+        ILOGV("+ IGETQ %d=0x%08llx", ref,                                   \
+            (u8) GET_REGISTER##_regsize(vdst));                             \
+    }                                                                       \
+    FINISH(2);
+
+#define HANDLE_IPUT_X(_opcode, _opname, _ftype, _regsize)                   \
+    HANDLE_OPCODE(_opcode /*vA, vB, field@CCCC*/)                           \
+    {                                                                       \
+        InstField* ifield;                                                  \
+        Object* obj;                                                        \
+        EXPORT_PC();                                                        \
+        vdst = INST_A(inst);                                                \
+        vsrc1 = INST_B(inst);   /* object ptr */                            \
+        ref = FETCH(1);         /* field ref */                             \
+        ILOGV("|iput%s v%d,v%d,field@0x%04x", (_opname), vdst, vsrc1, ref); \
+        obj = (Object*) GET_REGISTER(vsrc1);                                \
+        if (!checkForNull(obj))                                             \
+            GOTO(exceptionThrown);                                          \
+        ifield = (InstField*) dvmDexGetResolvedField(methodClassDex, ref);  \
+        if (ifield == NULL) {                                               \
+            ifield = dvmResolveInstField(method->clazz, ref);               \
+            if (ifield == NULL)                                             \
+                GOTO(exceptionThrown);                                      \
+        }                                                                   \
+        dvmSetField##_ftype(obj, ifield->byteOffset,                        \
+            GET_REGISTER##_regsize(vdst));                                  \
+        ILOGV("+ IPUT '%s'=0x%08llx", ifield->field.name,                   \
+            (u8) GET_REGISTER##_regsize(vdst));                             \
+        UPDATE_FIELD_PUT(&ifield->field);                                   \
+    }                                                                       \
+    FINISH(2);
+
+#define HANDLE_IPUT_X_QUICK(_opcode, _opname, _ftype, _regsize)             \
+    HANDLE_OPCODE(_opcode /*vA, vB, field@CCCC*/)                           \
+    {                                                                       \
+        Object* obj;                                                        \
+        vdst = INST_A(inst);                                                \
+        vsrc1 = INST_B(inst);   /* object ptr */                            \
+        ref = FETCH(1);         /* field offset */                          \
+        ILOGV("|iput%s-quick v%d,v%d,field@0x%04x",                         \
+            (_opname), vdst, vsrc1, ref);                                   \
+        obj = (Object*) GET_REGISTER(vsrc1);                                \
+        if (!checkForNullExportPC(obj, fp, pc))                             \
+            GOTO(exceptionThrown);                                          \
+        dvmSetField##_ftype(obj, ref, GET_REGISTER##_regsize(vdst));        \
+        ILOGV("+ IPUTQ %d=0x%08llx", ref,                                   \
+            (u8) GET_REGISTER##_regsize(vdst));                             \
+    }                                                                       \
+    FINISH(2);
+
+#define HANDLE_SGET_X(_opcode, _opname, _ftype, _regsize)                   \
+    HANDLE_OPCODE(_opcode /*vAA, field@BBBB*/)                              \
+    {                                                                       \
+        StaticField* sfield;                                                \
+        vdst = INST_AA(inst);                                               \
+        ref = FETCH(1);         /* field ref */                             \
+        ILOGV("|sget%s v%d,sfield@0x%04x", (_opname), vdst, ref);           \
+        sfield = (StaticField*)dvmDexGetResolvedField(methodClassDex, ref); \
+        if (sfield == NULL) {                                               \
+            EXPORT_PC();                                                    \
+            sfield = dvmResolveStaticField(method->clazz, ref);             \
+            if (sfield == NULL)                                             \
+                GOTO(exceptionThrown);                                      \
+        }                                                                   \
+        SET_REGISTER##_regsize(vdst, dvmGetStaticField##_ftype(sfield));    \
+        ILOGV("+ SGET '%s'=0x%08llx",                                       \
+            sfield->field.name, (u8)GET_REGISTER##_regsize(vdst));          \
+        UPDATE_FIELD_GET(&sfield->field);                                   \
+    }                                                                       \
+    FINISH(2);
+
+#define HANDLE_SPUT_X(_opcode, _opname, _ftype, _regsize)                   \
+    HANDLE_OPCODE(_opcode /*vAA, field@BBBB*/)                              \
+    {                                                                       \
+        StaticField* sfield;                                                \
+        vdst = INST_AA(inst);                                               \
+        ref = FETCH(1);         /* field ref */                             \
+        ILOGV("|sput%s v%d,sfield@0x%04x", (_opname), vdst, ref);           \
+        sfield = (StaticField*)dvmDexGetResolvedField(methodClassDex, ref); \
+        if (sfield == NULL) {                                               \
+            EXPORT_PC();                                                    \
+            sfield = dvmResolveStaticField(method->clazz, ref);             \
+            if (sfield == NULL)                                             \
+                GOTO(exceptionThrown);                                      \
+        }                                                                   \
+        dvmSetStaticField##_ftype(sfield, GET_REGISTER##_regsize(vdst));    \
+        ILOGV("+ SPUT '%s'=0x%08llx",                                       \
+            sfield->field.name, (u8)GET_REGISTER##_regsize(vdst));          \
+        UPDATE_FIELD_PUT(&sfield->field);                                   \
+    }                                                                       \
+    FINISH(2);
+
+
+/* File: c/OP_NOP.c */
+HANDLE_OPCODE(OP_NOP)
+    FINISH(1);
+OP_END
+
+/* File: c/OP_MOVE.c */
+HANDLE_OPCODE(OP_MOVE /*vA, vB*/)
+    vdst = INST_A(inst);
+    vsrc1 = INST_B(inst);
+    ILOGV("|move%s v%d,v%d %s(v%d=0x%08x)",
+        (INST_INST(inst) == OP_MOVE) ? "" : "-object", vdst, vsrc1,
+        kSpacing, vdst, GET_REGISTER(vsrc1));
+    SET_REGISTER(vdst, GET_REGISTER(vsrc1));
+    FINISH(1);
+OP_END
+
+/* File: c/OP_MOVE_FROM16.c */
+HANDLE_OPCODE(OP_MOVE_FROM16 /*vAA, vBBBB*/)
+    vdst = INST_AA(inst);
+    vsrc1 = FETCH(1);
+    ILOGV("|move%s/from16 v%d,v%d %s(v%d=0x%08x)",
+        (INST_INST(inst) == OP_MOVE_FROM16) ? "" : "-object", vdst, vsrc1,
+        kSpacing, vdst, GET_REGISTER(vsrc1));
+    SET_REGISTER(vdst, GET_REGISTER(vsrc1));
+    FINISH(2);
+OP_END
+
+/* File: c/OP_MOVE_16.c */
+HANDLE_OPCODE(OP_MOVE_16 /*vAAAA, vBBBB*/)
+    vdst = FETCH(1);
+    vsrc1 = FETCH(2);
+    ILOGV("|move%s/16 v%d,v%d %s(v%d=0x%08x)",
+        (INST_INST(inst) == OP_MOVE_16) ? "" : "-object", vdst, vsrc1,
+        kSpacing, vdst, GET_REGISTER(vsrc1));
+    SET_REGISTER(vdst, GET_REGISTER(vsrc1));
+    FINISH(3);
+OP_END
+
+/* File: c/OP_MOVE_WIDE.c */
+HANDLE_OPCODE(OP_MOVE_WIDE /*vA, vB*/)
+    /* IMPORTANT: must correctly handle overlapping registers, e.g. both
+     * "move-wide v6, v7" and "move-wide v7, v6" */
+    vdst = INST_A(inst);
+    vsrc1 = INST_B(inst);
+    ILOGV("|move-wide v%d,v%d %s(v%d=0x%08llx)", vdst, vsrc1,
+        kSpacing+5, vdst, GET_REGISTER_WIDE(vsrc1));
+    SET_REGISTER_WIDE(vdst, GET_REGISTER_WIDE(vsrc1));
+    FINISH(1);
+OP_END
+
+/* File: c/OP_MOVE_WIDE_FROM16.c */
+HANDLE_OPCODE(OP_MOVE_WIDE_FROM16 /*vAA, vBBBB*/)
+    vdst = INST_AA(inst);
+    vsrc1 = FETCH(1);
+    ILOGV("|move-wide/from16 v%d,v%d  (v%d=0x%08llx)", vdst, vsrc1,
+        vdst, GET_REGISTER_WIDE(vsrc1));
+    SET_REGISTER_WIDE(vdst, GET_REGISTER_WIDE(vsrc1));
+    FINISH(2);
+OP_END
+
+/* File: c/OP_MOVE_WIDE_16.c */
+HANDLE_OPCODE(OP_MOVE_WIDE_16 /*vAAAA, vBBBB*/)
+    vdst = FETCH(1);
+    vsrc1 = FETCH(2);
+    ILOGV("|move-wide/16 v%d,v%d %s(v%d=0x%08llx)", vdst, vsrc1,
+        kSpacing+8, vdst, GET_REGISTER_WIDE(vsrc1));
+    SET_REGISTER_WIDE(vdst, GET_REGISTER_WIDE(vsrc1));
+    FINISH(3);
+OP_END
+
+/* File: c/OP_MOVE_OBJECT.c */
+/* File: c/OP_MOVE.c */
+HANDLE_OPCODE(OP_MOVE_OBJECT /*vA, vB*/)
+    vdst = INST_A(inst);
+    vsrc1 = INST_B(inst);
+    ILOGV("|move%s v%d,v%d %s(v%d=0x%08x)",
+        (INST_INST(inst) == OP_MOVE) ? "" : "-object", vdst, vsrc1,
+        kSpacing, vdst, GET_REGISTER(vsrc1));
+    SET_REGISTER(vdst, GET_REGISTER(vsrc1));
+    FINISH(1);
+OP_END
+
+//OP_END
+
+/* File: c/OP_MOVE_OBJECT_FROM16.c */
+/* File: c/OP_MOVE_FROM16.c */
+HANDLE_OPCODE(OP_MOVE_OBJECT_FROM16 /*vAA, vBBBB*/)
+    vdst = INST_AA(inst);
+    vsrc1 = FETCH(1);
+    ILOGV("|move%s/from16 v%d,v%d %s(v%d=0x%08x)",
+        (INST_INST(inst) == OP_MOVE_FROM16) ? "" : "-object", vdst, vsrc1,
+        kSpacing, vdst, GET_REGISTER(vsrc1));
+    SET_REGISTER(vdst, GET_REGISTER(vsrc1));
+    FINISH(2);
+OP_END
+
+//OP_END
+
+/* File: c/OP_MOVE_OBJECT_16.c */
+/* File: c/OP_MOVE_16.c */
+HANDLE_OPCODE(OP_MOVE_OBJECT_16 /*vAAAA, vBBBB*/)
+    vdst = FETCH(1);
+    vsrc1 = FETCH(2);
+    ILOGV("|move%s/16 v%d,v%d %s(v%d=0x%08x)",
+        (INST_INST(inst) == OP_MOVE_16) ? "" : "-object", vdst, vsrc1,
+        kSpacing, vdst, GET_REGISTER(vsrc1));
+    SET_REGISTER(vdst, GET_REGISTER(vsrc1));
+    FINISH(3);
+OP_END
+
+//OP_END
+
+/* File: c/OP_MOVE_RESULT.c */
+HANDLE_OPCODE(OP_MOVE_RESULT /*vAA*/)
+    vdst = INST_AA(inst);
+    ILOGV("|move-result%s v%d %s(v%d=0x%08x)",
+         (INST_INST(inst) == OP_MOVE_RESULT) ? "" : "-object",
+         vdst, kSpacing+4, vdst,retval.i);
+    SET_REGISTER(vdst, retval.i);
+    FINISH(1);
+OP_END
+
+/* File: c/OP_MOVE_RESULT_WIDE.c */
+HANDLE_OPCODE(OP_MOVE_RESULT_WIDE /*vAA*/)
+    vdst = INST_AA(inst);
+    ILOGV("|move-result-wide v%d %s(0x%08llx)", vdst, kSpacing, retval.j);
+    SET_REGISTER_WIDE(vdst, retval.j);
+    FINISH(1);
+OP_END
+
+/* File: c/OP_MOVE_RESULT_OBJECT.c */
+/* File: c/OP_MOVE_RESULT.c */
+HANDLE_OPCODE(OP_MOVE_RESULT_OBJECT /*vAA*/)
+    vdst = INST_AA(inst);
+    ILOGV("|move-result%s v%d %s(v%d=0x%08x)",
+         (INST_INST(inst) == OP_MOVE_RESULT) ? "" : "-object",
+         vdst, kSpacing+4, vdst,retval.i);
+    SET_REGISTER(vdst, retval.i);
+    FINISH(1);
+OP_END
+
+//OP_END
+
+/* File: c/OP_MOVE_EXCEPTION.c */
+HANDLE_OPCODE(OP_MOVE_EXCEPTION /*vAA*/)
+    vdst = INST_AA(inst);
+    ILOGV("|move-exception v%d", vdst);
+    assert(self->exception != NULL);
+    SET_REGISTER(vdst, (u4)self->exception);
+    dvmClearException(self);
+    FINISH(1);
+OP_END
+
+/* File: c/OP_RETURN_VOID.c */
+HANDLE_OPCODE(OP_RETURN_VOID /**/)
+    ILOGV("|return-void");
+#ifndef NDEBUG
+    retval.j = 0xababababULL;    // placate valgrind
+#endif
+    GOTO(returnFromMethod);
+OP_END
+
+/* File: c/OP_RETURN.c */
+HANDLE_OPCODE(OP_RETURN /*vAA*/)
+    vsrc1 = INST_AA(inst);
+    ILOGV("|return%s v%d",
+        (INST_INST(inst) == OP_RETURN) ? "" : "-object", vsrc1);
+    retval.i = GET_REGISTER(vsrc1);
+    GOTO(returnFromMethod);
+OP_END
+
+/* File: c/OP_RETURN_WIDE.c */
+HANDLE_OPCODE(OP_RETURN_WIDE /*vAA*/)
+    vsrc1 = INST_AA(inst);
+    ILOGV("|return-wide v%d", vsrc1);
+    retval.j = GET_REGISTER_WIDE(vsrc1);
+    GOTO(returnFromMethod);
+OP_END
+
+/* File: c/OP_RETURN_OBJECT.c */
+/* File: c/OP_RETURN.c */
+HANDLE_OPCODE(OP_RETURN_OBJECT /*vAA*/)
+    vsrc1 = INST_AA(inst);
+    ILOGV("|return%s v%d",
+        (INST_INST(inst) == OP_RETURN) ? "" : "-object", vsrc1);
+    retval.i = GET_REGISTER(vsrc1);
+    GOTO(returnFromMethod);
+OP_END
+
+//OP_END
+
+/* File: c/OP_CONST_4.c */
+HANDLE_OPCODE(OP_CONST_4 /*vA, #+B*/)
+    {
+        s4 tmp;
+
+        vdst = INST_A(inst);
+        tmp = (s4) (INST_B(inst) << 28) >> 28;  // sign extend 4-bit value
+        ILOGV("|const/4 v%d,#0x%02x", vdst, (s4)tmp);
+        SET_REGISTER(vdst, tmp);
+    }
+    FINISH(1);
+OP_END
+
+/* File: c/OP_CONST_16.c */
+HANDLE_OPCODE(OP_CONST_16 /*vAA, #+BBBB*/)
+    vdst = INST_AA(inst);
+    vsrc1 = FETCH(1);
+    ILOGV("|const/16 v%d,#0x%04x", vdst, (s2)vsrc1);
+    SET_REGISTER(vdst, (s2) vsrc1);
+    FINISH(2);
+OP_END
+
+/* File: c/OP_CONST.c */
+HANDLE_OPCODE(OP_CONST /*vAA, #+BBBBBBBB*/)
+    {
+        u4 tmp;
+
+        vdst = INST_AA(inst);
+        tmp = FETCH(1);
+        tmp |= (u4)FETCH(2) << 16;
+        ILOGV("|const v%d,#0x%08x", vdst, tmp);
+        SET_REGISTER(vdst, tmp);
+    }
+    FINISH(3);
+OP_END
+
+/* File: c/OP_CONST_HIGH16.c */
+HANDLE_OPCODE(OP_CONST_HIGH16 /*vAA, #+BBBB0000*/)
+    vdst = INST_AA(inst);
+    vsrc1 = FETCH(1);
+    ILOGV("|const/high16 v%d,#0x%04x0000", vdst, vsrc1);
+    SET_REGISTER(vdst, vsrc1 << 16);
+    FINISH(2);
+OP_END
+
+/* File: c/OP_CONST_WIDE_16.c */
+HANDLE_OPCODE(OP_CONST_WIDE_16 /*vAA, #+BBBB*/)
+    vdst = INST_AA(inst);
+    vsrc1 = FETCH(1);
+    ILOGV("|const-wide/16 v%d,#0x%04x", vdst, (s2)vsrc1);
+    SET_REGISTER_WIDE(vdst, (s2)vsrc1);
+    FINISH(2);
+OP_END
+
+/* File: c/OP_CONST_WIDE_32.c */
+HANDLE_OPCODE(OP_CONST_WIDE_32 /*vAA, #+BBBBBBBB*/)
+    {
+        u4 tmp;
+
+        vdst = INST_AA(inst);
+        tmp = FETCH(1);
+        tmp |= (u4)FETCH(2) << 16;
+        ILOGV("|const-wide/32 v%d,#0x%08x", vdst, tmp);
+        SET_REGISTER_WIDE(vdst, (s4) tmp);
+    }
+    FINISH(3);
+OP_END
+
+/* File: c/OP_CONST_WIDE.c */
+HANDLE_OPCODE(OP_CONST_WIDE /*vAA, #+BBBBBBBBBBBBBBBB*/)
+    {
+        u8 tmp;
+
+        vdst = INST_AA(inst);
+        tmp = FETCH(1);
+        tmp |= (u8)FETCH(2) << 16;
+        tmp |= (u8)FETCH(3) << 32;
+        tmp |= (u8)FETCH(4) << 48;
+        ILOGV("|const-wide v%d,#0x%08llx", vdst, tmp);
+        SET_REGISTER_WIDE(vdst, tmp);
+    }
+    FINISH(5);
+OP_END
+
+/* File: c/OP_CONST_WIDE_HIGH16.c */
+HANDLE_OPCODE(OP_CONST_WIDE_HIGH16 /*vAA, #+BBBB000000000000*/)
+    vdst = INST_AA(inst);
+    vsrc1 = FETCH(1);
+    ILOGV("|const-wide/high16 v%d,#0x%04x000000000000", vdst, vsrc1);
+    SET_REGISTER_WIDE(vdst, ((u8) vsrc1) << 48);
+    FINISH(2);
+OP_END
+
+/* File: c/OP_CONST_STRING.c */
+HANDLE_OPCODE(OP_CONST_STRING /*vAA, string@BBBB*/)
+    {
+        StringObject* strObj;
+
+        vdst = INST_AA(inst);
+        ref = FETCH(1);
+        ILOGV("|const-string v%d string@0x%04x", vdst, ref);
+        strObj = dvmDexGetResolvedString(methodClassDex, ref);
+        if (strObj == NULL) {
+            EXPORT_PC();
+            strObj = dvmResolveString(method->clazz, ref);
+            if (strObj == NULL)
+                GOTO(exceptionThrown);
+        }
+        SET_REGISTER(vdst, (u4) strObj);
+    }
+    FINISH(2);
+OP_END
+
+/* File: c/OP_CONST_STRING_JUMBO.c */
+HANDLE_OPCODE(OP_CONST_STRING_JUMBO /*vAA, string@BBBBBBBB*/)
+    {
+        StringObject* strObj;
+        u4 tmp;
+
+        vdst = INST_AA(inst);
+        tmp = FETCH(1);
+        tmp |= (u4)FETCH(2) << 16;
+        ILOGV("|const-string/jumbo v%d string@0x%08x", vdst, tmp);
+        strObj = dvmDexGetResolvedString(methodClassDex, tmp);
+        if (strObj == NULL) {
+            EXPORT_PC();
+            strObj = dvmResolveString(method->clazz, tmp);
+            if (strObj == NULL)
+                GOTO(exceptionThrown);
+        }
+        SET_REGISTER(vdst, (u4) strObj);
+    }
+    FINISH(3);
+OP_END
+
+/* File: c/OP_CONST_CLASS.c */
+HANDLE_OPCODE(OP_CONST_CLASS /*vAA, class@BBBB*/)
+    {
+        ClassObject* clazz;
+
+        vdst = INST_AA(inst);
+        ref = FETCH(1);
+        ILOGV("|const-class v%d class@0x%04x", vdst, ref);
+        clazz = dvmDexGetResolvedClass(methodClassDex, ref);
+        if (clazz == NULL) {
+            EXPORT_PC();
+            clazz = dvmResolveClass(method->clazz, ref, true);
+            if (clazz == NULL)
+                GOTO(exceptionThrown);
+        }
+        SET_REGISTER(vdst, (u4) clazz);
+    }
+    FINISH(2);
+OP_END
+
+/* File: c/OP_MONITOR_ENTER.c */
+HANDLE_OPCODE(OP_MONITOR_ENTER /*vAA*/)
+    {
+        Object* obj;
+
+        vsrc1 = INST_AA(inst);
+        ILOGV("|monitor-enter v%d %s(0x%08x)",
+            vsrc1, kSpacing+6, GET_REGISTER(vsrc1));
+        obj = (Object*)GET_REGISTER(vsrc1);
+        if (!checkForNullExportPC(obj, fp, pc))
+            GOTO(exceptionThrown);
+        ILOGV("+ locking %p %s\n", obj, obj->clazz->descriptor);
+#ifdef WITH_MONITOR_TRACKING
+        EXPORT_PC();        /* need for stack trace */
+#endif
+        dvmLockObject(self, obj);
+#ifdef WITH_DEADLOCK_PREDICTION
+        if (dvmCheckException(self))
+            GOTO(exceptionThrown);
+#endif
+    }
+    FINISH(1);
+OP_END
+
+/* File: c/OP_MONITOR_EXIT.c */
+HANDLE_OPCODE(OP_MONITOR_EXIT /*vAA*/)
+    {
+        Object* obj;
+
+        EXPORT_PC();
+
+        vsrc1 = INST_AA(inst);
+        ILOGV("|monitor-exit v%d %s(0x%08x)",
+            vsrc1, kSpacing+5, GET_REGISTER(vsrc1));
+        obj = (Object*)GET_REGISTER(vsrc1);
+        if (!checkForNull(obj)) {
+            /*
+             * The exception needs to be processed at the *following*
+             * instruction, not the current instruction (see the Dalvik
+             * spec).  Because we're jumping to an exception handler,
+             * we're not actually at risk of skipping an instruction
+             * by doing so.
+             */
+            ADJUST_PC(1);           /* monitor-exit width is 1 */
+            GOTO(exceptionThrown);
+        }
+        ILOGV("+ unlocking %p %s\n", obj, obj->clazz->descriptor);
+        if (!dvmUnlockObject(self, obj)) {
+            assert(dvmCheckException(self));
+            ADJUST_PC(1);
+            GOTO(exceptionThrown);
+        }
+    }
+    FINISH(1);
+OP_END
+
+/* File: c/OP_CHECK_CAST.c */
+HANDLE_OPCODE(OP_CHECK_CAST /*vAA, class@BBBB*/)
+    {
+        ClassObject* clazz;
+        Object* obj;
+
+        EXPORT_PC();
+
+        vsrc1 = INST_AA(inst);
+        ref = FETCH(1);         /* class to check against */
+        ILOGV("|check-cast v%d,class@0x%04x", vsrc1, ref);
+
+        obj = (Object*)GET_REGISTER(vsrc1);
+        if (obj != NULL) {
+#if defined(WITH_EXTRA_OBJECT_VALIDATION)
+            if (!checkForNull(obj))
+                GOTO(exceptionThrown);
+#endif
+            clazz = dvmDexGetResolvedClass(methodClassDex, ref);
+            if (clazz == NULL) {
+                clazz = dvmResolveClass(method->clazz, ref, false);
+                if (clazz == NULL)
+                    GOTO(exceptionThrown);
+            }
+            if (!dvmInstanceof(obj->clazz, clazz)) {
+                dvmThrowExceptionWithClassMessage(
+                    "Ljava/lang/ClassCastException;", obj->clazz->descriptor);
+                GOTO(exceptionThrown);
+            }
+        }
+    }
+    FINISH(2);
+OP_END
+
+/* File: c/OP_INSTANCE_OF.c */
+HANDLE_OPCODE(OP_INSTANCE_OF /*vA, vB, class@CCCC*/)
+    {
+        ClassObject* clazz;
+        Object* obj;
+
+        vdst = INST_A(inst);
+        vsrc1 = INST_B(inst);   /* object to check */
+        ref = FETCH(1);         /* class to check against */
+        ILOGV("|instance-of v%d,v%d,class@0x%04x", vdst, vsrc1, ref);
+
+        obj = (Object*)GET_REGISTER(vsrc1);
+        if (obj == NULL) {
+            SET_REGISTER(vdst, 0);
+        } else {
+#if defined(WITH_EXTRA_OBJECT_VALIDATION)
+            if (!checkForNullExportPC(obj, fp, pc))
+                GOTO(exceptionThrown);
+#endif
+            clazz = dvmDexGetResolvedClass(methodClassDex, ref);
+            if (clazz == NULL) {
+                EXPORT_PC();
+                clazz = dvmResolveClass(method->clazz, ref, true);
+                if (clazz == NULL)
+                    GOTO(exceptionThrown);
+            }
+            SET_REGISTER(vdst, dvmInstanceof(obj->clazz, clazz));
+        }
+    }
+    FINISH(2);
+OP_END
+
+/* File: c/OP_ARRAY_LENGTH.c */
+HANDLE_OPCODE(OP_ARRAY_LENGTH /*vA, vB*/)
+    {
+        ArrayObject* arrayObj;
+
+        vdst = INST_A(inst);
+        vsrc1 = INST_B(inst);
+        arrayObj = (ArrayObject*) GET_REGISTER(vsrc1);
+        ILOGV("|array-length v%d,v%d  (%p)", vdst, vsrc1, arrayObj);
+        if (!checkForNullExportPC((Object*) arrayObj, fp, pc))
+            GOTO(exceptionThrown);
+        /* verifier guarantees this is an array reference */
+        SET_REGISTER(vdst, arrayObj->length);
+    }
+    FINISH(1);
+OP_END
+
+/* File: c/OP_NEW_INSTANCE.c */
+HANDLE_OPCODE(OP_NEW_INSTANCE /*vAA, class@BBBB*/)
+    {
+        ClassObject* clazz;
+        Object* newObj;
+
+        EXPORT_PC();
+
+        vdst = INST_AA(inst);
+        ref = FETCH(1);
+        ILOGV("|new-instance v%d,class@0x%04x", vdst, ref);
+        clazz = dvmDexGetResolvedClass(methodClassDex, ref);
+        if (clazz == NULL) {
+            clazz = dvmResolveClass(method->clazz, ref, false);
+            if (clazz == NULL)
+                GOTO(exceptionThrown);
+        }
+
+        if (!dvmIsClassInitialized(clazz) && !dvmInitClass(clazz))
+            GOTO(exceptionThrown);
+
+        /*
+         * Note: the verifier can ensure that this never happens, allowing us
+         * to remove the check.  However, the spec requires we throw the
+         * exception at runtime, not verify time, so the verifier would
+         * need to replace the new-instance call with a magic "throw
+         * InstantiationError" instruction.
+         *
+         * Since this relies on the verifier, which is optional, we would
+         * also need a "new-instance-quick" instruction to identify instances
+         * that don't require the check.
+         */
+        if (dvmIsInterfaceClass(clazz) || dvmIsAbstractClass(clazz)) {
+            dvmThrowExceptionWithClassMessage("Ljava/lang/InstantiationError;",
+                clazz->descriptor);
+            GOTO(exceptionThrown);
+        }
+        newObj = dvmAllocObject(clazz, ALLOC_DONT_TRACK);
+        if (newObj == NULL)
+            GOTO(exceptionThrown);
+        SET_REGISTER(vdst, (u4) newObj);
+    }
+    FINISH(2);
+OP_END
+
+/* File: c/OP_NEW_ARRAY.c */
+HANDLE_OPCODE(OP_NEW_ARRAY /*vA, vB, class@CCCC*/)
+    {
+        ClassObject* arrayClass;
+        ArrayObject* newArray;
+        s4 length;
+
+        EXPORT_PC();
+
+        vdst = INST_A(inst);
+        vsrc1 = INST_B(inst);       /* length reg */
+        ref = FETCH(1);
+        ILOGV("|new-array v%d,v%d,class@0x%04x  (%d elements)",
+            vdst, vsrc1, ref, (s4) GET_REGISTER(vsrc1));
+        length = (s4) GET_REGISTER(vsrc1);
+        if (length < 0) {
+            dvmThrowException("Ljava/lang/NegativeArraySizeException;", NULL);
+            GOTO(exceptionThrown);
+        }
+        arrayClass = dvmDexGetResolvedClass(methodClassDex, ref);
+        if (arrayClass == NULL) {
+            arrayClass = dvmResolveClass(method->clazz, ref, false);
+            if (arrayClass == NULL)
+                GOTO(exceptionThrown);
+        }
+        /* verifier guarantees this is an array class */
+        assert(dvmIsArrayClass(arrayClass));
+        assert(dvmIsClassInitialized(arrayClass));
+
+        newArray = dvmAllocArrayByClass(arrayClass, length, ALLOC_DONT_TRACK);
+        if (newArray == NULL)
+            GOTO(exceptionThrown);
+        SET_REGISTER(vdst, (u4) newArray);
+    }
+    FINISH(2);
+OP_END
+
+
+/* File: c/OP_FILLED_NEW_ARRAY.c */
+HANDLE_OPCODE(OP_FILLED_NEW_ARRAY /*vB, {vD, vE, vF, vG, vA}, class@CCCC*/)
+    GOTO(filledNewArray, false);
+OP_END
+
+/* File: c/OP_FILLED_NEW_ARRAY_RANGE.c */
+HANDLE_OPCODE(OP_FILLED_NEW_ARRAY_RANGE /*{vCCCC..v(CCCC+AA-1)}, class@BBBB*/)
+    GOTO(filledNewArray, true);
+OP_END
+
+/* File: c/OP_FILL_ARRAY_DATA.c */
+HANDLE_OPCODE(OP_FILL_ARRAY_DATA)   /*vAA, +BBBBBBBB*/
+    {
+        const u2* arrayData;
+        s4 offset;
+        ArrayObject* arrayObj;
+
+        EXPORT_PC();
+        vsrc1 = INST_AA(inst);
+        offset = FETCH(1) | (((s4) FETCH(2)) << 16);
+        ILOGV("|fill-array-data v%d +0x%04x", vsrc1, offset);
+        arrayData = pc + offset;       // offset in 16-bit units
+#ifndef NDEBUG
+        if (arrayData < method->insns ||
+            arrayData >= method->insns + dvmGetMethodInsnsSize(method))
+        {
+            /* should have been caught in verifier */
+            dvmThrowException("Ljava/lang/InternalError;", 
+                              "bad fill array data");
+            GOTO(exceptionThrown);
+        }
+#endif
+        arrayObj = (ArrayObject*) GET_REGISTER(vsrc1);
+        if (!dvmInterpHandleFillArrayData(arrayObj, arrayData)) {
+            GOTO(exceptionThrown);
+        }
+        FINISH(3);
+    }
+OP_END
+
+/* File: c/OP_THROW.c */
+HANDLE_OPCODE(OP_THROW /*vAA*/)
+    {
+        Object* obj;
+
+        vsrc1 = INST_AA(inst);
+        ILOGV("|throw v%d  (%p)", vsrc1, (void*)GET_REGISTER(vsrc1));
+        obj = (Object*) GET_REGISTER(vsrc1);
+        if (!checkForNullExportPC(obj, fp, pc)) {
+            /* will throw a null pointer exception */
+            LOGVV("Bad exception\n");
+        } else {
+            /* use the requested exception */
+            dvmSetException(self, obj);
+        }
+        GOTO(exceptionThrown);
+    }
+OP_END
+
+/* File: c/OP_GOTO.c */
+HANDLE_OPCODE(OP_GOTO /*+AA*/)
+    vdst = INST_AA(inst);
+    if ((s1)vdst < 0)
+        ILOGV("|goto -0x%02x", -((s1)vdst));
+    else
+        ILOGV("|goto +0x%02x", ((s1)vdst));
+    ILOGV("> branch taken");
+    if ((s1)vdst < 0)
+        PERIODIC_CHECKS(kInterpEntryInstr, (s1)vdst);
+    FINISH((s1)vdst);
+OP_END
+
+/* File: c/OP_GOTO_16.c */
+HANDLE_OPCODE(OP_GOTO_16 /*+AAAA*/)
+    {
+        s4 offset = (s2) FETCH(1);          /* sign-extend next code unit */
+
+        if (offset < 0)
+            ILOGV("|goto/16 -0x%04x", -offset);
+        else
+            ILOGV("|goto/16 +0x%04x", offset);
+        ILOGV("> branch taken");
+        if (offset < 0)
+            PERIODIC_CHECKS(kInterpEntryInstr, offset);
+        FINISH(offset);
+    }
+OP_END
+
+/* File: c/OP_GOTO_32.c */
+HANDLE_OPCODE(OP_GOTO_32 /*+AAAAAAAA*/)
+    {
+        s4 offset = FETCH(1);               /* low-order 16 bits */
+        offset |= ((s4) FETCH(2)) << 16;    /* high-order 16 bits */
+
+        if (offset < 0)
+            ILOGV("|goto/32 -0x%08x", -offset);
+        else
+            ILOGV("|goto/32 +0x%08x", offset);
+        ILOGV("> branch taken");
+        if (offset <= 0)    /* allowed to branch to self */
+            PERIODIC_CHECKS(kInterpEntryInstr, offset);
+        FINISH(offset);
+    }
+OP_END
+
+/* File: c/OP_PACKED_SWITCH.c */
+HANDLE_OPCODE(OP_PACKED_SWITCH /*vAA, +BBBB*/)
+    {
+        const u2* switchData;
+        u4 testVal;
+        s4 offset;
+
+        vsrc1 = INST_AA(inst);
+        offset = FETCH(1) | (((s4) FETCH(2)) << 16);
+        ILOGV("|packed-switch v%d +0x%04x", vsrc1, vsrc2);
+        switchData = pc + offset;       // offset in 16-bit units
+#ifndef NDEBUG
+        if (switchData < method->insns ||
+            switchData >= method->insns + dvmGetMethodInsnsSize(method))
+        {
+            /* should have been caught in verifier */
+            EXPORT_PC();
+            dvmThrowException("Ljava/lang/InternalError;", "bad packed switch");
+            GOTO(exceptionThrown);
+        }
+#endif
+        testVal = GET_REGISTER(vsrc1);
+
+        offset = dvmInterpHandlePackedSwitch(switchData, testVal);
+        ILOGV("> branch taken (0x%04x)\n", offset);
+        if (offset <= 0)  /* uncommon */
+            PERIODIC_CHECKS(kInterpEntryInstr, offset);
+        FINISH(offset);
+    }
+OP_END
+
+/* File: c/OP_SPARSE_SWITCH.c */
+HANDLE_OPCODE(OP_SPARSE_SWITCH /*vAA, +BBBB*/)
+    {
+        const u2* switchData;
+        u4 testVal;
+        s4 offset;
+
+        vsrc1 = INST_AA(inst);
+        offset = FETCH(1) | (((s4) FETCH(2)) << 16);
+        ILOGV("|sparse-switch v%d +0x%04x", vsrc1, vsrc2);
+        switchData = pc + offset;       // offset in 16-bit units
+#ifndef NDEBUG
+        if (switchData < method->insns ||
+            switchData >= method->insns + dvmGetMethodInsnsSize(method))
+        {
+            /* should have been caught in verifier */
+            EXPORT_PC();
+            dvmThrowException("Ljava/lang/InternalError;", "bad sparse switch");
+            GOTO(exceptionThrown);
+        }
+#endif
+        testVal = GET_REGISTER(vsrc1);
+
+        offset = dvmInterpHandleSparseSwitch(switchData, testVal);
+        ILOGV("> branch taken (0x%04x)\n", offset);
+        if (offset <= 0)  /* uncommon */
+            PERIODIC_CHECKS(kInterpEntryInstr, offset);
+        FINISH(offset);
+    }
+OP_END
+
+/* File: c/OP_CMPL_FLOAT.c */
+HANDLE_OP_CMPX(OP_CMPL_FLOAT, "l-float", float, _FLOAT, -1)
+OP_END
+
+/* File: c/OP_CMPG_FLOAT.c */
+HANDLE_OP_CMPX(OP_CMPG_FLOAT, "g-float", float, _FLOAT, 1)
+OP_END
+
+/* File: c/OP_CMPL_DOUBLE.c */
+HANDLE_OP_CMPX(OP_CMPL_DOUBLE, "l-double", double, _DOUBLE, -1)
+OP_END
+
+/* File: c/OP_CMPG_DOUBLE.c */
+HANDLE_OP_CMPX(OP_CMPG_DOUBLE, "g-double", double, _DOUBLE, 1)
+OP_END
+
+/* File: c/OP_CMP_LONG.c */
+HANDLE_OP_CMPX(OP_CMP_LONG, "-long", s8, _WIDE, 0)
+OP_END
+
+/* File: c/OP_IF_EQ.c */
+HANDLE_OP_IF_XX(OP_IF_EQ, "eq", ==)
+OP_END
+
+/* File: c/OP_IF_NE.c */
+HANDLE_OP_IF_XX(OP_IF_NE, "ne", !=)
+OP_END
+
+/* File: c/OP_IF_LT.c */
+HANDLE_OP_IF_XX(OP_IF_LT, "lt", <)
+OP_END
+
+/* File: c/OP_IF_GE.c */
+HANDLE_OP_IF_XX(OP_IF_GE, "ge", >=)
+OP_END
+
+/* File: c/OP_IF_GT.c */
+HANDLE_OP_IF_XX(OP_IF_GT, "gt", >)
+OP_END
+
+/* File: c/OP_IF_LE.c */
+HANDLE_OP_IF_XX(OP_IF_LE, "le", <=)
+OP_END
+
+/* File: c/OP_IF_EQZ.c */
+HANDLE_OP_IF_XXZ(OP_IF_EQZ, "eqz", ==)
+OP_END
+
+/* File: c/OP_IF_NEZ.c */
+HANDLE_OP_IF_XXZ(OP_IF_NEZ, "nez", !=)
+OP_END
+
+/* File: c/OP_IF_LTZ.c */
+HANDLE_OP_IF_XXZ(OP_IF_LTZ, "ltz", <)
+OP_END
+
+/* File: c/OP_IF_GEZ.c */
+HANDLE_OP_IF_XXZ(OP_IF_GEZ, "gez", >=)
+OP_END
+
+/* File: c/OP_IF_GTZ.c */
+HANDLE_OP_IF_XXZ(OP_IF_GTZ, "gtz", >)
+OP_END
+
+/* File: c/OP_IF_LEZ.c */
+HANDLE_OP_IF_XXZ(OP_IF_LEZ, "lez", <=)
+OP_END
+
+/* File: c/OP_UNUSED_3E.c */
+HANDLE_OPCODE(OP_UNUSED_3E)
+OP_END
+
+/* File: c/OP_UNUSED_3F.c */
+HANDLE_OPCODE(OP_UNUSED_3F)
+OP_END
+
+/* File: c/OP_UNUSED_40.c */
+HANDLE_OPCODE(OP_UNUSED_40)
+OP_END
+
+/* File: c/OP_UNUSED_41.c */
+HANDLE_OPCODE(OP_UNUSED_41)
+OP_END
+
+/* File: c/OP_UNUSED_42.c */
+HANDLE_OPCODE(OP_UNUSED_42)
+OP_END
+
+/* File: c/OP_UNUSED_43.c */
+HANDLE_OPCODE(OP_UNUSED_43)
+OP_END
+
+/* File: c/OP_AGET.c */
+HANDLE_OP_AGET(OP_AGET, "", u4, )
+OP_END
+
+/* File: c/OP_AGET_WIDE.c */
+HANDLE_OP_AGET(OP_AGET_WIDE, "-wide", s8, _WIDE)
+OP_END
+
+/* File: c/OP_AGET_OBJECT.c */
+HANDLE_OP_AGET(OP_AGET_OBJECT, "-object", u4, )
+OP_END
+
+/* File: c/OP_AGET_BOOLEAN.c */
+HANDLE_OP_AGET(OP_AGET_BOOLEAN, "-boolean", u1, )
+OP_END
+
+/* File: c/OP_AGET_BYTE.c */
+HANDLE_OP_AGET(OP_AGET_BYTE, "-byte", s1, )
+OP_END
+
+/* File: c/OP_AGET_CHAR.c */
+HANDLE_OP_AGET(OP_AGET_CHAR, "-char", u2, )
+OP_END
+
+/* File: c/OP_AGET_SHORT.c */
+HANDLE_OP_AGET(OP_AGET_SHORT, "-short", s2, )
+OP_END
+
+/* File: c/OP_APUT.c */
+HANDLE_OP_APUT(OP_APUT, "", u4, )
+OP_END
+
+/* File: c/OP_APUT_WIDE.c */
+HANDLE_OP_APUT(OP_APUT_WIDE, "-wide", s8, _WIDE)
+OP_END
+
+/* File: c/OP_APUT_OBJECT.c */
+HANDLE_OPCODE(OP_APUT_OBJECT /*vAA, vBB, vCC*/)
+    {
+        ArrayObject* arrayObj;
+        Object* obj;
+        u2 arrayInfo;
+        EXPORT_PC();
+        vdst = INST_AA(inst);       /* AA: source value */
+        arrayInfo = FETCH(1);
+        vsrc1 = arrayInfo & 0xff;   /* BB: array ptr */
+        vsrc2 = arrayInfo >> 8;     /* CC: index */
+        ILOGV("|aput%s v%d,v%d,v%d", "-object", vdst, vsrc1, vsrc2);
+        arrayObj = (ArrayObject*) GET_REGISTER(vsrc1);
+        if (!checkForNull((Object*) arrayObj))
+            GOTO(exceptionThrown);
+        if (GET_REGISTER(vsrc2) >= arrayObj->length) {
+            dvmThrowException("Ljava/lang/ArrayIndexOutOfBoundsException;",
+                NULL);
+            GOTO(exceptionThrown);
+        }
+        obj = (Object*) GET_REGISTER(vdst);
+        if (obj != NULL) {
+            if (!checkForNull(obj))
+                GOTO(exceptionThrown);
+            if (!dvmCanPutArrayElement(obj->clazz, arrayObj->obj.clazz)) {
+                LOGV("Can't put a '%s'(%p) into array type='%s'(%p)\n",
+                    obj->clazz->descriptor, obj,
+                    arrayObj->obj.clazz->descriptor, arrayObj);
+                //dvmDumpClass(obj->clazz);
+                //dvmDumpClass(arrayObj->obj.clazz);
+                dvmThrowException("Ljava/lang/ArrayStoreException;", NULL);
+                GOTO(exceptionThrown);
+            }
+        }
+        ILOGV("+ APUT[%d]=0x%08x", GET_REGISTER(vsrc2), GET_REGISTER(vdst));
+        ((u4*) arrayObj->contents)[GET_REGISTER(vsrc2)] =
+            GET_REGISTER(vdst);
+    }
+    FINISH(2);
+OP_END
+
+/* File: c/OP_APUT_BOOLEAN.c */
+HANDLE_OP_APUT(OP_APUT_BOOLEAN, "-boolean", u1, )
+OP_END
+
+/* File: c/OP_APUT_BYTE.c */
+HANDLE_OP_APUT(OP_APUT_BYTE, "-byte", s1, )
+OP_END
+
+/* File: c/OP_APUT_CHAR.c */
+HANDLE_OP_APUT(OP_APUT_CHAR, "-char", u2, )
+OP_END
+
+/* File: c/OP_APUT_SHORT.c */
+HANDLE_OP_APUT(OP_APUT_SHORT, "-short", s2, )
+OP_END
+
+/* File: c/OP_IGET.c */
+HANDLE_IGET_X(OP_IGET,                  "", Int, )
+OP_END
+
+/* File: c/OP_IGET_WIDE.c */
+HANDLE_IGET_X(OP_IGET_WIDE,             "-wide", Long, _WIDE)
+OP_END
+
+/* File: c/OP_IGET_OBJECT.c */
+HANDLE_IGET_X(OP_IGET_OBJECT,           "-object", Object, _AS_OBJECT)
+OP_END
+
+/* File: c/OP_IGET_BOOLEAN.c */
+HANDLE_IGET_X(OP_IGET_BOOLEAN,          "", Int, )
+OP_END
+
+/* File: c/OP_IGET_BYTE.c */
+HANDLE_IGET_X(OP_IGET_BYTE,             "", Int, )
+OP_END
+
+/* File: c/OP_IGET_CHAR.c */
+HANDLE_IGET_X(OP_IGET_CHAR,             "", Int, )
+OP_END
+
+/* File: c/OP_IGET_SHORT.c */
+HANDLE_IGET_X(OP_IGET_SHORT,            "", Int, )
+OP_END
+
+/* File: c/OP_IPUT.c */
+HANDLE_IPUT_X(OP_IPUT,                  "", Int, )
+OP_END
+
+/* File: c/OP_IPUT_WIDE.c */
+HANDLE_IPUT_X(OP_IPUT_WIDE,             "-wide", Long, _WIDE)
+OP_END
+
+/* File: c/OP_IPUT_OBJECT.c */
+HANDLE_IPUT_X(OP_IPUT_OBJECT,           "-object", Object, _AS_OBJECT)
+OP_END
+
+/* File: c/OP_IPUT_BOOLEAN.c */
+HANDLE_IPUT_X(OP_IPUT_BOOLEAN,          "", Int, )
+OP_END
+
+/* File: c/OP_IPUT_BYTE.c */
+HANDLE_IPUT_X(OP_IPUT_BYTE,             "", Int, )
+OP_END
+
+/* File: c/OP_IPUT_CHAR.c */
+HANDLE_IPUT_X(OP_IPUT_CHAR,             "", Int, )
+OP_END
+
+/* File: c/OP_IPUT_SHORT.c */
+HANDLE_IPUT_X(OP_IPUT_SHORT,            "", Int, )
+OP_END
+
+/* File: c/OP_SGET.c */
+HANDLE_SGET_X(OP_SGET,                  "", Int, )
+OP_END
+
+/* File: c/OP_SGET_WIDE.c */
+HANDLE_SGET_X(OP_SGET_WIDE,             "-wide", Long, _WIDE)
+OP_END
+
+/* File: c/OP_SGET_OBJECT.c */
+HANDLE_SGET_X(OP_SGET_OBJECT,           "-object", Object, _AS_OBJECT)
+OP_END
+
+/* File: c/OP_SGET_BOOLEAN.c */
+HANDLE_SGET_X(OP_SGET_BOOLEAN,          "", Int, )
+OP_END
+
+/* File: c/OP_SGET_BYTE.c */
+HANDLE_SGET_X(OP_SGET_BYTE,             "", Int, )
+OP_END
+
+/* File: c/OP_SGET_CHAR.c */
+HANDLE_SGET_X(OP_SGET_CHAR,             "", Int, )
+OP_END
+
+/* File: c/OP_SGET_SHORT.c */
+HANDLE_SGET_X(OP_SGET_SHORT,            "", Int, )
+OP_END
+
+/* File: c/OP_SPUT.c */
+HANDLE_SPUT_X(OP_SPUT,                  "", Int, )
+OP_END
+
+/* File: c/OP_SPUT_WIDE.c */
+HANDLE_SPUT_X(OP_SPUT_WIDE,             "-wide", Long, _WIDE)
+OP_END
+
+/* File: c/OP_SPUT_OBJECT.c */
+HANDLE_SPUT_X(OP_SPUT_OBJECT,           "-object", Object, _AS_OBJECT)
+OP_END
+
+/* File: c/OP_SPUT_BOOLEAN.c */
+HANDLE_SPUT_X(OP_SPUT_BOOLEAN,          "", Int, )
+OP_END
+
+/* File: c/OP_SPUT_BYTE.c */
+HANDLE_SPUT_X(OP_SPUT_BYTE,             "", Int, )
+OP_END
+
+/* File: c/OP_SPUT_CHAR.c */
+HANDLE_SPUT_X(OP_SPUT_CHAR,             "", Int, )
+OP_END
+
+/* File: c/OP_SPUT_SHORT.c */
+HANDLE_SPUT_X(OP_SPUT_SHORT,            "", Int, )
+OP_END
+
+/* File: c/OP_INVOKE_VIRTUAL.c */
+HANDLE_OPCODE(OP_INVOKE_VIRTUAL /*vB, {vD, vE, vF, vG, vA}, meth@CCCC*/)
+    GOTO(invokeVirtual, false);
+OP_END
+
+/* File: c/OP_INVOKE_SUPER.c */
+HANDLE_OPCODE(OP_INVOKE_SUPER /*vB, {vD, vE, vF, vG, vA}, meth@CCCC*/)
+    GOTO(invokeSuper, false);
+OP_END
+
+/* File: c/OP_INVOKE_DIRECT.c */
+HANDLE_OPCODE(OP_INVOKE_DIRECT /*vB, {vD, vE, vF, vG, vA}, meth@CCCC*/)
+    GOTO(invokeDirect, false);
+OP_END
+
+/* File: c/OP_INVOKE_STATIC.c */
+HANDLE_OPCODE(OP_INVOKE_STATIC /*vB, {vD, vE, vF, vG, vA}, meth@CCCC*/)
+    GOTO(invokeStatic, false);
+OP_END
+
+/* File: c/OP_INVOKE_INTERFACE.c */
+HANDLE_OPCODE(OP_INVOKE_INTERFACE /*vB, {vD, vE, vF, vG, vA}, meth@CCCC*/)
+    GOTO(invokeInterface, false);
+OP_END
+
+/* File: c/OP_UNUSED_73.c */
+HANDLE_OPCODE(OP_UNUSED_73)
+OP_END
+
+/* File: c/OP_INVOKE_VIRTUAL_RANGE.c */
+HANDLE_OPCODE(OP_INVOKE_VIRTUAL_RANGE /*{vCCCC..v(CCCC+AA-1)}, meth@BBBB*/)
+    GOTO(invokeVirtual, true);
+OP_END
+
+/* File: c/OP_INVOKE_SUPER_RANGE.c */
+HANDLE_OPCODE(OP_INVOKE_SUPER_RANGE /*{vCCCC..v(CCCC+AA-1)}, meth@BBBB*/)
+    GOTO(invokeSuper, true);
+OP_END
+
+/* File: c/OP_INVOKE_DIRECT_RANGE.c */
+HANDLE_OPCODE(OP_INVOKE_DIRECT_RANGE /*{vCCCC..v(CCCC+AA-1)}, meth@BBBB*/)
+    GOTO(invokeDirect, true);
+OP_END
+
+/* File: c/OP_INVOKE_STATIC_RANGE.c */
+HANDLE_OPCODE(OP_INVOKE_STATIC_RANGE /*{vCCCC..v(CCCC+AA-1)}, meth@BBBB*/)
+    GOTO(invokeStatic, true);
+OP_END
+
+/* File: c/OP_INVOKE_INTERFACE_RANGE.c */
+HANDLE_OPCODE(OP_INVOKE_INTERFACE_RANGE /*{vCCCC..v(CCCC+AA-1)}, meth@BBBB*/)
+    GOTO(invokeInterface, true);
+OP_END
+
+/* File: c/OP_UNUSED_79.c */
+HANDLE_OPCODE(OP_UNUSED_79)
+OP_END
+
+/* File: c/OP_UNUSED_7A.c */
+HANDLE_OPCODE(OP_UNUSED_7A)
+OP_END
+
+/* File: c/OP_NEG_INT.c */
+HANDLE_UNOP(OP_NEG_INT, "neg-int", -, , )
+OP_END
+
+/* File: c/OP_NOT_INT.c */
+HANDLE_UNOP(OP_NOT_INT, "not-int", , ^ 0xffffffff, )
+OP_END
+
+/* File: c/OP_NEG_LONG.c */
+HANDLE_UNOP(OP_NEG_LONG, "neg-long", -, , _WIDE)
+OP_END
+
+/* File: c/OP_NOT_LONG.c */
+HANDLE_UNOP(OP_NOT_LONG, "not-long", , & 0xffffffffffffffffULL, _WIDE)
+OP_END
+
+/* File: c/OP_NEG_FLOAT.c */
+HANDLE_UNOP(OP_NEG_FLOAT, "neg-float", -, , _FLOAT)
+OP_END
+
+/* File: c/OP_NEG_DOUBLE.c */
+HANDLE_UNOP(OP_NEG_DOUBLE, "neg-double", -, , _DOUBLE)
+OP_END
+
+/* File: c/OP_INT_TO_LONG.c */
+HANDLE_NUMCONV(OP_INT_TO_LONG,          "int-to-long", _INT, _WIDE)
+OP_END
+
+/* File: c/OP_INT_TO_FLOAT.c */
+HANDLE_NUMCONV(OP_INT_TO_FLOAT,         "int-to-float", _INT, _FLOAT)
+OP_END
+
+/* File: c/OP_INT_TO_DOUBLE.c */
+HANDLE_NUMCONV(OP_INT_TO_DOUBLE,        "int-to-double", _INT, _DOUBLE)
+OP_END
+
+/* File: c/OP_LONG_TO_INT.c */
+HANDLE_NUMCONV(OP_LONG_TO_INT,          "long-to-int", _WIDE, _INT)
+OP_END
+
+/* File: c/OP_LONG_TO_FLOAT.c */
+HANDLE_NUMCONV(OP_LONG_TO_FLOAT,        "long-to-float", _WIDE, _FLOAT)
+OP_END
+
+/* File: c/OP_LONG_TO_DOUBLE.c */
+HANDLE_NUMCONV(OP_LONG_TO_DOUBLE,       "long-to-double", _WIDE, _DOUBLE)
+OP_END
+
+/* File: c/OP_FLOAT_TO_INT.c */
+HANDLE_FLOAT_TO_INT(OP_FLOAT_TO_INT,    "float-to-int",
+    float, _FLOAT, s4, _INT)
+OP_END
+
+/* File: c/OP_FLOAT_TO_LONG.c */
+HANDLE_FLOAT_TO_INT(OP_FLOAT_TO_LONG,   "float-to-long",
+    float, _FLOAT, s8, _WIDE)
+OP_END
+
+/* File: c/OP_FLOAT_TO_DOUBLE.c */
+HANDLE_NUMCONV(OP_FLOAT_TO_DOUBLE,      "float-to-double", _FLOAT, _DOUBLE)
+OP_END
+
+/* File: c/OP_DOUBLE_TO_INT.c */
+HANDLE_FLOAT_TO_INT(OP_DOUBLE_TO_INT,   "double-to-int",
+    double, _DOUBLE, s4, _INT)
+OP_END
+
+/* File: c/OP_DOUBLE_TO_LONG.c */
+HANDLE_FLOAT_TO_INT(OP_DOUBLE_TO_LONG,  "double-to-long",
+    double, _DOUBLE, s8, _WIDE)
+OP_END
+
+/* File: c/OP_DOUBLE_TO_FLOAT.c */
+HANDLE_NUMCONV(OP_DOUBLE_TO_FLOAT,      "double-to-float", _DOUBLE, _FLOAT)
+OP_END
+
+/* File: c/OP_INT_TO_BYTE.c */
+HANDLE_INT_TO_SMALL(OP_INT_TO_BYTE,     "byte", s1)
+OP_END
+
+/* File: c/OP_INT_TO_CHAR.c */
+HANDLE_INT_TO_SMALL(OP_INT_TO_CHAR,     "char", u2)
+OP_END
+
+/* File: c/OP_INT_TO_SHORT.c */
+HANDLE_INT_TO_SMALL(OP_INT_TO_SHORT,    "short", s2)    /* want sign bit */
+OP_END
+
+/* File: c/OP_ADD_INT.c */
+HANDLE_OP_X_INT(OP_ADD_INT, "add", +, false)
+OP_END
+
+/* File: c/OP_SUB_INT.c */
+HANDLE_OP_X_INT(OP_SUB_INT, "sub", -, false)
+OP_END
+
+/* File: c/OP_MUL_INT.c */
+HANDLE_OP_X_INT(OP_MUL_INT, "mul", *, false)
+OP_END
+
+/* File: c/OP_DIV_INT.c */
+HANDLE_OP_X_INT(OP_DIV_INT, "div", /, true)
+OP_END
+
+/* File: c/OP_REM_INT.c */
+HANDLE_OP_X_INT(OP_REM_INT, "rem", %, true)
+OP_END
+
+/* File: c/OP_AND_INT.c */
+HANDLE_OP_X_INT(OP_AND_INT, "and", &, false)
+OP_END
+
+/* File: c/OP_OR_INT.c */
+HANDLE_OP_X_INT(OP_OR_INT,  "or",  |, false)
+OP_END
+
+/* File: c/OP_XOR_INT.c */
+HANDLE_OP_X_INT(OP_XOR_INT, "xor", ^, false)
+OP_END
+
+/* File: c/OP_SHL_INT.c */
+HANDLE_OP_SHX_INT(OP_SHL_INT, "shl", (s4), <<)
+OP_END
+
+/* File: c/OP_SHR_INT.c */
+HANDLE_OP_SHX_INT(OP_SHR_INT, "shr", (s4), >>)
+OP_END
+
+/* File: c/OP_USHR_INT.c */
+HANDLE_OP_SHX_INT(OP_USHR_INT, "ushr", (u4), >>)
+OP_END
+
+/* File: c/OP_ADD_LONG.c */
+HANDLE_OP_X_LONG(OP_ADD_LONG, "add", +, false)
+OP_END
+
+/* File: c/OP_SUB_LONG.c */
+HANDLE_OP_X_LONG(OP_SUB_LONG, "sub", -, false)
+OP_END
+
+/* File: c/OP_MUL_LONG.c */
+HANDLE_OP_X_LONG(OP_MUL_LONG, "mul", *, false)
+OP_END
+
+/* File: c/OP_DIV_LONG.c */
+HANDLE_OP_X_LONG(OP_DIV_LONG, "div", /, true)
+OP_END
+
+/* File: c/OP_REM_LONG.c */
+HANDLE_OP_X_LONG(OP_REM_LONG, "rem", %, true)
+OP_END
+
+/* File: c/OP_AND_LONG.c */
+HANDLE_OP_X_LONG(OP_AND_LONG, "and", &, false)
+OP_END
+
+/* File: c/OP_OR_LONG.c */
+HANDLE_OP_X_LONG(OP_OR_LONG,  "or", |, false)
+OP_END
+
+/* File: c/OP_XOR_LONG.c */
+HANDLE_OP_X_LONG(OP_XOR_LONG, "xor", ^, false)
+OP_END
+
+/* File: c/OP_SHL_LONG.c */
+HANDLE_OP_SHX_LONG(OP_SHL_LONG, "shl", (s8), <<)
+OP_END
+
+/* File: c/OP_SHR_LONG.c */
+HANDLE_OP_SHX_LONG(OP_SHR_LONG, "shr", (s8), >>)
+OP_END
+
+/* File: c/OP_USHR_LONG.c */
+HANDLE_OP_SHX_LONG(OP_USHR_LONG, "ushr", (u8), >>)
+OP_END
+
+/* File: c/OP_ADD_FLOAT.c */
+HANDLE_OP_X_FLOAT(OP_ADD_FLOAT, "add", +)
+OP_END
+
+/* File: c/OP_SUB_FLOAT.c */
+HANDLE_OP_X_FLOAT(OP_SUB_FLOAT, "sub", -)
+OP_END
+
+/* File: c/OP_MUL_FLOAT.c */
+HANDLE_OP_X_FLOAT(OP_MUL_FLOAT, "mul", *)
+OP_END
+
+/* File: c/OP_DIV_FLOAT.c */
+HANDLE_OP_X_FLOAT(OP_DIV_FLOAT, "div", /)
+OP_END
+
+/* File: c/OP_REM_FLOAT.c */
+HANDLE_OPCODE(OP_REM_FLOAT /*vAA, vBB, vCC*/)
+    {
+        u2 srcRegs;
+        vdst = INST_AA(inst);
+        srcRegs = FETCH(1);
+        vsrc1 = srcRegs & 0xff;
+        vsrc2 = srcRegs >> 8;
+        ILOGV("|%s-float v%d,v%d,v%d", "mod", vdst, vsrc1, vsrc2);
+        SET_REGISTER_FLOAT(vdst,
+            fmodf(GET_REGISTER_FLOAT(vsrc1), GET_REGISTER_FLOAT(vsrc2)));
+    }
+    FINISH(2);
+OP_END
+
+/* File: c/OP_ADD_DOUBLE.c */
+HANDLE_OP_X_DOUBLE(OP_ADD_DOUBLE, "add", +)
+OP_END
+
+/* File: c/OP_SUB_DOUBLE.c */
+HANDLE_OP_X_DOUBLE(OP_SUB_DOUBLE, "sub", -)
+OP_END
+
+/* File: c/OP_MUL_DOUBLE.c */
+HANDLE_OP_X_DOUBLE(OP_MUL_DOUBLE, "mul", *)
+OP_END
+
+/* File: c/OP_DIV_DOUBLE.c */
+HANDLE_OP_X_DOUBLE(OP_DIV_DOUBLE, "div", /)
+OP_END
+
+/* File: c/OP_REM_DOUBLE.c */
+HANDLE_OPCODE(OP_REM_DOUBLE /*vAA, vBB, vCC*/)
+    {
+        u2 srcRegs;
+        vdst = INST_AA(inst);
+        srcRegs = FETCH(1);
+        vsrc1 = srcRegs & 0xff;
+        vsrc2 = srcRegs >> 8;
+        ILOGV("|%s-double v%d,v%d,v%d", "mod", vdst, vsrc1, vsrc2);
+        SET_REGISTER_DOUBLE(vdst,
+            fmod(GET_REGISTER_DOUBLE(vsrc1), GET_REGISTER_DOUBLE(vsrc2)));
+    }
+    FINISH(2);
+OP_END
+
+/* File: c/OP_ADD_INT_2ADDR.c */
+HANDLE_OP_X_INT_2ADDR(OP_ADD_INT_2ADDR, "add", +, false)
+OP_END
+
+/* File: c/OP_SUB_INT_2ADDR.c */
+HANDLE_OP_X_INT_2ADDR(OP_SUB_INT_2ADDR, "sub", -, false)
+OP_END
+
+/* File: c/OP_MUL_INT_2ADDR.c */
+HANDLE_OP_X_INT_2ADDR(OP_MUL_INT_2ADDR, "mul", *, false)
+OP_END
+
+/* File: c/OP_DIV_INT_2ADDR.c */
+HANDLE_OP_X_INT_2ADDR(OP_DIV_INT_2ADDR, "div", /, true)
+OP_END
+
+/* File: c/OP_REM_INT_2ADDR.c */
+HANDLE_OP_X_INT_2ADDR(OP_REM_INT_2ADDR, "rem", %, true)
+OP_END
+
+/* File: c/OP_AND_INT_2ADDR.c */
+HANDLE_OP_X_INT_2ADDR(OP_AND_INT_2ADDR, "and", &, false)
+OP_END
+
+/* File: c/OP_OR_INT_2ADDR.c */
+HANDLE_OP_X_INT_2ADDR(OP_OR_INT_2ADDR,  "or", |, false)
+OP_END
+
+/* File: c/OP_XOR_INT_2ADDR.c */
+HANDLE_OP_X_INT_2ADDR(OP_XOR_INT_2ADDR, "xor", ^, false)
+OP_END
+
+/* File: c/OP_SHL_INT_2ADDR.c */
+HANDLE_OP_SHX_INT_2ADDR(OP_SHL_INT_2ADDR, "shl", (s4), <<)
+OP_END
+
+/* File: c/OP_SHR_INT_2ADDR.c */
+HANDLE_OP_SHX_INT_2ADDR(OP_SHR_INT_2ADDR, "shr", (s4), >>)
+OP_END
+
+/* File: c/OP_USHR_INT_2ADDR.c */
+HANDLE_OP_SHX_INT_2ADDR(OP_USHR_INT_2ADDR, "ushr", (u4), >>)
+OP_END
+
+/* File: c/OP_ADD_LONG_2ADDR.c */
+HANDLE_OP_X_LONG_2ADDR(OP_ADD_LONG_2ADDR, "add", +, false)
+OP_END
+
+/* File: c/OP_SUB_LONG_2ADDR.c */
+HANDLE_OP_X_LONG_2ADDR(OP_SUB_LONG_2ADDR, "sub", -, false)
+OP_END
+
+/* File: c/OP_MUL_LONG_2ADDR.c */
+HANDLE_OP_X_LONG_2ADDR(OP_MUL_LONG_2ADDR, "mul", *, false)
+OP_END
+
+/* File: c/OP_DIV_LONG_2ADDR.c */
+HANDLE_OP_X_LONG_2ADDR(OP_DIV_LONG_2ADDR, "div", /, true)
+OP_END
+
+/* File: c/OP_REM_LONG_2ADDR.c */
+HANDLE_OP_X_LONG_2ADDR(OP_REM_LONG_2ADDR, "rem", %, true)
+OP_END
+
+/* File: c/OP_AND_LONG_2ADDR.c */
+HANDLE_OP_X_LONG_2ADDR(OP_AND_LONG_2ADDR, "and", &, false)
+OP_END
+
+/* File: c/OP_OR_LONG_2ADDR.c */
+HANDLE_OP_X_LONG_2ADDR(OP_OR_LONG_2ADDR,  "or", |, false)
+OP_END
+
+/* File: c/OP_XOR_LONG_2ADDR.c */
+HANDLE_OP_X_LONG_2ADDR(OP_XOR_LONG_2ADDR, "xor", ^, false)
+OP_END
+
+/* File: c/OP_SHL_LONG_2ADDR.c */
+HANDLE_OP_SHX_LONG_2ADDR(OP_SHL_LONG_2ADDR, "shl", (s8), <<)
+OP_END
+
+/* File: c/OP_SHR_LONG_2ADDR.c */
+HANDLE_OP_SHX_LONG_2ADDR(OP_SHR_LONG_2ADDR, "shr", (s8), >>)
+OP_END
+
+/* File: c/OP_USHR_LONG_2ADDR.c */
+HANDLE_OP_SHX_LONG_2ADDR(OP_USHR_LONG_2ADDR, "ushr", (u8), >>)
+OP_END
+
+/* File: c/OP_ADD_FLOAT_2ADDR.c */
+HANDLE_OP_X_FLOAT_2ADDR(OP_ADD_FLOAT_2ADDR, "add", +)
+OP_END
+
+/* File: c/OP_SUB_FLOAT_2ADDR.c */
+HANDLE_OP_X_FLOAT_2ADDR(OP_SUB_FLOAT_2ADDR, "sub", -)
+OP_END
+
+/* File: c/OP_MUL_FLOAT_2ADDR.c */
+HANDLE_OP_X_FLOAT_2ADDR(OP_MUL_FLOAT_2ADDR, "mul", *)
+OP_END
+
+/* File: c/OP_DIV_FLOAT_2ADDR.c */
+HANDLE_OP_X_FLOAT_2ADDR(OP_DIV_FLOAT_2ADDR, "div", /)
+OP_END
+
+/* File: c/OP_REM_FLOAT_2ADDR.c */
+HANDLE_OPCODE(OP_REM_FLOAT_2ADDR /*vA, vB*/)
+    vdst = INST_A(inst);
+    vsrc1 = INST_B(inst);
+    ILOGV("|%s-float-2addr v%d,v%d", "mod", vdst, vsrc1);
+    SET_REGISTER_FLOAT(vdst,
+        fmodf(GET_REGISTER_FLOAT(vdst), GET_REGISTER_FLOAT(vsrc1)));
+    FINISH(1);
+OP_END
+
+/* File: c/OP_ADD_DOUBLE_2ADDR.c */
+HANDLE_OP_X_DOUBLE_2ADDR(OP_ADD_DOUBLE_2ADDR, "add", +)
+OP_END
+
+/* File: c/OP_SUB_DOUBLE_2ADDR.c */
+HANDLE_OP_X_DOUBLE_2ADDR(OP_SUB_DOUBLE_2ADDR, "sub", -)
+OP_END
+
+/* File: c/OP_MUL_DOUBLE_2ADDR.c */
+HANDLE_OP_X_DOUBLE_2ADDR(OP_MUL_DOUBLE_2ADDR, "mul", *)
+OP_END
+
+/* File: c/OP_DIV_DOUBLE_2ADDR.c */
+HANDLE_OP_X_DOUBLE_2ADDR(OP_DIV_DOUBLE_2ADDR, "div", /)
+OP_END
+
+/* File: c/OP_REM_DOUBLE_2ADDR.c */
+HANDLE_OPCODE(OP_REM_DOUBLE_2ADDR /*vA, vB*/)
+    vdst = INST_A(inst);
+    vsrc1 = INST_B(inst);
+    ILOGV("|%s-double-2addr v%d,v%d", "mod", vdst, vsrc1);
+    SET_REGISTER_DOUBLE(vdst,
+        fmod(GET_REGISTER_DOUBLE(vdst), GET_REGISTER_DOUBLE(vsrc1)));
+    FINISH(1);
+OP_END
+
+/* File: c/OP_ADD_INT_LIT16.c */
+HANDLE_OP_X_INT_LIT16(OP_ADD_INT_LIT16, "add", (s4), +, false)
+OP_END
+
+/* File: c/OP_RSUB_INT.c */
+HANDLE_OPCODE(OP_RSUB_INT /*vA, vB, #+CCCC*/)
+    {
+        vdst = INST_A(inst);
+        vsrc1 = INST_B(inst);
+        vsrc2 = FETCH(1);
+        ILOGV("|rsub-int v%d,v%d,#+0x%04x", vdst, vsrc1, vsrc2);
+        SET_REGISTER(vdst, (s2) vsrc2 - (s4) GET_REGISTER(vsrc1));
+    }
+    FINISH(2);
+OP_END
+
+/* File: c/OP_MUL_INT_LIT16.c */
+HANDLE_OP_X_INT_LIT16(OP_MUL_INT_LIT16, "mul", (s4), *, false)
+OP_END
+
+/* File: c/OP_DIV_INT_LIT16.c */
+HANDLE_OP_X_INT_LIT16(OP_DIV_INT_LIT16, "div", (s4), /, true)
+OP_END
+
+/* File: c/OP_REM_INT_LIT16.c */
+HANDLE_OP_X_INT_LIT16(OP_REM_INT_LIT16, "rem", (s4), %, true)
+OP_END
+
+/* File: c/OP_AND_INT_LIT16.c */
+HANDLE_OP_X_INT_LIT16(OP_AND_INT_LIT16, "and", (s4), &, false)
+OP_END
+
+/* File: c/OP_OR_INT_LIT16.c */
+HANDLE_OP_X_INT_LIT16(OP_OR_INT_LIT16,  "or",  (s4), |, false)
+OP_END
+
+/* File: c/OP_XOR_INT_LIT16.c */
+HANDLE_OP_X_INT_LIT16(OP_XOR_INT_LIT16, "xor", (s4), ^, false)
+OP_END
+
+/* File: c/OP_ADD_INT_LIT8.c */
+HANDLE_OP_X_INT_LIT8(OP_ADD_INT_LIT8,   "add", +, false)
+OP_END
+
+/* File: c/OP_RSUB_INT_LIT8.c */
+HANDLE_OPCODE(OP_RSUB_INT_LIT8 /*vAA, vBB, #+CC*/)
+    {
+        u2 litInfo;
+        vdst = INST_AA(inst);
+        litInfo = FETCH(1);
+        vsrc1 = litInfo & 0xff;
+        vsrc2 = litInfo >> 8;
+        ILOGV("|%s-int/lit8 v%d,v%d,#+0x%02x", "rsub", vdst, vsrc1, vsrc2);
+        SET_REGISTER(vdst, (s1) vsrc2 - (s4) GET_REGISTER(vsrc1));
+    }
+    FINISH(2);
+OP_END
+
+/* File: c/OP_MUL_INT_LIT8.c */
+HANDLE_OP_X_INT_LIT8(OP_MUL_INT_LIT8,   "mul", *, false)
+OP_END
+
+/* File: c/OP_DIV_INT_LIT8.c */
+HANDLE_OP_X_INT_LIT8(OP_DIV_INT_LIT8,   "div", /, true)
+OP_END
+
+/* File: c/OP_REM_INT_LIT8.c */
+HANDLE_OP_X_INT_LIT8(OP_REM_INT_LIT8,   "rem", %, true)
+OP_END
+
+/* File: c/OP_AND_INT_LIT8.c */
+HANDLE_OP_X_INT_LIT8(OP_AND_INT_LIT8,   "and", &, false)
+OP_END
+
+/* File: c/OP_OR_INT_LIT8.c */
+HANDLE_OP_X_INT_LIT8(OP_OR_INT_LIT8,    "or",  |, false)
+OP_END
+
+/* File: c/OP_XOR_INT_LIT8.c */
+HANDLE_OP_X_INT_LIT8(OP_XOR_INT_LIT8,   "xor", ^, false)
+OP_END
+
+/* File: c/OP_SHL_INT_LIT8.c */
+HANDLE_OP_SHX_INT_LIT8(OP_SHL_INT_LIT8,   "shl", (s4), <<)
+OP_END
+
+/* File: c/OP_SHR_INT_LIT8.c */
+HANDLE_OP_SHX_INT_LIT8(OP_SHR_INT_LIT8,   "shr", (s4), >>)
+OP_END
+
+/* File: c/OP_USHR_INT_LIT8.c */
+HANDLE_OP_SHX_INT_LIT8(OP_USHR_INT_LIT8,  "ushr", (u4), >>)
+OP_END
+
+/* File: c/OP_UNUSED_E3.c */
+HANDLE_OPCODE(OP_UNUSED_E3)
+OP_END
+
+/* File: c/OP_UNUSED_E4.c */
+HANDLE_OPCODE(OP_UNUSED_E4)
+OP_END
+
+/* File: c/OP_UNUSED_E5.c */
+HANDLE_OPCODE(OP_UNUSED_E5)
+OP_END
+
+/* File: c/OP_UNUSED_E6.c */
+HANDLE_OPCODE(OP_UNUSED_E6)
+OP_END
+
+/* File: c/OP_UNUSED_E7.c */
+HANDLE_OPCODE(OP_UNUSED_E7)
+OP_END
+
+/* File: c/OP_UNUSED_E8.c */
+HANDLE_OPCODE(OP_UNUSED_E8)
+OP_END
+
+/* File: c/OP_UNUSED_E9.c */
+HANDLE_OPCODE(OP_UNUSED_E9)
+OP_END
+
+/* File: c/OP_UNUSED_EA.c */
+HANDLE_OPCODE(OP_UNUSED_EA)
+OP_END
+
+/* File: c/OP_UNUSED_EB.c */
+HANDLE_OPCODE(OP_UNUSED_EB)
+OP_END
+
+/* File: c/OP_UNUSED_EC.c */
+HANDLE_OPCODE(OP_UNUSED_EC)
+OP_END
+
+/* File: c/OP_UNUSED_ED.c */
+HANDLE_OPCODE(OP_UNUSED_ED)
+OP_END
+
+/* File: c/OP_EXECUTE_INLINE.c */
+HANDLE_OPCODE(OP_EXECUTE_INLINE /*vB, {vD, vE, vF, vG}, inline@CCCC*/)
+    {
+        /*
+         * This has the same form as other method calls, but we ignore
+         * the 5th argument (vA).  This is chiefly because the first four
+         * arguments to a function on ARM are in registers.
+         *
+         * We only set the arguments that are actually used, leaving
+         * the rest uninitialized.  We're assuming that, if the method
+         * needs them, they'll be specified in the call.
+         *
+         * This annoys gcc when optimizations are enabled, causing a
+         * "may be used uninitialized" warning.  We can quiet the warnings
+         * for a slight penalty (5%: 373ns vs. 393ns on empty method).  Note
+         * that valgrind is perfectly happy with this arrangement, because
+         * the uninitialiezd values are never actually used.
+         */
+        u4 arg0, arg1, arg2, arg3;
+        //arg0 = arg1 = arg2 = arg3 = 0;
+
+        EXPORT_PC();
+
+        vsrc1 = INST_B(inst);       /* #of args */
+        ref = FETCH(1);             /* inline call "ref" */
+        vdst = FETCH(2);            /* 0-4 register indices */
+        ILOGV("|execute-inline args=%d @%d {regs=0x%04x}",
+            vsrc1, ref, vdst);
+
+        assert((vdst >> 16) == 0);  // 16-bit type -or- high 16 bits clear
+        assert(vsrc1 <= 4);
+
+        switch (vsrc1) {
+        case 4:
+            arg3 = GET_REGISTER(vdst >> 12);
+            /* fall through */
+        case 3:
+            arg2 = GET_REGISTER((vdst & 0x0f00) >> 8);
+            /* fall through */
+        case 2:
+            arg1 = GET_REGISTER((vdst & 0x00f0) >> 4);
+            /* fall through */
+        case 1:
+            arg0 = GET_REGISTER(vdst & 0x0f);
+            /* fall through */
+        default:        // case 0
+            ;
+        }
+
+#if INTERP_TYPE == INTERP_DBG
+        if (!dvmPerformInlineOp4Dbg(arg0, arg1, arg2, arg3, &retval, ref))
+            GOTO(exceptionThrown);
+#else
+        if (!dvmPerformInlineOp4Std(arg0, arg1, arg2, arg3, &retval, ref))
+            GOTO(exceptionThrown);
+#endif
+    }
+    FINISH(3);
+OP_END
+
+/* File: c/OP_UNUSED_EF.c */
+HANDLE_OPCODE(OP_UNUSED_EF)
+OP_END
+
+/* File: c/OP_INVOKE_DIRECT_EMPTY.c */
+HANDLE_OPCODE(OP_INVOKE_DIRECT_EMPTY /*vB, {vD, vE, vF, vG, vA}, meth@CCCC*/)
+    //LOGI("Ignoring empty\n");
+    FINISH(3);
+OP_END
+
+/* File: c/OP_UNUSED_F1.c */
+HANDLE_OPCODE(OP_UNUSED_F1)
+OP_END
+
+/* File: c/OP_IGET_QUICK.c */
+HANDLE_IGET_X_QUICK(OP_IGET_QUICK,          "", Int, )
+OP_END
+
+/* File: c/OP_IGET_WIDE_QUICK.c */
+HANDLE_IGET_X_QUICK(OP_IGET_WIDE_QUICK,     "-wide", Long, _WIDE)
+OP_END
+
+/* File: c/OP_IGET_OBJECT_QUICK.c */
+HANDLE_IGET_X_QUICK(OP_IGET_OBJECT_QUICK,   "-object", Object, _AS_OBJECT)
+OP_END
+
+/* File: c/OP_IPUT_QUICK.c */
+HANDLE_IPUT_X_QUICK(OP_IPUT_QUICK,          "", Int, )
+OP_END
+
+/* File: c/OP_IPUT_WIDE_QUICK.c */
+HANDLE_IPUT_X_QUICK(OP_IPUT_WIDE_QUICK,     "-wide", Long, _WIDE)
+OP_END
+
+/* File: c/OP_IPUT_OBJECT_QUICK.c */
+HANDLE_IPUT_X_QUICK(OP_IPUT_OBJECT_QUICK,   "-object", Object, _AS_OBJECT)
+OP_END
+
+/* File: c/OP_INVOKE_VIRTUAL_QUICK.c */
+HANDLE_OPCODE(OP_INVOKE_VIRTUAL_QUICK /*vB, {vD, vE, vF, vG, vA}, meth@CCCC*/)
+    GOTO(invokeVirtualQuick, false);
+OP_END
+
+/* File: c/OP_INVOKE_VIRTUAL_QUICK_RANGE.c */
+HANDLE_OPCODE(OP_INVOKE_VIRTUAL_QUICK_RANGE/*{vCCCC..v(CCCC+AA-1)}, meth@BBBB*/)
+    GOTO(invokeVirtualQuick, true);
+OP_END
+
+/* File: c/OP_INVOKE_SUPER_QUICK.c */
+HANDLE_OPCODE(OP_INVOKE_SUPER_QUICK /*vB, {vD, vE, vF, vG, vA}, meth@CCCC*/)
+    GOTO(invokeSuperQuick, false);
+OP_END
+
+/* File: c/OP_INVOKE_SUPER_QUICK_RANGE.c */
+HANDLE_OPCODE(OP_INVOKE_SUPER_QUICK_RANGE /*{vCCCC..v(CCCC+AA-1)}, meth@BBBB*/)
+    GOTO(invokeSuperQuick, true);
+OP_END
+
+/* File: c/OP_UNUSED_FC.c */
+HANDLE_OPCODE(OP_UNUSED_FC)
+OP_END
+
+/* File: c/OP_UNUSED_FD.c */
+HANDLE_OPCODE(OP_UNUSED_FD)
+OP_END
+
+/* File: c/OP_UNUSED_FE.c */
+HANDLE_OPCODE(OP_UNUSED_FE)
+OP_END
+
+/* File: c/OP_UNUSED_FF.c */
+HANDLE_OPCODE(OP_UNUSED_FF)
+OP_END
+
+/* File: desktop/entry.c */
+/*
+ * Handler function table, one entry per opcode.
+ */
+#undef H
+#define H(_op) dvmMterp_##_op
+DEFINE_GOTO_TABLE(gDvmMterpHandlers)
+
+#undef H
+#define H(_op) #_op
+DEFINE_GOTO_TABLE(gDvmMterpHandlerNames)
+
+#include <setjmp.h>
+
+/*
+ * C mterp entry point.  This just calls the various C fallbacks, making
+ * this a slow but portable interpeter.
+ */
+bool dvmMterpStdRun(MterpGlue* glue)
+{
+    jmp_buf jmpBuf;
+    int changeInterp;
+
+    glue->bailPtr = &jmpBuf;
+
+    /*
+     * We want to return "changeInterp" as a boolean, but we can't return
+     * zero through longjmp, so we return (boolean+1).
+     */
+    changeInterp = setjmp(jmpBuf) -1;
+    if (changeInterp >= 0) {
+        Thread* threadSelf = dvmThreadSelf();
+        LOGVV("mterp threadid=%d returning %d\n",
+            threadSelf->threadId, changeInterp);
+        return changeInterp;
+    }
+
+    /*
+     * We may not be starting at a point where we're executing instructions.
+     * We need to pick up where the other interpreter left off.
+     *
+     * In some cases we need to call into a throw/return handler which
+     * will do some processing and then either return to us (updating "glue")
+     * or longjmp back out.
+     */
+    switch (glue->entryPoint) {
+    case kInterpEntryInstr:
+        /* just start at the start */
+        break;
+    case kInterpEntryReturn:
+        dvmMterp_returnFromMethod(glue);
+        break;
+    case kInterpEntryThrow:
+        dvmMterp_exceptionThrown(glue);
+        break;
+    default:
+        dvmAbort();
+    }
+
+    /* run until somebody longjmp()s out */
+    while (true) {
+        typedef void (*Handler)(MterpGlue* glue);
+
+        u2 inst = /*glue->*/pc[0];
+        Handler handler = (Handler) gDvmMterpHandlers[inst & 0xff];
+        LOGVV("handler %p %s\n",
+            handler, (const char*) gDvmMterpHandlerNames[inst & 0xff]);
+        (*handler)(glue);
+    }
+}
+
+/*
+ * C mterp exit point.  Call here to bail out of the interpreter.
+ */
+void dvmMterpStdBail(MterpGlue* glue, bool changeInterp)
+{
+    jmp_buf* pJmpBuf = glue->bailPtr;
+    longjmp(*pJmpBuf, ((int)changeInterp)+1);
+}
+
+
+/* File: c/footer.c */
+/*
+ * C footer.  This has some common code shared by the various targets.
+ */
+
+#define GOTO_TARGET(_target, ...)                                           \
+    void dvmMterp_##_target(MterpGlue* glue, ## __VA_ARGS__) {              \
+        u2 ref, vsrc1, vsrc2, vdst;                                         \
+        u2 inst = FETCH(0);                                                 \
+        const Method* methodToCall;                                         \
+        StackSaveArea* debugSaveArea;
+
+#define GOTO_TARGET_END }
+
+
+/*
+ * Everything from here on is a "goto target".  In the basic interpreter
+ * we jump into these targets and then jump directly to the handler for
+ * next instruction.  Here, these are subroutines that return to the caller.
+ */
+
+GOTO_TARGET(filledNewArray, bool methodCallRange)
+    {
+        ClassObject* arrayClass;
+        ArrayObject* newArray;
+        int* contents;
+        char typeCh;
+        int i;
+        u4 arg5;
+
+        EXPORT_PC();
+
+        ref = FETCH(1);             /* class ref */
+        vdst = FETCH(2);            /* first 4 regs -or- range base */
+
+        if (methodCallRange) {
+            vsrc1 = INST_AA(inst);  /* #of elements */
+            arg5 = -1;              /* silence compiler warning */
+            ILOGV("|filled-new-array-range args=%d @0x%04x {regs=v%d-v%d}",
+                vsrc1, ref, vdst, vdst+vsrc1-1);
+        } else {
+            arg5 = INST_A(inst);
+            vsrc1 = INST_B(inst);   /* #of elements */
+            ILOGV("|filled-new-array args=%d @0x%04x {regs=0x%04x %x}",
+                vsrc1, ref, vdst, arg5);
+        }
+
+        /*
+         * Resolve the array class.
+         */
+        arrayClass = dvmDexGetResolvedClass(methodClassDex, ref);
+        if (arrayClass == NULL) {
+            arrayClass = dvmResolveClass(method->clazz, ref, false);
+            if (arrayClass == NULL)
+                GOTO(exceptionThrown);
+        }
+        /*
+        if (!dvmIsArrayClass(arrayClass)) {
+            dvmThrowException("Ljava/lang/RuntimeError;",
+                "filled-new-array needs array class");
+            GOTO(exceptionThrown);
+        }
+        */
+        /* verifier guarantees this is an array class */
+        assert(dvmIsArrayClass(arrayClass));
+        assert(dvmIsClassInitialized(arrayClass));
+
+        /*
+         * Create an array of the specified type.
+         */
+        LOGVV("+++ filled-new-array type is '%s'\n", arrayClass->descriptor);
+        typeCh = arrayClass->descriptor[1];
+        if (typeCh == 'D' || typeCh == 'J') {
+            /* category 2 primitives not allowed */
+            dvmThrowException("Ljava/lang/RuntimeError;",
+                "bad filled array req");
+            GOTO(exceptionThrown);
+        } else if (typeCh == 'L' || typeCh == '[') {
+            /* create array of objects or array of arrays */
+            /* TODO: need some work in the verifier before we allow this */
+            LOGE("fnao not implemented\n");
+            dvmThrowException("Ljava/lang/InternalError;",
+                "filled-new-array not implemented for reference types");
+            GOTO(exceptionThrown);
+        } else if (typeCh != 'I') {
+            /* TODO: requires multiple "fill in" loops with different widths */
+            LOGE("non-int not implemented\n");
+            dvmThrowException("Ljava/lang/InternalError;",
+                "filled-new-array not implemented for anything but 'int'");
+            GOTO(exceptionThrown);
+        }
+
+        assert(strchr("BCIFZ", typeCh) != NULL);
+        newArray = dvmAllocPrimitiveArray(arrayClass->descriptor[1], vsrc1,
+                    ALLOC_DONT_TRACK);
+        if (newArray == NULL)
+            GOTO(exceptionThrown);
+
+        /*
+         * Fill in the elements.  It's legal for vsrc1 to be zero.
+         */
+        contents = (int*) newArray->contents;
+        if (methodCallRange) {
+            for (i = 0; i < vsrc1; i++)
+                contents[i] = GET_REGISTER(vdst+i);
+        } else {
+            assert(vsrc1 <= 5);
+            if (vsrc1 == 5) {
+                contents[4] = GET_REGISTER(arg5);
+                vsrc1--;
+            }
+            for (i = 0; i < vsrc1; i++) {
+                contents[i] = GET_REGISTER(vdst & 0x0f);
+                vdst >>= 4;
+            }
+        }
+
+        retval.l = newArray;
+    }
+    FINISH(3);
+GOTO_TARGET_END
+
+
+GOTO_TARGET(invokeVirtual, bool methodCallRange)
+    {
+        Method* baseMethod;
+        Object* thisPtr;
+
+        EXPORT_PC();
+
+        vsrc1 = INST_AA(inst);      /* AA (count) or BA (count + arg 5) */
+        ref = FETCH(1);             /* method ref */
+        vdst = FETCH(2);            /* 4 regs -or- first reg */
+
+        if (methodCallRange) {
+            /*
+             * The object against which we are executing a method is always
+             * in the first argument.
+             */
+            assert(vsrc1 > 0);
+            ILOGV("|invoke-virtual-range args=%d @0x%04x {regs=v%d-v%d}",
+                vsrc1, ref, vdst, vdst+vsrc1-1);
+            thisPtr = (Object*) GET_REGISTER(vdst);
+        } else {
+            /*
+             * The object against which we are executing a method is always
+             * in the first argument.
+             */
+            assert((vsrc1>>4) > 0);
+            ILOGV("|invoke-virtual args=%d @0x%04x {regs=0x%04x %x}",
+                vsrc1 >> 4, ref, vdst, vsrc1 & 0x0f);
+            thisPtr = (Object*) GET_REGISTER(vdst & 0x0f);
+        }
+
+        if (!checkForNull(thisPtr))
+            GOTO(exceptionThrown);
+
+        /*
+         * Resolve the method.  This is the correct method for the static
+         * type of the object.  We also verify access permissions here.
+         */
+        baseMethod = dvmDexGetResolvedMethod(methodClassDex, ref);
+        if (baseMethod == NULL) {
+            baseMethod = dvmResolveMethod(method->clazz, ref, METHOD_VIRTUAL);
+            if (baseMethod == NULL) {
+                ILOGV("+ unknown method or access denied\n");
+                GOTO(exceptionThrown);
+            }
+        }
+
+        /*
+         * Combine the object we found with the vtable offset in the
+         * method.
+         */
+        assert(baseMethod->methodIndex < thisPtr->clazz->vtableCount);
+        methodToCall = thisPtr->clazz->vtable[baseMethod->methodIndex];
+
+#if 0
+        if (dvmIsAbstractMethod(methodToCall)) {
+            /*
+             * This can happen if you create two classes, Base and Sub, where
+             * Sub is a sub-class of Base.  Declare a protected abstract
+             * method foo() in Base, and invoke foo() from a method in Base.
+             * Base is an "abstract base class" and is never instantiated
+             * directly.  Now, Override foo() in Sub, and use Sub.  This
+             * Works fine unless Sub stops providing an implementation of
+             * the method.
+             */
+            dvmThrowException("Ljava/lang/AbstractMethodError;",
+                "abstract method not implemented");
+            GOTO(exceptionThrown);
+        }
+#else
+        assert(!dvmIsAbstractMethod(methodToCall) ||
+            methodToCall->nativeFunc != NULL);
+#endif
+
+        LOGVV("+++ base=%s.%s virtual[%d]=%s.%s\n",
+            baseMethod->clazz->descriptor, baseMethod->name,
+            (u4) baseMethod->methodIndex,
+            methodToCall->clazz->descriptor, methodToCall->name);
+        assert(methodToCall != NULL);
+
+#if 0
+        if (vsrc1 != methodToCall->insSize) {
+            LOGW("WRONG METHOD: base=%s.%s virtual[%d]=%s.%s\n",
+                baseMethod->clazz->descriptor, baseMethod->name,
+                (u4) baseMethod->methodIndex,
+                methodToCall->clazz->descriptor, methodToCall->name);
+            //dvmDumpClass(baseMethod->clazz);
+            //dvmDumpClass(methodToCall->clazz);
+            dvmDumpAllClasses(0);
+        }
+#endif
+
+        GOTO(invokeMethod, methodCallRange, methodToCall, vsrc1, vdst);
+    }
+GOTO_TARGET_END
+
+GOTO_TARGET(invokeSuper, bool methodCallRange)
+    {
+        Method* baseMethod;
+        u2 thisReg;
+
+        EXPORT_PC();
+
+        vsrc1 = INST_AA(inst);      /* AA (count) or BA (count + arg 5) */
+        ref = FETCH(1);             /* method ref */
+        vdst = FETCH(2);            /* 4 regs -or- first reg */
+
+        if (methodCallRange) {
+            ILOGV("|invoke-super-range args=%d @0x%04x {regs=v%d-v%d}",
+                vsrc1, ref, vdst, vdst+vsrc1-1);
+            thisReg = vdst;
+        } else {
+            ILOGV("|invoke-super args=%d @0x%04x {regs=0x%04x %x}",
+                vsrc1 >> 4, ref, vdst, vsrc1 & 0x0f);
+            thisReg = vdst & 0x0f;
+        }
+        /* impossible in well-formed code, but we must check nevertheless */
+        if (!checkForNull((Object*) GET_REGISTER(thisReg)))
+            GOTO(exceptionThrown);
+
+        /*
+         * Resolve the method.  This is the correct method for the static
+         * type of the object.  We also verify access permissions here.
+         * The first arg to dvmResolveMethod() is just the referring class
+         * (used for class loaders and such), so we don't want to pass
+         * the superclass into the resolution call.
+         */
+        baseMethod = dvmDexGetResolvedMethod(methodClassDex, ref);
+        if (baseMethod == NULL) {
+            baseMethod = dvmResolveMethod(method->clazz, ref, METHOD_VIRTUAL);
+            if (baseMethod == NULL) {
+                ILOGV("+ unknown method or access denied\n");
+                GOTO(exceptionThrown);
+            }
+        }
+
+        /*
+         * Combine the object we found with the vtable offset in the
+         * method's class.
+         *
+         * We're using the current method's class' superclass, not the
+         * superclass of "this".  This is because we might be executing
+         * in a method inherited from a superclass, and we want to run
+         * in that class' superclass.
+         */
+        if (baseMethod->methodIndex >= method->clazz->super->vtableCount) {
+            /*
+             * Method does not exist in the superclass.  Could happen if
+             * superclass gets updated.
+             */
+            dvmThrowException("Ljava/lang/NoSuchMethodError;",
+                baseMethod->name);
+            GOTO(exceptionThrown);
+        }
+        methodToCall = method->clazz->super->vtable[baseMethod->methodIndex];
+#if 0
+        if (dvmIsAbstractMethod(methodToCall)) {
+            dvmThrowException("Ljava/lang/AbstractMethodError;",
+                "abstract method not implemented");
+            GOTO(exceptionThrown);
+        }
+#else
+        assert(!dvmIsAbstractMethod(methodToCall) ||
+            methodToCall->nativeFunc != NULL);
+#endif
+        LOGVV("+++ base=%s.%s super-virtual=%s.%s\n",
+            baseMethod->clazz->descriptor, baseMethod->name,
+            methodToCall->clazz->descriptor, methodToCall->name);
+        assert(methodToCall != NULL);
+
+        GOTO(invokeMethod, methodCallRange, methodToCall, vsrc1, vdst);
+    }
+GOTO_TARGET_END
+
+GOTO_TARGET(invokeInterface, bool methodCallRange)
+    {
+        Object* thisPtr;
+        ClassObject* thisClass;
+
+        EXPORT_PC();
+
+        vsrc1 = INST_AA(inst);      /* AA (count) or BA (count + arg 5) */
+        ref = FETCH(1);             /* method ref */
+        vdst = FETCH(2);            /* 4 regs -or- first reg */
+
+        if (methodCallRange) {
+            /*
+             * The object against which we are executing a method is always
+             * in the first argument.
+             */
+            assert(vsrc1 > 0);
+            ILOGV("|invoke-interface-range args=%d @0x%04x {regs=v%d-v%d}",
+                vsrc1, ref, vdst, vdst+vsrc1-1);
+            thisPtr = (Object*) GET_REGISTER(vdst);
+        } else {
+            /*
+             * The object against which we are executing a method is always
+             * in the first argument.
+             */
+            assert((vsrc1>>4) > 0);
+            ILOGV("|invoke-interface args=%d @0x%04x {regs=0x%04x %x}",
+                vsrc1 >> 4, ref, vdst, vsrc1 & 0x0f);
+            thisPtr = (Object*) GET_REGISTER(vdst & 0x0f);
+        }
+
+        if (!checkForNull(thisPtr))
+            GOTO(exceptionThrown);
+        thisClass = thisPtr->clazz;
+
+        /*
+         * Given a class and a method index, find the Method* with the
+         * actual code we want to execute.
+         */
+        methodToCall = dvmFindInterfaceMethodInCache(thisClass, ref, method,
+                        methodClassDex);
+        if (methodToCall == NULL) {
+            assert(dvmCheckException(self));
+            GOTO(exceptionThrown);
+        }
+
+        GOTO(invokeMethod, methodCallRange, methodToCall, vsrc1, vdst);
+    }
+GOTO_TARGET_END
+
+GOTO_TARGET(invokeDirect, bool methodCallRange)
+    {
+        u2 thisReg;
+
+        vsrc1 = INST_AA(inst);      /* AA (count) or BA (count + arg 5) */
+        ref = FETCH(1);             /* method ref */
+        vdst = FETCH(2);            /* 4 regs -or- first reg */
+
+        EXPORT_PC();
+
+        if (methodCallRange) {
+            ILOGV("|invoke-direct-range args=%d @0x%04x {regs=v%d-v%d}",
+                vsrc1, ref, vdst, vdst+vsrc1-1);
+            thisReg = vdst;
+        } else {
+            ILOGV("|invoke-direct args=%d @0x%04x {regs=0x%04x %x}",
+                vsrc1 >> 4, ref, vdst, vsrc1 & 0x0f);
+            thisReg = vdst & 0x0f;
+        }
+        if (!checkForNull((Object*) GET_REGISTER(thisReg)))
+            GOTO(exceptionThrown);
+
+        methodToCall = dvmDexGetResolvedMethod(methodClassDex, ref);
+        if (methodToCall == NULL) {
+            methodToCall = dvmResolveMethod(method->clazz, ref, METHOD_DIRECT);
+            if (methodToCall == NULL) {
+                ILOGV("+ unknown direct method\n");     // should be impossible
+                GOTO(exceptionThrown);
+            }
+        }
+        GOTO(invokeMethod, methodCallRange, methodToCall, vsrc1, vdst);
+    }
+GOTO_TARGET_END
+
+GOTO_TARGET(invokeStatic, bool methodCallRange)
+    vsrc1 = INST_AA(inst);      /* AA (count) or BA (count + arg 5) */
+    ref = FETCH(1);             /* method ref */
+    vdst = FETCH(2);            /* 4 regs -or- first reg */
+
+    EXPORT_PC();
+
+    if (methodCallRange)
+        ILOGV("|invoke-static-range args=%d @0x%04x {regs=v%d-v%d}",
+            vsrc1, ref, vdst, vdst+vsrc1-1);
+    else
+        ILOGV("|invoke-static args=%d @0x%04x {regs=0x%04x %x}",
+            vsrc1 >> 4, ref, vdst, vsrc1 & 0x0f);
+    methodToCall = dvmDexGetResolvedMethod(methodClassDex, ref);
+    if (methodToCall == NULL) {
+        methodToCall = dvmResolveMethod(method->clazz, ref, METHOD_STATIC);
+        if (methodToCall == NULL) {
+            ILOGV("+ unknown method\n");
+            GOTO(exceptionThrown);
+        }
+    }
+    GOTO(invokeMethod, methodCallRange, methodToCall, vsrc1, vdst);
+GOTO_TARGET_END
+
+GOTO_TARGET(invokeVirtualQuick, bool methodCallRange)
+    {
+        Object* thisPtr;
+
+        EXPORT_PC();
+
+        vsrc1 = INST_AA(inst);      /* AA (count) or BA (count + arg 5) */
+        ref = FETCH(1);             /* vtable index */
+        vdst = FETCH(2);            /* 4 regs -or- first reg */
+
+        if (methodCallRange) {
+            /*
+             * The object against which we are executing a method is always
+             * in the first argument.
+             */
+            assert(vsrc1 > 0);
+            ILOGV("|invoke-virtual-quick-range args=%d @0x%04x {regs=v%d-v%d}",
+                vsrc1, ref, vdst, vdst+vsrc1-1);
+            thisPtr = (Object*) GET_REGISTER(vdst);
+        } else {
+            /*
+             * The object against which we are executing a method is always
+             * in the first argument.
+             */
+            assert((vsrc1>>4) > 0);
+            ILOGV("|invoke-virtual-quick args=%d @0x%04x {regs=0x%04x %x}",
+                vsrc1 >> 4, ref, vdst, vsrc1 & 0x0f);
+            thisPtr = (Object*) GET_REGISTER(vdst & 0x0f);
+        }
+
+        if (!checkForNull(thisPtr))
+            GOTO(exceptionThrown);
+
+        /*
+         * Combine the object we found with the vtable offset in the
+         * method.
+         */
+        assert(ref < thisPtr->clazz->vtableCount);
+        methodToCall = thisPtr->clazz->vtable[ref];
+
+#if 0
+        if (dvmIsAbstractMethod(methodToCall)) {
+            dvmThrowException("Ljava/lang/AbstractMethodError;",
+                "abstract method not implemented");
+            GOTO(exceptionThrown);
+        }
+#else
+        assert(!dvmIsAbstractMethod(methodToCall) ||
+            methodToCall->nativeFunc != NULL);
+#endif
+
+        LOGVV("+++ virtual[%d]=%s.%s\n",
+            ref, methodToCall->clazz->descriptor, methodToCall->name);
+        assert(methodToCall != NULL);
+
+        GOTO(invokeMethod, methodCallRange, methodToCall, vsrc1, vdst);
+    }
+GOTO_TARGET_END
+
+GOTO_TARGET(invokeSuperQuick, bool methodCallRange)
+    {
+        u2 thisReg;
+
+        EXPORT_PC();
+
+        vsrc1 = INST_AA(inst);      /* AA (count) or BA (count + arg 5) */
+        ref = FETCH(1);             /* vtable index */
+        vdst = FETCH(2);            /* 4 regs -or- first reg */
+
+        if (methodCallRange) {
+            ILOGV("|invoke-super-quick-range args=%d @0x%04x {regs=v%d-v%d}",
+                vsrc1, ref, vdst, vdst+vsrc1-1);
+            thisReg = vdst;
+        } else {
+            ILOGV("|invoke-super-quick args=%d @0x%04x {regs=0x%04x %x}",
+                vsrc1 >> 4, ref, vdst, vsrc1 & 0x0f);
+            thisReg = vdst & 0x0f;
+        }
+        /* impossible in well-formed code, but we must check nevertheless */
+        if (!checkForNull((Object*) GET_REGISTER(thisReg)))
+            GOTO(exceptionThrown);
+
+#if 0   /* impossible in optimized + verified code */
+        if (ref >= method->clazz->super->vtableCount) {
+            dvmThrowException("Ljava/lang/NoSuchMethodError;", NULL);
+            GOTO(exceptionThrown);
+        }
+#else
+        assert(ref < method->clazz->super->vtableCount);
+#endif
+
+        /*
+         * Combine the object we found with the vtable offset in the
+         * method's class.
+         *
+         * We're using the current method's class' superclass, not the
+         * superclass of "this".  This is because we might be executing
+         * in a method inherited from a superclass, and we want to run
+         * in the method's class' superclass.
+         */
+        methodToCall = method->clazz->super->vtable[ref];
+
+#if 0
+        if (dvmIsAbstractMethod(methodToCall)) {
+            dvmThrowException("Ljava/lang/AbstractMethodError;",
+                "abstract method not implemented");
+            GOTO(exceptionThrown);
+        }
+#else
+        assert(!dvmIsAbstractMethod(methodToCall) ||
+            methodToCall->nativeFunc != NULL);
+#endif
+        LOGVV("+++ super-virtual[%d]=%s.%s\n",
+            ref, methodToCall->clazz->descriptor, methodToCall->name);
+        assert(methodToCall != NULL);
+
+        GOTO(invokeMethod, methodCallRange, methodToCall, vsrc1, vdst);
+    }
+GOTO_TARGET_END
+
+
+
+    /*
+     * General handling for return-void, return, and return-wide.  Put the
+     * return value in "retval" before jumping here.
+     */
+GOTO_TARGET(returnFromMethod)
+    {
+        StackSaveArea* saveArea;
+
+        /*
+         * We must do this BEFORE we pop the previous stack frame off, so
+         * that the GC can see the return value (if any) in the local vars.
+         *
+         * Since this is now an interpreter switch point, we must do it before
+         * we do anything at all.
+         */
+        PERIODIC_CHECKS(kInterpEntryReturn, 0);
+
+        ILOGV("> retval=0x%llx (leaving %s.%s %s)",
+            retval.j, method->clazz->descriptor, method->name,
+            method->signature);
+        //DUMP_REGS(method, fp);
+
+        saveArea = SAVEAREA_FROM_FP(fp);
+
+#ifdef EASY_GDB
+        debugSaveArea = saveArea;
+#endif
+#if (INTERP_TYPE == INTERP_DBG) && defined(WITH_PROFILER)
+        TRACE_METHOD_EXIT(self, method);
+#endif
+
+        /* back up to previous frame and see if we hit a break */
+        fp = saveArea->prevFrame;
+        assert(fp != NULL);
+        if (dvmIsBreakFrame(fp)) {
+            /* bail without popping the method frame from stack */
+            LOGVV("+++ returned into break frame\n");
+            GOTO_BAIL(false);
+        }
+
+        /* update thread FP, and reset local variables */
+        self->curFrame = fp;
+        method =
+#undef method       // ARRGH!
+            SAVEAREA_FROM_FP(fp)->method;
+#define method glue->method
+        //methodClass = method->clazz;
+        methodClassDex = method->clazz->pDvmDex;
+        pc = saveArea->savedPc;
+        ILOGD("> (return to %s.%s %s)", method->clazz->descriptor,
+            method->name, method->signature);
+
+        /* use FINISH on the caller's invoke instruction */
+        //u2 invokeInstr = INST_INST(FETCH(0));
+        if (true /*invokeInstr >= OP_INVOKE_VIRTUAL &&
+            invokeInstr <= OP_INVOKE_INTERFACE*/)
+        {
+            FINISH(3);
+        } else {
+            //LOGE("Unknown invoke instr %02x at %d\n",
+            //    invokeInstr, (int) (pc - method->insns));
+            assert(false);
+        }
+    }
+GOTO_TARGET_END
+
+
+    /*
+     * Jump here when the code throws an exception.
+     *
+     * By the time we get here, the Throwable has been created and the stack
+     * trace has been saved off.
+     */
+GOTO_TARGET(exceptionThrown)
+    {
+        Object* exception;
+        int catchRelPc;
+
+        /*
+         * Since this is now an interpreter switch point, we must do it before
+         * we do anything at all.
+         */
+        PERIODIC_CHECKS(kInterpEntryThrow, 0);
+
+        /*
+         * We save off the exception and clear the exception status.  While
+         * processing the exception we might need to load some Throwable
+         * classes, and we don't want class loader exceptions to get
+         * confused with this one.
+         */
+        assert(dvmCheckException(self));
+        exception = dvmGetException(self);
+        dvmAddTrackedAlloc(exception, self);
+        dvmClearException(self);
+
+        LOGV("Handling exception %s at %s:%d\n",
+            exception->clazz->descriptor, method->name,
+            dvmLineNumFromPC(method, pc - method->insns));
+
+#if (INTERP_TYPE == INTERP_DBG) && defined(WITH_DEBUGGER)
+        /*
+         * Tell the debugger about it.
+         *
+         * TODO: if the exception was thrown by interpreted code, control
+         * fell through native, and then back to us, we will report the
+         * exception at the point of the throw and again here.  We can avoid
+         * this by not reporting exceptions when we jump here directly from
+         * the native call code above, but then we won't report exceptions
+         * that were thrown *from* the JNI code (as opposed to *through* it).
+         *
+         * The correct solution is probably to ignore from-native exceptions
+         * here, and have the JNI exception code do the reporting to the
+         * debugger.
+         */
+        if (gDvm.debuggerActive) {
+            void* catchFrame;
+            catchRelPc = dvmFindCatchBlock(self, pc - method->insns,
+                        exception, true, &catchFrame);
+            dvmDbgPostException(fp, pc - method->insns, catchFrame, catchRelPc,
+                exception);
+        }
+#endif
+
+        /*
+         * We need to unroll to the catch block or the nearest "break"
+         * frame.
+         *
+         * A break frame could indicate that we have reached an intermediate
+         * native call, or have gone off the top of the stack and the thread
+         * needs to exit.  Either way, we return from here, leaving the
+         * exception raised.
+         *
+         * If we do find a catch block, we want to transfer execution to
+         * that point.
+         */
+        catchRelPc = dvmFindCatchBlock(self, pc - method->insns,
+                    exception, false, (void*)&fp);
+
+        /*
+         * Restore the stack bounds after an overflow.  This isn't going to
+         * be correct in all circumstances, e.g. if JNI code devours the
+         * exception this won't happen until some other exception gets
+         * thrown.  If the code keeps pushing the stack bounds we'll end
+         * up aborting the VM.
+         */
+        if (self->stackOverflowed)
+            dvmCleanupStackOverflow(self);
+
+        if (catchRelPc < 0) {
+            /* falling through to JNI code or off the bottom of the stack */
+#if DVM_SHOW_EXCEPTION >= 2
+            LOGD("Exception %s from %s:%d not caught locally\n",
+                exception->clazz->descriptor, dvmGetMethodSourceFile(method),
+                dvmLineNumFromPC(method, pc - method->insns));
+#endif
+            dvmSetException(self, exception);
+            dvmReleaseTrackedAlloc(exception, self);
+            GOTO_BAIL(false);
+        }
+
+#if DVM_SHOW_EXCEPTION >= 3
+        {
+            const Method* catchMethod =
+#undef method
+                SAVEAREA_FROM_FP(fp)->method;
+#define method glue->method
+            LOGD("Exception %s thrown from %s:%d to %s:%d\n",
+                exception->clazz->descriptor, dvmGetMethodSourceFile(method),
+                dvmLineNumFromPC(method, pc - method->insns),
+                dvmGetMethodSourceFile(catchMethod),
+                dvmLineNumFromPC(catchMethod, catchRelPc));
+        }
+#endif
+
+        /*
+         * Adjust local variables to match self->curFrame and the
+         * updated PC.
+         */
+        //fp = (u4*) self->curFrame;
+        method =
+#undef method
+            SAVEAREA_FROM_FP(fp)->method;
+#define method glue->method
+        //methodClass = method->clazz;
+        methodClassDex = method->clazz->pDvmDex;
+        pc = method->insns + catchRelPc;
+        ILOGV("> pc <-- %s.%s %s", method->clazz->descriptor, method->name,
+            method->signature);
+        DUMP_REGS(method, fp, false);               // show all regs
+
+        /*
+         * Restore the exception if the handler wants it.
+         *
+         * The Dalvik spec mandates that, if an exception handler wants to
+         * do something with the exception, the first instruction executed
+         * must be "move-exception".  We can pass the exception along
+         * through the thread struct, and let the move-exception instruction
+         * clear it for us.
+         *
+         * If the handler doesn't call move-exception, we don't want to
+         * finish here with an exception still pending.
+         */
+        if (INST_INST(FETCH(0)) == OP_MOVE_EXCEPTION)
+            dvmSetException(self, exception);
+
+        dvmReleaseTrackedAlloc(exception, self);
+        FINISH(0);
+    }
+GOTO_TARGET_END
+
+
+    /*
+     * General handling for invoke-{virtual,super,direct,static,interface},
+     * including "quick" variants.
+     *
+     * Set "methodToCall" to the Method we're calling, and "methodCallRange"
+     * depending on whether this is a "/range" instruction.
+     *
+     * For a range call:
+     *  "vsrc1" holds the argument count (8 bits)
+     *  "vdst" holds the first argument in the range
+     * For a non-range call:
+     *  "vsrc1" holds the argument count (4 bits) and the 5th argument index
+     *  "vdst" holds four 4-bit register indices
+     *
+     * The caller must EXPORT_PC before jumping here, because any method
+     * call can throw a stack overflow exception.
+     */
+GOTO_TARGET(invokeMethod, bool methodCallRange, const Method* _methodToCall,
+    u2 count, u2 regs)
+    {
+        vsrc1 = count; vdst = regs; methodToCall = _methodToCall;  /* ADDED */
+
+        //printf("range=%d call=%p count=%d regs=0x%04x\n",
+        //    methodCallRange, methodToCall, count, regs);
+        //printf(" --> %s.%s %s\n", methodToCall->clazz->descriptor,
+        //    methodToCall->name, methodToCall->signature);
+
+        u4* outs;
+        int i;
+
+        /*
+         * Copy args.  This may corrupt vsrc1/vdst.
+         */
+        if (methodCallRange) {
+            // could use memcpy or a "Duff's device"; most functions have
+            // so few args it won't matter much
+            assert(vsrc1 <= method->outsSize);
+            assert(vsrc1 == methodToCall->insSize);
+            outs = OUTS_FROM_FP(fp, vsrc1);
+            for (i = 0; i < vsrc1; i++)
+                outs[i] = GET_REGISTER(vdst+i);
+        } else {
+            u4 count = vsrc1 >> 4;
+
+            assert(count <= method->outsSize);
+            assert(count == methodToCall->insSize);
+            assert(count <= 5);
+
+            outs = OUTS_FROM_FP(fp, count);
+#if 0
+            if (count == 5) {
+                outs[4] = GET_REGISTER(vsrc1 & 0x0f);
+                count--;
+            }
+            for (i = 0; i < (int) count; i++) {
+                outs[i] = GET_REGISTER(vdst & 0x0f);
+                vdst >>= 4;
+            }
+#else
+            // This version executes fewer instructions but is larger
+            // overall.  Seems to be a teensy bit faster.
+            assert((vdst >> 16) == 0);  // 16 bits -or- high 16 bits clear
+            switch (count) {
+            case 5:
+                outs[4] = GET_REGISTER(vsrc1 & 0x0f);
+            case 4:
+                outs[3] = GET_REGISTER(vdst >> 12);
+            case 3:
+                outs[2] = GET_REGISTER((vdst & 0x0f00) >> 8);
+            case 2:
+                outs[1] = GET_REGISTER((vdst & 0x00f0) >> 4);
+            case 1:
+                outs[0] = GET_REGISTER(vdst & 0x0f);
+            default:
+                ;
+            }
+#endif
+        }
+    }
+
+    /*
+     * (This was originally a "goto" target; I've kept it separate from the
+     * stuff above in case we want to refactor things again.)
+     *
+     * At this point, we have the arguments stored in the "outs" area of
+     * the current method's stack frame, and the method to call in
+     * "methodToCall".  Push a new stack frame.
+     */
+    {
+        StackSaveArea* newSaveArea;
+        u4* newFp;
+
+        ILOGV("> %s%s.%s %s",
+            dvmIsNativeMethod(methodToCall) ? "(NATIVE) " : "",
+            methodToCall->clazz->descriptor, methodToCall->name,
+            methodToCall->signature);
+
+        newFp = (u4*) SAVEAREA_FROM_FP(fp) - methodToCall->registersSize;
+        newSaveArea = SAVEAREA_FROM_FP(newFp);
+
+        /* verify that we have enough space */
+        if (true) {
+            u1* bottom;
+            bottom = (u1*) newSaveArea - methodToCall->outsSize * sizeof(u4);
+            if (bottom < self->interpStackEnd) {
+                /* stack overflow */
+                LOGV("Stack overflow on method call (start=%p end=%p newBot=%p size=%d '%s')\n",
+                    self->interpStackStart, self->interpStackEnd, bottom,
+                    self->interpStackSize, methodToCall->name);
+                dvmHandleStackOverflow(self);
+                assert(dvmCheckException(self));
+                GOTO(exceptionThrown);
+            }
+            //LOGD("+++ fp=%p newFp=%p newSave=%p bottom=%p\n",
+            //    fp, newFp, newSaveArea, bottom);
+        }
+
+#ifdef LOG_INSTR
+        if (methodToCall->registersSize > methodToCall->insSize) {
+            /*
+             * This makes valgrind quiet when we print registers that
+             * haven't been initialized.  Turn it off when the debug
+             * messages are disabled -- we want valgrind to report any
+             * used-before-initialized issues.
+             */
+            memset(newFp, 0xcc,
+                (methodToCall->registersSize - methodToCall->insSize) * 4);
+        }
+#endif
+
+#ifdef EASY_GDB
+        newSaveArea->prevSave = SAVEAREA_FROM_FP(fp);
+#endif
+        newSaveArea->prevFrame = fp;
+        newSaveArea->savedPc = pc;
+#undef method
+        newSaveArea->method = methodToCall;
+#define method glue->method
+
+        if (!dvmIsNativeMethod(methodToCall)) {
+            /*
+             * "Call" interpreted code.  Reposition the PC, update the
+             * frame pointer and other local state, and continue.
+             */
+            method = methodToCall;
+            methodClassDex = method->clazz->pDvmDex;
+            pc = methodToCall->insns;
+            fp = self->curFrame = newFp;
+#ifdef EASY_GDB
+            debugSaveArea = SAVEAREA_FROM_FP(newFp);
+#endif
+#if INTERP_TYPE == INTERP_DBG
+            debugIsMethodEntry = true;              // profiling, debugging
+#endif
+            ILOGD("> pc <-- %s.%s %s", method->clazz->descriptor, method->name,
+                method->signature);
+            DUMP_REGS(method, fp, true);            // show input args
+            FINISH(0);                              // jump to method start
+        } else {
+            /* set this up for JNI locals, even if not a JNI native */
+            newSaveArea->xtra.localRefTop = self->jniLocalRefTable.nextEntry;
+
+            self->curFrame = newFp;
+
+            DUMP_REGS(methodToCall, newFp, true);   // show input args
+
+#if (INTERP_TYPE == INTERP_DBG) && defined(WITH_DEBUGGER)
+            if (gDvm.debuggerActive) {
+                dvmDbgPostLocationEvent(methodToCall, -1,
+                    dvmGetThisPtr(method, fp), DBG_METHOD_ENTRY);
+            }
+#endif
+#if (INTERP_TYPE == INTERP_DBG) && defined(WITH_PROFILER)
+            TRACE_METHOD_ENTER(self, methodToCall);
+#endif
+
+            ILOGD("> native <-- %s.%s %s", methodToCall->clazz->descriptor,
+                methodToCall->name, methodToCall->signature);
+
+            /*
+             * Jump through native call bridge.  Because we leave no
+             * space for locals on native calls, "newFp" points directly
+             * to the method arguments.
+             */
+            (*methodToCall->nativeFunc)(newFp, &retval, methodToCall, self);
+
+#if (INTERP_TYPE == INTERP_DBG) && defined(WITH_DEBUGGER)
+            if (gDvm.debuggerActive) {
+                dvmDbgPostLocationEvent(methodToCall, -1,
+                    dvmGetThisPtr(method, fp), DBG_METHOD_EXIT);
+            }
+#endif
+#if (INTERP_TYPE == INTERP_DBG) && defined(WITH_PROFILER)
+            TRACE_METHOD_EXIT(self, methodToCall);
+#endif
+
+            /* pop frame off */
+            dvmPopJniLocals(self, newSaveArea);
+            self->curFrame = fp;
+
+            /*
+             * If the native code threw an exception, or interpreted code
+             * invoked by the native call threw one and nobody has cleared
+             * it, jump to our local exception handling.
+             */
+            if (dvmCheckException(self)) {
+                LOGV("Exception thrown by/below native code\n");
+                GOTO(exceptionThrown);
+            }
+
+            ILOGD("> retval=0x%llx (leaving native)", retval.j);
+            ILOGD("> (return from native %s.%s to %s.%s %s)",
+                methodToCall->clazz->descriptor, methodToCall->name,
+                method->clazz->descriptor, method->name,
+                method->signature);
+
+            //u2 invokeInstr = INST_INST(FETCH(0));
+            if (true /*invokeInstr >= OP_INVOKE_VIRTUAL &&
+                invokeInstr <= OP_INVOKE_INTERFACE*/)
+            {
+                FINISH(3);
+            } else {
+                //LOGE("Unknown invoke instr %02x at %d\n",
+                //    invokeInstr, (int) (pc - method->insns));
+                assert(false);
+            }
+        }
+    }
+    assert(false);      // should not get here
+GOTO_TARGET_END
+
+
+/* undefine "magic" name remapping */
+#undef retval
+#undef pc
+#undef fp
+#undef method
+#undef methodClassDex
+#undef self
+#undef debugTrackedRefStart
+
diff --git a/vm/mterp/rebuild.sh b/vm/mterp/rebuild.sh
new file mode 100755
index 0000000..79e37ac
--- /dev/null
+++ b/vm/mterp/rebuild.sh
@@ -0,0 +1,22 @@
+#!/bin/sh
+#
+# Copyright (C) 2008 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+#
+# Rebuild for all known targets.  Necessary until the stuff in "out" gets
+# generated as part of the build.
+#
+set -e
+for arch in desktop armv5; do TARGET_ARCH_EXT=$arch make -f Makefile-mterp; done
diff --git a/vm/oo/AccessCheck.c b/vm/oo/AccessCheck.c
new file mode 100644
index 0000000..e3a6946
--- /dev/null
+++ b/vm/oo/AccessCheck.c
@@ -0,0 +1,151 @@
+/*
+ * Copyright (C) 2008 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/*
+ * Check access to fields and methods.
+ */
+#include "Dalvik.h"
+
+/*
+ * Return the #of initial characters that match.
+ */
+static int strcmpCount(const char* str1, const char* str2)
+{
+    int count = 0;
+
+    while (true) {
+        char ch = str1[count];
+        if (ch == '\0' || ch != str2[count])
+            return count;
+        count++;
+    }
+}
+
+/*
+ * Returns "true" if the two classes are in the same runtime package.
+ */
+bool dvmInSamePackage(const ClassObject* class1, const ClassObject* class2)
+{
+    /* quick test for intra-class access */
+    if (class1 == class2)
+        return true;
+
+    /* class loaders must match */
+    if (class1->classLoader != class2->classLoader)
+        return false;
+
+    /*
+     * Switch array classes to their element types.  Arrays receive the
+     * class loader of the underlying element type.  The point of doing
+     * this is to get the un-decorated class name, without all the
+     * "[[L...;" stuff.
+     */
+    if (dvmIsArrayClass(class1))
+        class1 = class1->elementClass;
+    if (dvmIsArrayClass(class2))
+        class2 = class2->elementClass;
+
+    /* check again */
+    if (class1 == class2)
+        return true;
+
+    /*
+     * We have two classes with different names.  Compare them and see
+     * if they match up through the final '/'.
+     *
+     *  Ljava/lang/Object; + Ljava/lang/Class;          --> true
+     *  LFoo;              + LBar;                      --> true
+     *  Ljava/lang/Object; + Ljava/io/File;             --> false
+     *  Ljava/lang/Object; + Ljava/lang/reflect/Method; --> false
+     */
+    int commonLen;
+
+    commonLen = strcmpCount(class1->descriptor, class2->descriptor);
+    if (strchr(class1->descriptor + commonLen, '/') != NULL ||
+        strchr(class2->descriptor + commonLen, '/') != NULL)
+    {
+        return false;
+    }
+
+    return true;
+}
+
+/*
+ * Validate method/field access.
+ */
+static bool checkAccess(const ClassObject* accessFrom,
+    const ClassObject* accessTo, u4 accessFlags)
+{
+    /* quick accept for public access */
+    if (accessFlags & ACC_PUBLIC)
+        return true;
+
+    /* quick accept for access from same class */
+    if (accessFrom == accessTo)
+        return true;
+
+    /* quick reject for private access from another class */
+    if (accessFlags & ACC_PRIVATE)
+        return false;
+
+    /*
+     * Semi-quick test for protected access from a sub-class, which may or
+     * may not be in the same package.
+     */
+    if (accessFlags & ACC_PROTECTED)
+        if (dvmIsSubClass(accessFrom, accessTo))
+            return true;
+
+    /*
+     * Allow protected and private access from other classes in the same
+     * package.
+     */
+    return dvmInSamePackage(accessFrom, accessTo);
+}
+
+/*
+ * Determine whether the "accessFrom" class is allowed to get at "clazz".
+ *
+ * It's allowed if "clazz" is public or is in the same package.  (Only
+ * inner classes can be marked "private" or "protected", so we don't need
+ * to check for it here.)
+ */
+bool dvmCheckClassAccess(const ClassObject* accessFrom,
+    const ClassObject* clazz)
+{
+    if (dvmIsPublicClass(clazz))
+        return true;
+    return dvmInSamePackage(accessFrom, clazz);
+}
+
+/*
+ * Determine whether the "accessFrom" class is allowed to get at "method".
+ */
+bool dvmCheckMethodAccess(const ClassObject* accessFrom, const Method* method)
+{
+    return checkAccess(accessFrom, method->clazz, method->accessFlags);
+}
+
+/*
+ * Determine whether the "accessFrom" class is allowed to get at "field".
+ */
+bool dvmCheckFieldAccess(const ClassObject* accessFrom, const Field* field)
+{
+    //LOGI("CHECK ACCESS from '%s' to field '%s' (in %s) flags=0x%x\n",
+    //    accessFrom->descriptor, field->name,
+    //    field->clazz->descriptor, field->accessFlags);
+    return checkAccess(accessFrom, field->clazz, field->accessFlags);
+}
+
diff --git a/vm/oo/AccessCheck.h b/vm/oo/AccessCheck.h
new file mode 100644
index 0000000..105c9e1
--- /dev/null
+++ b/vm/oo/AccessCheck.h
@@ -0,0 +1,43 @@
+/*
+ * Copyright (C) 2008 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/*
+ * Check access to fields and methods.
+ */
+#ifndef _DALVIK_OO_ACCESSCHECK
+#define _DALVIK_OO_ACCESSCHECK
+
+/*
+ * Determine whether the "accessFrom" class is allowed to get at "clazz".
+ */
+bool dvmCheckClassAccess(const ClassObject* accessFrom,
+    const ClassObject* clazz);
+
+/*
+ * Determine whether the "accessFrom" class is allowed to get at "method".
+ */
+bool dvmCheckMethodAccess(const ClassObject* accessFrom, const Method* method);
+
+/*
+ * Determine whether the "accessFrom" class is allowed to get at "field".
+ */
+bool dvmCheckFieldAccess(const ClassObject* accessFrom, const Field* field);
+
+/*
+ * Returns "true" if the two classes are in the same runtime package.
+ */
+bool dvmInSamePackage(const ClassObject* class1, const ClassObject* class2);
+
+#endif /*_DALVIK_OO_ACCESSCHECK*/
diff --git a/vm/oo/Array.c b/vm/oo/Array.c
new file mode 100644
index 0000000..cfe2456
--- /dev/null
+++ b/vm/oo/Array.c
@@ -0,0 +1,710 @@
+/*
+ * Copyright (C) 2008 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/*
+ * Array objects.
+ */
+#include "Dalvik.h"
+
+#include <stdlib.h>
+#include <stddef.h>
+
+#if WITH_HPROF && WITH_HPROF_STACK
+#include "hprof/Hprof.h"
+#endif
+
+static ClassObject* createArrayClass(const char* descriptor, Object* loader);
+static ClassObject* createPrimitiveClass(int idx);
+
+static const char gPrimLetter[] = PRIM_TYPE_TO_LETTER;
+
+/*
+ * Allocate space for a new array object.  This is the lowest-level array
+ * allocation function.
+ *
+ * Pass in the array class and the width of each element.
+ *
+ * On failure, returns NULL with an exception raised.
+ */
+ArrayObject* dvmAllocArray(ClassObject* arrayClass, size_t length,
+    size_t elemWidth, int allocFlags)
+{
+    ArrayObject* newArray;
+    size_t size;
+
+    assert(arrayClass->descriptor[0] == '[');
+
+    if (length > 0x0fffffff) {
+        /* too large and (length * elemWidth) will overflow 32 bits */
+        LOGE("Rejecting allocation of %u-element array\n", length);
+        dvmThrowBadAllocException("array size too large");
+        return NULL;
+    }
+
+    size = offsetof(ArrayObject, contents);
+    size += length * elemWidth;
+
+    /* Note that we assume that the Array class does not
+     * override finalize().
+     */
+    newArray = dvmMalloc(size, allocFlags);
+    if (newArray != NULL) {
+        DVM_OBJECT_INIT(&newArray->obj, arrayClass);
+        newArray->length = length;
+        LOGVV("AllocArray: %s [%d] (%d)\n",
+            arrayClass->descriptor, (int) length, (int) size);
+#if WITH_HPROF && WITH_HPROF_STACK
+        hprofFillInStackTrace(&newArray->obj);
+#endif
+        dvmTrackAllocation(arrayClass, size);
+    }
+    /* the caller must call dvmReleaseTrackedAlloc */
+    return newArray;
+}
+
+/*
+ * Create a new array, given an array class.  The class may represent an
+ * array of references or primitives.
+ */
+ArrayObject* dvmAllocArrayByClass(ClassObject* arrayClass,
+    size_t length, int allocFlags)
+{
+    const char* descriptor = arrayClass->descriptor;
+
+    assert(descriptor[0] == '[');       /* must be array class */
+    if (descriptor[1] != '[' && descriptor[1] != 'L') {
+        /* primitive array */
+        assert(descriptor[2] == '\0');
+        return dvmAllocPrimitiveArray(descriptor[1], length, allocFlags);
+    } else {
+        return dvmAllocArray(arrayClass, length, kObjectArrayRefWidth,
+            allocFlags);
+    }
+}
+
+/*
+ * Find the array class for "elemClassObj", which could itself be an
+ * array class.
+ */
+ClassObject* dvmFindArrayClassForElement(ClassObject* elemClassObj)
+{
+    ClassObject* arrayClass;
+
+    assert(elemClassObj != NULL);
+
+    if (elemClassObj->arrayClass != NULL) {
+        arrayClass = elemClassObj->arrayClass;
+        LOGVV("using cached '%s' class for '%s'\n",
+            arrayClass->descriptor, elemClassObj->descriptor);
+    } else {
+        /* Simply prepend "[" to the descriptor. */
+        int nameLen = strlen(elemClassObj->descriptor);
+        char className[nameLen + 2];
+
+        className[0] = '[';
+        memcpy(className+1, elemClassObj->descriptor, nameLen+1);
+        arrayClass = dvmFindArrayClass(className, elemClassObj->classLoader);
+        if (arrayClass != NULL)
+            elemClassObj->arrayClass = arrayClass;
+    }
+
+    return arrayClass;
+}
+
+/*
+ * Create a new array that holds references to members of the specified class.
+ *
+ * "elemClassObj" is the element type, and may itself be an array class.  It
+ * may not be a primitive class.
+ *
+ * "allocFlags" determines whether the new object will be added to the
+ * "tracked alloc" table.
+ *
+ * This is less efficient than dvmAllocArray(), but occasionally convenient.
+ */
+ArrayObject* dvmAllocObjectArray(ClassObject* elemClassObj, size_t length,
+    int allocFlags)
+{
+    ClassObject* arrayClass;
+    ArrayObject* newArray = NULL;
+
+    LOGVV("dvmAllocObjectArray: '%s' len=%d\n",
+        elemClassObj->descriptor, (int)length);
+
+    arrayClass = dvmFindArrayClassForElement(elemClassObj);
+    if (arrayClass != NULL) {
+        newArray = dvmAllocArray(arrayClass, length, kObjectArrayRefWidth,
+            allocFlags);
+    }
+
+    /* the caller must call dvmReleaseTrackedAlloc */
+    return newArray;
+}
+
+/*
+ * Create a new array that holds primitive types.
+ *
+ * "type" is the primitive type letter, e.g. 'I' for int or 'J' for long.
+ * If the array class doesn't exist, it will be created.
+ */
+ArrayObject* dvmAllocPrimitiveArray(char type, size_t length, int allocFlags)
+{
+    ArrayObject* newArray;
+    ClassObject** pTypeClass;
+    int width;
+
+    switch (type) {
+    case 'I':
+        pTypeClass = &gDvm.classArrayInt;
+        width = 4;
+        break;
+    case 'C':
+        pTypeClass = &gDvm.classArrayChar;
+        width = 2;
+        break;
+    case 'B':
+        pTypeClass = &gDvm.classArrayByte;
+        width = 1;
+        break;
+    case 'Z':
+        pTypeClass = &gDvm.classArrayBoolean;
+        width = 1; /* special-case this? */
+        break;
+    case 'F':
+        pTypeClass = &gDvm.classArrayFloat;
+        width = 4;
+        break;
+    case 'D':
+        pTypeClass = &gDvm.classArrayDouble;
+        width = 8;
+        break;
+    case 'S':
+        pTypeClass = &gDvm.classArrayShort;
+        width = 2;
+        break;
+    case 'J':
+        pTypeClass = &gDvm.classArrayLong;
+        width = 8;
+        break;
+    default:
+        LOGE("Unknown type '%c'\n", type);
+        assert(false);
+        return NULL;
+    }
+
+    if (*pTypeClass == NULL) {
+        char typeClassName[3] = "[x";
+
+        typeClassName[1] = type;
+
+        *pTypeClass = dvmFindArrayClass(typeClassName, NULL);
+        if (*pTypeClass == NULL) {
+            LOGE("ERROR: failed to generate array class for '%s'\n",
+                typeClassName);
+            return NULL;
+        }
+    }
+
+    newArray = dvmAllocArray(*pTypeClass, length, width, allocFlags);
+
+    /* the caller must dvmReleaseTrackedAlloc if allocFlags==ALLOC_DEFAULT */
+    return newArray;
+}
+
+/*
+ * Recursively create an array with multiple dimensions.  Elements may be
+ * Objects or primitive types.
+ *
+ * The dimension we're creating is in dimensions[0], so when we recurse
+ * we advance the pointer.
+ */
+ArrayObject* dvmAllocMultiArray(ClassObject* arrayClass, int curDim, 
+    const int* dimensions)
+{
+    ArrayObject* newArray;
+    const char* elemName = arrayClass->descriptor + 1; // Advance past one '['.
+
+    LOGVV("dvmAllocMultiArray: class='%s' curDim=%d *dimensions=%d\n",
+        arrayClass->descriptor, curDim, *dimensions);
+
+    if (curDim == 0) {
+        if (*elemName == 'L' || *elemName == '[') {
+            LOGVV("  end: array class (obj) is '%s'\n",
+                arrayClass->descriptor);
+            newArray = dvmAllocArray(arrayClass, *dimensions,
+                        kObjectArrayRefWidth, ALLOC_DEFAULT);
+        } else {
+            LOGVV("  end: array class (prim) is '%s'\n",
+                arrayClass->descriptor);
+            newArray = dvmAllocPrimitiveArray(
+                    gPrimLetter[arrayClass->elementClass->primitiveType],
+                    *dimensions, ALLOC_DEFAULT);
+        }
+    } else {
+        ClassObject* subArrayClass;
+        Object** contents;
+        int i;
+
+        /* if we have X[][], find X[] */
+        subArrayClass = dvmFindArrayClass(elemName, arrayClass->classLoader);
+        if (subArrayClass == NULL) {
+            /* not enough '['s on the initial class? */
+            assert(dvmCheckException(dvmThreadSelf()));
+            return NULL;
+        }
+        assert(dvmIsArrayClass(subArrayClass));
+
+        /* allocate the array that holds the sub-arrays */
+        newArray = dvmAllocArray(arrayClass, *dimensions, kObjectArrayRefWidth,
+                        ALLOC_DEFAULT);
+        if (newArray == NULL) {
+            assert(dvmCheckException(dvmThreadSelf()));
+            return NULL;
+        }
+
+        /*
+         * Create a new sub-array in every element of the array.
+         */
+        contents = (Object**) newArray->contents;
+        for (i = 0; i < *dimensions; i++) {
+            ArrayObject* newSubArray;
+
+            newSubArray = dvmAllocMultiArray(subArrayClass, curDim-1,
+                            dimensions+1);
+            if (newSubArray == NULL) {
+                dvmReleaseTrackedAlloc((Object*) newArray, NULL);
+                assert(dvmCheckException(dvmThreadSelf()));
+                return NULL;
+            }
+
+            *contents++ = (Object*) newSubArray;
+            dvmReleaseTrackedAlloc((Object*) newSubArray, NULL);
+        }
+    }
+
+    /* caller must call dvmReleaseTrackedAlloc */
+    return newArray;
+}
+
+
+/*
+ * Find an array class, by name (e.g. "[I").
+ *
+ * If the array class doesn't exist, we generate it.
+ *
+ * If the element class doesn't exist, we return NULL (no exception raised).
+ */
+ClassObject* dvmFindArrayClass(const char* descriptor, Object* loader)
+{
+    ClassObject* clazz;
+
+    assert(descriptor[0] == '[');
+    //LOGV("dvmFindArrayClass: '%s' %p\n", descriptor, loader);
+
+    clazz = dvmLookupClass(descriptor, loader, false);
+    if (clazz == NULL) {
+        LOGV("Array class '%s' %p not found; creating\n", descriptor, loader);
+        clazz = createArrayClass(descriptor, loader);
+        if (clazz != NULL)
+            dvmAddInitiatingLoader(clazz, loader);
+    }
+
+    return clazz;
+}
+
+/*
+ * Create an array class (i.e. the class object for the array, not the
+ * array itself).  "descriptor" looks like "[C" or "[Ljava/lang/String;".
+ *
+ * If "descriptor" refers to an array of primitives, look up the
+ * primitive type's internally-generated class object.
+ *
+ * "loader" is the class loader of the class that's referring to us.  It's
+ * used to ensure that we're looking for the element type in the right
+ * context.  It does NOT become the class loader for the array class; that
+ * always comes from the base element class.
+ *
+ * Returns NULL with an exception raised on failure.
+ */
+static ClassObject* createArrayClass(const char* descriptor, Object* loader)
+{
+    ClassObject* newClass = NULL;
+    ClassObject* elementClass = NULL;
+    int arrayDim;
+    u4 extraFlags;
+
+    assert(descriptor[0] == '[');
+    assert(gDvm.classJavaLangClass != NULL);
+    assert(gDvm.classJavaLangObject != NULL);
+
+    /*
+     * Identify the underlying element class and the array dimension depth.
+     */
+    extraFlags = CLASS_ISARRAY;
+    if (descriptor[1] == '[') {
+        /* array of arrays; keep descriptor and grab stuff from parent */
+        ClassObject* outer;
+
+        outer = dvmFindClassNoInit(&descriptor[1], loader);
+        if (outer != NULL) {
+            /* want the base class, not "outer", in our elementClass */
+            elementClass = outer->elementClass;
+            arrayDim = outer->arrayDim + 1;
+            extraFlags |= CLASS_ISOBJECTARRAY;
+        } else {
+            assert(elementClass == NULL);     /* make sure we fail */
+        }
+    } else {
+        arrayDim = 1;
+        if (descriptor[1] == 'L') {
+            /* array of objects; strip off "[" and look up descriptor. */
+            const char* subDescriptor = &descriptor[1];
+            LOGVV("searching for element class '%s'\n", subDescriptor);
+            elementClass = dvmFindClassNoInit(subDescriptor, loader);
+            extraFlags |= CLASS_ISOBJECTARRAY;
+        } else {
+            /* array of a primitive type */
+            elementClass = dvmFindPrimitiveClass(descriptor[1]);
+        }
+    }
+
+    if (elementClass == NULL) {
+        /* failed */
+        assert(dvmCheckException(dvmThreadSelf()));
+        dvmFreeClassInnards(newClass);
+        dvmReleaseTrackedAlloc((Object*) newClass, NULL);
+        return NULL;
+    }
+
+    /*
+     * See if it's already loaded.  Array classes are always associated
+     * with the class loader of their underlying element type -- an array
+     * of Strings goes with the loader for java/lang/String -- so we need
+     * to look for it there.  (The caller should have checked for the
+     * existence of the class before calling here, but they did so with
+     * *their* class loader, not the element class' loader.)
+     *
+     * If we find it, the caller adds "loader" to the class' initiating
+     * loader list, which should prevent us from going through this again.
+     *
+     * This call is unnecessary if "loader" and "elementClass->classLoader"
+     * are the same, because our caller (dvmFindArrayClass) just did the
+     * lookup.  (Even if we get this wrong we still have correct behavior,
+     * because we effectively do this lookup again when we add the new
+     * class to the hash table -- necessary because of possible races with
+     * other threads.)
+     */
+    if (loader != elementClass->classLoader) {
+        LOGVV("--- checking for '%s' in %p vs. elem %p\n",
+            descriptor, loader, elementClass->classLoader);
+        newClass = dvmLookupClass(descriptor, elementClass->classLoader, false);
+        if (newClass != NULL) {
+            LOGV("--- we already have %s in %p, don't need in %p\n",
+                descriptor, elementClass->classLoader, loader);
+            return newClass;
+        }
+    }
+
+
+    /*
+     * Fill out the fields in the ClassObject.
+     *
+     * It is possible to execute some methods against arrays, because all
+     * arrays are instances of Object, so we need to set up a vtable.  We
+     * can just point at the one in Object.
+     *
+     * Array classes are simple enough that we don't need to do a full
+     * link step.
+     */
+    newClass = (ClassObject*) dvmMalloc(sizeof(*newClass), ALLOC_DEFAULT);
+    if (newClass == NULL)
+        return NULL;
+    DVM_OBJECT_INIT(&newClass->obj, gDvm.unlinkedJavaLangClass);
+    newClass->descriptorAlloc = strdup(descriptor);
+    newClass->descriptor = newClass->descriptorAlloc;
+    newClass->super = gDvm.classJavaLangObject;
+    newClass->vtableCount = gDvm.classJavaLangObject->vtableCount;
+    newClass->vtable = gDvm.classJavaLangObject->vtable;
+    newClass->primitiveType = PRIM_NOT;
+    newClass->elementClass = elementClass;
+    newClass->classLoader = elementClass->classLoader;
+    newClass->arrayDim = arrayDim;
+    newClass->status = CLASS_INITIALIZED;
+#if WITH_HPROF && WITH_HPROF_STACK
+    newClass->hprofSerialNumber = 0;
+    hprofFillInStackTrace(newClass);
+#endif
+
+    /* don't need to set newClass->objectSize */
+
+    /*
+     * All arrays have java/lang/Cloneable and java/io/Serializable as
+     * interfaces.  We need to set that up here, so that stuff like
+     * "instanceof" works right.
+     *
+     * Note: The GC could run during the call to dvmFindSystemClassNoInit(),
+     * so we need to make sure the class object is GC-valid while we're in
+     * there.  Do this by clearing the interface list so the GC will just
+     * think that the entries are null.
+     *
+     * TODO?
+     * We may want to cache these two classes to avoid the lookup, though
+     * it's not vital -- we only do it when creating an array class, not
+     * every time we create an array.  Better yet, create a single, global
+     * copy of "interfaces" and "iftable" somewhere near the start and
+     * just point to those (and remember not to free them for arrays).
+     */
+    newClass->interfaceCount = 2;
+    newClass->interfaces = (ClassObject**)dvmLinearAlloc(newClass->classLoader,
+                                sizeof(ClassObject*) * 2);
+    memset(newClass->interfaces, 0, sizeof(ClassObject*) * 2);
+    newClass->interfaces[0] =
+        dvmFindSystemClassNoInit("Ljava/lang/Cloneable;");
+    newClass->interfaces[1] =
+        dvmFindSystemClassNoInit("Ljava/io/Serializable;");
+    dvmLinearReadOnly(newClass->classLoader, newClass->interfaces);
+    if (newClass->interfaces[0] == NULL || newClass->interfaces[1] == NULL) {
+        LOGE("Unable to create array class '%s': missing interfaces\n",
+            descriptor);
+        dvmFreeClassInnards(newClass);
+        dvmThrowException("Ljava/lang/InternalError;", "missing array ifaces");
+        dvmReleaseTrackedAlloc((Object*) newClass, NULL);
+        return NULL;
+    }
+    /*
+     * We assume that Cloneable/Serializable don't have superinterfaces --
+     * normally we'd have to crawl up and explicitly list all of the
+     * supers as well.  These interfaces don't have any methods, so we
+     * don't have to worry about the ifviPool either.
+     */
+    newClass->iftableCount = 2;
+    newClass->iftable = (InterfaceEntry*) dvmLinearAlloc(newClass->classLoader,
+                                sizeof(InterfaceEntry) * 2);
+    memset(newClass->iftable, 0, sizeof(InterfaceEntry) * 2);
+    newClass->iftable[0].clazz = newClass->interfaces[0];
+    newClass->iftable[1].clazz = newClass->interfaces[1];
+    dvmLinearReadOnly(newClass->classLoader, newClass->iftable);
+
+    /*
+     * Inherit access flags from the element.  Arrays can't be used as a
+     * superclass or interface, so we want to add "final" and remove
+     * "interface".
+     *
+     * Don't inherit any non-standard flags (e.g., CLASS_FINALIZABLE)
+     * from elementClass.  We assume that the array class does not
+     * override finalize().
+     */
+    newClass->accessFlags = ((newClass->elementClass->accessFlags &
+                             ~ACC_INTERFACE) | ACC_FINAL) & JAVA_FLAGS_MASK;
+
+    /* Set the flags we determined above.
+     * This must happen after accessFlags is set.
+     */
+    SET_CLASS_FLAG(newClass, extraFlags);
+
+    if (!dvmAddClassToHash(newClass)) {
+        /*
+         * Another thread must have loaded the class after we
+         * started but before we finished.  Discard what we've
+         * done and leave some hints for the GC.
+         */
+        LOGI("WOW: somebody generated %s simultaneously\n",
+            newClass->descriptor);
+
+        /* Clean up the class before letting the
+         * GC get its hands on it.
+         */
+        assert(newClass->obj.clazz == gDvm.unlinkedJavaLangClass);
+        dvmFreeClassInnards(newClass);
+
+        /* Let the GC free the class.
+         */
+        dvmReleaseTrackedAlloc((Object*) newClass, NULL);
+
+        /* Grab the winning class.
+         */
+        newClass = dvmLookupClass(descriptor, elementClass->classLoader, false);
+        assert(newClass != NULL);
+        return newClass;
+    }
+
+    /* make it available to the GC */
+    newClass->obj.clazz = gDvm.classJavaLangClass;
+    dvmReleaseTrackedAlloc((Object*) newClass, NULL);
+
+    LOGV("Created array class '%s' %p (access=0x%04x.%04x)\n",
+        descriptor, newClass->classLoader,
+        newClass->accessFlags >> 16,
+        newClass->accessFlags & JAVA_FLAGS_MASK);
+
+    return newClass;
+}
+
+/*
+ * Get a class we generated for the primitive types.
+ *
+ * These correspond to e.g. Integer.TYPE, and are used as the element
+ * class in arrays of primitives.
+ *
+ * "type" should be 'I', 'J', 'Z', etc.
+ */
+ClassObject* dvmFindPrimitiveClass(char type)
+{
+    int idx;
+
+    switch (type) {
+    case 'Z':
+        idx = PRIM_BOOLEAN;
+        break;
+    case 'C':
+        idx = PRIM_CHAR;
+        break;
+    case 'F':
+        idx = PRIM_FLOAT;
+        break;
+    case 'D':
+        idx = PRIM_DOUBLE;
+        break;
+    case 'B':
+        idx = PRIM_BYTE;
+        break;
+    case 'S':
+        idx = PRIM_SHORT;
+        break;
+    case 'I':
+        idx = PRIM_INT;
+        break;
+    case 'J':
+        idx = PRIM_LONG;
+        break;
+    case 'V':
+        idx = PRIM_VOID;
+        break;
+    default:
+        LOGE("Unknown primitive type '%c'\n", type);
+        assert(false);
+        return NULL;
+    }
+
+    /*
+     * Create the primitive class if it hasn't already been, and add it
+     * to the table.
+     */
+    if (gDvm.primitiveClass[idx] == NULL) {
+        ClassObject* primClass = createPrimitiveClass(idx);
+        dvmReleaseTrackedAlloc((Object*) primClass, NULL);
+
+        if (!ATOMIC_CMP_SWAP((int*) &gDvm.primitiveClass[idx],
+            0, (int) primClass))
+        {
+            /*
+             * Looks like somebody beat us to it.  Free up the one we
+             * just created and use the other one.
+             */
+            dvmFreeClassInnards(primClass);
+        }
+    }
+
+    return gDvm.primitiveClass[idx];
+}
+
+/*
+ * Synthesize a primitive class.
+ *
+ * The spec for java.lang.Class.isPrimitive describes the names to
+ * be used for these classes.
+ *
+ * Just creates the class and returns it (does not add it to the class list).
+ */
+static ClassObject* createPrimitiveClass(int idx)
+{
+    ClassObject* newClass;
+    static const char* kClassDescriptors[PRIM_MAX] = {
+        "Z", "C", "F", "D", "B", "S", "I", "J", "V"
+    };
+
+    assert(gDvm.classJavaLangClass != NULL);
+    assert(idx >= 0 && idx < PRIM_MAX);
+
+    /*
+     * Fill out a few fields in the ClassObject.
+     *
+     * Note that primitive classes do not sub-class java/lang/Object.  This
+     * matters for "instanceof" checks.  Also, we assume that the primitive
+     * class does not override finalize().
+     */
+    newClass = (ClassObject*) dvmMalloc(sizeof(*newClass), ALLOC_DEFAULT);
+    if (newClass == NULL)
+        return NULL;
+    DVM_OBJECT_INIT(&newClass->obj, gDvm.classJavaLangClass);
+    newClass->accessFlags = ACC_PUBLIC | ACC_FINAL | ACC_ABSTRACT;
+    newClass->primitiveType = idx;
+    newClass->descriptorAlloc = NULL;
+    newClass->descriptor = kClassDescriptors[idx];
+    //newClass->super = gDvm.classJavaLangObject;
+    newClass->status = CLASS_INITIALIZED;
+#if WITH_HPROF && WITH_HPROF_STACK
+    newClass->hprofSerialNumber = 0;
+    hprofFillInStackTrace(newClass);
+#endif
+
+    /* don't need to set newClass->objectSize */
+
+    LOGVV("Created primitive class '%s'\n", kClassDescriptors[idx]);
+
+    return newClass;
+}
+
+/*
+ * Copy the entire contents of one array of objects to another.  If the copy
+ * is impossible because of a type clash, we fail and return "false".
+ */
+bool dvmCopyObjectArray(ArrayObject* dstArray, const ArrayObject* srcArray,
+    ClassObject* dstElemClass)
+{
+    Object** src = (Object**)srcArray->contents;
+    Object** dst = (Object**)dstArray->contents;
+    u4 count = dstArray->length;
+
+    assert(srcArray->length == dstArray->length);
+    assert(dstArray->obj.clazz->elementClass == dstElemClass ||
+        (dstArray->obj.clazz->elementClass == dstElemClass->elementClass &&
+         dstArray->obj.clazz->arrayDim == dstElemClass->arrayDim+1));
+
+    while (count--) {
+        if (!dvmInstanceof((*src)->clazz, dstElemClass)) {
+            LOGW("dvmCopyObjectArray: can't store %s in %s\n",
+                (*src)->clazz->descriptor, dstElemClass->descriptor);
+            return false;
+        }
+        *dst++ = *src++;
+    }
+
+    return true;
+}
+
+/*
+ * Add all primitive classes to the root set of objects.
+TODO: do these belong to the root class loader?
+ */
+void dvmGcScanPrimitiveClasses()
+{
+    int i;
+
+    for (i = 0; i < PRIM_MAX; i++) {
+        dvmMarkObject((Object *)gDvm.primitiveClass[i]);    // may be NULL
+    }
+}
+
diff --git a/vm/oo/Array.h b/vm/oo/Array.h
new file mode 100644
index 0000000..868e48a
--- /dev/null
+++ b/vm/oo/Array.h
@@ -0,0 +1,135 @@
+/*
+ * Copyright (C) 2008 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/*
+ * Array handling.
+ */
+#ifndef _DALVIK_OO_ARRAY
+#define _DALVIK_OO_ARRAY
+
+/* width of an object reference, for arrays of objects */
+#define kObjectArrayRefWidth    sizeof(Object*)
+
+/*
+ * Find a matching array class.  If it doesn't exist, create it.
+ *
+ * "descriptor" looks like "[I".
+ *
+ * "loader" should be the defining class loader for the elements held
+ * in the array.
+ */
+ClassObject* dvmFindArrayClass(const char* descriptor, Object* loader);
+
+/*
+ * Find the array class for the specified class.  If "elemClassObj" is the
+ * class "Foo", this returns the class object for "[Foo".
+ */
+ClassObject* dvmFindArrayClassForElement(ClassObject* elemClassObj);
+
+/*
+ * Allocate space for a new array object.
+ *
+ * "allocFlags" determines whether the new object will be added to the
+ * "tracked alloc" table.
+ *
+ * Returns NULL with an exception raised if allocation fails.
+ */
+ArrayObject* dvmAllocArray(ClassObject* arrayClass, size_t length,
+    size_t elemWidth, int allocFlags);
+
+/*
+ * Create a new array, given an array class.  The class may represent an
+ * array of references or primitives.
+ *
+ * Returns NULL with an exception raised if allocation fails.
+ */
+ArrayObject* dvmAllocArrayByClass(ClassObject* arrayClass,
+    size_t length, int allocFlags);
+
+/*
+ * Create a new array that holds references to members of the specified class.
+ *
+ * "elemClassObj" is the element type, and may itself be an array class.  It
+ * may not be a primitive class.
+ *
+ * "allocFlags" determines whether the new object will be added to the
+ * "tracked alloc" table.
+ *
+ * This is less efficient than dvmAllocArray(), but occasionally convenient.
+ *
+ * Returns NULL with an exception raised if allocation fails.
+ */
+ArrayObject* dvmAllocObjectArray(ClassObject* elemClassObj, size_t length,
+    int allocFlags);
+
+/*
+ * Allocate an array whose members are primitives (bools, ints, etc.).
+ *
+ * "type" should be 'I', 'J', 'Z', etc.
+ *
+ * The new object will be added to the "tracked alloc" table.
+ *
+ * Returns NULL with an exception raised if allocation fails.
+ */
+ArrayObject* dvmAllocPrimitiveArray(char type, size_t length, int allocFlags);
+
+/*
+ * Allocate an array with multiple dimensions.  Elements may be Objects or
+ * primitive types.
+ *
+ * The base object will be added to the "tracked alloc" table.
+ *
+ * Returns NULL with an exception raised if allocation fails.
+ */
+ArrayObject* dvmAllocMultiArray(ClassObject* arrayClass, int curDim,
+    const int* dimensions);
+
+/*
+ * Find the synthesized object for the primitive class, generating it
+ * if this is the first reference.
+ */
+ClassObject* dvmFindPrimitiveClass(char type);
+
+/*
+ * Verify that the object is actually an array.
+ *
+ * Does not verify that the object is actually a non-NULL object.
+ */
+INLINE bool dvmIsArray(const ArrayObject* arrayObj)
+{
+    return ( ((Object*)arrayObj)->clazz->descriptor[0] == '[' );
+}
+
+/*
+ * Verify that the class is an array class.
+ *
+ * TODO: there may be some performance advantage to setting a flag in
+ * the accessFlags field instead of chasing into the name string.
+ */
+INLINE bool dvmIsArrayClass(const ClassObject* clazz)
+{
+    return (clazz->descriptor[0] == '[');
+}
+
+/*
+ * Copy the entire contents of one array of objects to another.  If the copy
+ * is impossible because of a type clash, we fail and return "false".
+ *
+ * "dstElemClass" is the type of element that "dstArray" holds.
+ */
+bool dvmCopyObjectArray(ArrayObject* dstArray, const ArrayObject* srcArray,
+    ClassObject* dstElemClass);
+
+#endif /*_DALVIK_OO_ARRAY*/
diff --git a/vm/oo/Class.c b/vm/oo/Class.c
new file mode 100644
index 0000000..757ea1b
--- /dev/null
+++ b/vm/oo/Class.c
@@ -0,0 +1,4353 @@
+/*
+ * Copyright (C) 2008 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/*
+ * Class loading, including bootstrap class loader, linking, and
+ * initialization.
+ */
+
+#define LOG_CLASS_LOADING 0
+
+#include "Dalvik.h"
+#include "libdex/DexClass.h"
+
+#include <stdlib.h>
+#include <stddef.h>
+#include <sys/stat.h>
+
+#if LOG_CLASS_LOADING
+#include <unistd.h>
+#include <pthread.h>
+#include <cutils/process_name.h>
+#include <sys/types.h>
+#endif
+
+/*
+Notes on Linking and Verification
+
+The basic way to retrieve a class is to load it, make sure its superclass
+and interfaces are available, prepare its fields, and return it.  This gets
+a little more complicated when multiple threads can be trying to retrieve
+the class simultaneously, requiring that we use the class object's monitor
+to keep things orderly.
+
+The linking (preparing, resolving) of a class can cause us to recursively
+load superclasses and interfaces.  Barring circular references (e.g. two
+classes that are superclasses of each other), this will complete without
+the loader attempting to access the partially-linked class.
+
+With verification, the situation is different.  If we try to verify
+every class as we load it, we quickly run into trouble.  Even the lowly
+java.lang.Object requires CloneNotSupportedException; follow the list
+of referenced classes and you can head down quite a trail.  The trail
+eventually leads back to Object, which is officially not fully-formed yet.
+
+The VM spec (specifically, v2 5.4.1) notes that classes pulled in during
+verification do not need to be prepared or verified.  This means that we
+are allowed to have loaded but unverified classes.  It further notes that
+the class must be verified before it is initialized, which allows us to
+defer verification for all classes until class init.  You can't execute
+code or access fields in an uninitialized class, so this is safe.
+
+It also allows a more peaceful coexistence between verified and
+unverifiable code.  If class A refers to B, and B has a method that
+refers to a bogus class C, should we allow class A to be verified?
+If A only exercises parts of B that don't use class C, then there is
+nothing wrong with running code in A.  We can fully verify both A and B,
+and allow execution to continue until B causes initialization of C.  The
+VerifyError is thrown close to the point of use.
+
+This gets a little weird with java.lang.Class, which is the only class
+that can be instantiated before it is initialized.  We have to force
+initialization right after the class is created, because by definition we
+have instances of it on the heap, and somebody might get a class object and
+start making virtual calls on it.  We can end up going recursive during
+verification of java.lang.Class, but we avoid that by checking to see if
+verification is already in progress before we try to initialize it.
+*/
+
+/*
+Notes on class loaders and interaction with optimization / verification
+
+In what follows, "pre-verification" and "optimization" are the steps
+performed by the dexopt command, which attempts to verify and optimize
+classes as part of unpacking jar files and storing the DEX data in the
+dalvik-cache directory.  These steps are performed by loading the DEX
+files directly, without any assistance from ClassLoader instances.
+
+When we pre-verify and optimize a class in a DEX file, we make some
+assumptions about where the class loader will go to look for classes.
+If we can't guarantee those assumptions, e.g. because a class ("AppClass")
+references something not defined in the bootstrap jars or the AppClass jar,
+we can't pre-verify or optimize the class.
+
+The VM doesn't define the behavior of user-defined class loaders.
+For example, suppose application class AppClass, loaded by UserLoader,
+has a method that creates a java.lang.String.  The first time
+AppClass.stringyMethod tries to do something with java.lang.String, it
+asks UserLoader to find it.  UserLoader is expected to defer to its parent
+loader, but isn't required to.  UserLoader might provide a replacement
+for String.
+
+We can run into trouble if we pre-verify AppClass with the assumption that
+java.lang.String will come from core.jar, and don't verify this assumption
+at runtime.  There are two places that an alternate implementation of
+java.lang.String can come from: the AppClass jar, or from some other jar
+that UserLoader knows about.  (Someday UserLoader will be able to generate
+some bytecode and call DefineClass, but not yet.)
+
+To handle the first situation, the pre-verifier will explicitly check for
+conflicts between the class being optimized/verified and the bootstrap
+classes.  If an app jar contains a class that has the same package and
+class name as a class in a bootstrap jar, the verification resolver refuses
+to find either, which will block pre-verification and optimization on
+classes that reference ambiguity.  The VM will postpone verification of
+the app class until first load.
+
+For the second situation, we need to ensure that all references from a
+pre-verified class are satisified by the class' jar or earlier bootstrap
+jars.  In concrete terms: when resolving a reference to NewClass,
+which was caused by a reference in class AppClass, we check to see if
+AppClass was pre-verified.  If so, we require that NewClass comes out
+of either the AppClass jar or one of the jars in the bootstrap path.
+(We may not control the class loaders, but we do manage the DEX files.
+We can verify that it's either (loader==null && dexFile==a_boot_dex)
+or (loader==UserLoader && dexFile==AppClass.dexFile).  Classes from
+DefineClass can't be pre-verified, so this doesn't apply.)
+
+This should ensure that you can't "fake out" the pre-verifier by creating
+a user-defined class loader that replaces system classes.  It should
+also ensure that you can write such a loader and have it work in the
+expected fashion; all you lose is some performance due to "just-in-time
+verification" and the lack of DEX optimizations.
+
+There is a "back door" of sorts in the class resolution check, due to
+the fact that the "class ref" entries are shared between the bytecode
+and meta-data references (e.g. annotations and exception handler lists).
+The class references in annotations have no bearing on class verification,
+so when a class does an annotation query that causes a class reference
+index to be resolved, we don't want to fail just because the calling
+class was pre-verified and the resolved class is in some random DEX file.
+The successful resolution adds the class to the "resolved classes" table,
+so when optimized bytecode references it we don't repeat the resolve-time
+check.  We can avoid this by not updating the "resolved classes" table
+when the class reference doesn't come out of something that has been
+checked by the verifier, but that has a nonzero performance impact.
+Since the ultimate goal of this test is to catch an unusual situation
+(user-defined class loaders redefining core classes), the added caution
+may not be worth the performance hit.
+*/
+
+static ClassPathEntry* processClassPath(const char* pathStr, bool isBootstrap);
+static void freeCpeArray(ClassPathEntry* cpe);
+
+static ClassObject* findClassFromLoaderNoInit(
+    const char* descriptor, Object* loader);
+static ClassObject* findClassNoInit(const char* descriptor, Object* loader,\
+    DvmDex* pDvmDex);
+static ClassObject* loadClassFromDex(DvmDex* pDvmDex,
+    const DexClassDef* pClassDef, Object* loader);
+static void loadMethodFromDex(ClassObject* clazz, const DexMethod* pDexMethod,
+    Method* meth);
+static int computeJniArgInfo(const DexProto* proto);
+static void loadSFieldFromDex(ClassObject* clazz,
+    const DexField* pDexSField, StaticField* sfield);
+static void loadIFieldFromDex(ClassObject* clazz,
+    const DexField* pDexIField, InstField* field);
+static void freeMethodInnards(Method* meth);
+static bool createVtable(ClassObject* clazz);
+static bool createIftable(ClassObject* clazz);
+static bool insertMethodStubs(ClassObject* clazz);
+static bool computeFieldOffsets(ClassObject* clazz);
+static void throwEarlierClassFailure(ClassObject* clazz);
+
+#if LOG_CLASS_LOADING
+/*
+ * Logs information about a class loading with given timestamp.
+ */
+static void logClassLoadWithTime(char type, ClassObject* clazz, u8 time) {
+    pid_t ppid = getppid();
+    pid_t pid = getpid();
+    unsigned int tid = (unsigned int) pthread_self();
+
+    LOG(LOG_INFO, "PRELOAD", "%c%d:%d:%d:%s:%d:%s:%lld\n", type, ppid, pid, tid,
+        get_process_name(), (int) clazz->classLoader, clazz->descriptor,
+        time);
+}
+
+/*
+ * Logs information about a class loading.
+ */
+static void logClassLoad(char type, ClassObject* clazz) {
+    logClassLoadWithTime(type, clazz, dvmGetThreadCpuTimeNsec());
+}
+#endif
+
+/* 
+ * Some LinearAlloc unit tests.
+ */
+static void linearAllocTests()
+{
+    char* fiddle;
+    int try = 1;
+
+    switch (try) {
+    case 0:
+        fiddle = dvmLinearAlloc(NULL, 3200-28);
+        dvmLinearReadOnly(NULL, fiddle);
+        break;
+    case 1:
+        fiddle = dvmLinearAlloc(NULL, 3200-24);
+        dvmLinearReadOnly(NULL, fiddle);
+        break;
+    case 2:
+        fiddle = dvmLinearAlloc(NULL, 3200-20);
+        dvmLinearReadOnly(NULL, fiddle);
+        break;
+    case 3:
+        fiddle = dvmLinearAlloc(NULL, 3200-16);
+        dvmLinearReadOnly(NULL, fiddle);
+        break;
+    case 4:
+        fiddle = dvmLinearAlloc(NULL, 3200-12);
+        dvmLinearReadOnly(NULL, fiddle);
+        break;
+    }
+    fiddle = dvmLinearAlloc(NULL, 896);
+    dvmLinearReadOnly(NULL, fiddle);
+    fiddle = dvmLinearAlloc(NULL, 20);      // watch addr of this alloc
+    dvmLinearReadOnly(NULL, fiddle);
+
+    fiddle = dvmLinearAlloc(NULL, 1);
+    fiddle[0] = 'q';
+    dvmLinearReadOnly(NULL, fiddle);
+    fiddle = dvmLinearAlloc(NULL, 4096);
+    fiddle[0] = 'x';
+    fiddle[4095] = 'y';
+    dvmLinearReadOnly(NULL, fiddle);
+    dvmLinearFree(NULL, fiddle);
+    fiddle = dvmLinearAlloc(NULL, 0);
+    dvmLinearReadOnly(NULL, fiddle);
+    fiddle = dvmLinearRealloc(NULL, fiddle, 12);
+    fiddle[11] = 'z';
+    dvmLinearReadOnly(NULL, fiddle);
+    fiddle = dvmLinearRealloc(NULL, fiddle, 5);
+    dvmLinearReadOnly(NULL, fiddle);
+    fiddle = dvmLinearAlloc(NULL, 17001);
+    fiddle[0] = 'x';
+    fiddle[17000] = 'y';
+    dvmLinearReadOnly(NULL, fiddle);
+
+    char* str = dvmLinearStrdup(NULL, "This is a test!");
+    LOGI("GOT: '%s'\n", str);
+
+    dvmLinearAllocDump(NULL);
+    dvmLinearFree(NULL, str);
+}
+
+/*
+ * Initialize the bootstrap class loader.
+ *
+ * Call this after the bootclasspath string has been finalized.
+ */
+bool dvmClassStartup(void)
+{
+    ClassObject* unlinkedClass;
+
+    /* make this a requirement -- don't currently support dirs in path */
+    if (strcmp(gDvm.bootClassPathStr, ".") == 0) {
+        LOGE("ERROR: must specify non-'.' bootclasspath\n");
+        return false;
+    }
+
+    gDvm.loadedClasses =
+        dvmHashTableCreate(256, (HashFreeFunc) dvmFreeClassInnards);
+
+    gDvm.pBootLoaderAlloc = dvmLinearAllocCreate(NULL);
+    if (gDvm.pBootLoaderAlloc == NULL)
+        return false;
+
+    if (false) {
+        linearAllocTests();
+        exit(0);
+    }
+
+
+    /* This placeholder class is used while a ClassObject is
+     * loading/linking so those not in the know can still say
+     * "obj->clazz->...".
+     */
+    unlinkedClass = &gDvm.unlinkedJavaLangClassObject;
+
+    memset(unlinkedClass, 0, sizeof(*unlinkedClass));
+
+    /* Set obj->clazz to NULL so anyone who gets too interested
+     * in the fake class will crash.
+     */
+    DVM_OBJECT_INIT(&unlinkedClass->obj, NULL);
+    unlinkedClass->descriptor = "!unlinkedClass";
+
+    gDvm.unlinkedJavaLangClass = unlinkedClass;
+
+    /*
+     * Process the bootstrap class path.  This means opening the specified
+     * DEX or Jar files and possibly running them through the optimizer.
+     */
+    assert(gDvm.bootClassPath == NULL);
+    processClassPath(gDvm.bootClassPathStr, true);
+
+    if (gDvm.bootClassPath == NULL)
+        return false;
+
+    return true;
+}
+
+/*
+ * Clean up.
+ */
+void dvmClassShutdown(void)
+{
+    int i;
+
+    /* discard all system-loaded classes */
+    dvmHashTableFree(gDvm.loadedClasses);
+    gDvm.loadedClasses = NULL;
+
+    /* discard primitive classes created for arrays */
+    for (i = 0; i < PRIM_MAX; i++)
+        dvmFreeClassInnards(gDvm.primitiveClass[i]);
+
+    /* this closes DEX files, JAR files, etc. */
+    freeCpeArray(gDvm.bootClassPath);
+    gDvm.bootClassPath = NULL;
+
+    dvmLinearAllocDestroy(NULL);
+}
+
+
+/*
+ * ===========================================================================
+ *      Bootstrap class loader
+ * ===========================================================================
+ */
+
+/*
+ * Dump the contents of a ClassPathEntry array.
+ */
+static void dumpClassPath(const ClassPathEntry* cpe)
+{
+    int idx = 0;
+
+    while (cpe->kind != kCpeLastEntry) {
+        const char* kindStr;
+
+        switch (cpe->kind) {
+        case kCpeDir:       kindStr = "dir";    break;
+        case kCpeJar:       kindStr = "jar";    break;
+        case kCpeDex:       kindStr = "dex";    break;
+        default:            kindStr = "???";    break;
+        }
+
+        LOGI("  %2d: type=%s %s %p\n", idx, kindStr, cpe->fileName, cpe->ptr);
+        if (CALC_CACHE_STATS && cpe->kind == kCpeJar) {
+            JarFile* pJarFile = (JarFile*) cpe->ptr;
+            DvmDex* pDvmDex = dvmGetJarFileDex(pJarFile);
+            dvmDumpAtomicCacheStats(pDvmDex->pInterfaceCache);
+        }
+
+        cpe++;
+        idx++;
+    }
+}
+
+/*
+ * Dump the contents of the bootstrap class path.
+ */
+void dvmDumpBootClassPath(void)
+{
+    dumpClassPath(gDvm.bootClassPath);
+}
+
+/*
+ * Returns "true" if the class path contains the specified path.
+ */
+bool dvmClassPathContains(const ClassPathEntry* cpe, const char* path)
+{
+    while (cpe->kind != kCpeLastEntry) {
+        if (strcmp(cpe->fileName, path) == 0)
+            return true;
+
+        cpe++;
+    }
+    return false;
+}
+
+/*
+ * Free an array of ClassPathEntry structs.
+ *
+ * We release the contents of each entry, then free the array itself.
+ */
+static void freeCpeArray(ClassPathEntry* cpe)
+{
+    ClassPathEntry* cpeStart = cpe;
+
+    if (cpe == NULL)
+        return;
+
+    while (cpe->kind != kCpeLastEntry) {
+        switch (cpe->kind) {
+        case kCpeJar:
+            /* free JarFile */
+            dvmJarFileFree((JarFile*) cpe->ptr);
+            break;
+        case kCpeDex:
+            /* free RawDexFile */
+            dvmRawDexFileFree((RawDexFile*) cpe->ptr);
+            break;
+        default:
+            /* e.g. kCpeDir */
+            assert(cpe->ptr == NULL);
+            break;
+        }
+
+        free(cpe->fileName);
+        cpe++;
+    }
+
+    free(cpeStart);
+}
+
+/*
+ * Prepare a ClassPathEntry struct, which at this point only has a valid
+ * filename.  We need to figure out what kind of file it is, and for
+ * everything other than directories we need to open it up and see
+ * what's inside.
+ */
+static bool prepareCpe(ClassPathEntry* cpe, bool isBootstrap)
+{
+    JarFile* pJarFile = NULL;
+    RawDexFile* pRawDexFile = NULL;
+    struct stat sb;
+    int cc;
+
+    cc = stat(cpe->fileName, &sb);
+    if (cc < 0) {
+        LOGW("Unable to stat classpath element '%s'\n", cpe->fileName);
+        return false;
+    }
+    if (S_ISDIR(sb.st_mode)) {
+        /*
+         * The directory will usually have .class files in subdirectories,
+         * which may be a few levels down.  Doing a recursive scan and
+         * caching the results would help us avoid hitting the filesystem
+         * on misses.  Whether or not this is of measureable benefit
+         * depends on a number of factors, but most likely it is not
+         * worth the effort (especially since most of our stuff will be
+         * in DEX or JAR).
+         */
+        cpe->kind = kCpeDir;
+        assert(cpe->ptr == NULL);
+        return true;
+    }
+
+    if (dvmJarFileOpen(cpe->fileName, &pJarFile, isBootstrap) == 0) {
+        cpe->kind = kCpeJar;
+        cpe->ptr = pJarFile;
+        return true;
+    }
+
+    // TODO: do we still want to support "raw" DEX files in the classpath?
+    if (dvmRawDexFileOpen(cpe->fileName, &pRawDexFile, isBootstrap) == 0) {
+        cpe->kind = kCpeDex;
+        cpe->ptr = pRawDexFile;
+        return true;
+    }
+
+    return false;
+}
+
+/*
+ * Convert a colon-separated list of directories, Zip files, and DEX files
+ * into an array of ClassPathEntry structs.
+ *
+ * If we're unable to load a bootstrap class path entry, we fail.  This
+ * is necessary to preserve the dependencies implied by optimized DEX files
+ * (e.g. if the same class appears in multiple places).
+ *
+ * During normal startup we fail if there are no entries, because we won't
+ * get very far without the basic language support classes, but if we're
+ * optimizing a DEX file we allow it.
+ */
+static ClassPathEntry* processClassPath(const char* pathStr, bool isBootstrap)
+{
+    ClassPathEntry* cpe = NULL;
+    char* mangle;
+    char* cp;
+    const char* end;
+    int idx, count;
+
+    assert(pathStr != NULL);
+
+    mangle = strdup(pathStr);
+
+    /*
+     * Run through and essentially strtok() the string.  Get a count of
+     * the #of elements while we're at it.
+     *
+     * If the path was constructed strangely (e.g. ":foo::bar:") this will
+     * over-allocate, which isn't ideal but is mostly harmless.
+     */
+    count = 1;
+    for (cp = mangle; *cp != '\0'; cp++) {
+        if (*cp == ':') {   /* separates two entries */
+            count++;
+            *cp = '\0';
+        }
+    }
+    end = cp;
+
+    /*
+     * Allocate storage.  We over-alloc by one so we can set an "end" marker.
+     */
+    cpe = (ClassPathEntry*) calloc(count+1, sizeof(ClassPathEntry));
+
+    /*
+     * Set the global pointer so the DEX file dependency stuff can find it.
+     */
+    gDvm.bootClassPath = cpe;
+
+    /*
+     * Go through a second time, pulling stuff out.
+     */
+    cp = mangle;
+    idx = 0;
+    while (cp < end) {
+        if (*cp == '\0') {
+            /* leading, trailing, or doubled ':'; ignore it */
+        } else {
+            ClassPathEntry tmp;
+            tmp.kind = kCpeUnknown;
+            tmp.fileName = strdup(cp);
+            tmp.ptr = NULL;
+
+            /* drop an end marker here so DEX loader can walk unfinished list */
+            cpe[idx].kind = kCpeLastEntry;
+            cpe[idx].fileName = NULL;
+            cpe[idx].ptr = NULL;
+
+            if (!prepareCpe(&tmp, isBootstrap)) {
+                LOGD("Failed on '%s' (boot=%d)\n", tmp.fileName, isBootstrap);
+                /* drop from list and continue on */
+                free(tmp.fileName);
+
+                if (isBootstrap || gDvm.optimizing) {
+                    /* if boot path entry or we're optimizing, this is fatal */
+                    free(cpe);
+                    cpe = NULL;
+                    goto bail;
+                }
+            } else {
+                /* copy over, pointers and all */
+                if (tmp.fileName[0] != '/')
+                    LOGW("Non-absolute bootclasspath entry '%s'\n",
+                        tmp.fileName);
+                cpe[idx] = tmp;
+                idx++;
+            }
+        }
+
+        cp += strlen(cp) +1;
+    }
+    assert(idx <= count);
+    if (idx == 0 && !gDvm.optimizing) {
+        LOGE("ERROR: no valid entries found in bootclasspath '%s'\n", pathStr);
+        free(cpe);
+        cpe = NULL;
+        goto bail;
+    }
+
+    LOGVV("  (filled %d of %d slots)\n", idx, count);
+
+    /* put end marker in over-alloc slot */
+    cpe[idx].kind = kCpeLastEntry;
+    cpe[idx].fileName = NULL;
+    cpe[idx].ptr = NULL;
+
+    //dumpClassPath(cpe);
+
+bail:
+    free(mangle);
+    gDvm.bootClassPath = cpe;
+    return cpe;
+}
+
+/*
+ * Search the DEX files we loaded from the bootstrap class path for a DEX
+ * file that has the class with the matching descriptor.
+ *
+ * Returns the matching DEX file and DexClassDef entry if found, otherwise
+ * returns NULL.
+ */
+static DvmDex* searchBootPathForClass(const char* descriptor,
+    const DexClassDef** ppClassDef)
+{
+    const ClassPathEntry* cpe = gDvm.bootClassPath;
+    const DexClassDef* pFoundDef = NULL;
+    DvmDex* pFoundFile = NULL;
+
+    LOGVV("+++ class '%s' not yet loaded, scanning bootclasspath...\n",
+        descriptor);
+
+    while (cpe->kind != kCpeLastEntry) {
+        //LOGV("+++  checking '%s' (%d)\n", cpe->fileName, cpe->kind);
+
+        switch (cpe->kind) {
+        case kCpeDir:
+            LOGW("Directory entries ('%s') not supported in bootclasspath\n",
+                cpe->fileName);
+            break;
+        case kCpeJar:
+            {
+                JarFile* pJarFile = (JarFile*) cpe->ptr;
+                const DexClassDef* pClassDef;
+                DvmDex* pDvmDex;
+
+                pDvmDex = dvmGetJarFileDex(pJarFile);
+                pClassDef = dexFindClass(pDvmDex->pDexFile, descriptor);
+                if (pClassDef != NULL) {
+                    /* found */
+                    pFoundDef = pClassDef;
+                    pFoundFile = pDvmDex;
+                    goto found;
+                }
+            }
+            break;
+        case kCpeDex:
+            {
+                RawDexFile* pRawDexFile = (RawDexFile*) cpe->ptr;
+                const DexClassDef* pClassDef;
+                DvmDex* pDvmDex;
+
+                pDvmDex = dvmGetRawDexFileDex(pRawDexFile);
+                pClassDef = dexFindClass(pDvmDex->pDexFile, descriptor);
+                if (pClassDef != NULL) {
+                    /* found */
+                    pFoundDef = pClassDef;
+                    pFoundFile = pDvmDex;
+                    goto found;
+                }
+            }
+            break;
+        default:
+            LOGE("Unknown kind %d\n", cpe->kind);
+            assert(false);
+            break;
+        }
+
+        cpe++;
+    }
+
+    /*
+     * Special handling during verification + optimization.
+     *
+     * The DEX optimizer needs to load classes from the DEX file it's working
+     * on.  Rather than trying to insert it into the bootstrap class path
+     * or synthesizing a class loader to manage it, we just make it available
+     * here.  It logically comes after all existing entries in the bootstrap
+     * class path.
+     */
+    if (gDvm.bootClassPathOptExtra != NULL) {
+        const DexClassDef* pClassDef;
+
+        pClassDef =
+            dexFindClass(gDvm.bootClassPathOptExtra->pDexFile, descriptor);
+        if (pClassDef != NULL) {
+            /* found */
+            pFoundDef = pClassDef;
+            pFoundFile = gDvm.bootClassPathOptExtra;
+        }
+    }
+
+found:
+    *ppClassDef = pFoundDef;
+    return pFoundFile;
+}
+
+/*
+ * Set the "extra" DEX, which becomes a de facto member of the bootstrap
+ * class set.
+ */
+void dvmSetBootPathExtraDex(DvmDex* pDvmDex)
+{
+    gDvm.bootClassPathOptExtra = pDvmDex;
+}
+
+
+/*
+ * Return the #of entries in the bootstrap class path.
+ *
+ * (Used for ClassLoader.getResources().)
+ */
+int dvmGetBootPathSize(void)
+{
+    const ClassPathEntry* cpe = gDvm.bootClassPath;
+
+    while (cpe->kind != kCpeLastEntry)
+        cpe++;
+
+    return cpe - gDvm.bootClassPath;
+}
+
+/*
+ * Find a resource with the specified name in entry N of the boot class path.
+ *
+ * We return a newly-allocated String of one of these forms:
+ *   file://path/name
+ *   jar:file://path!/name
+ * Where "path" is the bootstrap class path entry and "name" is the string
+ * passed into this method.  "path" needs to be an absolute path (starting
+ * with '/'); if it's not we'd need to "absolutify" it as part of forming
+ * the URL string.
+ */
+StringObject* dvmGetBootPathResource(const char* name, int idx)
+{
+    const int kUrlOverhead = 13;        // worst case for Jar URL
+    const ClassPathEntry* cpe = gDvm.bootClassPath;
+    StringObject* urlObj = NULL;
+
+    LOGV("+++ searching for resource '%s' in %d(%s)\n",
+        name, idx, cpe[idx].fileName);
+
+    /* we could use direct array index, but I don't entirely trust "idx" */
+    while (idx-- && cpe->kind != kCpeLastEntry)
+        cpe++;
+    if (cpe->kind == kCpeLastEntry) {
+        assert(false);
+        return NULL;
+    }
+
+    char urlBuf[strlen(name) + strlen(cpe->fileName) + kUrlOverhead +1];
+
+    switch (cpe->kind) {
+    case kCpeDir:
+        sprintf(urlBuf, "file://%s/%s", cpe->fileName, name);
+        if (access(urlBuf+7, F_OK) != 0)
+            goto bail;
+        break;
+    case kCpeJar:
+        {
+            JarFile* pJarFile = (JarFile*) cpe->ptr;
+            if (dexZipFindEntry(&pJarFile->archive, name) == NULL)
+                goto bail;
+            sprintf(urlBuf, "jar:file://%s!/%s", cpe->fileName, name);
+        }
+        break;
+    case kCpeDex:
+        LOGV("No resources in DEX files\n");
+        goto bail;
+    default:
+        assert(false);
+        goto bail;
+    }
+
+    LOGV("+++ using URL='%s'\n", urlBuf);
+    urlObj = dvmCreateStringFromCstr(urlBuf, ALLOC_DEFAULT);
+
+bail:
+    return urlObj;
+}
+
+
+/*
+ * ===========================================================================
+ *      Class list management
+ * ===========================================================================
+ */
+
+/* search for these criteria in the Class hash table */
+typedef struct ClassMatchCriteria {
+    const char* descriptor;
+    Object*     loader;
+} ClassMatchCriteria;
+
+#define kInitLoaderInc  4       /* must be power of 2 */
+
+/*
+ * Determine if "loader" appears in clazz' initiating loader list.
+ *
+ * The class hash table lock must be held when calling here, since
+ * it's also used when updating a class' initiating loader list.
+ */
+bool dvmLoaderInInitiatingList(const ClassObject* clazz, const Object* loader)
+{
+    /*
+     * The bootstrap class loader can't be just an initiating loader for
+     * anything (it's always the defining loader if the class is visible
+     * to it).  We don't put defining loaders in the initiating list.
+     */
+    if (loader == NULL)
+        return false;
+
+    /*
+     * Scan the list for a match.  The list is expected to be short.
+     */
+    int i;
+    for (i = clazz->initiatingLoaderCount-1; i >= 0; --i) {
+        if (clazz->initiatingLoaders[i] == loader) {
+            //LOGI("+++ found initiating match %p in %s\n",
+            //    loader, clazz->descriptor);
+            return true;
+        }
+    }
+    return false;
+}
+
+/*
+ * Add "loader" to clazz's initiating loader set, unless it's the defining
+ * class loader.
+ *
+ * In the common case this will be a short list, so we don't need to do
+ * anything too fancy here.
+ *
+ * This locks gDvm.loadedClasses for synchronization, so don't hold it
+ * when calling here.
+ */
+void dvmAddInitiatingLoader(ClassObject* clazz, Object* loader)
+{
+    if (loader != clazz->classLoader) {
+        assert(loader != NULL);
+
+        LOGVV("Adding %p to '%s' init list\n", loader, clazz->descriptor);
+        dvmHashTableLock(gDvm.loadedClasses);
+
+        /*
+         * Make sure nobody snuck in.  The penalty for adding twice is
+         * pretty minor, and probably outweighs the O(n^2) hit for
+         * checking before every add, so we may not want to do this.
+         */
+        if (false && dvmLoaderInInitiatingList(clazz, loader)) {
+            LOGW("WOW: simultaneous add of initiating class loader\n");
+            goto bail_unlock;
+        }
+        
+        /*
+         * The list never shrinks, so we just keep a count of the
+         * number of elements in it, and reallocate the buffer when
+         * we run off the end.
+         *
+         * The pointer is initially NULL, so we *do* want to call realloc
+         * when count==0.
+         */
+        if ((clazz->initiatingLoaderCount & (kInitLoaderInc-1)) == 0) {
+            Object** newList;
+
+            newList = (Object**) realloc(clazz->initiatingLoaders,
+                        (clazz->initiatingLoaderCount + kInitLoaderInc)
+                         * sizeof(Object*));
+            if (newList == NULL) {
+                /* this is mainly a cache, so it's not the EotW */
+                assert(false);
+                goto bail_unlock;
+            }
+            clazz->initiatingLoaders = newList;
+
+            //LOGI("Expanded init list to %d (%s)\n",
+            //    clazz->initiatingLoaderCount+kInitLoaderInc,
+            //    clazz->descriptor);
+        }
+
+        clazz->initiatingLoaders[clazz->initiatingLoaderCount++] = loader;
+
+bail_unlock:
+        dvmHashTableUnlock(gDvm.loadedClasses);
+    }
+}
+
+/*
+ * (This is a dvmHashTableLookup callback.)
+ *
+ * Entries in the class hash table are stored as { descriptor, d-loader }
+ * tuples.  If the hashed class descriptor matches the requested descriptor,
+ * and the hashed defining class loader matches the requested class
+ * loader, we're good.  If only the descriptor matches, we check to see if the
+ * loader is in the hashed class' initiating loader list.  If so, we
+ * can return "true" immediately and skip some of the loadClass melodrama.
+ *
+ * The caller must lock the hash table before calling here.
+ *
+ * Returns 0 if a matching entry is found, nonzero otherwise.
+ */
+static int hashcmpClassByCrit(const void* vclazz, const void* vcrit)
+{
+    const ClassObject* clazz = (const ClassObject*) vclazz;
+    const ClassMatchCriteria* pCrit = (const ClassMatchCriteria*) vcrit;
+    bool match;
+
+    match = (strcmp(clazz->descriptor, pCrit->descriptor) == 0 &&
+             (clazz->classLoader == pCrit->loader ||
+              (pCrit->loader != NULL &&
+               dvmLoaderInInitiatingList(clazz, pCrit->loader)) ));
+    //if (match)
+    //    LOGI("+++ %s %p matches existing %s %p\n",
+    //        pCrit->descriptor, pCrit->loader,
+    //        clazz->descriptor, clazz->classLoader);
+    return !match;
+}
+
+/*
+ * Like hashcmpClassByCrit, but passing in a fully-formed ClassObject
+ * instead of a ClassMatchCriteria.
+ */
+static int hashcmpClassByClass(const void* vclazz, const void* vaddclazz)
+{
+    const ClassObject* clazz = (const ClassObject*) vclazz;
+    const ClassObject* addClazz = (const ClassObject*) vaddclazz;
+    bool match;
+
+    match = (strcmp(clazz->descriptor, addClazz->descriptor) == 0 &&
+             (clazz->classLoader == addClazz->classLoader ||
+              (addClazz->classLoader != NULL &&
+               dvmLoaderInInitiatingList(clazz, addClazz->classLoader)) ));
+    return !match;
+}
+
+/*
+ * Search through the hash table to find an entry with a matching descriptor
+ * and an initiating class loader that matches "loader".
+ *
+ * The table entries are hashed on descriptor only, because they're unique
+ * on *defining* class loader, not *initiating* class loader.  This isn't
+ * great, because it guarantees we will have to probe when multiple
+ * class loaders are used.
+ *
+ * Note this does NOT try to load a class; it just finds a class that
+ * has already been loaded.
+ *
+ * If "unprepOkay" is set, this will return classes that have been added
+ * to the hash table but are not yet fully loaded and linked.  Otherwise,
+ * such classes are ignored.  (The only place that should set "unprepOkay"
+ * is findClassNoInit(), which will wait for the prep to finish.)
+ *
+ * Returns NULL if not found.
+ */
+ClassObject* dvmLookupClass(const char* descriptor, Object* loader,
+    bool unprepOkay)
+{
+    ClassMatchCriteria crit;
+    void* found;
+    u4 hash;
+
+    crit.descriptor = descriptor;
+    crit.loader = loader;
+    hash = dvmComputeUtf8Hash(descriptor);
+
+    LOGVV("threadid=%d: dvmLookupClass searching for '%s' %p\n",
+        dvmThreadSelf()->threadId, descriptor, loader);
+
+    dvmHashTableLock(gDvm.loadedClasses);
+    found = dvmHashTableLookup(gDvm.loadedClasses, hash, &crit,
+                hashcmpClassByCrit, false);
+    dvmHashTableUnlock(gDvm.loadedClasses);
+
+    /*
+     * The class has been added to the hash table but isn't ready for use.
+     * We're going to act like we didn't see it, so that the caller will
+     * go through the full "find class" path, which includes locking the
+     * object and waiting until it's ready.  We could do that lock/wait
+     * here, but this is an extremely rare case, and it's simpler to have
+     * the wait-for-class code centralized.
+     */
+    if (found != NULL && !unprepOkay && !dvmIsClassLinked(found)) {
+        LOGD("Ignoring not-yet-ready %s, using slow path\n",
+            ((ClassObject*)found)->descriptor);
+        found = NULL;
+    }
+
+    return (ClassObject*) found;
+}
+
+/*
+ * Add a new class to the hash table.
+ *
+ * The class is considered "new" if it doesn't match on both the class
+ * descriptor and the defining class loader.
+ *
+ * TODO: we should probably have separate hash tables for each
+ * ClassLoader. This could speed up dvmLookupClass and
+ * other common operations. It does imply a VM-visible data structure
+ * for each ClassLoader object with loaded classes, which we don't
+ * have yet.
+ */
+bool dvmAddClassToHash(ClassObject* clazz)
+{
+    void* found;
+    u4 hash;
+
+    hash = dvmComputeUtf8Hash(clazz->descriptor);
+
+    dvmHashTableLock(gDvm.loadedClasses);
+    found = dvmHashTableLookup(gDvm.loadedClasses, hash, clazz,
+                hashcmpClassByClass, true);
+    dvmHashTableUnlock(gDvm.loadedClasses);
+
+    LOGV("+++ dvmAddClassToHash '%s' %p (isnew=%d) --> %p\n",
+        clazz->descriptor, clazz->classLoader,
+        (found == (void*) clazz), clazz);
+
+    //dvmCheckClassTablePerf();
+
+    /* can happen if two threads load the same class simultaneously */
+    return (found == (void*) clazz);
+}
+
+#if 0
+/*
+ * Compute hash value for a class.
+ */
+u4 hashcalcClass(const void* item)
+{
+    return dvmComputeUtf8Hash(((const ClassObject*) item)->descriptor);
+}
+
+/*
+ * Check the performance of the "loadedClasses" hash table.
+ */
+void dvmCheckClassTablePerf(void)
+{
+    dvmHashTableLock(gDvm.loadedClasses);
+    dvmHashTableProbeCount(gDvm.loadedClasses, hashcalcClass,
+        hashcmpClassByClass);
+    dvmHashTableUnlock(gDvm.loadedClasses);
+}
+#endif
+
+/*
+ * Remove a class object from the hash table.
+ */
+static void removeClassFromHash(ClassObject* clazz)
+{
+    LOGV("+++ removeClassFromHash '%s'\n", clazz->descriptor);
+
+    u4 hash = dvmComputeUtf8Hash(clazz->descriptor);
+
+    dvmHashTableLock(gDvm.loadedClasses);
+    if (!dvmHashTableRemove(gDvm.loadedClasses, hash, clazz))
+        LOGW("Hash table remove failed on class '%s'\n", clazz->descriptor);
+    dvmHashTableUnlock(gDvm.loadedClasses);
+}
+
+
+/*
+ * ===========================================================================
+ *      Class creation
+ * ===========================================================================
+ */
+
+/*
+ * Find the named class (by descriptor), using the specified
+ * initiating ClassLoader.
+ *
+ * The class will be loaded and initialized if it has not already been.
+ * If necessary, the superclass will be loaded.
+ * 
+ * If the class can't be found, returns NULL with an appropriate exception
+ * raised.
+ */
+ClassObject* dvmFindClass(const char* descriptor, Object* loader)
+{
+    ClassObject* clazz;
+
+    clazz = dvmFindClassNoInit(descriptor, loader);
+    if (clazz != NULL && clazz->status < CLASS_INITIALIZED) {
+        /* initialize class */
+        if (!dvmInitClass(clazz)) {
+            /* init failed; leave it in the list, marked as bad */
+            assert(dvmCheckException(dvmThreadSelf()));
+            assert(clazz->status == CLASS_ERROR);
+            return NULL;
+        }
+    }
+
+    return clazz;
+}
+
+/*
+ * Find the named class (by descriptor), using the specified
+ * initiating ClassLoader.
+ *
+ * The class will be loaded if it has not already been, as will its
+ * superclass.  It will not be initialized.
+ *
+ * If the class can't be found, returns NULL with an appropriate exception
+ * raised.
+ */
+ClassObject* dvmFindClassNoInit(const char* descriptor,
+        Object* loader)
+{
+    assert(descriptor != NULL);
+    //assert(loader != NULL);
+
+    LOGVV("FindClassNoInit '%s' %p\n", descriptor, loader);
+
+    if (*descriptor == '[') {
+        /*
+         * Array class.  Find in table, generate if not found.
+         */
+        return dvmFindArrayClass(descriptor, loader);
+    } else {
+        /*
+         * Regular class.  Find in table, load if not found.
+         */
+        if (loader != NULL) {
+            return findClassFromLoaderNoInit(descriptor, loader);
+        } else {
+            return dvmFindSystemClassNoInit(descriptor);
+        }
+    }
+}
+
+/*
+ * Load the named class (by descriptor) from the specified class
+ * loader.  This calls out to let the ClassLoader object do its thing.
+ *
+ * Returns with NULL and an exception raised on error.
+ */
+static ClassObject* findClassFromLoaderNoInit(const char* descriptor,
+    Object* loader)
+{
+    //LOGI("##### findClassFromLoaderNoInit (%s,%p)\n",
+    //        descriptor, loader);
+
+    Thread* self = dvmThreadSelf();
+    ClassObject* clazz;
+
+    assert(loader != NULL);
+
+    /*
+     * Do we already have it?
+     *
+     * The class loader code does the "is it already loaded" check as
+     * well.  However, this call is much faster than calling through
+     * interpreted code.  Doing this does mean that in the common case
+     * (365 out of 420 calls booting the sim) we're doing the
+     * lookup-by-descriptor twice.  It appears this is still a win, so
+     * I'm keeping it in.
+     */
+    clazz = dvmLookupClass(descriptor, loader, false);
+    if (clazz != NULL) {
+        LOGVV("Already loaded: %s %p\n", descriptor, loader);
+        return clazz;
+    } else {
+        LOGVV("Not already loaded: %s %p\n", descriptor, loader);
+    }
+
+    char* dotName = NULL;
+    StringObject* nameObj = NULL;
+    Object* excep;
+    Method* loadClass;
+
+    /* convert "Landroid/debug/Stuff;" to "android.debug.Stuff" */
+    dotName = dvmDescriptorToDot(descriptor);
+    if (dotName == NULL) {
+        dvmThrowException("Ljava/lang/OutOfMemoryError;", NULL);
+        goto bail;
+    }
+    nameObj = dvmCreateStringFromCstr(dotName, ALLOC_DEFAULT);
+    if (nameObj == NULL) {
+        assert(dvmCheckException(self));
+        goto bail;
+    }
+
+    // TODO: cache the vtable offset
+    loadClass = dvmFindVirtualMethodHierByDescriptor(loader->clazz, "loadClass",
+                 "(Ljava/lang/String;)Ljava/lang/Class;");
+    if (loadClass == NULL) {
+        LOGW("Couldn't find loadClass in ClassLoader\n");
+        goto bail;
+    }
+
+#ifdef WITH_PROFILER
+    dvmMethodTraceClassPrepBegin();
+#endif
+
+    /*
+     * Invoke loadClass().  This will probably result in a couple of
+     * exceptions being thrown, because the ClassLoader.loadClass()
+     * implementation eventually calls VMClassLoader.loadClass to see if
+     * the bootstrap class loader can find it before doing its own load.
+     */
+    LOGVV("--- Invoking loadClass(%s, %p)\n", dotName, loader);
+    JValue result;
+    dvmCallMethod(self, loadClass, loader, &result, nameObj);
+    clazz = (ClassObject*) result.l;
+
+#ifdef WITH_PROFILER
+    dvmMethodTraceClassPrepEnd();
+#endif
+
+    excep = dvmGetException(self);
+    if (excep != NULL) {
+#if DVM_SHOW_EXCEPTION >= 2
+        LOGD("NOTE: loadClass '%s' %p threw exception %s\n",
+            dotName, loader, excep->clazz->descriptor);
+#endif
+        dvmAddTrackedAlloc(excep, self);
+        dvmClearException(self);
+        dvmThrowChainedExceptionWithClassMessage(
+            "Ljava/lang/NoClassDefFoundError;", descriptor, excep);
+        dvmReleaseTrackedAlloc(excep, self);
+        clazz = NULL;
+        goto bail;
+    } else {
+        assert(clazz != NULL);
+    }
+
+    dvmAddInitiatingLoader(clazz, loader);
+
+    LOGVV("--- Successfully loaded %s %p (thisldr=%p clazz=%p)\n",
+        descriptor, clazz->classLoader, loader, clazz);
+
+bail:
+    dvmReleaseTrackedAlloc((Object*)nameObj, NULL);
+    free(dotName);
+    return clazz;
+}
+
+/*
+ * Load the named class (by descriptor) from the specified DEX file.
+ * Used by class loaders to instantiate a class object from a
+ * VM-managed DEX.
+ */
+ClassObject* dvmDefineClass(DvmDex* pDvmDex, const char* descriptor,
+    Object* classLoader)
+{
+    assert(pDvmDex != NULL);
+
+    return findClassNoInit(descriptor, classLoader, pDvmDex);
+}
+
+
+/*
+ * Find the named class (by descriptor), scanning through the
+ * bootclasspath if it hasn't already been loaded.
+ *
+ * "descriptor" looks like "Landroid/debug/Stuff;".
+ *
+ * Uses NULL as the defining class loader.
+ */
+ClassObject* dvmFindSystemClass(const char* descriptor)
+{
+    ClassObject* clazz;
+
+    clazz = dvmFindSystemClassNoInit(descriptor);
+    if (clazz != NULL && clazz->status < CLASS_INITIALIZED) {
+        /* initialize class */
+        if (!dvmInitClass(clazz)) {
+            /* init failed; leave it in the list, marked as bad */
+            assert(dvmCheckException(dvmThreadSelf()));
+            assert(clazz->status == CLASS_ERROR);
+            return NULL;
+        }
+    }
+
+    return clazz;
+}
+
+/*
+ * Find the named class (by descriptor), searching for it in the
+ * bootclasspath.
+ *
+ * On failure, this returns NULL with an exception raised.
+ */
+ClassObject* dvmFindSystemClassNoInit(const char* descriptor)
+{
+    return findClassNoInit(descriptor, NULL, NULL);
+}
+
+/*
+ * Find the named class (by descriptor). If it's not already loaded,
+ * we load it and link it, but don't execute <clinit>. (The VM has
+ * specific limitations on which events can cause initialization.)
+ *
+ * If "pDexFile" is NULL, we will search the bootclasspath for an entry.
+ *
+ * On failure, this returns NULL with an exception raised.
+ *
+ * TODO: we need to return an indication of whether we loaded the class or
+ * used an existing definition.  If somebody deliberately tries to load a
+ * class twice in the same class loader, they should get a LinkageError,
+ * but inadvertent simultaneous class references should "just work".
+ */
+static ClassObject* findClassNoInit(const char* descriptor, Object* loader,
+    DvmDex* pDvmDex)
+{
+    Thread* self = dvmThreadSelf();
+    ClassObject* clazz;
+#ifdef WITH_PROFILER
+    bool profilerNotified = false;
+#endif
+
+    if (loader != NULL) {
+        LOGVV("#### findClassNoInit(%s,%p,%p)\n", descriptor, loader,
+            pDvmDex->pDexFile);
+    }
+
+    /*
+     * We don't expect an exception to be raised at this point.  The
+     * exception handling code is good about managing this.  This *can*
+     * happen if a JNI lookup fails and the JNI code doesn't do any
+     * error checking before doing another class lookup, so we may just
+     * want to clear this and restore it on exit.  If we don't, some kinds
+     * of failures can't be detected without rearranging other stuff.
+     *
+     * Most often when we hit this situation it means that something is
+     * broken in the VM or in JNI code, so I'm keeping it in place (and
+     * making it an informative abort rather than an assert).
+     */
+    if (dvmCheckException(self)) {
+        LOGE("Class lookup %s attemped while exception %s pending\n",
+            descriptor, dvmGetException(self)->clazz->descriptor);
+        dvmDumpAllThreads(false);
+        dvmAbort();
+    }
+
+    clazz = dvmLookupClass(descriptor, loader, true);
+    if (clazz == NULL) {
+        const DexClassDef* pClassDef;
+
+#ifdef WITH_PROFILER
+        dvmMethodTraceClassPrepBegin();
+        profilerNotified = true;
+#endif
+
+#if LOG_CLASS_LOADING
+        u8 startTime = dvmGetThreadCpuTimeNsec();
+#endif
+
+        if (pDvmDex == NULL) {
+            assert(loader == NULL);     /* shouldn't be here otherwise */
+            pDvmDex = searchBootPathForClass(descriptor, &pClassDef);
+        } else {
+            pClassDef = dexFindClass(pDvmDex->pDexFile, descriptor);
+        }
+
+        if (pDvmDex == NULL || pClassDef == NULL) {
+            dvmThrowExceptionWithClassMessage(
+                "Ljava/lang/NoClassDefFoundError;", descriptor);
+            goto bail;
+        }
+
+        /* found a match, try to load it */
+        clazz = loadClassFromDex(pDvmDex, pClassDef, loader);
+        if (dvmCheckException(self)) {
+            /* class was found but had issues */
+            dvmReleaseTrackedAlloc((Object*) clazz, NULL);
+            goto bail;
+        }
+
+        /* 
+         * Lock the class while we link it so other threads must wait for us
+         * to finish.  Set the "initThreadId" so we can identify recursive
+         * invocation.
+         */
+        dvmLockObject(self, (Object*) clazz);
+        clazz->initThreadId = self->threadId;
+
+        /*
+         * Add to hash table so lookups succeed.
+         *
+         * [Are circular references possible when linking a class?]
+         */
+        assert(clazz->classLoader == loader);
+        if (!dvmAddClassToHash(clazz)) {
+            /*
+             * Another thread must have loaded the class after we
+             * started but before we finished.  Discard what we've
+             * done and leave some hints for the GC.
+             *
+             * (Yes, this happens.)
+             */
+            //LOGW("WOW: somebody loaded %s simultaneously\n", descriptor);
+            clazz->initThreadId = 0;
+            dvmUnlockObject(self, (Object*) clazz);
+
+            /* Let the GC free the class.
+             */
+            assert(clazz->obj.clazz == gDvm.unlinkedJavaLangClass);
+            dvmReleaseTrackedAlloc((Object*) clazz, NULL);
+
+            /* Grab the winning class.
+             */
+            clazz = dvmLookupClass(descriptor, loader, true);
+            assert(clazz != NULL);
+            goto got_class;
+        }
+        dvmReleaseTrackedAlloc((Object*) clazz, NULL);
+
+#if LOG_CLASS_LOADING
+        logClassLoadWithTime('>', clazz, startTime);
+#endif
+        /*
+         * Prepare and resolve.
+         */
+        if (!dvmLinkClass(clazz, false)) {
+            assert(dvmCheckException(self));
+
+            /* Make note of the error and clean up the class.
+             */
+            removeClassFromHash(clazz);
+            clazz->status = CLASS_ERROR;
+            dvmFreeClassInnards(clazz);
+
+            /* Let any waiters know.
+             */
+            clazz->initThreadId = 0;
+            dvmObjectNotifyAll(self, (Object*) clazz);
+            dvmUnlockObject(self, (Object*) clazz);
+
+            clazz = NULL;
+            if (gDvm.optimizing) {
+                /* happens with "external" libs */
+                LOGV("Link of class '%s' failed\n", descriptor);
+            } else {
+                LOGW("Link of class '%s' failed\n", descriptor);
+            }
+#if LOG_CLASS_LOADING
+            logClassLoad('<', clazz);
+#endif
+            goto bail;
+        }
+        dvmObjectNotifyAll(self, (Object*) clazz);
+        dvmUnlockObject(self, (Object*) clazz);
+
+        /*
+         * Add class stats to global counters.
+         *
+         * TODO: these should probably be atomic ops.
+         */
+        gDvm.numLoadedClasses++;
+        gDvm.numDeclaredMethods +=
+            clazz->virtualMethodCount + clazz->directMethodCount;
+        gDvm.numDeclaredInstFields += clazz->ifieldCount;
+        gDvm.numDeclaredStaticFields += clazz->sfieldCount;
+
+        /*
+         * Cache pointers to basic classes.  We want to use these in
+         * various places, and it's easiest to initialize them on first
+         * use rather than trying to force them to initialize (startup
+         * ordering makes it weird).
+         */
+        if (gDvm.classJavaLangObject == NULL &&
+            strcmp(descriptor, "Ljava/lang/Object;") == 0)
+        {
+            /* It should be impossible to get here with anything
+             * but the bootclasspath loader.
+             */
+            assert(loader == NULL);
+            gDvm.classJavaLangObject = clazz;
+        }
+
+#if LOG_CLASS_LOADING
+        logClassLoad('<', clazz);
+#endif
+
+    } else {
+got_class:
+        if (!dvmIsClassLinked(clazz) && clazz->status != CLASS_ERROR) {
+            /*
+             * We can race with other threads for class linking.  We should
+             * never get here recursively; doing so indicates that two
+             * classes have circular dependencies.
+             *
+             * One exception: we force discovery of java.lang.Class in
+             * dvmLinkClass(), and Class has Object as its superclass.  So
+             * if the first thing we ever load is Object, we will init
+             * Object->Class->Object.  The easiest way to avoid this is to
+             * ensure that Object is never the first thing we look up, so
+             * we get Foo->Class->Object instead.
+             */
+            dvmLockObject(self, (Object*) clazz);
+            if (!dvmIsClassLinked(clazz) &&
+                clazz->initThreadId == self->threadId)
+            {
+                LOGW("Recursive link on class %s\n", clazz->descriptor);
+                dvmUnlockObject(self, (Object*) clazz);
+                dvmThrowExceptionWithClassMessage(
+                    "Ljava/lang/ClassCircularityError;", clazz->descriptor);
+                clazz = NULL;
+                goto bail;
+            }
+            //LOGI("WAITING  for '%s' (owner=%d)\n",
+            //    clazz->descriptor, clazz->initThreadId);
+            while (!dvmIsClassLinked(clazz) && clazz->status != CLASS_ERROR) {
+                dvmObjectWait(self, (Object*) clazz, 0, 0, false);
+            }
+            dvmUnlockObject(self, (Object*) clazz);
+        }
+        if (clazz->status == CLASS_ERROR) {
+            /*
+             * Somebody else tried to load this and failed.  We need to raise
+             * an exception and report failure.
+             */
+            throwEarlierClassFailure(clazz);
+            clazz = NULL;
+            goto bail;
+        }
+    }
+
+    /* check some invariants */
+    assert(dvmIsClassLinked(clazz));
+    assert(gDvm.classJavaLangClass != NULL);
+    assert(clazz->obj.clazz == gDvm.classJavaLangClass);
+    if (clazz != gDvm.classJavaLangObject) {
+        assert(clazz->super != NULL);
+    }
+    if (!dvmIsInterfaceClass(clazz)) {
+        //LOGI("class=%s vtableCount=%d, virtualMeth=%d\n",
+        //    clazz->descriptor, clazz->vtableCount,
+        //    clazz->virtualMethodCount);
+        assert(clazz->vtableCount >= clazz->virtualMethodCount);
+    }
+
+    /*
+     * Normally class objects are initialized before we instantiate them,
+     * but we can't do that with java.lang.Class (chicken, meet egg).  We
+     * do it explicitly here.
+     *
+     * The verifier could call here to find Class while verifying Class,
+     * so we need to check for CLASS_VERIFYING as well as !initialized.
+     */
+    if (clazz == gDvm.classJavaLangClass && !dvmIsClassInitialized(clazz) &&
+        !(clazz->status == CLASS_VERIFYING))
+    {
+        LOGV("+++ explicitly initializing %s\n", clazz->descriptor);
+        dvmInitClass(clazz);
+    }
+
+bail:
+#ifdef WITH_PROFILER
+    if (profilerNotified)
+        dvmMethodTraceClassPrepEnd();
+#endif
+    assert(clazz != NULL || dvmCheckException(self));
+    return clazz;
+}
+
+/*
+ * Helper for loadClassFromDex, which takes a DexClassDataHeader and
+ * encoded data pointer in addition to the other arguments.
+ */
+static ClassObject* loadClassFromDex0(DvmDex* pDvmDex,
+    const DexClassDef* pClassDef, const DexClassDataHeader* pHeader,
+    const u1* pEncodedData, Object* classLoader)
+{
+    ClassObject* newClass = NULL;
+    const DexFile* pDexFile;
+    const char* descriptor;
+    int i;
+
+    pDexFile = pDvmDex->pDexFile;
+    descriptor = dexGetClassDescriptor(pDexFile, pClassDef);
+
+    /*
+     * Make sure the aren't any "bonus" flags set, since we use them for
+     * runtime state.
+     */
+    if ((pClassDef->accessFlags & ~EXPECTED_FILE_FLAGS) != 0) {
+        LOGW("Invalid file flags in class %s: %04x\n",
+            descriptor, pClassDef->accessFlags);
+        return NULL;
+    }
+
+    /*
+     * Allocate storage for the class object on the GC heap, so that other
+     * objects can have references to it.  We bypass the usual mechanism
+     * (allocObject), because we don't have all the bits and pieces yet.
+     *
+     * Note that we assume that java.lang.Class does not override
+     * finalize().
+     */
+    newClass = (ClassObject*) dvmMalloc(sizeof(*newClass), ALLOC_DEFAULT);
+    if (newClass == NULL)
+        return NULL;
+
+    /* Until the class is loaded and linked, use a placeholder
+     * obj->clazz value as a hint to the GC.  We don't want
+     * the GC trying to scan the object while it's full of Idx
+     * values.  Also, the real java.lang.Class may not exist
+     * yet.
+     */
+    DVM_OBJECT_INIT(&newClass->obj, gDvm.unlinkedJavaLangClass);
+
+    newClass->descriptor = descriptor;
+    assert(newClass->descriptorAlloc == NULL);
+    newClass->accessFlags = pClassDef->accessFlags;
+    newClass->classLoader = classLoader;
+    newClass->pDvmDex = pDvmDex;
+    newClass->primitiveType = PRIM_NOT;
+
+    /*
+     * Stuff the superclass index into the object pointer field.  The linker
+     * pulls it out and replaces it with a resolved ClassObject pointer.
+     * I'm doing it this way (rather than having a dedicated superclassIdx
+     * field) to save a few bytes of overhead per class.
+     *
+     * newClass->super is not traversed or freed by dvmFreeClassInnards, so
+     * this is safe.
+     */
+    assert(sizeof(u4) == sizeof(ClassObject*));
+    newClass->super = (ClassObject*) pClassDef->superclassIdx;
+
+    /*
+     * Stuff class reference indices into the pointer fields.
+     *
+     * The elements of newClass->interfaces are not traversed or freed by
+     * dvmFreeClassInnards, so this is GC-safe.
+     */
+    const DexTypeList* pInterfacesList;
+    pInterfacesList = dexGetInterfacesList(pDexFile, pClassDef);
+    if (pInterfacesList != NULL) {
+        newClass->interfaceCount = pInterfacesList->size;
+        newClass->interfaces = (ClassObject**) dvmLinearAlloc(classLoader,
+                newClass->interfaceCount * sizeof(ClassObject*));
+
+        for (i = 0; i < newClass->interfaceCount; i++) {
+            const DexTypeItem* pType = dexGetTypeItem(pInterfacesList, i);
+            newClass->interfaces[i] = (ClassObject*)(u4) pType->typeIdx;
+        }
+        dvmLinearReadOnly(classLoader, newClass->interfaces);
+    }
+
+    /* load field definitions */
+
+    /*
+     * TODO: consider over-allocating the class object and appending the
+     * static field info onto the end.  It's fixed-size and known at alloc
+     * time.  This would save a couple of native heap allocations, but it
+     * would also make heap compaction more difficult because we pass Field
+     * pointers around internally.
+     */
+
+    if (pHeader->staticFieldsSize != 0) {
+        /* static fields stay on system heap; field data isn't "write once" */
+        int count = (int) pHeader->staticFieldsSize;
+        u4 lastIndex = 0;
+        DexField field;
+
+        newClass->sfieldCount = count;
+        newClass->sfields =
+            (StaticField*) calloc(count, sizeof(StaticField));
+        for (i = 0; i < count; i++) {
+            dexReadClassDataField(&pEncodedData, &field, &lastIndex);
+            loadSFieldFromDex(newClass, &field, &newClass->sfields[i]);
+        }
+    }
+    
+    if (pHeader->instanceFieldsSize != 0) {
+        int count = (int) pHeader->instanceFieldsSize;
+        u4 lastIndex = 0;
+        DexField field;
+
+        newClass->ifieldCount = count;
+        newClass->ifields = (InstField*) dvmLinearAlloc(classLoader,
+                count * sizeof(InstField));
+        for (i = 0; i < count; i++) {
+            dexReadClassDataField(&pEncodedData, &field, &lastIndex);
+            loadIFieldFromDex(newClass, &field, &newClass->ifields[i]);
+        }
+        dvmLinearReadOnly(classLoader, newClass->ifields);
+    }
+
+    /* load method definitions */
+
+    if (pHeader->directMethodsSize != 0) {
+        int count = (int) pHeader->directMethodsSize;
+        u4 lastIndex = 0;
+        DexMethod method;
+        
+        newClass->directMethodCount = count;
+        newClass->directMethods = (Method*) dvmLinearAlloc(classLoader,
+                count * sizeof(Method));
+        for (i = 0; i < count; i++) {
+            dexReadClassDataMethod(&pEncodedData, &method, &lastIndex);
+            loadMethodFromDex(newClass, &method, &newClass->directMethods[i]);
+        }
+        dvmLinearReadOnly(classLoader, newClass->directMethods);
+    }
+    
+    if (pHeader->virtualMethodsSize != 0) {
+        int count = (int) pHeader->virtualMethodsSize;
+        u4 lastIndex = 0;
+        DexMethod method;
+
+        newClass->virtualMethodCount = count;
+        newClass->virtualMethods = (Method*) dvmLinearAlloc(classLoader,
+                count * sizeof(Method));
+        for (i = 0; i < count; i++) {
+            dexReadClassDataMethod(&pEncodedData, &method, &lastIndex);
+            loadMethodFromDex(newClass, &method, &newClass->virtualMethods[i]);
+        }
+        dvmLinearReadOnly(classLoader, newClass->virtualMethods);
+    }
+
+    newClass->sourceFile = dexGetSourceFile(pDexFile, pClassDef);
+    newClass->status = CLASS_LOADED;
+
+    /* caller must call dvmReleaseTrackedAlloc */
+    return newClass;
+}
+
+/*
+ * Try to load the indicated class from the specified DEX file.
+ *
+ * This is effectively loadClass()+defineClass() for a DexClassDef.  The
+ * loading was largely done when we crunched through the DEX.
+ *
+ * Returns NULL on failure.  If we locate the class but encounter an error
+ * while processing it, an appropriate exception is thrown.
+ */
+static ClassObject* loadClassFromDex(DvmDex* pDvmDex,
+    const DexClassDef* pClassDef, Object* classLoader)
+{
+    ClassObject* result;
+    DexClassDataHeader header;
+    const u1* pEncodedData;
+    const DexFile* pDexFile;
+
+    assert((pDvmDex != NULL) && (pClassDef != NULL));
+    pDexFile = pDvmDex->pDexFile;
+
+    if (gDvm.verboseClass) {
+        LOGV("CLASS: loading '%s'...\n",
+            dexGetClassDescriptor(pDexFile, pClassDef));
+    }
+
+    pEncodedData = dexGetClassData(pDexFile, pClassDef);
+
+    if (pEncodedData != NULL) {
+        dexReadClassDataHeader(&pEncodedData, &header);
+    } else {
+        // Provide an all-zeroes header for the rest of the loading.
+        memset(&header, 0, sizeof(header));
+    }
+    
+    result = loadClassFromDex0(pDvmDex, pClassDef, &header, pEncodedData,
+            classLoader);
+
+    if (gDvm.verboseClass && (result != NULL)) {
+        LOGI("[Loaded %s from DEX %p (cl=%p)]\n",
+            result->descriptor, pDvmDex, classLoader);
+    }
+    
+    return result;
+}    
+
+/*
+ * Free anything in a ClassObject that was allocated on the system heap.
+ *
+ * The ClassObject itself is allocated on the GC heap, so we leave it for
+ * the garbage collector.
+ *
+ * NOTE: this may be called with a partially-constructed object.
+ * NOTE: there is no particular ordering imposed, so don't go poking at
+ * superclasses.
+ */
+void dvmFreeClassInnards(ClassObject* clazz)
+{
+    void *tp;
+    int i;
+
+    if (clazz == NULL)
+        return;
+
+    assert(clazz->obj.clazz == gDvm.classJavaLangClass ||
+           clazz->obj.clazz == gDvm.unlinkedJavaLangClass);
+
+    /* Guarantee that dvmFreeClassInnards can be called on a given
+     * class multiple times by clearing things out as we free them.
+     * We don't make any attempt at real atomicity here; higher
+     * levels need to make sure that no two threads can free the
+     * same ClassObject at the same time.
+     *
+     * TODO: maybe just make it so the GC will never free the
+     * innards of an already-freed class.
+     *
+     * TODO: this #define isn't MT-safe -- the compiler could rearrange it.
+     */
+#define NULL_AND_FREE(p) \
+    do { \
+        if ((p) != NULL) { \
+            tp = (p); \
+            (p) = NULL; \
+            free(tp); \
+        } \
+    } while (0)
+#define NULL_AND_LINEAR_FREE(p) \
+    do { \
+        if ((p) != NULL) { \
+            tp = (p); \
+            (p) = NULL; \
+            dvmLinearFree(clazz->classLoader, tp); \
+        } \
+    } while (0)
+
+    /* arrays just point at Object's vtable; don't free vtable in this case.
+     * dvmIsArrayClass() checks clazz->descriptor, so we have to do this check
+     * before freeing the name.
+     */
+    clazz->vtableCount = -1;
+    if (dvmIsArrayClass(clazz)) {
+        clazz->vtable = NULL;
+    } else {
+        NULL_AND_LINEAR_FREE(clazz->vtable);
+    }
+
+    clazz->descriptor = NULL;
+    NULL_AND_FREE(clazz->descriptorAlloc);
+
+    if (clazz->directMethods != NULL) {
+        Method *directMethods = clazz->directMethods;
+        int directMethodCount = clazz->directMethodCount;
+        clazz->directMethods = NULL;
+        clazz->directMethodCount = -1;
+        for (i = 0; i < directMethodCount; i++) {
+            freeMethodInnards(&directMethods[i]);
+        }
+        dvmLinearFree(clazz->classLoader, directMethods);
+    }
+    if (clazz->virtualMethods != NULL) {
+        Method *virtualMethods = clazz->virtualMethods;
+        int virtualMethodCount = clazz->virtualMethodCount;
+        clazz->virtualMethodCount = -1;
+        clazz->virtualMethods = NULL;
+        for (i = 0; i < virtualMethodCount; i++) {
+            freeMethodInnards(&virtualMethods[i]);
+        }
+        dvmLinearFree(clazz->classLoader, virtualMethods);
+    }
+
+    clazz->initiatingLoaderCount = -1;
+    NULL_AND_FREE(clazz->initiatingLoaders);
+
+    clazz->interfaceCount = -1;
+    NULL_AND_LINEAR_FREE(clazz->interfaces);
+
+    clazz->iftableCount = -1;
+    NULL_AND_LINEAR_FREE(clazz->iftable);
+
+    clazz->ifviPoolCount = -1;
+    NULL_AND_LINEAR_FREE(clazz->ifviPool);
+
+    clazz->sfieldCount = -1;
+    NULL_AND_FREE(clazz->sfields);
+
+    clazz->ifieldCount = -1;
+    NULL_AND_LINEAR_FREE(clazz->ifields);
+
+#undef NULL_AND_FREE
+#undef NULL_AND_LINEAR_FREE
+}
+
+/*
+ * Free anything in a Method that was allocated on the system heap.
+ */
+static void freeMethodInnards(Method* meth)
+{
+#if 0
+    free(meth->exceptions);
+    free(meth->lines);
+    free(meth->locals);
+#else
+    UNUSED_PARAMETER(meth);
+#endif
+}
+
+/*
+ * Clone a Method, making new copies of anything that will be freed up
+ * by freeMethodInnards().
+ */
+static void cloneMethod(Method* dst, const Method* src)
+{
+    memcpy(dst, src, sizeof(Method));
+#if 0
+    /* for current usage, these are never set, so no need to implement copy */
+    assert(dst->exceptions == NULL);
+    assert(dst->lines == NULL);
+    assert(dst->locals == NULL);
+#endif
+}
+
+/*
+ * Pull the interesting pieces out of a DexMethod.
+ *
+ * The DEX file isn't going anywhere, so we don't need to make copies of
+ * the code area.
+ */
+static void loadMethodFromDex(ClassObject* clazz, const DexMethod* pDexMethod,
+    Method* meth)
+{
+    DexFile* pDexFile = clazz->pDvmDex->pDexFile;
+    const DexMethodId* pMethodId;
+    const DexCode* pDexCode;
+
+    pMethodId = dexGetMethodId(pDexFile, pDexMethod->methodIdx);
+
+    meth->name = dexStringById(pDexFile, pMethodId->nameIdx);
+    dexProtoSetFromMethodId(&meth->prototype, pDexFile, pMethodId);
+    meth->shorty = dexProtoGetShorty(&meth->prototype);
+    meth->accessFlags = pDexMethod->accessFlags;
+    meth->clazz = clazz;
+    meth->jniArgInfo = 0;
+
+    if (dvmCompareNameDescriptorAndMethod("finalize", "()V", meth) == 0) {
+        SET_CLASS_FLAG(clazz, CLASS_ISFINALIZABLE);
+    }
+
+    pDexCode = dexGetCode(pDexFile, pDexMethod);
+    if (pDexCode != NULL) {
+        /* integer constants, copy over for faster access */
+        meth->registersSize = pDexCode->registersSize;
+        meth->insSize = pDexCode->insSize;
+        meth->outsSize = pDexCode->outsSize;
+
+        /* pointer to code area */
+        meth->insns = pDexCode->insns;
+    } else {
+        /*
+         * We don't have a DexCode block, but we still want to know how
+         * much space is needed for the arguments (so we don't have to
+         * compute it later).  We also take this opportunity to compute
+         * JNI argument info.
+         *
+         * We do this for abstract methods as well, because we want to
+         * be able to substitute our exception-throwing "stub" in.
+         */
+        int argsSize = dvmComputeMethodArgsSize(meth);
+        if (!dvmIsStaticMethod(meth))
+            argsSize++;
+        meth->registersSize = meth->insSize = argsSize;
+        assert(meth->outsSize == 0);
+        assert(meth->insns == NULL);
+
+        if (dvmIsNativeMethod(meth)) {
+            meth->nativeFunc = dvmResolveNativeMethod;
+            meth->jniArgInfo = computeJniArgInfo(&meth->prototype);
+        }
+    }
+}
+
+/*
+ * jniArgInfo (32-bit int) layout:
+ *   SRRRLLLL FFFFFFFF FFFFFFFF FFFFFFFF
+ *   
+ *   S - if set, do things the hard way (scan the signature)
+ *   R - return-type enumeration
+ *   L - number of double-words of storage required on stack (0-30 words)
+ *   F - pad flag -- if set, write a pad word to the stack before copying
+ *       the next 32 bits
+ *   
+ * With this arrangement we can push up to 24 words of arguments without
+ * having to scan the signature.  Only works for ABIs that don't require
+ * special handling of floating-point args (e.g. ARM can make use of it,
+ * PPC can't).
+ *
+ * The return-type bits are always set, even if we have too many args to
+ * set the L/F bits.  This allows us to avoid scanning through the signature
+ * for the return type on all platforms.
+ */
+static int computeJniArgInfo(const DexProto* proto)
+{
+    const char* sig = dexProtoGetShorty(proto);
+    int returnType, padFlags, jniArgInfo;
+    char sigByte;
+    int stackOffset, padMask;
+
+    stackOffset = padFlags = 0;
+    padMask = 0x00000001;
+
+    /* The first shorty character is the return type. */
+    switch (*(sig++)) {
+    case 'V':
+        returnType = DALVIK_JNI_RETURN_VOID;
+        break;
+    case 'F':
+        returnType = DALVIK_JNI_RETURN_FLOAT;
+        break;
+    case 'D':
+        returnType = DALVIK_JNI_RETURN_DOUBLE;
+        break;
+    case 'J':
+        returnType = DALVIK_JNI_RETURN_S8;
+        break;
+    default:
+        returnType = DALVIK_JNI_RETURN_S4;
+        break;
+    }
+    
+    while (true) {
+        sigByte = *(sig++);
+
+        if (sigByte == '\0')
+            break;
+
+        if (sigByte == 'D' || sigByte == 'J') {
+            if ((stackOffset & 1) != 0) {
+                padFlags |= padMask;
+                stackOffset++;
+                padMask <<= 1;
+            }
+            stackOffset += 2;
+            padMask <<= 2;
+        } else {
+            stackOffset++;
+            padMask <<= 1;
+        }
+    }
+
+    jniArgInfo = returnType << DALVIK_JNI_RETURN_SHIFT;
+
+    if (stackOffset > DALVIK_JNI_COUNT_SHIFT) {
+        /* too big for "fast" version */
+        jniArgInfo |= DALVIK_JNI_NO_ARG_INFO;
+    } else {
+        assert((padFlags & (0xffffffff << DALVIK_JNI_COUNT_SHIFT)) == 0);
+        stackOffset -= 2;           // r2/r3 holds first two items
+        if (stackOffset < 0)
+            stackOffset = 0;
+        jniArgInfo |= ((stackOffset+1) / 2) << DALVIK_JNI_COUNT_SHIFT;
+        jniArgInfo |= padFlags;
+    }
+
+    return jniArgInfo;
+}
+
+/*
+ * Load information about a static field.
+ *
+ * This also "prepares" static fields by initializing them
+ * to their "standard default values".
+ */
+static void loadSFieldFromDex(ClassObject* clazz,
+    const DexField* pDexSField, StaticField* sfield)
+{
+    DexFile* pDexFile = clazz->pDvmDex->pDexFile;
+    const DexFieldId* pFieldId;
+
+    pFieldId = dexGetFieldId(pDexFile, pDexSField->fieldIdx);
+
+    sfield->field.clazz = clazz;
+    sfield->field.name = dexStringById(pDexFile, pFieldId->nameIdx);
+    sfield->field.signature = dexStringByTypeIdx(pDexFile, pFieldId->typeIdx);
+    sfield->field.accessFlags = pDexSField->accessFlags;
+
+    /* Static object field values are set to "standard default values"
+     * (null or 0) until the class is initialized.  We delay loading
+     * constant values from the class until that time.
+     */
+    //sfield->value.j = 0;
+    assert(sfield->value.j == 0LL);     // cleared earlier with calloc
+
+#ifdef PROFILE_FIELD_ACCESS
+    sfield->field.gets = sfield->field.puts = 0;
+#endif
+}
+
+/*
+ * Load information about an instance field.
+ */
+static void loadIFieldFromDex(ClassObject* clazz,
+    const DexField* pDexIField, InstField* ifield)
+{
+    DexFile* pDexFile = clazz->pDvmDex->pDexFile;
+    const DexFieldId* pFieldId;
+
+    pFieldId = dexGetFieldId(pDexFile, pDexIField->fieldIdx);
+
+    ifield->field.clazz = clazz;
+    ifield->field.name = dexStringById(pDexFile, pFieldId->nameIdx);
+    ifield->field.signature = dexStringByTypeIdx(pDexFile, pFieldId->typeIdx);
+    ifield->field.accessFlags = pDexIField->accessFlags;
+#ifndef NDEBUG
+    assert(ifield->byteOffset == 0);    // cleared earlier with calloc
+    ifield->byteOffset = -1;    // make it obvious if we fail to set later
+#endif
+
+#ifdef PROFILE_FIELD_ACCESS
+    ifield->field.gets = ifield->field.puts = 0;
+#endif
+}
+
+/*
+ * Cache java.lang.ref.Reference fields and methods.
+ */
+static bool precacheReferenceOffsets(ClassObject *clazz)
+{
+    Method *meth;
+    int i;
+
+    /* We trick the GC object scanner by not counting
+     * java.lang.ref.Reference.referent as an object
+     * field.  It will get explicitly scanned as part
+     * of the reference-walking process.
+     *
+     * Find the object field named "referent" and put it
+     * just after the list of object reference fields.
+     */
+    dvmLinearReadWrite(clazz->classLoader, clazz->ifields);
+    for (i = 0; i < clazz->ifieldRefCount; i++) {
+        InstField *pField = &clazz->ifields[i];
+        if (strcmp(pField->field.name, "referent") == 0) {
+            int targetIndex;
+
+            /* Swap this field with the last object field.
+             */
+            targetIndex = clazz->ifieldRefCount - 1;
+            if (i != targetIndex) {
+                InstField *swapField = &clazz->ifields[targetIndex];
+                InstField tmpField;
+                int tmpByteOffset;
+
+                /* It's not currently strictly necessary
+                 * for the fields to be in byteOffset order,
+                 * but it's more predictable that way.
+                 */
+                tmpByteOffset = swapField->byteOffset;
+                swapField->byteOffset = pField->byteOffset;
+                pField->byteOffset = tmpByteOffset;
+
+                tmpField = *swapField;
+                *swapField = *pField;
+                *pField = tmpField;
+            }
+
+            /* One fewer object field (wink wink).
+             */
+            clazz->ifieldRefCount--;
+            i--;        /* don't trip "didn't find it" test if field was last */
+            break;
+        }
+    }
+    dvmLinearReadOnly(clazz->classLoader, clazz->ifields);
+    if (i == clazz->ifieldRefCount) {
+        LOGE("Unable to reorder 'referent' in %s\n", clazz->descriptor);
+        return false;
+    }
+
+    /* Cache pretty much everything about Reference so that
+     * we don't need to call interpreted code when clearing/enqueueing
+     * references.  This is fragile, so we'll be paranoid.
+     */
+    gDvm.classJavaLangRefReference = clazz;
+
+    gDvm.offJavaLangRefReference_referent =
+        dvmFindFieldOffset(gDvm.classJavaLangRefReference,
+                "referent", "Ljava/lang/Object;");
+    assert(gDvm.offJavaLangRefReference_referent >= 0);
+
+    gDvm.offJavaLangRefReference_queue =
+        dvmFindFieldOffset(gDvm.classJavaLangRefReference,
+                "queue", "Ljava/lang/ref/ReferenceQueue;");
+    assert(gDvm.offJavaLangRefReference_queue >= 0);
+
+    gDvm.offJavaLangRefReference_queueNext =
+        dvmFindFieldOffset(gDvm.classJavaLangRefReference,
+                "queueNext", "Ljava/lang/ref/Reference;");
+    assert(gDvm.offJavaLangRefReference_queueNext >= 0);
+
+    gDvm.offJavaLangRefReference_vmData =
+        dvmFindFieldOffset(gDvm.classJavaLangRefReference,
+                "vmData", "I");
+    assert(gDvm.offJavaLangRefReference_vmData >= 0);
+
+#if FANCY_REFERENCE_SUBCLASS
+    meth = dvmFindVirtualMethodByDescriptor(clazz, "clear", "()V");
+    assert(meth != NULL);
+    gDvm.voffJavaLangRefReference_clear = meth->methodIndex;
+
+    meth = dvmFindVirtualMethodByDescriptor(clazz, "enqueue", "()Z");
+    assert(meth != NULL);
+    gDvm.voffJavaLangRefReference_enqueue = meth->methodIndex;
+#else
+    /* enqueueInternal() is private and thus a direct method. */
+    meth = dvmFindDirectMethodByDescriptor(clazz, "enqueueInternal", "()Z");
+    assert(meth != NULL);
+    gDvm.methJavaLangRefReference_enqueueInternal = meth;
+#endif
+
+    return true;
+}
+
+
+/*
+ * Link (prepare and resolve).  Verification is deferred until later.
+ *
+ * This converts symbolic references into pointers.  It's independent of
+ * the source file format.
+ *
+ * If "classesResolved" is false, we assume that superclassIdx and
+ * interfaces[] are holding class reference indices rather than pointers.
+ * The class references will be resolved during link.  (This is done when
+ * loading from DEX to avoid having to create additional storage to pass
+ * the indices around.)
+ *
+ * Returns "false" with an exception pending on failure.
+ */
+bool dvmLinkClass(ClassObject* clazz, bool classesResolved)
+{
+    u4 superclassIdx = 0;
+    bool okay = false;
+    bool resolve_okay;
+    int numInterfacesResolved = 0;
+    int i;
+
+    if (gDvm.verboseClass)
+        LOGV("CLASS: linking '%s'...\n", clazz->descriptor);
+
+    /* "Resolve" the class.
+     *
+     * At this point, clazz's reference fields contain Dex
+     * file indices instead of direct object references.
+     * We need to translate those indices into real references,
+     * while making sure that the GC doesn't sweep any of
+     * the referenced objects.
+     *
+     * The GC will avoid scanning this object as long as
+     * clazz->obj.clazz is gDvm.unlinkedJavaLangClass.
+     * Once clazz is ready, we'll replace clazz->obj.clazz
+     * with gDvm.classJavaLangClass to let the GC know
+     * to look at it.
+     */
+    assert(clazz->obj.clazz == gDvm.unlinkedJavaLangClass);
+
+    /* It's important that we take care of java.lang.Class
+     * first.  If we were to do this after looking up the
+     * superclass (below), Class wouldn't be ready when
+     * java.lang.Object needed it.
+     *
+     * Note that we don't set clazz->obj.clazz yet.
+     */
+    if (gDvm.classJavaLangClass == NULL) {
+        if (clazz->classLoader == NULL &&
+            strcmp(clazz->descriptor, "Ljava/lang/Class;") == 0)
+        {
+            gDvm.classJavaLangClass = clazz;
+        } else {
+            gDvm.classJavaLangClass =
+                dvmFindSystemClassNoInit("Ljava/lang/Class;");
+            if (gDvm.classJavaLangClass == NULL) {
+                /* should have thrown one */
+                assert(dvmCheckException(dvmThreadSelf()));
+                goto bail;
+            }
+        }
+    }
+    assert(gDvm.classJavaLangClass != NULL);
+
+    /*
+     * Resolve all Dex indices so we can hand the ClassObject
+     * over to the GC.  If we fail at any point, we need to remove
+     * any tracked references to avoid leaking memory.
+     */
+
+    /*
+     * All classes have a direct superclass, except for java/lang/Object.
+     */
+    if (!classesResolved) {
+        superclassIdx = (u4) clazz->super;          /* unpack temp store */
+        clazz->super = NULL;
+    }
+    if (strcmp(clazz->descriptor, "Ljava/lang/Object;") == 0) {
+        assert(!classesResolved);
+        if (superclassIdx != kDexNoIndex) {
+            /* TODO: is this invariant true for all java/lang/Objects,
+             * regardless of the class loader?  For now, assume it is.
+             */
+            dvmThrowException("Ljava/lang/ClassFormatError;",
+                "java.lang.Object has a superclass");
+            goto bail;
+        }
+
+        /* Don't finalize objects whose classes use the
+         * default (empty) Object.finalize().
+         */
+        CLEAR_CLASS_FLAG(clazz, CLASS_ISFINALIZABLE);
+    } else {
+        if (!classesResolved) {
+            if (superclassIdx == kDexNoIndex) {
+                dvmThrowException("Ljava/lang/LinkageError;",
+                    "no superclass defined");
+                goto bail;
+            }
+            clazz->super = dvmResolveClass(clazz, superclassIdx, false);
+            if (clazz->super == NULL) {
+                assert(dvmCheckException(dvmThreadSelf()));
+                if (gDvm.optimizing) {
+                    /* happens with "external" libs */
+                    LOGV("Unable to resolve superclass of %s (%d)\n",
+                        clazz->descriptor, superclassIdx);
+                } else {
+                    LOGW("Unable to resolve superclass of %s (%d)\n",
+                        clazz->descriptor, superclassIdx);
+                }
+                goto bail;
+            }
+        }
+        /* verify */
+        if (dvmIsFinalClass(clazz->super)) {
+            LOGW("Superclass of '%s' is final '%s'\n",
+                clazz->descriptor, clazz->super->descriptor);
+            dvmThrowException("Ljava/lang/IncompatibleClassChangeError;",
+                "superclass is final");
+            goto bail;
+        } else if (dvmIsInterfaceClass(clazz->super)) {
+            LOGW("Superclass of '%s' is interface '%s'\n",
+                clazz->descriptor, clazz->super->descriptor);
+            dvmThrowException("Ljava/lang/IncompatibleClassChangeError;",
+                "superclass is an interface");
+            goto bail;
+        } else if (!dvmCheckClassAccess(clazz, clazz->super)) {
+            LOGW("Superclass of '%s' (%s) is not accessible\n",
+                clazz->descriptor, clazz->super->descriptor);
+            dvmThrowException("Ljava/lang/IllegalAccessError;",
+                "superclass not accessible");
+            goto bail;
+        }
+
+        /* Don't let the GC reclaim the superclass.
+         * TODO: shouldn't be needed; remove when things stabilize
+         */
+        dvmAddTrackedAlloc((Object *)clazz->super, NULL);
+
+        /* Inherit finalizability from the superclass.  If this
+         * class also overrides finalize(), its CLASS_ISFINALIZABLE
+         * bit will already be set.
+         */
+        if (IS_CLASS_FLAG_SET(clazz->super, CLASS_ISFINALIZABLE)) {
+            SET_CLASS_FLAG(clazz, CLASS_ISFINALIZABLE);
+        }
+
+        /* See if this class descends from java.lang.Reference
+         * and set the class flags appropriately.
+         */
+        if (IS_CLASS_FLAG_SET(clazz->super, CLASS_ISREFERENCE)) {
+            u4 superRefFlags;
+
+            /* We've already determined the reference type of this
+             * inheritance chain.  Inherit reference-ness from the superclass.
+             */
+            superRefFlags = GET_CLASS_FLAG_GROUP(clazz->super,
+                    CLASS_ISREFERENCE |
+                    CLASS_ISWEAKREFERENCE |
+                    CLASS_ISPHANTOMREFERENCE);
+            SET_CLASS_FLAG(clazz, superRefFlags);
+        } else if (clazz->classLoader == NULL &&
+                clazz->super->classLoader == NULL &&
+                strcmp(clazz->super->descriptor,
+                       "Ljava/lang/ref/Reference;") == 0)
+        {
+            u4 refFlags;
+
+            /* This class extends Reference, which means it should
+             * be one of the magic Soft/Weak/PhantomReference classes.
+             */
+            refFlags = CLASS_ISREFERENCE;
+            if (strcmp(clazz->descriptor,
+                       "Ljava/lang/ref/SoftReference;") == 0)
+            {
+                /* Only CLASS_ISREFERENCE is set for soft references.
+                 */
+            } else if (strcmp(clazz->descriptor,
+                       "Ljava/lang/ref/WeakReference;") == 0)
+            {
+                refFlags |= CLASS_ISWEAKREFERENCE;
+            } else if (strcmp(clazz->descriptor,
+                       "Ljava/lang/ref/PhantomReference;") == 0)
+            {
+                refFlags |= CLASS_ISPHANTOMREFERENCE;
+            } else {
+                /* No-one else is allowed to inherit directly
+                 * from Reference.
+                 */
+//xxx is this the right exception?  better than an assertion.
+                dvmThrowException("Ljava/lang/LinkageError;",
+                    "illegal inheritance from Reference");
+                goto bail;
+            }
+
+            /* The class should not have any reference bits set yet.
+             */
+            assert(GET_CLASS_FLAG_GROUP(clazz,
+                    CLASS_ISREFERENCE |
+                    CLASS_ISWEAKREFERENCE |
+                    CLASS_ISPHANTOMREFERENCE) == 0);
+
+            SET_CLASS_FLAG(clazz, refFlags);
+        }
+    }
+
+    if (!classesResolved && clazz->interfaceCount > 0) {
+        /*
+         * Resolve the interfaces implemented directly by this class.  We
+         * stuffed the class index into the interface pointer slot.
+         */
+        dvmLinearReadWrite(clazz->classLoader, clazz->interfaces);
+        for (i = 0; i < clazz->interfaceCount; i++) {
+            u4 interfaceIdx;
+
+            interfaceIdx = (u4) clazz->interfaces[i];   /* unpack temp store */
+            assert(interfaceIdx != kDexNoIndex);
+
+            clazz->interfaces[i] = dvmResolveClass(clazz, interfaceIdx, false);
+            if (clazz->interfaces[i] == NULL) {
+                const DexFile* pDexFile = clazz->pDvmDex->pDexFile;
+
+                assert(dvmCheckException(dvmThreadSelf()));
+                dvmLinearReadOnly(clazz->classLoader, clazz->interfaces);
+
+                const char* classDescriptor;
+                classDescriptor = dexStringByTypeIdx(pDexFile, interfaceIdx);
+                if (gDvm.optimizing) {
+                    /* happens with "external" libs */
+                    LOGV("Failed resolving %s interface %d '%s'\n",
+                        clazz->descriptor, interfaceIdx, classDescriptor);
+                } else {
+                    LOGI("Failed resolving %s interface %d '%s'\n",
+                        clazz->descriptor, interfaceIdx, classDescriptor);
+                }
+                goto bail_during_resolve;
+            }
+
+            /* are we allowed to implement this interface? */
+            if (!dvmCheckClassAccess(clazz, clazz->interfaces[i])) {
+                dvmLinearReadOnly(clazz->classLoader, clazz->interfaces);
+                LOGW("Interface '%s' is not accessible to '%s'\n",
+                    clazz->interfaces[i]->descriptor, clazz->descriptor);
+                dvmThrowException("Ljava/lang/IllegalAccessError;",
+                    "interface not accessible");
+                goto bail_during_resolve;
+            }
+
+            /* Don't let the GC reclaim the interface class.
+             * TODO: shouldn't be needed; remove when things stabilize
+             */
+            dvmAddTrackedAlloc((Object *)clazz->interfaces[i], NULL);
+            numInterfacesResolved++;
+
+            LOGVV("+++  found interface '%s'\n",
+                clazz->interfaces[i]->descriptor);
+        }
+        dvmLinearReadOnly(clazz->classLoader, clazz->interfaces);
+    }
+
+    /*
+     * The ClassObject is now in a GC-able state.  We let the GC
+     * realize this by punching in the real class type, which is
+     * always java.lang.Class.
+     *
+     * After this line, clazz will be fair game for the GC.
+     * Every field that the GC will look at must now be valid:
+     * - clazz->super
+     * - class->classLoader
+     * - clazz->sfields
+     * - clazz->interfaces
+     */
+    clazz->obj.clazz = gDvm.classJavaLangClass;
+
+    if (false) {
+bail_during_resolve:
+        resolve_okay = false;
+    } else {
+        resolve_okay = true;
+    }
+
+    /*
+     * Now that the GC can scan the ClassObject, we can let
+     * go of the explicit references we were holding onto.
+     *
+     * Either that or we failed, in which case we need to
+     * release the references so we don't leak memory.
+     */
+    if (clazz->super != NULL) {
+        dvmReleaseTrackedAlloc((Object *)clazz->super, NULL);
+    }
+    for (i = 0; i < numInterfacesResolved; i++) {
+        dvmReleaseTrackedAlloc((Object *)clazz->interfaces[i], NULL);
+    }
+
+    if (!resolve_okay) {
+        //LOGW("resolve_okay is false\n");
+        goto bail;
+    }
+
+    /*
+     * Populate vtable.
+     */
+    if (dvmIsInterfaceClass(clazz)) {
+        /* no vtable; just set the method indices */
+        int count = clazz->virtualMethodCount;
+
+        if (count != (u2) count) {
+            LOGE("Too many methods (%d) in interface '%s'\n", count,
+                 clazz->descriptor);
+            goto bail;
+        }
+
+        dvmLinearReadWrite(clazz->classLoader, clazz->virtualMethods);
+
+        for (i = 0; i < count; i++)
+            clazz->virtualMethods[i].methodIndex = (u2) i;
+
+        dvmLinearReadOnly(clazz->classLoader, clazz->virtualMethods);
+    } else {
+        if (!createVtable(clazz)) {
+            LOGW("failed creating vtable\n");
+            goto bail;
+        }
+    }
+
+    /*
+     * Populate interface method tables.  Can alter the vtable.
+     */
+    if (!createIftable(clazz))
+        goto bail;
+
+    /*
+     * Insert special-purpose "stub" method implementations.
+     */
+    if (!insertMethodStubs(clazz))
+        goto bail;
+
+    /*
+     * Compute instance field offsets and, hence, the size of the object.
+     */
+    if (!computeFieldOffsets(clazz))
+        goto bail;
+
+    /*
+     * Cache fields and methods from java/lang/ref/Reference and
+     * java/lang/Class.  This has to happen after computeFieldOffsets().
+     */
+    if (clazz->classLoader == NULL) {
+        if (strcmp(clazz->descriptor, "Ljava/lang/ref/Reference;") == 0) {
+            if (!precacheReferenceOffsets(clazz)) {
+                LOGE("failed pre-caching Reference offsets\n");
+                dvmThrowException("Ljava/lang/InternalError;", NULL);
+                goto bail;
+            }
+        } else if (clazz == gDvm.classJavaLangClass) {
+            gDvm.offJavaLangClass_pd = dvmFindFieldOffset(clazz, "pd",
+                "Ljava/security/ProtectionDomain;");
+            if (gDvm.offJavaLangClass_pd <= 0) {
+                LOGE("ERROR: unable to find 'pd' field in Class\n");
+                dvmAbort();     /* we're not going to get much farther */
+                //goto bail;
+            }
+        }
+    }
+
+    /*
+     * Done!
+     */
+    if (IS_CLASS_FLAG_SET(clazz, CLASS_ISPREVERIFIED))
+        clazz->status = CLASS_VERIFIED;
+    else
+        clazz->status = CLASS_RESOLVED;
+    okay = true;
+    if (gDvm.verboseClass)
+        LOGV("CLASS: linked '%s'\n", clazz->descriptor);
+
+    /*
+     * We send CLASS_PREPARE events to the debugger from here.  The
+     * definition of "preparation" is creating the static fields for a
+     * class and initializing them to the standard default values, but not
+     * executing any code (that comes later, during "initialization").
+     *
+     * We did the static prep in loadSFieldFromDex() while loading the class.
+     *
+     * The class has been prepared and resolved but possibly not yet verified
+     * at this point.
+     */
+    if (gDvm.debuggerActive) {
+        dvmDbgPostClassPrepare(clazz);
+    }
+
+bail:
+    if (!okay) {
+        clazz->status = CLASS_ERROR;
+        if (!dvmCheckException(dvmThreadSelf())) {
+            dvmThrowException("Ljava/lang/VirtualMachineError;", NULL);
+        }
+    }
+    return okay;
+}
+
+/*
+ * Create the virtual method table.
+ *
+ * The top part of the table is a copy of the table from our superclass,
+ * with our local methods overriding theirs.  The bottom part of the table
+ * has any new methods we defined.
+ */
+static bool createVtable(ClassObject* clazz)
+{
+    bool result = false;
+    int maxCount;
+    int i;
+
+    if (clazz->super != NULL) {
+        //LOGI("SUPER METHODS %d %s->%s\n", clazz->super->vtableCount,
+        //    clazz->descriptor, clazz->super->descriptor);
+    }
+
+    /* the virtual methods we define, plus the superclass vtable size */
+    maxCount = clazz->virtualMethodCount;
+    if (clazz->super != NULL) {
+        maxCount += clazz->super->vtableCount;
+    } else {
+        /* TODO: is this invariant true for all java/lang/Objects,
+         * regardless of the class loader?  For now, assume it is.
+         */
+        assert(strcmp(clazz->descriptor, "Ljava/lang/Object;") == 0);
+    }
+    //LOGD("+++ max vmethods for '%s' is %d\n", clazz->descriptor, maxCount);
+
+    /*
+     * Over-allocate the table, then realloc it down if necessary.  So
+     * long as we don't allocate anything in between we won't cause
+     * fragmentation, and reducing the size should be unlikely to cause
+     * a buffer copy.
+     */
+    dvmLinearReadWrite(clazz->classLoader, clazz->virtualMethods);
+    clazz->vtable = (Method**) dvmLinearAlloc(clazz->classLoader,
+                        sizeof(Method*) * maxCount);
+    if (clazz->vtable == NULL)
+        goto bail;
+
+    if (clazz->super != NULL) {
+        int actualCount;
+
+        memcpy(clazz->vtable, clazz->super->vtable,
+            sizeof(*(clazz->vtable)) * clazz->super->vtableCount);
+        actualCount = clazz->super->vtableCount;
+
+        /*
+         * See if any of our virtual methods override the superclass.
+         */
+        for (i = 0; i < clazz->virtualMethodCount; i++) {
+            Method* localMeth = &clazz->virtualMethods[i];
+            int si;
+
+            for (si = 0; si < clazz->super->vtableCount; si++) {
+                Method* superMeth = clazz->vtable[si];
+
+                if (dvmCompareMethodNamesAndProtos(localMeth, superMeth) == 0)
+                {
+                    /* verify */
+                    if (dvmIsFinalMethod(superMeth)) {
+                        LOGW("Method %s.%s overrides final %s.%s\n",
+                            localMeth->clazz->descriptor, localMeth->name,
+                            superMeth->clazz->descriptor, superMeth->name);
+                        goto bail;
+                    }
+                    clazz->vtable[si] = localMeth;
+                    localMeth->methodIndex = (u2) si;
+                    //LOGV("+++   override %s.%s (slot %d)\n",
+                    //    clazz->descriptor, localMeth->name, si);
+                    break;
+                }
+            }
+
+            if (si == clazz->super->vtableCount) {
+                /* not an override, add to end */
+                clazz->vtable[actualCount] = localMeth;
+                localMeth->methodIndex = (u2) actualCount;
+                actualCount++;
+
+                //LOGV("+++   add method %s.%s\n",
+                //    clazz->descriptor, localMeth->name);
+            }
+        }
+
+        if (actualCount != (u2) actualCount) {
+            LOGE("Too many methods (%d) in class '%s'\n", actualCount,
+                 clazz->descriptor);
+            goto bail;
+        }
+        
+        assert(actualCount <= maxCount);
+
+        if (actualCount < maxCount) {
+            assert(clazz->vtable != NULL);
+            dvmLinearReadOnly(clazz->classLoader, clazz->vtable);
+            clazz->vtable = dvmLinearRealloc(clazz->classLoader, clazz->vtable,
+                sizeof(*(clazz->vtable)) * actualCount);
+            if (clazz->vtable == NULL) {
+                LOGE("vtable realloc failed\n");
+                goto bail;
+            } else {
+                LOGVV("+++  reduced vtable from %d to %d\n",
+                    maxCount, actualCount);
+            }
+        }
+
+        clazz->vtableCount = actualCount;
+    } else {
+        /* java/lang/Object case */
+        int count = clazz->virtualMethodCount;
+        if (count != (u2) count) {
+            LOGE("Too many methods (%d) in base class '%s'\n", count,
+                 clazz->descriptor);
+            goto bail;
+        }
+
+        for (i = 0; i < count; i++) {
+            clazz->vtable[i] = &clazz->virtualMethods[i];
+            clazz->virtualMethods[i].methodIndex = (u2) i;
+        }
+        clazz->vtableCount = clazz->virtualMethodCount;
+    }
+
+    result = true;
+
+bail:
+    dvmLinearReadOnly(clazz->classLoader, clazz->vtable);
+    dvmLinearReadOnly(clazz->classLoader, clazz->virtualMethods);
+    return result;
+}
+
+/*
+ * Create and populate "iftable".
+ *
+ * The set of interfaces we support is the combination of the interfaces
+ * we implement directly and those implemented by our superclass.  Each
+ * interface can have one or more "superinterfaces", which we must also
+ * support.  For speed we flatten the tree out.
+ *
+ * We might be able to speed this up when there are lots of interfaces
+ * by merge-sorting the class pointers and binary-searching when removing
+ * duplicates.  We could also drop the duplicate removal -- it's only
+ * there to reduce the memory footprint.
+ *
+ * Because of "Miranda methods", this may reallocate clazz->virtualMethods.
+ *
+ * Returns "true" on success.
+ */
+static bool createIftable(ClassObject* clazz)
+{
+    bool result = false;
+    bool zapIftable = false;
+    bool zapVtable = false;
+    bool zapIfvipool = false;
+    int ifCount, superIfCount, idx;
+    int i;
+
+    if (clazz->super != NULL)
+        superIfCount = clazz->super->iftableCount;
+    else
+        superIfCount = 0;
+
+    ifCount = superIfCount;
+    ifCount += clazz->interfaceCount;
+    for (i = 0; i < clazz->interfaceCount; i++)
+        ifCount += clazz->interfaces[i]->iftableCount;
+
+    LOGVV("INTF: class '%s' direct w/supra=%d super=%d total=%d\n",
+        clazz->descriptor, ifCount - superIfCount, superIfCount, ifCount);
+
+    if (ifCount == 0) {
+        assert(clazz->iftableCount == 0);
+        assert(clazz->iftable == NULL);
+        result = true;
+        goto bail;
+    }
+
+    /*
+     * Create a table with enough space for all interfaces, and copy the
+     * superclass' table in.
+     */
+    clazz->iftable = (InterfaceEntry*) dvmLinearAlloc(clazz->classLoader,
+                        sizeof(InterfaceEntry) * ifCount);
+    zapIftable = true;
+    memset(clazz->iftable, 0x00, sizeof(InterfaceEntry) * ifCount);
+    if (superIfCount != 0) {
+        memcpy(clazz->iftable, clazz->super->iftable,
+            sizeof(InterfaceEntry) * superIfCount);
+    }
+
+    /*
+     * Create a flattened interface hierarchy of our immediate interfaces.
+     */
+    idx = superIfCount;
+
+    for (i = 0; i < clazz->interfaceCount; i++) {
+        ClassObject* interf;
+        int j;
+
+        interf = clazz->interfaces[i];
+        assert(interf != NULL);
+
+        /* make sure this is still an interface class */
+        if (!dvmIsInterfaceClass(interf)) {
+            LOGW("Class '%s' implements non-interface '%s'\n",
+                clazz->descriptor, interf->descriptor);
+            dvmThrowExceptionWithClassMessage(
+                "Ljava/lang/IncompatibleClassChangeError;",
+                clazz->descriptor);
+            goto bail;
+        }
+
+        /* add entry for this interface */
+        clazz->iftable[idx++].clazz = interf;
+
+        /* add entries for the interface's superinterfaces */
+        for (j = 0; j < interf->iftableCount; j++) {
+            clazz->iftable[idx++].clazz = interf->iftable[j].clazz;
+        }
+    }
+
+    assert(idx == ifCount);
+
+    /*
+     * Remove anything redundant from our recent additions.  Note we have
+     * to traverse the recent adds when looking for duplicates, because
+     * it's possible the recent additions are self-redundant.  This
+     * reduces the memory footprint of classes with lots of inherited
+     * interfaces.
+     *
+     * (I don't know if this will cause problems later on when we're trying
+     * to find a static field.  It looks like the proper search order is
+     * (1) current class, (2) interfaces implemented by current class,
+     * (3) repeat with superclass.  A field implemented by an interface
+     * and by a superclass might come out wrong if the superclass also
+     * implements the interface.  The javac compiler will reject the
+     * situation as ambiguous, so the concern is somewhat artificial.)
+     *
+     * UPDATE: this makes ReferenceType.Interfaces difficult to implement,
+     * because it wants to return just the interfaces declared to be
+     * implemented directly by the class.  I'm excluding this code for now.
+     */
+    if (false) {
+    for (i = superIfCount; i < ifCount; i++) {
+        int j;
+
+        for (j = 0; j < ifCount; j++) {
+            if (i == j)
+                continue;
+            if (clazz->iftable[i].clazz == clazz->iftable[j].clazz) {
+                LOGVV("INTF: redundant interface %s in %s\n",
+                    clazz->iftable[i].clazz->descriptor,
+                    clazz->descriptor);
+
+                if (i != ifCount-1)
+                    memmove(&clazz->iftable[i], &clazz->iftable[i+1],
+                        (ifCount - i -1) * sizeof(InterfaceEntry));
+                ifCount--;
+                i--;        // adjust for i++ above
+                break;
+            }
+        }
+    }
+    LOGVV("INTF: class '%s' nodupes=%d\n", clazz->descriptor, ifCount);
+    }   // if (false)
+
+    clazz->iftableCount = ifCount;
+
+    /*
+     * If we're an interface, we don't need the vtable pointers, so
+     * we're done.  If this class doesn't implement an interface that our
+     * superclass doesn't have, then we again have nothing to do.
+     */
+    if (dvmIsInterfaceClass(clazz) || superIfCount == ifCount) {
+        //dvmDumpClass(clazz, kDumpClassFullDetail);
+        result = true;
+        goto bail;
+    }
+
+    /*
+     * When we're handling invokeinterface, we probably have an object
+     * whose type is an interface class rather than a concrete class.  We
+     * need to convert the method reference into a vtable index.  So, for
+     * every entry in "iftable", we create a list of vtable indices.
+     *
+     * Because our vtable encompasses the superclass vtable, we can use
+     * the vtable indices from our superclass for all of the interfaces
+     * that weren't directly implemented by us.
+     *
+     * Each entry in "iftable" has a pointer to the start of its set of
+     * vtable offsets.  The iftable entries in the superclass point to
+     * storage allocated in the superclass, and the iftable entries added
+     * for this class point to storage allocated in this class.  "iftable"
+     * is flat for fast access in a class and all of its subclasses, but
+     * "ifviPool" is only created for the topmost implementor.
+     */
+    int poolSize = 0;
+    for (i = superIfCount; i < ifCount; i++) {
+        /*
+         * Note it's valid for an interface to have no methods (e.g.
+         * java/io/Serializable).
+         */
+        LOGVV("INTF: pool: %d from %s\n",
+            clazz->iftable[i].clazz->virtualMethodCount,
+            clazz->iftable[i].clazz->descriptor);
+        poolSize += clazz->iftable[i].clazz->virtualMethodCount;
+    }
+
+    if (poolSize == 0) {
+        LOGVV("INTF: didn't find any new interfaces with methods\n");
+        result = true;
+        goto bail;
+    }
+
+    clazz->ifviPoolCount = poolSize;
+    clazz->ifviPool = (int*) dvmLinearAlloc(clazz->classLoader,
+                        poolSize * sizeof(int*));
+    zapIfvipool = true;
+
+    /*
+     * Fill in the vtable offsets for the interfaces that weren't part of
+     * our superclass.
+     */
+    int poolOffset = 0;
+    Method** mirandaList = NULL;
+    int mirandaCount = 0, mirandaAlloc = 0;
+
+    for (i = superIfCount; i < ifCount; i++) {
+        ClassObject* interface;
+        int methIdx;
+
+        clazz->iftable[i].methodIndexArray = clazz->ifviPool + poolOffset;
+        interface = clazz->iftable[i].clazz;
+        poolOffset += interface->virtualMethodCount;    // end here
+
+        /*
+         * For each method listed in the interface's method list, find the
+         * matching method in our class's method list.  We want to favor the
+         * subclass over the superclass, which just requires walking
+         * back from the end of the vtable.  (This only matters if the
+         * superclass defines a private method and this class redefines
+         * it -- otherwise it would use the same vtable slot.  In Dalvik
+         * those don't end up in the virtual method table, so it shouldn't
+         * matter which direction we go.  We walk it backward anyway.)
+         *
+         *
+         * Suppose we have the following arrangement:
+         *   public interface MyInterface
+         *     public boolean inInterface();
+         *   public abstract class MirandaAbstract implements MirandaInterface
+         *     //public abstract boolean inInterface(); // not declared!
+         *     public boolean inAbstract() { stuff }    // in vtable
+         *   public class MirandClass extends MirandaAbstract
+         *     public boolean inInterface() { stuff }
+         *     public boolean inAbstract() { stuff }    // in vtable
+         *
+         * The javac compiler happily compiles MirandaAbstract even though
+         * it doesn't declare all methods from its interface.  When we try
+         * to set up a vtable for MirandaAbstract, we find that we don't
+         * have an slot for inInterface.  To prevent this, we synthesize
+         * abstract method declarations in MirandaAbstract.
+         *
+         * We have to expand vtable and update some things that point at it,
+         * so we accumulate the method list and do it all at once below.
+         */
+        for (methIdx = 0; methIdx < interface->virtualMethodCount; methIdx++) {
+            Method* imeth = &interface->virtualMethods[methIdx];
+            int j;
+
+            IF_LOGVV() {
+                char* desc = dexProtoCopyMethodDescriptor(&imeth->prototype);
+                LOGVV("INTF:  matching '%s' '%s'\n", imeth->name, desc);
+                free(desc);
+            }
+
+            for (j = clazz->vtableCount-1; j >= 0; j--) {
+                if (dvmCompareMethodNamesAndProtos(imeth, clazz->vtable[j])
+                    == 0)
+                {
+                    LOGVV("INTF:   matched at %d\n", j);
+                    clazz->iftable[i].methodIndexArray[methIdx] = j;
+                    break;
+                }
+            }
+            if (j < 0) {
+                IF_LOGV() {
+                    char* desc =
+                        dexProtoCopyMethodDescriptor(&imeth->prototype);
+                    LOGV("No match for '%s' '%s' in '%s' (creating miranda)\n",
+                            imeth->name, desc, clazz->descriptor);
+                    free(desc);
+                }
+                //dvmThrowException("Ljava/lang/RuntimeException;", "Miranda!");
+                //return false;
+
+                if (mirandaCount == mirandaAlloc) {
+                    mirandaAlloc += 8;
+                    if (mirandaList == NULL) {
+                        mirandaList = dvmLinearAlloc(clazz->classLoader,
+                                        mirandaAlloc * sizeof(Method*));
+                    } else {
+                        dvmLinearReadOnly(clazz->classLoader, mirandaList);
+                        mirandaList = dvmLinearRealloc(clazz->classLoader,
+                                mirandaList, mirandaAlloc * sizeof(Method*));
+                    }
+                    assert(mirandaList != NULL);    // mem failed + we leaked
+                }
+
+                /*
+                 * These may be redundant (e.g. method with same name and
+                 * signature declared in two interfaces implemented by the
+                 * same abstract class).  We can squeeze the duplicates
+                 * out here.
+                 */
+                int mir;
+                for (mir = 0; mir < mirandaCount; mir++) {
+                    if (dvmCompareMethodNamesAndProtos(
+                            mirandaList[mir], imeth) == 0)
+                    {
+                        IF_LOGVV() {
+                            char* desc = dexProtoCopyMethodDescriptor(
+                                    &imeth->prototype);
+                            LOGVV("MIRANDA dupe: %s and %s %s%s\n",
+                                mirandaList[mir]->clazz->descriptor,
+                                imeth->clazz->descriptor,
+                                imeth->name, desc);
+                            free(desc);
+                        }
+                        break;
+                    }
+                }
+
+                /* point the iftable at a phantom slot index */
+                clazz->iftable[i].methodIndexArray[methIdx] =
+                    clazz->vtableCount + mir;
+                LOGVV("MIRANDA: %s points at slot %d\n",
+                    imeth->name, clazz->vtableCount + mir);
+
+                /* if non-duplicate among Mirandas, add to Miranda list */
+                if (mir == mirandaCount) {
+                    //LOGV("MIRANDA: holding '%s' in slot %d\n",
+                    //    imeth->name, mir);
+                    mirandaList[mirandaCount++] = imeth;
+                }
+            }
+        }
+    }
+
+    if (mirandaCount != 0) {
+        Method* newVirtualMethods;
+        Method* meth;
+        int oldMethodCount, oldVtableCount;
+
+        for (i = 0; i < mirandaCount; i++) {
+            LOGVV("MIRANDA %d: %s.%s\n", i,
+                mirandaList[i]->clazz->descriptor, mirandaList[i]->name);
+        }
+
+        /*
+         * We found methods in one or more interfaces for which we do not
+         * have vtable entries.  We have to expand our virtualMethods
+         * table (which might be empty) to hold some new entries.
+         */
+        if (clazz->virtualMethods == NULL) {
+            newVirtualMethods = (Method*) dvmLinearAlloc(clazz->classLoader,
+                sizeof(Method) * (clazz->virtualMethodCount + mirandaCount));
+        } else {
+            //dvmLinearReadOnly(clazz->classLoader, clazz->virtualMethods);
+            newVirtualMethods = (Method*) dvmLinearRealloc(clazz->classLoader,
+                clazz->virtualMethods,
+                sizeof(Method) * (clazz->virtualMethodCount + mirandaCount));
+        }
+        if (newVirtualMethods != clazz->virtualMethods) {
+            /*
+             * Table was moved in memory.  We have to run through the
+             * vtable and fix the pointers.  The vtable entries might be
+             * pointing at superclasses, so we flip it around: run through
+             * all locally-defined virtual methods, and fix their entries
+             * in the vtable.  (This would get really messy if sub-classes
+             * had already been loaded.)
+             *
+             * Reminder: clazz->virtualMethods and clazz->virtualMethodCount
+             * hold the virtual methods declared by this class.  The
+             * method's methodIndex is the vtable index, and is the same
+             * for all sub-classes (and all super classes in which it is
+             * defined).  We're messing with these because the Miranda
+             * stuff makes it look like the class actually has an abstract
+             * method declaration in it.
+             */
+            LOGVV("MIRANDA fixing vtable pointers\n");
+            dvmLinearReadWrite(clazz->classLoader, clazz->vtable);
+            Method* meth = newVirtualMethods;
+            for (i = 0; i < clazz->virtualMethodCount; i++, meth++)
+                clazz->vtable[meth->methodIndex] = meth;
+            dvmLinearReadOnly(clazz->classLoader, clazz->vtable);
+        }
+
+        oldMethodCount = clazz->virtualMethodCount;
+        clazz->virtualMethods = newVirtualMethods;
+        clazz->virtualMethodCount += mirandaCount;
+
+        dvmLinearReadOnly(clazz->classLoader, clazz->virtualMethods);
+
+        /*
+         * We also have to expand the vtable.
+         */
+        assert(clazz->vtable != NULL);
+        clazz->vtable = (Method**) dvmLinearRealloc(clazz->classLoader,
+                        clazz->vtable,
+                        sizeof(Method*) * (clazz->vtableCount + mirandaCount));
+        if (clazz->vtable == NULL) {
+            assert(false);
+            goto bail;
+        }
+        zapVtable = true;
+
+        oldVtableCount = clazz->vtableCount;
+        clazz->vtableCount += mirandaCount;
+
+        /*
+         * Now we need to create the fake methods.  We clone the abstract
+         * method definition from the interface and then replace a few
+         * things.
+         */
+        meth = clazz->virtualMethods + oldMethodCount;
+        for (i = 0; i < mirandaCount; i++, meth++) {
+            dvmLinearReadWrite(clazz->classLoader, clazz->virtualMethods);
+            cloneMethod(meth, mirandaList[i]);
+            meth->clazz = clazz;
+            meth->accessFlags |= ACC_MIRANDA;
+            meth->methodIndex = (u2) (oldVtableCount + i);
+            dvmLinearReadOnly(clazz->classLoader, clazz->virtualMethods);
+
+            /* point the new vtable entry at the new method */
+            clazz->vtable[oldVtableCount + i] = meth;
+        }
+
+        dvmLinearReadOnly(clazz->classLoader, mirandaList);
+        dvmLinearFree(clazz->classLoader, mirandaList);
+
+    }
+
+    /*
+     * TODO?
+     * Sort the interfaces by number of declared methods.  All we really
+     * want is to get the interfaces with zero methods at the end of the
+     * list, so that when we walk through the list during invoke-interface
+     * we don't examine interfaces that can't possibly be useful.
+     *
+     * The set will usually be small, so a simple insertion sort works.
+     *
+     * We have to be careful not to change the order of two interfaces
+     * that define the same method.  (Not a problem if we only move the
+     * zero-method interfaces to the end.)
+     *
+     * PROBLEM:
+     * If we do this, we will no longer be able to identify super vs.
+     * current class interfaces by comparing clazz->super->iftableCount.  This
+     * breaks anything that only wants to find interfaces declared directly
+     * by the class (dvmFindStaticFieldHier, ReferenceType.Interfaces,
+     * dvmDbgOutputAllInterfaces, etc).  Need to provide a workaround.
+     *
+     * We can sort just the interfaces implemented directly by this class,
+     * but that doesn't seem like it would provide much of an advantage.  I'm
+     * not sure this is worthwhile.
+     *
+     * (This has been made largely obsolete by the interface cache mechanism.)
+     */
+
+    //dvmDumpClass(clazz);
+
+    result = true;
+
+bail:
+    if (zapIftable)
+        dvmLinearReadOnly(clazz->classLoader, clazz->iftable);
+    if (zapVtable)
+        dvmLinearReadOnly(clazz->classLoader, clazz->vtable);
+    if (zapIfvipool)
+        dvmLinearReadOnly(clazz->classLoader, clazz->ifviPool);
+    return result;
+}
+
+
+/*
+ * Provide "stub" implementations for methods without them.
+ *
+ * Currently we provide an implementation for all abstract methods that
+ * throws an AbstractMethodError exception.  This allows us to avoid an
+ * explicit check for abstract methods in every virtual call.
+ *
+ * NOTE: for Miranda methods, the method declaration is a clone of what
+ * was found in the interface class.  That copy may already have had the
+ * function pointer filled in, so don't be surprised if it's not NULL.
+ *
+ * NOTE: this sets the "native" flag, giving us an "abstract native" method,
+ * which is nonsensical.  Need to make sure that this doesn't escape the
+ * VM.  We can either mask it out in reflection calls, or copy "native"
+ * into the high 16 bits of accessFlags and check that internally.
+ */
+static bool insertMethodStubs(ClassObject* clazz)
+{
+    dvmLinearReadWrite(clazz->classLoader, clazz->virtualMethods);
+
+    Method* meth;
+    int i;
+
+    meth = clazz->virtualMethods;
+    for (i = 0; i < clazz->virtualMethodCount; i++, meth++) {
+        if (dvmIsAbstractMethod(meth)) {
+            assert(meth->insns == NULL);
+            assert(meth->nativeFunc == NULL ||
+                meth->nativeFunc == (DalvikBridgeFunc)dvmAbstractMethodStub);
+
+            meth->accessFlags |= ACC_NATIVE;
+            meth->nativeFunc = (DalvikBridgeFunc) dvmAbstractMethodStub;
+        }
+    }
+
+    dvmLinearReadOnly(clazz->classLoader, clazz->virtualMethods);
+    return true;
+}
+
+
+/*
+ * Swap two instance fields.
+ */
+static inline void swapField(InstField* pOne, InstField* pTwo)
+{
+    InstField swap;
+
+    LOGVV("  --- swap '%s' and '%s'\n", pOne->field.name, pTwo->field.name);
+    swap = *pOne;
+    *pOne = *pTwo;
+    *pTwo = swap;
+}
+
+/*
+ * Assign instance fields to u4 slots.
+ *
+ * The top portion of the instance field area is occupied by the superclass
+ * fields, the bottom by the fields for this class.
+ *
+ * "long" and "double" fields occupy two adjacent slots.  On some
+ * architectures, 64-bit quantities must be 64-bit aligned, so we need to
+ * arrange fields (or introduce padding) to ensure this.  We assume the
+ * fields of the topmost superclass (i.e. Object) are 64-bit aligned, so
+ * we can just ensure that the offset is "even".  To avoid wasting space,
+ * we want to move non-reference 32-bit fields into gaps rather than
+ * creating pad words.
+ *
+ * In the worst case we will waste 4 bytes, but because objects are
+ * allocated on >= 64-bit boundaries, those bytes may well be wasted anyway
+ * (assuming this is the most-derived class).
+ *
+ * Pad words are not represented in the field table, so the field table
+ * itself does not change size.
+ *
+ * The number of field slots determines the size of the object, so we
+ * set that here too.
+ *
+ * This function feels a little more complicated than I'd like, but it
+ * has the property of moving the smallest possible set of fields, which
+ * should reduce the time required to load a class.
+ *
+ * NOTE: reference fields *must* come first, or precacheReferenceOffsets()
+ * will break.
+ */
+static bool computeFieldOffsets(ClassObject* clazz)
+{
+    int fieldOffset;
+    int i, j;
+
+    dvmLinearReadWrite(clazz->classLoader, clazz->ifields);
+
+    if (clazz->super != NULL)
+        fieldOffset = clazz->super->objectSize;
+    else
+        fieldOffset = offsetof(DataObject, instanceData);
+
+    LOGVV("--- computeFieldOffsets '%s'\n", clazz->descriptor);
+
+    //LOGI("OFFSETS fieldCount=%d\n", clazz->ifieldCount);
+    //LOGI("dataobj, instance: %d\n", offsetof(DataObject, instanceData));
+    //LOGI("classobj, access: %d\n", offsetof(ClassObject, accessFlags));
+    //LOGI("super=%p, fieldOffset=%d\n", clazz->super, fieldOffset);
+
+    /*
+     * Start by moving all reference fields to the front.
+     */
+    clazz->ifieldRefCount = 0;
+    j = clazz->ifieldCount - 1;
+    for (i = 0; i < clazz->ifieldCount; i++) {
+        InstField* pField = &clazz->ifields[i];
+        char c = pField->field.signature[0];
+
+        if (c != '[' && c != 'L') {
+            /* This isn't a reference field; see if any reference fields
+             * follow this one.  If so, we'll move it to this position.
+             * (quicksort-style partitioning)
+             */
+            while (j > i) {
+                InstField* refField = &clazz->ifields[j--];
+                char rc = refField->field.signature[0];
+
+                if (rc == '[' || rc == 'L') {
+                    /* Here's a reference field that follows at least one
+                     * non-reference field.  Swap it with the current field.
+                     * (When this returns, "pField" points to the reference
+                     * field, and "refField" points to the non-ref field.)
+                     */
+                    swapField(pField, refField);
+
+                    /* Fix the signature.
+                     */
+                    c = rc;
+
+                    clazz->ifieldRefCount++;
+                    break;
+                }
+            }
+            /* We may or may not have swapped a field.
+             */
+        } else {
+            /* This is a reference field.
+             */
+            clazz->ifieldRefCount++;
+        }
+
+        /*
+         * If we've hit the end of the reference fields, break.
+         */
+        if (c != '[' && c != 'L')
+            break;
+
+        pField->byteOffset = fieldOffset;
+        fieldOffset += sizeof(u4);
+        LOGVV("  --- offset1 '%s'=%d\n", pField->field.name,pField->byteOffset);
+    }
+
+    /*
+     * Now we want to pack all of the double-wide fields together.  If we're
+     * not aligned, though, we want to shuffle one 32-bit field into place.
+     * If we can't find one, we'll have to pad it.
+     */
+    if (i != clazz->ifieldCount && (fieldOffset & 0x04) != 0) {
+        LOGVV("  +++ not aligned\n");
+
+        InstField* pField = &clazz->ifields[i];
+        char c = pField->field.signature[0];
+
+        if (c != 'J' && c != 'D') {
+            /*
+             * The field that comes next is 32-bit, so just advance past it.
+             */
+            assert(c != '[' && c != 'L');
+            pField->byteOffset = fieldOffset;
+            fieldOffset += sizeof(u4);
+            i++;
+            LOGVV("  --- offset2 '%s'=%d\n",
+                pField->field.name, pField->byteOffset);
+        } else {
+            /*
+             * Next field is 64-bit, so search for a 32-bit field we can
+             * swap into it.
+             */
+            bool found = false;
+            j = clazz->ifieldCount - 1;
+            while (j > i) {
+                InstField* singleField = &clazz->ifields[j--];
+                char rc = singleField->field.signature[0];
+
+                if (rc != 'J' && rc != 'D') {
+                    swapField(pField, singleField);
+                    //c = rc;
+                    LOGVV("  +++ swapped '%s' for alignment\n",
+                        pField->field.name);
+                    pField->byteOffset = fieldOffset;
+                    fieldOffset += sizeof(u4);
+                    LOGVV("  --- offset3 '%s'=%d\n",
+                        pField->field.name, pField->byteOffset);
+                    found = true;
+                    i++;
+                    break;
+                }
+            }
+            if (!found) {
+                LOGV("  +++ inserting pad field in '%s'\n", clazz->descriptor);
+                fieldOffset += sizeof(u4);
+            }
+        }
+    }
+
+    /*
+     * Alignment is good, shuffle any double-wide fields forward, and
+     * finish assigning field offsets to all fields.
+     */
+    assert(i == clazz->ifieldCount || (fieldOffset & 0x04) == 0);
+    j = clazz->ifieldCount - 1;
+    for ( ; i < clazz->ifieldCount; i++) {
+        InstField* pField = &clazz->ifields[i];
+        char c = pField->field.signature[0];
+
+        if (c != 'D' && c != 'J') {
+            /* This isn't a double-wide field; see if any double fields
+             * follow this one.  If so, we'll move it to this position.
+             * (quicksort-style partitioning)
+             */
+            while (j > i) {
+                InstField* doubleField = &clazz->ifields[j--];
+                char rc = doubleField->field.signature[0];
+
+                if (rc == 'D' || rc == 'J') {
+                    /* Here's a double-wide field that follows at least one
+                     * non-double field.  Swap it with the current field.
+                     * (When this returns, "pField" points to the reference
+                     * field, and "doubleField" points to the non-double field.)
+                     */
+                    swapField(pField, doubleField);
+                    c = rc;
+
+                    break;
+                }
+            }
+            /* We may or may not have swapped a field.
+             */
+        } else {
+            /* This is a double-wide field, leave it be.
+             */
+        }
+
+        pField->byteOffset = fieldOffset;
+        LOGVV("  --- offset4 '%s'=%d\n", pField->field.name,pField->byteOffset);
+        fieldOffset += sizeof(u4);
+        if (c == 'J' || c == 'D')
+            fieldOffset += sizeof(u4);
+    }
+
+#ifndef NDEBUG
+    /* Make sure that all reference fields appear before
+     * non-reference fields, and all double-wide fields are aligned.
+     */
+    j = 0;  // seen non-ref
+    for (i = 0; i < clazz->ifieldCount; i++) {
+        InstField *pField = &clazz->ifields[i];
+        char c = pField->field.signature[0];
+
+        if (c == 'D' || c == 'J') {
+            assert((pField->byteOffset & 0x07) == 0);
+        }
+
+        if (c != '[' && c != 'L') {
+            if (!j) {
+                assert(i == clazz->ifieldRefCount);
+                j = 1;
+            }
+        } else if (j) {
+            assert(false);
+        }
+    }
+    if (!j) {
+        assert(clazz->ifieldRefCount == clazz->ifieldCount);
+    }
+#endif
+
+    /*
+     * We map a C struct directly on top of java/lang/Class objects.  Make
+     * sure we left enough room for the instance fields.
+     */
+    assert(clazz != gDvm.classJavaLangClass || (size_t)fieldOffset <
+        offsetof(ClassObject, instanceData) + sizeof(clazz->instanceData));
+
+    clazz->objectSize = fieldOffset;
+
+    dvmLinearReadOnly(clazz->classLoader, clazz->ifields);
+    return true;
+}
+
+/*
+ * Throw the VM-spec-mandated error when an exception is thrown during
+ * class initialization.
+ *
+ * The safest way to do this is to call the ExceptionInInitializerError
+ * constructor that takes a Throwable.
+ *
+ * [Do we want to wrap it if the original is an Error rather than
+ * an Exception?]
+ */
+static void throwClinitError(void)
+{
+    Thread* self = dvmThreadSelf();
+    Object* exception;
+    Object* eiie;
+
+    exception = dvmGetException(self);
+    dvmAddTrackedAlloc(exception, self);
+    dvmClearException(self);
+
+    if (gDvm.classJavaLangExceptionInInitializerError == NULL) {
+        /*
+         * Always resolves to same thing -- no race condition.
+         */
+        gDvm.classJavaLangExceptionInInitializerError =
+            dvmFindSystemClass(
+                    "Ljava/lang/ExceptionInInitializerError;");
+        if (gDvm.classJavaLangExceptionInInitializerError == NULL) {
+            LOGE("Unable to prep java/lang/ExceptionInInitializerError\n");
+            goto fail;
+        }
+
+        gDvm.methJavaLangExceptionInInitializerError_init =
+            dvmFindDirectMethodByDescriptor(gDvm.classJavaLangExceptionInInitializerError,
+            "<init>", "(Ljava/lang/Throwable;)V");
+        if (gDvm.methJavaLangExceptionInInitializerError_init == NULL) {
+            LOGE("Unable to prep java/lang/ExceptionInInitializerError\n");
+            goto fail;
+        }
+    }
+
+    eiie = dvmAllocObject(gDvm.classJavaLangExceptionInInitializerError,
+                ALLOC_DEFAULT);
+    if (eiie == NULL)
+        goto fail;
+
+    /*
+     * Construct the new object, and replace the exception with it.
+     */
+    JValue unused;
+    dvmCallMethod(self, gDvm.methJavaLangExceptionInInitializerError_init,
+        eiie, &unused, exception);
+    dvmSetException(self, eiie);
+    dvmReleaseTrackedAlloc(eiie, NULL);
+    dvmReleaseTrackedAlloc(exception, self);
+    return;
+
+fail:       /* restore original exception */
+    dvmSetException(self, exception);
+    dvmReleaseTrackedAlloc(exception, self);
+    return;
+}
+
+/*
+ * The class failed to initialize on a previous attempt, so we want to throw
+ * a NoClassDefFoundError (v2 2.17.5).  The exception to this rule is if we
+ * failed in verification, in which case v2 5.4.1 says we need to re-throw
+ * the previous error.
+ */
+static void throwEarlierClassFailure(ClassObject* clazz)
+{
+    LOGI("Rejecting re-init on previously-failed class %s v=%p\n",
+        clazz->descriptor, clazz->verifyErrorClass);
+
+    if (clazz->verifyErrorClass == NULL) {
+        dvmThrowExceptionWithClassMessage("Ljava/lang/NoClassDefFoundError;",
+            clazz->descriptor);
+    } else {
+        dvmThrowExceptionByClassWithClassMessage(clazz->verifyErrorClass,
+            clazz->descriptor);
+    }
+}
+
+/*
+ * Initialize any static fields whose values are stored in
+ * the DEX file.  This must be done during class initialization.
+ */
+static void initSFields(ClassObject* clazz)
+{
+    Thread* self = dvmThreadSelf(); /* for dvmReleaseTrackedAlloc() */
+    DexFile* pDexFile;
+    const DexClassDef* pClassDef;
+    const DexEncodedArray* pValueList;
+    EncodedArrayIterator iterator;
+    int i;
+
+    if (clazz->sfieldCount == 0) {
+        return;
+    }
+    if (clazz->pDvmDex == NULL) {
+        /* generated class; shouldn't have static fields */
+        LOGW("Not initializing static fields in %s\n", clazz->descriptor);
+        return;
+    }
+    pDexFile = clazz->pDvmDex->pDexFile;
+
+    pClassDef = dexFindClass(pDexFile, clazz->descriptor);
+    assert(pClassDef != NULL);
+
+    pValueList = dexGetStaticValuesList(pDexFile, pClassDef);
+    if (pValueList == NULL) {
+        return;
+    }
+
+    dvmEncodedArrayIteratorInitialize(&iterator, pValueList, clazz);
+    
+    /*
+     * Iterate over the initial values array, setting the corresponding
+     * static field for each array element.
+     */
+
+    for (i = 0; dvmEncodedArrayIteratorHasNext(&iterator); i++) {
+        AnnotationValue value;
+        bool parsed = dvmEncodedArrayIteratorGetNext(&iterator, &value);
+        StaticField* sfield = &clazz->sfields[i];
+        const char* descriptor = sfield->field.signature;
+        bool needRelease = false;
+
+        if (! parsed) {
+            /* 
+             * TODO: Eventually verification should attempt to ensure
+             * that this can't happen at least due to a data integrity
+             * problem.
+             */
+            LOGE("Static initializer parse failed for %s at index %d",
+                    clazz->descriptor, i);
+            dvmAbort();
+        }
+
+        /* Verify that the value we got was of a valid type. */
+
+        switch (descriptor[0]) {
+            case 'Z': parsed = (value.type == kDexAnnotationBoolean); break;
+            case 'B': parsed = (value.type == kDexAnnotationByte);    break;
+            case 'C': parsed = (value.type == kDexAnnotationChar);    break;
+            case 'S': parsed = (value.type == kDexAnnotationShort);   break;
+            case 'I': parsed = (value.type == kDexAnnotationInt);     break;
+            case 'J': parsed = (value.type == kDexAnnotationLong);    break;
+            case 'F': parsed = (value.type == kDexAnnotationFloat);   break;
+            case 'D': parsed = (value.type == kDexAnnotationDouble);  break;
+            case '[': parsed = (value.type == kDexAnnotationNull);    break;
+            case 'L': {
+                switch (value.type) {
+                    case kDexAnnotationNull: {
+                        /* No need for further tests. */
+                        break;
+                    }
+                    case kDexAnnotationString: {
+                        parsed =
+                            (strcmp(descriptor, "Ljava/lang/String;") == 0);
+                        needRelease = true;
+                        break;
+                    }
+                    case kDexAnnotationType: {
+                        parsed =
+                            (strcmp(descriptor, "Ljava/lang/Class;") == 0);
+                        needRelease = true;
+                        break;
+                    }
+                    default: {
+                        parsed = false;
+                        break;
+                    }
+                }
+                break;
+            }
+            default: {
+                parsed = false;
+                break;
+            }
+        }
+
+        if (parsed) {
+            /*
+             * All's well, so store the value. Note: This always
+             * stores the full width of a JValue, even though most of
+             * the time only the first word is needed.
+             */
+            sfield->value = value.value;
+            if (needRelease) {
+                dvmReleaseTrackedAlloc(value.value.l, self);
+            }
+        } else {
+            /*
+             * Something up above had a problem. TODO: See comment
+             * above the switch about verfication.
+             */
+            LOGE("Bogus static initialization: value type %d in field type "
+                    "%s for %s at index %d", value.type, descriptor,
+                    clazz->descriptor, i);
+            dvmAbort();
+        }
+    }
+}
+
+/*
+ * Returns true if the class is being initialized by us (which means that
+ * calling dvmInitClass will return immediately after fiddling with locks).
+ *
+ * There isn't a race here, because either clazz->initThreadId won't match
+ * us, or it will and it was set in the same thread.
+ */
+bool dvmIsClassInitializing(const ClassObject* clazz)
+{
+    return (clazz->status == CLASS_INITIALIZING &&
+            clazz->initThreadId == dvmThreadSelf()->threadId);
+}
+
+/*
+ * If a class has not been initialized, do so by executing the code in
+ * <clinit>.  The sequence is described in the VM spec v2 2.17.5.
+ *
+ * It is possible for multiple threads to arrive here simultaneously, so
+ * we need to lock the class while we check stuff.  We know that no
+ * interpreted code has access to the class yet, so we can use the class's
+ * monitor lock.
+ *
+ * We will often be called recursively, e.g. when the <clinit> code resolves
+ * one of its fields, the field resolution will try to initialize the class.
+ *
+ * This can get very interesting if a class has a static field initialized
+ * to a new instance of itself.  <clinit> will end up calling <init> on
+ * the members it is initializing, which is fine unless it uses the contents
+ * of static fields to initialize instance fields.  This will leave the
+ * static-referenced objects in a partially initialized state.  This is
+ * reasonably rare and can sometimes be cured with proper field ordering.
+ *
+ * On failure, returns "false" with an exception raised.
+ *
+ * -----
+ *
+ * It is possible to cause a deadlock by having a situation like this:
+ *   class A { static { sleep(10000); new B(); } }
+ *   class B { static { sleep(10000); new A(); } }
+ *   new Thread() { public void run() { new A(); } }.start();
+ *   new Thread() { public void run() { new B(); } }.start();
+ * This appears to be expected under the spec.
+ *
+ * The interesting question is what to do if somebody calls Thread.interrupt()
+ * on one of the deadlocked threads.  According to the VM spec, they're both
+ * sitting in "wait".  Should the interrupt code quietly raise the
+ * "interrupted" flag, or should the "wait" return immediately with an
+ * exception raised?
+ *
+ * This gets a little murky.  The VM spec says we call "wait", and the
+ * spec for Thread.interrupt says Object.wait is interruptible.  So it
+ * seems that, if we get unlucky and interrupt class initialization, we
+ * are expected to throw (which gets converted to ExceptionInInitializerError
+ * since InterruptedException is checked).
+ *
+ * There are a couple of problems here.  First, all threads are expected to
+ * present a consistent view of class initialization, so we can't have it
+ * fail in one thread and succeed in another.  Second, once a class fails
+ * to initialize, it must *always* fail.  This means that a stray interrupt()
+ * call could render a class unusable for the lifetime of the VM.
+ *
+ * In most cases -- the deadlock example above being a counter-example --
+ * the interrupting thread can't tell whether the target thread handled
+ * the initialization itself or had to wait while another thread did the
+ * work.  Refusing to interrupt class initialization is, in most cases,
+ * not something that a program can reliably detect.
+ *
+ * On the assumption that interrupting class initialization is highly
+ * undesirable in most circumstances, and that failing to do so does not
+ * deviate from the spec in a meaningful way, we don't allow class init
+ * to be interrupted by Thread.interrupt().
+ */
+bool dvmInitClass(ClassObject* clazz)
+{
+#if LOG_CLASS_LOADING
+    bool initializedByUs = false;
+#endif
+
+    Thread* self = dvmThreadSelf();
+    const Method* method;
+
+    dvmLockObject(self, (Object*) clazz);
+    assert(dvmIsClassLinked(clazz) || clazz->status == CLASS_ERROR);
+
+    /*
+     * If the class hasn't been verified yet, do so now.
+     */
+    if (clazz->status < CLASS_VERIFIED) {
+        /*
+         * If we're in an "erroneous" state, throw an exception and bail.
+         */
+        if (clazz->status == CLASS_ERROR) {
+            throwEarlierClassFailure(clazz);
+            goto bail_unlock;
+        }
+
+        assert(clazz->status == CLASS_RESOLVED);
+        assert(!IS_CLASS_FLAG_SET(clazz, CLASS_ISPREVERIFIED));
+
+        if (gDvm.classVerifyMode == VERIFY_MODE_NONE ||
+            (gDvm.classVerifyMode == VERIFY_MODE_REMOTE &&
+             clazz->classLoader == NULL))
+        {
+            LOGV("+++ not verifying class %s (cl=%p)\n",
+                clazz->descriptor, clazz->classLoader);
+            goto noverify;
+        }
+
+        if (!gDvm.optimizing)
+            LOGV("+++ late verify on %s\n", clazz->descriptor);
+
+        /*
+         * We're not supposed to optimize an unverified class, but during
+         * development this mode was useful.  We can't verify an optimized
+         * class because the optimization process discards information.
+         */
+        if (IS_CLASS_FLAG_SET(clazz, CLASS_ISOPTIMIZED)) {
+            LOGW("Class '%s' was optimized without verification; "
+                 "not verifying now\n",
+                clazz->descriptor);
+            LOGW("  ('rm /data/dalvik-cache/*' and restart to fix this)");
+            goto verify_failed;
+        }
+
+        clazz->status = CLASS_VERIFYING;
+        if (!dvmVerifyClass(clazz, VERIFY_DEFAULT)) {
+verify_failed:
+            dvmThrowExceptionWithClassMessage("Ljava/lang/VerifyError;",
+                clazz->descriptor);
+            clazz->verifyErrorClass = dvmGetException(self)->clazz;
+            clazz->status = CLASS_ERROR;
+            goto bail_unlock;
+        }
+
+        clazz->status = CLASS_VERIFIED;
+    }
+noverify:
+
+    if (clazz->status == CLASS_INITIALIZED)
+        goto bail_unlock;
+
+    while (clazz->status == CLASS_INITIALIZING) {
+        /* we caught somebody else in the act; was it us? */
+        if (clazz->initThreadId == self->threadId) {
+            //LOGV("HEY: found a recursive <clinit>\n");
+            goto bail_unlock;
+        }
+
+        if (dvmCheckException(self)) {
+            LOGW("GLITCH: exception pending at start of class init\n");
+            dvmAbort();
+        }
+
+        /*
+         * Wait for the other thread to finish initialization.  We pass
+         * "false" for the "interruptShouldThrow" arg so it doesn't throw
+         * an exception on interrupt.
+         */
+        dvmObjectWait(self, (Object*) clazz, 0, 0, false);
+
+        /*
+         * When we wake up, repeat the test for init-in-progress.  If there's
+         * an exception pending (only possible if "interruptShouldThrow"
+         * was set), bail out.
+         */
+        if (dvmCheckException(self)) {
+            LOGI("Class init of '%s' failing with wait() exception\n",
+                clazz->descriptor);
+            /*
+             * TODO: this is bogus, because it means the two threads have a
+             * different idea of the class status.  We need to flag the
+             * class as bad and ensure that the initializer thread respects
+             * our notice.  If we get lucky and wake up after the class has
+             * finished initialization but before being woken, we have to
+             * swallow the exception, perhaps raising thread->interrupted
+             * to preserve semantics.
+             *
+             * Since we're not currently allowing interrupts, this should
+             * never happen and we don't need to fix this.
+             */
+            assert(false);
+            throwClinitError();
+            clazz->status = CLASS_ERROR;
+            goto bail_unlock;
+        }
+        if (clazz->status == CLASS_INITIALIZING) {
+            LOGI("Waiting again for class init\n");
+            continue;
+        }
+        assert(clazz->status == CLASS_INITIALIZED ||
+               clazz->status == CLASS_ERROR);
+        if (clazz->status == CLASS_ERROR) {
+            /*
+             * The caller wants an exception, but it was thrown in a
+             * different thread.  Synthesize one here.
+             */
+            dvmThrowException("Ljava/lang/UnsatisfiedLinkError;",
+                "(<clinit> failed, see exception in other thread)");
+        }
+        goto bail_unlock;
+    }
+
+    /* see if we failed previously */
+    if (clazz->status == CLASS_ERROR) {
+        // might be wise to unlock before throwing; depends on which class
+        // it is that we have locked
+        dvmUnlockObject(self, (Object*) clazz);
+        throwEarlierClassFailure(clazz);
+        return false;
+    }
+
+    /*
+     * Let's initialize this thing.
+     *
+     * We unlock the object so that other threads can politely sleep on
+     * our mutex with Object.wait(), instead of hanging or spinning trying
+     * to grab our mutex.
+     */
+    assert(clazz->status < CLASS_INITIALIZING);
+
+#if LOG_CLASS_LOADING
+    // We started initializing.
+    logClassLoad('+', clazz);
+    initializedByUs = true;
+#endif
+
+    clazz->status = CLASS_INITIALIZING;
+    clazz->initThreadId = self->threadId;
+    dvmUnlockObject(self, (Object*) clazz);
+
+    /* init our superclass */
+    if (clazz->super != NULL && clazz->super->status != CLASS_INITIALIZED) {
+        assert(!dvmIsInterfaceClass(clazz));
+        if (!dvmInitClass(clazz->super)) {
+            assert(dvmCheckException(self));
+            clazz->status = CLASS_ERROR;
+            /* wake up anybody waiting */
+            dvmLockObject(self, (Object*) clazz);
+            goto bail_notify;
+        }
+    }
+
+    /* Initialize any static fields whose values are
+     * stored in the Dex file.  This should include all of the
+     * simple "final static" fields, which are required to
+     * be initialized first. (vmspec 2 sec 2.17.5 item 8)
+     * More-complicated final static fields should be set
+     * at the beginning of <clinit>;  all we can do is trust
+     * that the compiler did the right thing.
+     */
+    initSFields(clazz);
+
+    /* Execute any static initialization code.
+     */
+    method = dvmFindDirectMethodByDescriptor(clazz, "<clinit>", "()V");
+    if (method == NULL) {
+        LOGVV("No <clinit> found for %s\n", clazz->descriptor);
+    } else {
+        LOGVV("Invoking %s.<clinit>\n", clazz->descriptor);
+        JValue unused;
+        dvmCallMethod(self, method, NULL, &unused);
+    }
+
+    if (dvmCheckException(self)) {
+        /*
+         * We've had an exception thrown during static initialization.  We
+         * need to throw an ExceptionInInitializerError, but we want to
+         * tuck the original exception into the "cause" field.
+         */
+        LOGW("Exception %s thrown during %s.<clinit>\n",
+            (dvmGetException(self)->clazz)->descriptor, clazz->descriptor);
+        throwClinitError();
+        //LOGW("+++ replaced\n");
+
+        dvmLockObject(self, (Object*) clazz);
+        clazz->status = CLASS_ERROR;
+    } else {
+        /* success! */
+        dvmLockObject(self, (Object*) clazz);
+        clazz->status = CLASS_INITIALIZED;
+        LOGVV("Initialized class: %s\n", clazz->descriptor);
+    }
+
+bail_notify:
+    /*
+     * Notify anybody waiting on the object.
+     */
+    dvmObjectNotifyAll(self, (Object*) clazz);
+
+bail_unlock:
+
+#if LOG_CLASS_LOADING
+    if (initializedByUs) {
+        // We finished initializing.
+        logClassLoad('-', clazz);
+    }
+#endif
+
+    dvmUnlockObject(self, (Object*) clazz);
+
+    return (clazz->status != CLASS_ERROR);
+}
+
+/*
+ * Replace method->nativeFunc and method->insns with new values.  This is
+ * performed on resolution of a native method.
+ */
+void dvmSetNativeFunc(const Method* method, DalvikBridgeFunc func,
+    const u2* insns)
+{
+    ClassObject* clazz = method->clazz;
+
+    /* just open up both; easier that way */
+    dvmLinearReadWrite(clazz->classLoader, clazz->virtualMethods);
+    dvmLinearReadWrite(clazz->classLoader, clazz->directMethods);
+
+    ((Method*)method)->nativeFunc = func;
+    ((Method*)method)->insns = insns;
+
+    dvmLinearReadOnly(clazz->classLoader, clazz->virtualMethods);
+    dvmLinearReadOnly(clazz->classLoader, clazz->directMethods);
+}
+
+/*
+ * dvmHashForeach callback.  A nonzero return value causes foreach to
+ * bail out.
+ */
+static int findClassCallback(void* vclazz, void* arg)
+{
+    ClassObject* clazz = vclazz;
+    const char* descriptor = (const char*) arg;
+
+    if (strcmp(clazz->descriptor, descriptor) == 0)
+        return (int) clazz;
+    return 0;
+}
+
+/*
+ * Find a loaded class by descriptor. Returns the first one found.
+ * Because there can be more than one if class loaders are involved,
+ * this is not an especially good API. (Currently only used by the
+ * debugger and "checking" JNI.)
+ *
+ * "descriptor" should have the form "Ljava/lang/Class;" or
+ * "[Ljava/lang/Class;", i.e. a descriptor and not an internal-form
+ * class name.
+ */
+ClassObject* dvmFindLoadedClass(const char* descriptor)
+{
+    int result;
+
+    dvmHashTableLock(gDvm.loadedClasses);
+    result = dvmHashForeach(gDvm.loadedClasses, findClassCallback,
+            (void*) descriptor);
+    dvmHashTableUnlock(gDvm.loadedClasses);
+
+    return (ClassObject*) result;
+}
+
+/*
+ * Retrieve the system (a/k/a application) class loader.
+ */
+Object* dvmGetSystemClassLoader(void)
+{
+    ClassObject* clazz;
+    Method* getSysMeth;
+    Object* loader;
+
+    clazz = dvmFindSystemClass("Ljava/lang/ClassLoader;");
+    if (clazz == NULL)
+        return NULL;
+
+    getSysMeth = dvmFindDirectMethodByDescriptor(clazz, "getSystemClassLoader",
+        "()Ljava/lang/ClassLoader;");
+    if (getSysMeth == NULL)
+        return NULL;
+
+    JValue result;
+    dvmCallMethod(dvmThreadSelf(), getSysMeth, NULL, &result);
+    loader = (Object*)result.l;
+    return loader;
+}
+
+
+/*
+ * This is a dvmHashForeach callback.
+ */
+static int dumpClass(void* vclazz, void* varg)
+{
+    const ClassObject* clazz = (const ClassObject*) vclazz;
+    int flags = (int) varg;
+    char* desc;
+    int i;
+
+    if (clazz == NULL) {
+        LOGI("dumpClass: ignoring request to dump null class\n");
+        return 0;
+    }
+
+    if ((flags & kDumpClassFullDetail) == 0) {
+        bool showInit = (flags & kDumpClassInitialized) != 0;
+        bool showLoader = (flags & kDumpClassClassLoader) != 0;
+        const char* initStr;
+
+        initStr = dvmIsClassInitialized(clazz) ? "true" : "false";
+
+        if (showInit && showLoader)
+            LOGI("%s %p %s\n", clazz->descriptor, clazz->classLoader, initStr);
+        else if (showInit)
+            LOGI("%s %s\n", clazz->descriptor, initStr);
+        else if (showLoader)
+            LOGI("%s %p\n", clazz->descriptor, clazz->classLoader);
+        else
+            LOGI("%s\n", clazz->descriptor);
+
+        return 0;
+    }
+
+    LOGI("----- %s '%s' cl=%p -----\n",
+        dvmIsInterfaceClass(clazz) ? "interface" : "class",
+        clazz->descriptor, clazz->classLoader);
+    LOGI("  objectSize=%d (%d from super)\n", (int) clazz->objectSize,
+        clazz->super != NULL ? (int) clazz->super->objectSize : 0);
+    LOGI("  access=0x%04x.%04x\n", clazz->accessFlags >> 16,
+        clazz->accessFlags & JAVA_FLAGS_MASK);
+    if (clazz->super != NULL)
+        LOGI("  super='%s' (cl=%p)\n",
+            clazz->super->descriptor, clazz->super->classLoader);
+    if (dvmIsArrayClass(clazz)) {
+        LOGI("  dimensions=%d elementClass=%s\n",
+            clazz->arrayDim, clazz->elementClass->descriptor);
+    }
+    if (clazz->iftableCount > 0) {
+        LOGI("  interfaces (%d):\n", clazz->iftableCount);
+        for (i = 0; i < clazz->iftableCount; i++) {
+            InterfaceEntry* ent = &clazz->iftable[i];
+            int j;
+
+            LOGI("    %2d: %s (cl=%p)\n",
+                i, ent->clazz->descriptor, ent->clazz->classLoader);
+
+            /* enable when needed */
+            if (false && ent->methodIndexArray != NULL) {
+                for (j = 0; j < ent->clazz->virtualMethodCount; j++)
+                    LOGI("      %2d: %d %s %s\n",
+                        j, ent->methodIndexArray[j],
+                        ent->clazz->virtualMethods[j].name,
+                        clazz->vtable[ent->methodIndexArray[j]]->name);
+            }
+        }
+    }
+    if (!dvmIsInterfaceClass(clazz)) {
+        LOGI("  vtable (%d entries, %d in super):\n", clazz->vtableCount,
+            clazz->super != NULL ? clazz->super->vtableCount : 0);
+        for (i = 0; i < clazz->vtableCount; i++) {
+            desc = dexProtoCopyMethodDescriptor(&clazz->vtable[i]->prototype);
+            LOGI("    %s%2d: %p %20s %s\n",
+                (i != clazz->vtable[i]->methodIndex) ? "*** " : "",
+                (u4) clazz->vtable[i]->methodIndex, clazz->vtable[i],
+                clazz->vtable[i]->name, desc);
+            free(desc);
+        }
+        LOGI("  direct methods (%d entries):\n", clazz->directMethodCount);
+        for (i = 0; i < clazz->directMethodCount; i++) {
+            desc = dexProtoCopyMethodDescriptor(
+                    &clazz->directMethods[i].prototype);
+            LOGI("    %2d: %20s %s\n", i, clazz->directMethods[i].name,
+                desc);
+            free(desc);
+        }
+    } else {
+        LOGI("  interface methods (%d):\n", clazz->virtualMethodCount);
+        for (i = 0; i < clazz->virtualMethodCount; i++) {
+            desc = dexProtoCopyMethodDescriptor(
+                    &clazz->virtualMethods[i].prototype);
+            LOGI("    %2d: %2d %20s %s\n", i,
+                (u4) clazz->virtualMethods[i].methodIndex,
+                clazz->virtualMethods[i].name,
+                desc);
+            free(desc);
+        }
+    }
+    if (clazz->sfieldCount > 0) {
+        LOGI("  static fields (%d entries):\n", clazz->sfieldCount);
+        for (i = 0; i < clazz->sfieldCount; i++) {
+            LOGI("    %2d: %20s %s\n", i, clazz->sfields[i].field.name,
+                clazz->sfields[i].field.signature);
+        }
+    }
+    if (clazz->ifieldCount > 0) {
+        LOGI("  instance fields (%d entries):\n", clazz->ifieldCount);
+        for (i = 0; i < clazz->ifieldCount; i++) {
+            LOGI("    %2d: %20s %s\n", i, clazz->ifields[i].field.name,
+                clazz->ifields[i].field.signature);
+        }
+    }
+    return 0;
+}
+
+/*
+ * Dump the contents of a single class.
+ *
+ * Pass kDumpClassFullDetail into "flags" to get lots of detail.
+ */
+void dvmDumpClass(const ClassObject* clazz, int flags)
+{
+    dumpClass((void*) clazz, (void*) flags);
+}
+
+/*
+ * Dump the contents of all classes.
+ */
+void dvmDumpAllClasses(int flags)
+{
+    dvmHashTableLock(gDvm.loadedClasses);
+    dvmHashForeach(gDvm.loadedClasses, dumpClass, (void*) flags);
+    dvmHashTableUnlock(gDvm.loadedClasses);
+}
+
+/*
+ * Get the number of loaded classes
+ */
+int dvmGetNumLoadedClasses()
+{
+    int count; 
+    dvmHashTableLock(gDvm.loadedClasses);
+    count = dvmHashTableNumEntries(gDvm.loadedClasses);
+    dvmHashTableUnlock(gDvm.loadedClasses);
+    return count;
+}
+
+/*
+ * Write some statistics to the log file.
+ */
+void dvmDumpLoaderStats(const char* msg)
+{
+    LOGV("VM stats (%s): cls=%d/%d meth=%d ifld=%d sfld=%d linear=%d\n",
+        msg, gDvm.numLoadedClasses, dvmHashTableNumEntries(gDvm.loadedClasses),
+        gDvm.numDeclaredMethods, gDvm.numDeclaredInstFields,
+        gDvm.numDeclaredStaticFields, gDvm.pBootLoaderAlloc->curOffset);
+}
+
+#ifdef PROFILE_FIELD_ACCESS
+/*
+ * Dump the field access counts for all fields in this method.
+ */
+static int dumpAccessCounts(void* vclazz, void* varg)
+{
+    const ClassObject* clazz = (const ClassObject*) vclazz;
+    int i;
+
+    for (i = 0; i < clazz->ifieldCount; i++) {
+        Field* field = &clazz->ifields[i].field;
+
+        if (field->gets != 0)
+            printf("GI %d %s.%s\n", field->gets,
+                field->clazz->descriptor, field->name);
+        if (field->puts != 0)
+            printf("PI %d %s.%s\n", field->puts,
+                field->clazz->descriptor, field->name);
+    }
+    for (i = 0; i < clazz->sfieldCount; i++) {
+        Field* field = &clazz->sfields[i].field;
+
+        if (field->gets != 0)
+            printf("GS %d %s.%s\n", field->gets,
+                field->clazz->descriptor, field->name);
+        if (field->puts != 0)
+            printf("PS %d %s.%s\n", field->puts,
+                field->clazz->descriptor, field->name);
+    }
+
+    return 0;
+}
+
+/*
+ * Dump the field access counts for all loaded classes.
+ */
+void dvmDumpFieldAccessCounts(void)
+{
+    dvmHashTableLock(gDvm.loadedClasses);
+    dvmHashForeach(gDvm.loadedClasses, dumpAccessCounts, NULL);
+    dvmHashTableUnlock(gDvm.loadedClasses);
+}
+#endif
+
+
+/*
+ * Mark all classes associated with the built-in loader.
+ */
+static int markClassObject(void *clazz, void *arg)
+{
+    UNUSED_PARAMETER(arg);
+
+    dvmMarkObjectNonNull((Object *)clazz);
+    return 0;
+}
+
+/*
+ * The garbage collector calls this to mark the class objects for all
+ * loaded classes.
+ */
+void dvmGcScanRootClassLoader()
+{
+    /* dvmClassStartup() may not have been called before the first GC.
+     */
+    if (gDvm.loadedClasses != NULL) {
+        dvmHashTableLock(gDvm.loadedClasses);
+        dvmHashForeach(gDvm.loadedClasses, markClassObject, NULL);
+        dvmHashTableUnlock(gDvm.loadedClasses);
+    }
+}
+
+
+/*
+ * ===========================================================================
+ *      Method Prototypes and Descriptors
+ * ===========================================================================
+ */
+
+/*
+ * Compare the two method names and prototypes, a la strcmp(). The
+ * name is considered the "major" order and the prototype the "minor"
+ * order. The prototypes are compared as if by dvmCompareMethodProtos().
+ */
+int dvmCompareMethodNamesAndProtos(const Method* method1,
+        const Method* method2)
+{
+    int result = strcmp(method1->name, method2->name);
+
+    if (result != 0) {
+        return result;
+    }
+
+    return dvmCompareMethodProtos(method1, method2);
+}
+
+/*
+ * Compare a (name, prototype) pair with the (name, prototype) of
+ * a method, a la strcmp(). The name is considered the "major" order and
+ * the prototype the "minor" order. The descriptor and prototype are
+ * compared as if by dvmCompareDescriptorAndMethodProto().
+ */
+int dvmCompareNameProtoAndMethod(const char* name,
+    const DexProto* proto, const Method* method)
+{
+    int result = strcmp(name, method->name);
+
+    if (result != 0) {
+        return result;
+    }
+
+    return dexProtoCompare(proto, &method->prototype);
+}
+
+/*
+ * Compare a (name, method descriptor) pair with the (name, prototype) of
+ * a method, a la strcmp(). The name is considered the "major" order and
+ * the prototype the "minor" order. The descriptor and prototype are
+ * compared as if by dvmCompareDescriptorAndMethodProto().
+ */
+int dvmCompareNameDescriptorAndMethod(const char* name,
+    const char* descriptor, const Method* method)
+{
+    int result = strcmp(name, method->name);
+
+    if (result != 0) {
+        return result;
+    }
+
+    return dvmCompareDescriptorAndMethodProto(descriptor, method);
+}
diff --git a/vm/oo/Class.h b/vm/oo/Class.h
new file mode 100644
index 0000000..1da0316
--- /dev/null
+++ b/vm/oo/Class.h
@@ -0,0 +1,235 @@
+/*
+ * Copyright (C) 2008 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/*
+ * Class loader.
+ */
+#ifndef _DALVIK_OO_CLASS
+#define _DALVIK_OO_CLASS
+
+/*
+ * The classpath and bootclasspath differ in that only the latter is
+ * consulted when looking for classes needed by the VM.  When searching
+ * for an arbitrary class definition, we start with the bootclasspath,
+ * look for optional packages (a/k/a standard extensions), and then try
+ * the classpath.
+ *
+ * In Dalvik, a class can be found in one of three ways:
+ *  - as a "loose" .class file in a directory
+ *  - as a .class file held in a JAR archive
+ *  - in a .dex file
+ *
+ * These three may be freely intermixed in a classpath specification.
+ * Ordering is significant.  (Currently only ".dex" is supported directly
+ * by the VM.)
+ */
+typedef struct ClassPathEntry {
+    enum {
+        kCpeUnknown = 0,
+        kCpeDir,
+        kCpeJar,
+        kCpeDex,
+        kCpeLastEntry       /* used as sentinel at end of array */
+    }       kind;
+    char*   fileName;
+    void*   ptr;            /* JarFile* or DexFile* */
+} ClassPathEntry;
+
+bool dvmClassStartup(void);
+void dvmClassShutdown(void);
+bool dvmPrepBootClassPath(bool isNormalStart);
+
+/*
+ * Boot class path accessors, for class loader getResources().
+ */
+int dvmGetBootPathSize(void);
+StringObject* dvmGetBootPathResource(const char* name, int idx);
+void dvmDumpBootClassPath(void);
+
+/*
+ * Determine whether "path" is a member of "cpe".
+ */
+bool dvmClassPathContains(const ClassPathEntry* cpe, const char* path);
+
+/*
+ * Find the class with the given descriptor.  Load it if it hasn't already
+ * been.
+ * 
+ * "loader" is the initiating class loader.
+ */
+ClassObject* dvmFindClass(const char* descriptor, Object* loader);
+ClassObject* dvmFindClassNoInit(const char* descriptor, Object* loader);
+
+/*
+ * Like dvmFindClass, but only for system classes.
+ */
+ClassObject* dvmFindSystemClass(const char* descriptor);
+ClassObject* dvmFindSystemClassNoInit(const char* descriptor);
+
+/*
+ * Find a loaded class by descriptor. Returns the first one found.
+ * Because there can be more than one if class loaders are involved,
+ * this is not an especially good API. (Currently only used by the
+ * debugger and "checking" JNI.)
+ *
+ * "descriptor" should have the form "Ljava/lang/Class;" or
+ * "[Ljava/lang/Class;", i.e. a descriptor and not an internal-form
+ * class name.
+ */
+ClassObject* dvmFindLoadedClass(const char* descriptor);
+
+/*
+ * Load the named class (by descriptor) from the specified DEX file.
+ * Used by class loaders to instantiate a class object from a
+ * VM-managed DEX.
+ */
+ClassObject* dvmDefineClass(DvmDex* pDvmDex, const char* descriptor,
+    Object* classLoader);
+
+/*
+ * Link a loaded class.  Normally done as part of one of the "find class"
+ * variations, this is only called explicitly for synthetic class
+ * generation (e.g. reflect.Proxy).
+ */
+bool dvmLinkClass(ClassObject* clazz, bool classesResolved);
+
+/*
+ * Determine if a class has been initialized.
+ */
+INLINE bool dvmIsClassInitialized(const ClassObject* clazz) {
+    return (clazz->status == CLASS_INITIALIZED);
+}
+bool dvmIsClassInitializing(const ClassObject* clazz);
+
+/*
+ * Initialize a class.
+ */
+bool dvmInitClass(ClassObject* clazz);
+
+/*
+ * Retrieve the system class loader.
+ */
+Object* dvmGetSystemClassLoader(void);
+
+/*
+ * Utility functions.
+ */
+ClassObject* dvmLookupClass(const char* descriptor, Object* loader,
+    bool unprepOkay);
+void dvmFreeClassInnards(ClassObject* clazz);
+bool dvmAddClassToHash(ClassObject* clazz);
+void dvmAddInitiatingLoader(ClassObject* clazz, Object* loader);
+bool dvmLoaderInInitiatingList(const ClassObject* clazz, const Object* loader);
+
+/*
+ * Update method's "nativeFunc" and "insns" after native method resolution.
+ */
+void dvmSetNativeFunc(const Method* method, DalvikBridgeFunc func,
+    const u2* insns);
+
+/* during DEX optimizing, add an extra DEX to the bootstrap class path */
+INLINE void dvmSetBootPathExtraDex(DvmDex* pDvmDex);
+
+/*
+ * Debugging.
+ */
+void dvmDumpClass(const ClassObject* clazz, int flags);
+void dvmDumpAllClasses(int flags);
+void dvmDumpLoaderStats(const char* msg);
+int  dvmGetNumLoadedClasses();
+
+#ifdef PROFILE_FIELD_ACCESS
+void dvmDumpFieldAccessCounts(void);
+#endif
+
+/* flags for dvmDumpClass / dvmDumpAllClasses */
+#define kDumpClassFullDetail    1
+#define kDumpClassClassLoader   (1 << 1)
+#define kDumpClassInitialized   (1 << 2)
+
+
+/*
+ * Store a copy of the method prototype descriptor string
+ * for the given method into the given DexStringCache, returning the
+ * stored string for convenience.
+ */
+INLINE char* dvmCopyDescriptorStringFromMethod(const Method* method,
+        DexStringCache *pCache)
+{
+    const char* result =
+        dexProtoGetMethodDescriptor(&method->prototype, pCache);
+    return dexStringCacheEnsureCopy(pCache, result);
+}
+
+/*
+ * Compute the number of argument words (u4 units) required by the
+ * given method's prototype. For example, if the method descriptor is
+ * "(IJ)D", this would return 3 (one for the int, two for the long;
+ * return value isn't relevant).
+ */
+INLINE int dvmComputeMethodArgsSize(const Method* method)
+{
+    return dexProtoComputeArgsSize(&method->prototype);
+}
+
+/*
+ * Compare the two method prototypes. The two prototypes are compared
+ * as if by strcmp() on the result of dexProtoGetMethodDescriptor().
+ */
+INLINE int dvmCompareMethodProtos(const Method* method1,
+        const Method* method2)
+{
+    return dexProtoCompare(&method1->prototype, &method2->prototype);
+}    
+
+/*
+ * Compare the two method names and prototypes, a la strcmp(). The
+ * name is considered the "major" order and the prototype the "minor"
+ * order. The prototypes are compared as if by dexProtoGetMethodDescriptor().
+ */
+int dvmCompareMethodNamesAndProtos(const Method* method1,
+        const Method* method2);
+
+/*
+ * Compare a method descriptor string with the prototype of a method,
+ * as if by converting the descriptor to a DexProto and comparing it
+ * with dexProtoCompare().
+ */
+INLINE int dvmCompareDescriptorAndMethodProto(const char* descriptor,
+    const Method* method)
+{
+    // Sense is reversed.
+    return -dexProtoCompareToDescriptor(&method->prototype, descriptor);
+}
+
+/*
+ * Compare a (name, prototype) pair with the (name, prototype) of
+ * a method, a la strcmp(). The name is considered the "major" order and
+ * the prototype the "minor" order. The descriptor and prototype are
+ * compared as if by dvmCompareDescriptorAndMethodProto().
+ */
+int dvmCompareNameProtoAndMethod(const char* name,
+    const DexProto* proto, const Method* method);
+
+/*
+ * Compare a (name, method descriptor) pair with the (name, prototype) of
+ * a method, a la strcmp(). The name is considered the "major" order and
+ * the prototype the "minor" order. The descriptor and prototype are
+ * compared as if by dvmCompareDescriptorAndMethodProto().
+ */
+int dvmCompareNameDescriptorAndMethod(const char* name,
+    const char* descriptor, const Method* method);
+
+#endif /*_DALVIK_OO_CLASS*/
diff --git a/vm/oo/Object.c b/vm/oo/Object.c
new file mode 100644
index 0000000..189ad09
--- /dev/null
+++ b/vm/oo/Object.c
@@ -0,0 +1,653 @@
+/*
+ * Copyright (C) 2008 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/*
+ * Operations on an Object.
+ */
+#include "Dalvik.h"
+
+/*
+ * Find a matching field, in the current class only.
+ *
+ * Returns NULL if the field can't be found.  (Does not throw an exception.)
+ */
+InstField* dvmFindInstanceField(const ClassObject* clazz,
+    const char* fieldName, const char* signature)
+{
+    InstField* pField;
+    int i;
+
+    assert(clazz != NULL);
+
+    /*
+     * Find a field with a matching name and signature.  The Java programming
+     * language does not allow you to have two fields with the same name
+     * and different types, but the Java VM spec does allow it, so we can't
+     * bail out early when the name matches.
+     */
+    pField = clazz->ifields;
+    for (i = 0; i < clazz->ifieldCount; i++, pField++) {
+        if (strcmp(fieldName, pField->field.name) == 0 &&
+            strcmp(signature, pField->field.signature) == 0)
+        {
+            return pField;
+        }
+    }
+
+    return NULL;
+}
+
+/*
+ * Find a matching field, in this class or a superclass.
+ *
+ * Searching through interfaces isn't necessary, because interface fields
+ * are inherently public/static/final.
+ *
+ * Returns NULL if the field can't be found.  (Does not throw an exception.)
+ */
+InstField* dvmFindInstanceFieldHier(const ClassObject* clazz,
+    const char* fieldName, const char* signature)
+{
+    InstField* pField;
+
+    /*
+     * Search for a match in the current class.
+     */
+    pField = dvmFindInstanceField(clazz, fieldName, signature);
+    if (pField != NULL)
+        return pField;
+
+    if (clazz->super != NULL)
+        return dvmFindInstanceFieldHier(clazz->super, fieldName, signature);
+    else
+        return NULL;
+}
+
+
+/*
+ * Find a matching field, in this class or an interface.
+ *
+ * Returns NULL if the field can't be found.  (Does not throw an exception.)
+ */
+StaticField* dvmFindStaticField(const ClassObject* clazz,
+    const char* fieldName, const char* signature)
+{
+    StaticField* pField;
+    int i;
+
+    assert(clazz != NULL);
+
+    pField = clazz->sfields;
+    for (i = 0; i < clazz->sfieldCount; i++, pField++) {
+        if (strcmp(fieldName, pField->field.name) == 0) {
+            /*
+             * The name matches.  Unlike methods, we can't have two fields
+             * with the same names but differing types.
+             */
+            if (strcmp(signature, pField->field.signature) != 0) {
+                LOGW("Found field '%s', but sig is '%s' not '%s'\n",
+                    fieldName, pField->field.signature, signature);
+                return NULL;
+            }
+            return pField;
+        }
+    }
+
+    return NULL;
+}
+
+/*
+ * Find a matching field, in this class or a superclass.
+ *
+ * Returns NULL if the field can't be found.  (Does not throw an exception.)
+ */
+StaticField* dvmFindStaticFieldHier(const ClassObject* clazz,
+    const char* fieldName, const char* signature)
+{
+    StaticField* pField;
+
+    /*
+     * Search for a match in the current class.
+     */
+    pField = dvmFindStaticField(clazz, fieldName, signature);
+    if (pField != NULL)
+        return pField;
+
+    /*
+     * See if it's in any of our interfaces.  We don't check interfaces
+     * inherited from the superclass yet.
+     *
+     * (Note the set may have been stripped down because of redundancy with
+     * the superclass; see notes in createIftable.)
+     */
+    int i = 0;
+    if (clazz->super != NULL) {
+        assert(clazz->iftableCount >= clazz->super->iftableCount);
+        i = clazz->super->iftableCount;
+    }
+    for ( ; i < clazz->iftableCount; i++) {
+        ClassObject* iface = clazz->iftable[i].clazz;
+        pField = dvmFindStaticField(iface, fieldName, signature);
+        if (pField != NULL)
+            return pField;
+    }
+
+    if (clazz->super != NULL)
+        return dvmFindStaticFieldHier(clazz->super, fieldName, signature);
+    else
+        return NULL;
+}
+
+/*
+ * Compare the given name, return type, and argument types with the contents
+ * of the given method. This returns 0 if they are equal and non-zero if not.
+ */
+static inline int compareMethodHelper(Method* method, const char* methodName,
+    const char* returnType, size_t argCount, const char** argTypes)
+{
+    DexParameterIterator iterator;
+    const DexProto* proto;
+
+    if (strcmp(methodName, method->name) != 0) {
+        return 1;
+    }
+
+    proto = &method->prototype;
+        
+    if (strcmp(returnType, dexProtoGetReturnType(proto)) != 0) {
+        return 1;
+    }
+
+    if (dexProtoGetParameterCount(proto) != argCount) {
+        return 1;
+    }
+
+    dexParameterIteratorInit(&iterator, proto);
+
+    for (/*argCount*/; argCount != 0; argCount--, argTypes++) {
+        const char* argType = *argTypes;
+        const char* paramType = dexParameterIteratorNextDescriptor(&iterator);
+
+        if (paramType == NULL) {
+            /* Param list ended early; no match */
+            break;
+        } else if (strcmp(argType, paramType) != 0) {
+            /* Types aren't the same; no match. */
+            break;
+        }
+    }
+
+    if (argCount == 0) {
+        /* We ran through all the given arguments... */
+        if (dexParameterIteratorNextDescriptor(&iterator) == NULL) {
+            /* ...and through all the method's arguments; success! */
+            return 0;
+        }
+    }
+
+    return 1;
+}
+
+/*
+ * Get the count of arguments in the given method descriptor string,
+ * and also find a pointer to the return type.
+ */
+static inline size_t countArgsAndFindReturnType(const char* descriptor,
+    const char** pReturnType) 
+{
+    size_t count = 0;
+    bool bogus = false;
+    bool done = false;
+
+    assert(*descriptor == '(');
+    descriptor++;
+    
+    while (!done) {
+        switch (*descriptor) {
+            case 'B': case 'C': case 'D': case 'F':
+            case 'I': case 'J': case 'S': case 'Z': {
+                count++;
+                break;
+            }
+            case '[': {
+                do {
+                    descriptor++;
+                } while (*descriptor == '[');
+                /*
+                 * Don't increment count, as it will be taken care of
+                 * by the next iteration. Also, decrement descriptor
+                 * to compensate for the increment below the switch.
+                 */
+                descriptor--;
+                break;
+            }
+            case 'L': {
+                do {
+                    descriptor++;
+                } while ((*descriptor != ';') && (*descriptor != '\0'));
+                count++;
+                if (*descriptor == '\0') {
+                    /* Bogus descriptor. */
+                    done = true;
+                    bogus = true;
+                }
+                break;
+            }
+            case ')': {
+                /* 
+                 * Note: The loop will exit after incrementing descriptor
+                 * one more time, so it then points at the return type.
+                 */
+                done = true;
+                break;
+            }
+            default: {
+                /* Bogus descriptor. */
+                done = true;
+                bogus = true;
+                break;
+            }
+        }
+
+        descriptor++;
+    }
+
+    if (bogus) {
+        *pReturnType = NULL;
+        return 0;
+    }
+
+    *pReturnType = descriptor;
+    return count;
+}
+
+/*
+ * Copy the argument types into the given array using the given buffer
+ * for the contents.
+ */
+static inline void copyTypes(char* buffer, const char** argTypes,
+    size_t argCount, const char* descriptor)
+{
+    size_t i;
+    char c;
+
+    /* Skip the '('. */
+    descriptor++;
+    
+    for (i = 0; i < argCount; i++) {
+        argTypes[i] = buffer;
+
+        /* Copy all the array markers and one extra character. */
+        do {
+            c = *(descriptor++);
+            *(buffer++) = c;
+        } while (c == '[');
+
+        if (c == 'L') {
+            /* Copy the rest of a class name. */
+            do {
+                c = *(descriptor++);
+                *(buffer++) = c;
+            } while (c != ';');
+        }
+        
+        *(buffer++) = '\0';
+    }        
+}
+
+/*
+ * Look for a match in the given class. Returns the match if found
+ * or NULL if not.
+ */
+static Method* findMethodInListByDescriptor(const ClassObject* clazz,
+    bool findVirtual, bool isHier, const char* name, const char* descriptor)
+{
+    const char* returnType;
+    size_t argCount = countArgsAndFindReturnType(descriptor, &returnType);
+
+    if (returnType == NULL) {
+        LOGW("Bogus method descriptor: %s\n", descriptor);
+        return NULL;
+    }
+
+    /*
+     * Make buffer big enough for all the argument type characters and
+     * one '\0' per argument. The "- 2" is because "returnType -
+     * descriptor" includes two parens.
+     */
+    char buffer[argCount + (returnType - descriptor) - 2];
+    const char* argTypes[argCount];
+
+    copyTypes(buffer, argTypes, argCount, descriptor);
+
+    while (clazz != NULL) {
+        Method* methods;
+        size_t methodCount;
+        size_t i;
+
+        if (findVirtual) {
+            methods = clazz->virtualMethods;
+            methodCount = clazz->virtualMethodCount;
+        } else {
+            methods = clazz->directMethods;
+            methodCount = clazz->directMethodCount;
+        }
+        
+        for (i = 0; i < methodCount; i++) {
+            Method* method = &methods[i];
+            if (compareMethodHelper(method, name, returnType, argCount,
+                            argTypes) == 0) {
+                return method;
+            }
+        }
+
+        if (! isHier) {
+            break;
+        }
+
+        clazz = clazz->super;
+    }
+
+    return NULL;
+}
+
+/*
+ * Look for a match in the given clazz. Returns the match if found
+ * or NULL if not.
+ */
+static Method* findMethodInListByProto(const ClassObject* clazz,
+    bool findVirtual, bool isHier, const char* name, const DexProto* proto)
+{    
+    while (clazz != NULL) {
+        Method* methods;
+        size_t methodCount;
+        size_t i;
+
+        if (findVirtual) {
+            methods = clazz->virtualMethods;
+            methodCount = clazz->virtualMethodCount;
+        } else {
+            methods = clazz->directMethods;
+            methodCount = clazz->directMethodCount;
+        }
+
+        for (i = 0; i < methodCount; i++) {
+            Method* method = &methods[i];
+            if (dvmCompareNameProtoAndMethod(name, proto, method) == 0) {
+                return method;
+            }
+        }
+
+        if (! isHier) {
+            break;
+        }
+
+        clazz = clazz->super;
+    }
+
+    return NULL;
+}
+
+/*
+ * Find a "virtual" method in a class.
+ *
+ * Does not chase into the superclass.
+ *
+ * Returns NULL if the method can't be found.  (Does not throw an exception.)
+ */
+Method* dvmFindVirtualMethodByDescriptor(const ClassObject* clazz,
+    const char* methodName, const char* descriptor)
+{
+    return findMethodInListByDescriptor(clazz, true, false,
+            methodName, descriptor);
+
+    // TODO? - throw IncompatibleClassChangeError if a match is
+    // found in the directMethods list, rather than NotFoundError.
+    // Note we could have been called by dvmFindVirtualMethodHier though.
+}
+
+
+/*
+ * Find a "virtual" method in a class, knowing only the name.  This is
+ * only useful in limited circumstances, e.g. when searching for a member
+ * of an annotation class.
+ *
+ * Does not chase into the superclass.
+ *
+ * Returns NULL if the method can't be found.  (Does not throw an exception.)
+ */
+Method* dvmFindVirtualMethodByName(const ClassObject* clazz,
+    const char* methodName)
+{
+    Method* methods = clazz->virtualMethods;
+    int methodCount = clazz->virtualMethodCount;
+    int i;
+
+    for (i = 0; i < methodCount; i++) {
+        if (strcmp(methods[i].name, methodName) == 0)
+            return &methods[i];
+    }
+
+    return NULL;
+}
+
+/*
+ * Find a "virtual" method in a class.
+ *
+ * Does not chase into the superclass.
+ *
+ * Returns NULL if the method can't be found.  (Does not throw an exception.)
+ */
+Method* dvmFindVirtualMethod(const ClassObject* clazz, const char* methodName,
+    const DexProto* proto)
+{
+    return findMethodInListByProto(clazz, true, false, methodName, proto);
+}
+
+/*
+ * Find a "virtual" method in a class.  If we don't find it, try the
+ * superclass.
+ *
+ * Returns NULL if the method can't be found.  (Does not throw an exception.)
+ */
+Method* dvmFindVirtualMethodHierByDescriptor(const ClassObject* clazz,
+    const char* methodName, const char* descriptor)
+{
+    return findMethodInListByDescriptor(clazz, true, true,
+            methodName, descriptor);
+}
+
+/*
+ * Find a "virtual" method in a class.  If we don't find it, try the
+ * superclass.
+ *
+ * Returns NULL if the method can't be found.  (Does not throw an exception.)
+ */
+Method* dvmFindVirtualMethodHier(const ClassObject* clazz,
+    const char* methodName, const DexProto* proto)
+{
+    return findMethodInListByProto(clazz, true, true, methodName, proto);
+}
+
+/*
+ * Find a "direct" method (static, private, or "<*init>").
+ *
+ * Returns NULL if the method can't be found.  (Does not throw an exception.)
+ */
+Method* dvmFindDirectMethodByDescriptor(const ClassObject* clazz,
+    const char* methodName, const char* descriptor)
+{
+    return findMethodInListByDescriptor(clazz, false, false,
+            methodName, descriptor);
+}
+
+/*
+ * Find a "direct" method.  If we don't find it, try the superclass.  This
+ * is only appropriate for static methods, but will work for all direct
+ * methods.
+ *
+ * Returns NULL if the method can't be found.  (Does not throw an exception.)
+ */
+Method* dvmFindDirectMethodHierByDescriptor(const ClassObject* clazz,
+    const char* methodName, const char* descriptor)
+{
+    return findMethodInListByDescriptor(clazz, false, true,
+            methodName, descriptor);
+}
+
+/*
+ * Find a "direct" method (static or "<*init>").
+ *
+ * Returns NULL if the method can't be found.  (Does not throw an exception.)
+ */
+Method* dvmFindDirectMethod(const ClassObject* clazz, const char* methodName,
+    const DexProto* proto)
+{
+    return findMethodInListByProto(clazz, false, false, methodName, proto);
+}
+
+/*
+ * Find a "direct" method in a class.  If we don't find it, try the
+ * superclass.
+ *
+ * Returns NULL if the method can't be found.  (Does not throw an exception.)
+ */
+Method* dvmFindDirectMethodHier(const ClassObject* clazz,
+    const char* methodName, const DexProto* proto)
+{
+    return findMethodInListByProto(clazz, false, true, methodName, proto);
+}
+
+/*
+ * We have a method pointer for a method in "clazz", but it might be
+ * pointing to a method in a derived class.  We want to find the actual entry
+ * from the class' vtable.  If "clazz" is an interface, we have to do a
+ * little more digging.
+ *
+ * (This is used for reflection and JNI "call method" calls.)
+ */
+const Method* dvmGetVirtualizedMethod(const ClassObject* clazz,
+    const Method* meth)
+{
+    Method* actualMeth;
+    int methodIndex;
+
+    assert(!dvmIsStaticMethod(meth));
+
+    if (dvmIsPrivateMethod(meth))   // no vtable entry for these
+        return meth;
+
+    /*
+     * If the method was declared in an interface, we need to scan through
+     * the class' list of interfaces for it, and find the vtable index
+     * from that.
+     *
+     * TODO: use the interface cache.
+     */
+    if (dvmIsInterfaceClass(meth->clazz)) {
+        int i;
+
+        for (i = 0; i < clazz->iftableCount; i++) {
+            if (clazz->iftable[i].clazz == meth->clazz)
+                break;
+        }
+        if (i == clazz->iftableCount) {
+            dvmThrowException("Ljava/lang/IncompatibleClassChangeError;",
+                "invoking method from interface not implemented by class");
+            return NULL;
+        }
+
+        methodIndex = clazz->iftable[i].methodIndexArray[meth->methodIndex];
+    } else {
+        methodIndex = meth->methodIndex;
+    }
+
+    assert(methodIndex >= 0 && methodIndex < clazz->vtableCount);
+    actualMeth = clazz->vtable[methodIndex];
+
+    /*
+     * Make sure there's code to execute.
+     */
+    if (dvmIsAbstractMethod(actualMeth)) {
+        dvmThrowException("Ljava/lang/AbstractMethodError;", NULL);
+        return NULL;
+    }
+    assert(!dvmIsMirandaMethod(actualMeth));
+
+    return actualMeth;
+}
+
+/*
+ * Get the source file for a method.
+ */
+const char* dvmGetMethodSourceFile(const Method* meth)
+{
+    /*
+     * TODO: A method's debug info can override the default source
+     * file for a class, so we should account for that possibility
+     * here.
+     */
+    return meth->clazz->sourceFile;
+}
+
+/*
+ * Dump some information about an object.
+ */
+void dvmDumpObject(const Object* obj)
+{
+    ClassObject* clazz;
+    int i;
+
+    if (obj == NULL || obj->clazz == NULL) {
+        LOGW("Null or malformed object not dumped\n");
+        return;
+    }
+
+    clazz = obj->clazz;
+    LOGV("----- Object dump: %p (%s, %d bytes) -----\n",
+        obj, clazz->descriptor, (int) clazz->objectSize);
+    //printHexDump(obj, clazz->objectSize);
+    LOGV("  Fields:\n");
+    for (i = 0; i < clazz->ifieldCount; i++) {
+        const InstField* pField = &clazz->ifields[i];
+        char type = pField->field.signature[0];
+
+        if (type == 'F' || type == 'D') {
+            double dval;
+
+            if (type == 'F')
+                dval = dvmGetFieldFloat(obj, pField->byteOffset);
+            else
+                dval = dvmGetFieldDouble(obj, pField->byteOffset);
+
+            LOGV("  %2d: '%s' '%s' flg=%04x %.3f\n", i, pField->field.name,
+                pField->field.signature, pField->field.accessFlags, dval);
+        } else {
+            long long lval;
+
+            if (pField->field.signature[0] == 'J')
+                lval = dvmGetFieldLong(obj, pField->byteOffset);
+            else if (pField->field.signature[0] == 'Z')
+                lval = dvmGetFieldBoolean(obj, pField->byteOffset);
+            else
+                lval = dvmGetFieldInt(obj, pField->byteOffset);
+
+            LOGV("  %2d: '%s' '%s' af=%04x 0x%llx\n", i, pField->field.name,
+                pField->field.signature, pField->field.accessFlags, lval);
+        }
+    }
+}
+
diff --git a/vm/oo/Object.h b/vm/oo/Object.h
new file mode 100644
index 0000000..e207aa5
--- /dev/null
+++ b/vm/oo/Object.h
@@ -0,0 +1,829 @@
+/*
+ * Copyright (C) 2008 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/*
+ * Declaration of the fundamental Object type and refinements thereof, plus
+ * some functions for manipulating them.
+ */
+#ifndef _DALVIK_OO_OBJECT
+#define _DALVIK_OO_OBJECT
+
+#include <stddef.h>
+
+/* fwd decl */
+struct DataObject;
+struct ClassObject;
+struct StringObject;
+struct ArrayObject;
+struct Method;
+struct ExceptionEntry;
+struct LineNumEntry;
+struct StaticField;
+struct InstField;
+struct Field;
+typedef struct DataObject DataObject;
+typedef struct ClassObject ClassObject;
+typedef struct StringObject StringObject;
+typedef struct ArrayObject ArrayObject;
+typedef struct Method Method;
+typedef struct ExceptionEntry ExceptionEntry;
+typedef struct LineNumEntry LineNumEntry;
+typedef struct StaticField StaticField;
+typedef struct InstField InstField;
+typedef struct Field Field;
+
+/*
+ * Native function pointer type.
+ *
+ * "args[0]" holds the "this" pointer for virtual methods.
+ *
+ * The "Bridge" form is a super-set of the "Native" form; in many places
+ * they are used interchangeably.  Currently, all functions have all
+ * arguments passed in, but some functions only care about the first two.
+ * Passing extra arguments to a C function is (mostly) harmless.
+ */
+typedef void (*DalvikBridgeFunc)(const u4* args, JValue* pResult,
+    const Method* method, struct Thread* self);
+typedef void (*DalvikNativeFunc)(const u4* args, JValue* pResult);
+
+
+/* vm-internal access flags and related definitions */
+typedef enum AccessFlags {
+    ACC_MIRANDA         = 0x8000,       // method (internal to VM)
+    JAVA_FLAGS_MASK     = 0xffff,       // bits set from Java sources (low 16)
+} AccessFlags;
+
+/* Use the top 16 bits of the access flags field for
+ * other class flags.  Code should use the *CLASS_FLAG*()
+ * macros to set/get these flags.
+ */
+typedef enum ClassFlags {
+    CLASS_ISFINALIZABLE     = (1<<31),  // class/ancestor overrides finalize()
+    CLASS_ISARRAY           = (1<<30),  // class is a "[*"
+    CLASS_ISOBJECTARRAY     = (1<<29),  // class is a "[L*" or "[[*"
+    CLASS_ISREFERENCE       = (1<<28),  // class is a soft/weak/phantom ref
+                                        // only ISREFERENCE is set --> soft
+    CLASS_ISWEAKREFERENCE   = (1<<27),  // class is a weak reference
+    CLASS_ISPHANTOMREFERENCE = (1<<26), // class is a phantom reference
+
+    CLASS_MULTIPLE_DEFS     = (1<<25),  // DEX verifier: defs in multiple DEXs
+
+    /* unlike the others, these can be present in the optimized DEX file */
+    CLASS_ISOPTIMIZED       = (1<<17),  // class may contain opt instrs
+    CLASS_ISPREVERIFIED     = (1<<16),  // class has been pre-verified
+} ClassFlags;
+
+/* bits we can reasonably expect to see set in a DEX access flags field */
+#define EXPECTED_FILE_FLAGS \
+    (ACC_CLASS_MASK | CLASS_ISPREVERIFIED | CLASS_ISOPTIMIZED)
+
+/* current state of the class, increasing as we progress */
+typedef enum ClassStatus {
+    CLASS_ERROR         = -1,
+
+    CLASS_NOTREADY      = 0,
+    CLASS_LOADED        = 1,
+    CLASS_PREPARED      = 2,    /* part of linking */
+    CLASS_RESOLVED      = 3,    /* part of linking */
+    CLASS_VERIFYING     = 4,    /* in the process of being verified */
+    CLASS_VERIFIED      = 5,    /* logically part of linking; done pre-init */
+    CLASS_INITIALIZING  = 6,    /* class init in progress */
+    CLASS_INITIALIZED   = 7,    /* ready to go */
+} ClassStatus;
+
+
+/*
+ * Primitive type identifiers.  We use these values as indexes into an
+ * array of synthesized classes, so these start at zero and count up.
+ * The order is arbitrary (mimics table in doc for newarray opcode),
+ * but can't be changed without shuffling some reflection tables.
+ *
+ * PRIM_VOID can't be used as an array type, but we include it here for
+ * other uses (e.g. Void.TYPE).
+ */
+typedef enum PrimitiveType {
+    PRIM_NOT        = -1,       /* value is not a primitive type */
+    PRIM_BOOLEAN    = 0,
+    PRIM_CHAR       = 1,
+    PRIM_FLOAT      = 2,
+    PRIM_DOUBLE     = 3,
+    PRIM_BYTE       = 4,
+    PRIM_SHORT      = 5,
+    PRIM_INT        = 6,
+    PRIM_LONG       = 7,
+    PRIM_VOID       = 8,
+
+    PRIM_MAX
+} PrimitiveType;
+#define PRIM_TYPE_TO_LETTER "ZCFDBSIJV"     /* must match order in enum */
+
+/*
+ * This defines the amount of space we leave for field slots in the
+ * java.lang.Class definition.  If we alter the class to have more than
+ * this many fields, the VM will abort at startup.
+ */
+#define CLASS_FIELD_SLOTS   4
+
+
+/*
+ * Used for iftable in ClassObject.
+ */
+typedef struct InterfaceEntry {
+    /* pointer to interface class */
+    ClassObject*    clazz;
+
+    /*
+     * Index into array of vtable offsets.  This points into the ifviPool,
+     * which holds the vtables for all interfaces declared by this class.
+     */
+    int*            methodIndexArray;
+} InterfaceEntry;
+
+
+
+/*
+ * There are three types of objects:
+ *  Class objects - an instance of java.lang.Class
+ *  Array objects - an object created with a "new array" instruction
+ *  Data objects - an object that is neither of the above
+ *
+ * We also define String objects.  At present they're equivalent to
+ * DataObject, but that may change.  (Either way, they make some of the
+ * code more obvious.)
+ *
+ * All objects have an Object header followed by type-specific data.
+ */
+typedef struct Object {
+    /* ptr to class object */
+    ClassObject*    clazz;
+
+    /* thin lock or "fat" monitor */
+    Lock            lock;
+} Object;
+
+/*
+ * Properly initialize an Object.
+ * void DVM_OBJECT_INIT(Object *obj, ClassObject *clazz_)
+ */
+#define DVM_OBJECT_INIT(obj, clazz_) \
+    do { (obj)->clazz = (clazz_); DVM_LOCK_INIT(&(obj)->lock); } while (0)
+
+/*
+ * Get/set class flags.
+ */
+#define SET_CLASS_FLAG(clazz, flag) \
+    do { (clazz)->accessFlags |= (flag); } while (0)
+
+#define CLEAR_CLASS_FLAG(clazz, flag) \
+    do { (clazz)->accessFlags &= ~(flag); } while (0)
+
+#define IS_CLASS_FLAG_SET(clazz, flag) \
+    (((clazz)->accessFlags & (flag)) != 0)
+
+#define GET_CLASS_FLAG_GROUP(clazz, flags) \
+    ((u4)((clazz)->accessFlags & (flags)))
+
+/*
+ * Data objects have an Object header followed by their instance data.
+ */
+struct DataObject {
+    Object          obj;                /* MUST be first item */
+
+    /* variable #of u4 slots; u8 uses 2 slots */
+    u4              instanceData[1];
+};
+
+/*
+ * Strings are used frequently enough that we may want to give them their
+ * own unique type.
+ *
+ * Using a dedicated type object to access the instance data provides a
+ * performance advantage but makes the java/lang/String.java implementation
+ * fragile.
+ *
+ * Currently this is just equal to DataObject, and we pull the fields out
+ * like we do for any other object.
+ */
+struct StringObject {
+    Object          obj;                /* MUST be first item */
+
+    /* variable #of u4 slots; u8 uses 2 slots */
+    u4              instanceData[1];
+};
+
+
+/*
+ * Array objects have these additional fields.
+ *
+ * We don't currently store the size of each element.  Usually it's implied
+ * by the instruction.  If necessary, the width can be derived from
+ * the first char of obj->clazz->name.
+ */
+struct ArrayObject {
+    Object          obj;                /* MUST be first item */
+
+    /* number of elements; immutable after init */
+    u4              length;
+
+    /*
+     * Array contents; actual size is (length * sizeof(type)).  This is
+     * declared as u8 so that the compiler inserts any necessary padding
+     * (e.g. for EABI); the actual allocation may be smaller than 8 bytes.
+     */
+    u8              contents[1];
+};
+
+/*
+ * Class objects have many additional fields.  This is used for both
+ * classes and interfaces, including synthesized classes (arrays and
+ * primitive types).
+ *
+ * Class objects are unusual in that they have some fields allocated with
+ * the system malloc (or LinearAlloc), rather than on the GC heap.  This is
+ * handy during initialization, but does require special handling when
+ * discarding java.lang.Class objects.
+ *
+ * The separation of methods (direct vs. virtual) and fields (class vs.
+ * instance) used in Dalvik works out pretty well.  The only time it's
+ * annoying is when enumerating or searching for things with reflection.
+ */
+struct ClassObject {
+    Object          obj;                /* MUST be first item */
+
+    /* leave space for instance data; we could access fields directly if we
+       freeze the definition of java/lang/Class */
+    u4              instanceData[CLASS_FIELD_SLOTS];
+
+    /* UTF-8 descriptor for the class; from constant pool, or on heap
+       if generated ("[C") */
+    const char*     descriptor;
+    char*           descriptorAlloc;
+
+    /* access flags; low 16 bits are defined by VM spec */
+    u4              accessFlags;
+
+    /* DexFile from which we came; needed to resolve constant pool entries */
+    /* (will be NULL for VM-generated, e.g. arrays and primitive classes) */
+    DvmDex*         pDvmDex;
+
+    /* state of class initialization */
+    ClassStatus     status;
+
+    /* if class verify fails, we must return same error on subsequent tries */
+    ClassObject*    verifyErrorClass;
+
+    /* threadId, used to check for recursive <clinit> invocation */
+    u4              initThreadId;
+
+    /*
+     * Total object size; used when allocating storage on gc heap.  (For
+     * interfaces and abstract classes this will be zero.)
+     */
+    size_t          objectSize;
+
+    /* arrays only: class object for base element, for instanceof/checkcast
+       (for String[][][], this will be String) */
+    ClassObject*    elementClass;
+
+    /* class object representing an array of this class; set on first use */
+    ClassObject*    arrayClass;
+
+    /* arrays only: number of dimensions, e.g. int[][] is 2 */
+    int             arrayDim;
+
+    /* primitive type index, or PRIM_NOT (-1); set for generated prim classes */
+    PrimitiveType   primitiveType;
+
+    /* superclass, or NULL if this is java.lang.Object */
+    ClassObject*    super;
+
+    /* defining class loader, or NULL for the "bootstrap" system loader */
+    Object*         classLoader;
+
+    /* initiating class loader list */
+    Object**        initiatingLoaders;
+    int             initiatingLoaderCount;
+
+    /* array of interfaces this class implements directly */
+    int             interfaceCount;
+    ClassObject**   interfaces;
+
+    /* static, private, and <init> methods */
+    int             directMethodCount;
+    Method*         directMethods;
+
+    /* virtual methods defined in this class; invoked through vtable */
+    int             virtualMethodCount;
+    Method*         virtualMethods;
+
+    /*
+     * Virtual method table (vtable), for use by "invoke-virtual".  The
+     * vtable from the superclass is copied in, and virtual methods from
+     * our class either replace those from the super or are appended.
+     */
+    int             vtableCount;
+    Method**        vtable;
+
+    /*
+     * Interface table (iftable), one entry per interface supported by
+     * this class.  That means one entry for each interface we support
+     * directly, indirectly via superclass, or indirectly via
+     * superinterface.  This will be null if neither we nor our superclass
+     * implement any interfaces.
+     *
+     * Why we need this: given "class Foo implements Face", declare
+     * "Face faceObj = new Foo()".  Invoke faceObj.blah(), where "blah" is
+     * part of the Face interface.  We can't easily use a single vtable.
+     *
+     * For every interface a concrete class implements, we create a list of
+     * virtualMethod indices for the methods in the interface.
+     */
+    int             iftableCount;
+    InterfaceEntry* iftable;
+
+    /*
+     * The interface vtable indices for iftable get stored here.  By placing
+     * them all in a single pool for each class that implements interfaces,
+     * we decrease the number of allocations.
+     */
+    int             ifviPoolCount;
+    int*            ifviPool;
+
+    /* static fields */
+    int             sfieldCount;
+    StaticField*    sfields;
+
+    /* instance fields
+     *
+     * These describe the layout of the contents of a DataObject-compatible
+     * Object.  Note that only the fields directly defined by this class
+     * are listed in ifields;  fields defined by a superclass are listed
+     * in the superclass's ClassObject.ifields.
+     *
+     * All instance fields that refer to objects are guaranteed to be
+     * at the beginning of the field list.  ifieldRefCount specifies
+     * the number of reference fields.
+     */
+    int             ifieldCount;
+    int             ifieldRefCount; // number of fields that are object refs
+    InstField*      ifields;
+
+    /* source file name, if known */
+    const char*     sourceFile;
+
+#if WITH_HPROF && WITH_HPROF_STACK
+    int             hprofSerialNumber;
+#endif
+};
+
+/*
+ * A method.  We create one of these for every method in every class
+ * we load, so try to keep the size to a minimum.
+ *
+ * Much of this comes from and could be accessed in the data held in shared
+ * memory.  We hold it all together here for speed.  Everything but the
+ * pointers could be held in a shared table generated by the optimizer;
+ * if we're willing to convert them to offsets and take the performance
+ * hit (e.g. "meth->insns" becomes "baseAddr + meth->insnsOffset") we
+ * could move everything but "nativeFunc".
+ */
+struct Method {
+    /* the class we are a part of */
+    ClassObject*    clazz;
+
+    /* access flags; low 16 bits are defined by spec (could be u2?) */
+    u4              accessFlags;
+
+    /*
+     * For concrete virtual methods, this is the offset of the method
+     * in "vtable".
+     *
+     * For abstract methods in an interface class, this is the offset
+     * of the method in "iftable[n]->methodIndexArray".
+     */
+    u2             methodIndex;
+
+    /*
+     * Method bounds; not needed for an abstract method.
+     *
+     * For a native method, we compute the size of the argument list, and
+     * set "insSize" and "registerSize" equal to it.
+     */
+    u2              registersSize;  /* ins + locals */
+    u2              outsSize;
+    u2              insSize;
+
+    /* method name, e.g. "<init>" or "eatLunch" */
+    const char*     name;
+
+    /*
+     * Method prototype descriptor string (return and argument types).
+     *
+     * TODO: This currently must specify the DexFile as well as the proto_ids
+     * index, because generated Proxy classes don't have a DexFile.  We can
+     * remove the DexFile* and reduce the size of this struct if we generate
+     * a DEX for proxies.
+     */
+    DexProto        prototype;
+
+    /* short-form method descriptor string */
+    const char*     shorty;
+
+    /*
+     * The remaining items are not used for abstract or native methods.
+     * (JNI is currently hijacking "insns" as a function pointer, set
+     * after the first call.  For internal-native this stays null.)
+     */
+
+    /* the actual code */
+    const u2*       insns;          /* instructions, in memory-mapped .dex */
+
+    /* cached JNI argument and return-type hints */
+    int             jniArgInfo;
+
+    /*
+     * Native method ptr; could be actual function or a JNI bridge.  We
+     * don't currently discriminate between DalvikBridgeFunc and
+     * DalvikNativeFunc; the former takes an argument superset (i.e. two
+     * extra args) which will be ignored.  If necessary we can use
+     * insns==NULL to detect JNI bridge vs. internal native.
+     */
+    DalvikBridgeFunc nativeFunc;
+
+#ifdef WITH_PROFILER
+    bool            inProfile;
+#endif
+#ifdef WITH_DEBUGGER
+    short           debugBreakpointCount;
+#endif
+};
+
+/*
+ * Generic field header.  We pass this around when we want a generic Field
+ * pointer (e.g. for reflection stuff).  Testing the accessFlags for
+ * ACC_STATIC allows a proper up-cast.
+ */
+struct Field {
+    ClassObject*    clazz;          /* class in which the field is declared */
+    const char*     name;
+    const char*     signature;      /* e.g. "I", "[C", "Landroid/os/Debug;" */
+    u4              accessFlags;
+#ifdef PROFILE_FIELD_ACCESS
+    u4              gets;
+    u4              puts;
+#endif
+};
+
+/*
+ * Static field.
+ */
+struct StaticField {
+    Field           field;          /* MUST be first item */
+    JValue          value;          /* initially set from DEX for primitives */
+};
+
+/*
+ * Instance field.
+ */
+struct InstField {
+    Field           field;          /* MUST be first item */
+
+    /*
+     * This field indicates the byte offset from the beginning of the
+     * (Object *) to the actual instance data; e.g., byteOffset==0 is
+     * the same as the object pointer (bug!), and byteOffset==4 is 4
+     * bytes farther.
+     */
+    int             byteOffset;
+};
+
+
+/*
+ * Find a method within a class.  The superclass is not searched.
+ */
+Method* dvmFindDirectMethodByDescriptor(const ClassObject* clazz,
+    const char* methodName, const char* signature);
+Method* dvmFindVirtualMethodByDescriptor(const ClassObject* clazz,
+    const char* methodName, const char* signature);
+Method* dvmFindVirtualMethodByName(const ClassObject* clazz,
+    const char* methodName);
+Method* dvmFindDirectMethod(const ClassObject* clazz, const char* methodName,
+    const DexProto* proto);
+Method* dvmFindVirtualMethod(const ClassObject* clazz, const char* methodName,
+    const DexProto* proto);
+
+
+/*
+ * Find a method within a class hierarchy.
+ */
+Method* dvmFindDirectMethodHierByDescriptor(const ClassObject* clazz,
+    const char* methodName, const char* descriptor);
+Method* dvmFindVirtualMethodHierByDescriptor(const ClassObject* clazz,
+    const char* methodName, const char* signature);
+Method* dvmFindDirectMethodHier(const ClassObject* clazz,
+    const char* methodName, const DexProto* proto);
+Method* dvmFindVirtualMethodHier(const ClassObject* clazz,
+    const char* methodName, const DexProto* proto);
+
+/*
+ * Find the implementation of "meth" in "clazz".
+ *
+ * Returns NULL and throws an exception if not found.
+ */
+const Method* dvmGetVirtualizedMethod(const ClassObject* clazz,
+    const Method* meth);
+
+/*
+ * Get the source file associated with a method.
+ */
+const char* dvmGetMethodSourceFile(const Method* meth);
+
+/*
+ * Find a field within a class.  The superclass is not searched.
+ */
+InstField* dvmFindInstanceField(const ClassObject* clazz,
+    const char* fieldName, const char* signature);
+StaticField* dvmFindStaticField(const ClassObject* clazz,
+    const char* fieldName, const char* signature);
+
+/*
+ * Find a field in a class/interface hierarchy.
+ */
+InstField* dvmFindInstanceFieldHier(const ClassObject* clazz,
+    const char* fieldName, const char* signature);
+StaticField* dvmFindStaticFieldHier(const ClassObject* clazz,
+    const char* fieldName, const char* signature);
+
+/*
+ * Find a field and return the byte offset from the object pointer.  Only
+ * searches the specified class, not the superclass.
+ *
+ * Returns -1 on failure.
+ */
+INLINE int dvmFindFieldOffset(const ClassObject* clazz,
+    const char* fieldName, const char* signature)
+{
+    InstField* pField = dvmFindInstanceField(clazz, fieldName, signature);
+    if (pField == NULL)
+        return -1;
+    else
+        return pField->byteOffset;
+}
+
+/*
+ * Field access functions.  Pass in the word offset from Field->byteOffset.
+ *
+ * We guarantee that long/double field data is 64-bit aligned, so it's safe
+ * to access them with ldrd/strd on ARM.
+ *
+ * The VM treats all fields as 32 or 64 bits, so the field set functions
+ * write 32 bits even if the underlying type is smaller.
+ */
+#define BYTE_OFFSET(_ptr, _offset)  ((void*) (((u1*)(_ptr)) + (_offset)))
+
+INLINE JValue* dvmFieldPtr(const Object* obj, int offset) {
+    return ((JValue*)BYTE_OFFSET(obj, offset));
+}
+
+INLINE bool dvmGetFieldBoolean(const Object* obj, int offset) {
+    return ((JValue*)BYTE_OFFSET(obj, offset))->z;
+}
+INLINE s1 dvmGetFieldByte(const Object* obj, int offset) {
+    return ((JValue*)BYTE_OFFSET(obj, offset))->b;
+}
+INLINE s2 dvmGetFieldShort(const Object* obj, int offset) {
+    return ((JValue*)BYTE_OFFSET(obj, offset))->s;
+}
+INLINE u2 dvmGetFieldChar(const Object* obj, int offset) {
+    return ((JValue*)BYTE_OFFSET(obj, offset))->c;
+}
+INLINE s4 dvmGetFieldInt(const Object* obj, int offset) {
+    return ((JValue*)BYTE_OFFSET(obj, offset))->i;
+}
+INLINE s8 dvmGetFieldLong(const Object* obj, int offset) {
+    return ((JValue*)BYTE_OFFSET(obj, offset))->j;
+}
+INLINE float dvmGetFieldFloat(const Object* obj, int offset) {
+    return ((JValue*)BYTE_OFFSET(obj, offset))->f;
+}
+INLINE double dvmGetFieldDouble(const Object* obj, int offset) {
+    return ((JValue*)BYTE_OFFSET(obj, offset))->d;
+}
+INLINE Object* dvmGetFieldObject(const Object* obj, int offset) {
+    return ((JValue*)BYTE_OFFSET(obj, offset))->l;
+}
+
+INLINE void dvmSetFieldBoolean(Object* obj, int offset, bool val) {
+    ((JValue*)BYTE_OFFSET(obj, offset))->i = val;
+}
+INLINE void dvmSetFieldByte(Object* obj, int offset, s1 val) {
+    ((JValue*)BYTE_OFFSET(obj, offset))->i = val;
+}
+INLINE void dvmSetFieldShort(Object* obj, int offset, s2 val) {
+    ((JValue*)BYTE_OFFSET(obj, offset))->i = val;
+}
+INLINE void dvmSetFieldChar(Object* obj, int offset, u2 val) {
+    ((JValue*)BYTE_OFFSET(obj, offset))->i = val;
+}
+INLINE void dvmSetFieldInt(Object* obj, int offset, s4 val) {
+    ((JValue*)BYTE_OFFSET(obj, offset))->i = val;
+}
+INLINE void dvmSetFieldLong(Object* obj, int offset, s8 val) {
+    ((JValue*)BYTE_OFFSET(obj, offset))->j = val;
+}
+INLINE void dvmSetFieldFloat(Object* obj, int offset, float val) {
+    ((JValue*)BYTE_OFFSET(obj, offset))->f = val;
+}
+INLINE void dvmSetFieldDouble(Object* obj, int offset, double val) {
+    ((JValue*)BYTE_OFFSET(obj, offset))->d = val;
+}
+INLINE void dvmSetFieldObject(Object* obj, int offset, Object* val) {
+    ((JValue*)BYTE_OFFSET(obj, offset))->l = val;
+}
+
+/*
+ * Static field access functions.
+ */
+INLINE JValue* dvmStaticFieldPtr(const StaticField* sfield) {
+    return (JValue*)&sfield->value;
+}
+
+INLINE bool dvmGetStaticFieldBoolean(const StaticField* sfield) {
+    return sfield->value.z;
+}
+INLINE s1 dvmGetStaticFieldByte(const StaticField* sfield) {
+    return sfield->value.b;
+}
+INLINE s2 dvmGetStaticFieldShort(const StaticField* sfield) {
+    return sfield->value.s;
+}
+INLINE u2 dvmGetStaticFieldChar(const StaticField* sfield) {
+    return sfield->value.c;
+}
+INLINE s4 dvmGetStaticFieldInt(const StaticField* sfield) {
+    return sfield->value.i;
+}
+INLINE s8 dvmGetStaticFieldLong(const StaticField* sfield) {
+    return sfield->value.j;
+}
+INLINE float dvmGetStaticFieldFloat(const StaticField* sfield) {
+    return sfield->value.f;
+}
+INLINE double dvmGetStaticFieldDouble(const StaticField* sfield) {
+    return sfield->value.d;
+}
+INLINE Object* dvmGetStaticFieldObject(const StaticField* sfield) {
+    return sfield->value.l;
+}
+
+INLINE void dvmSetStaticFieldBoolean(StaticField* sfield, bool val) {
+    sfield->value.i = val;
+}
+INLINE void dvmSetStaticFieldByte(StaticField* sfield, s1 val) {
+    sfield->value.i = val;
+}
+INLINE void dvmSetStaticFieldShort(StaticField* sfield, s2 val) {
+    sfield->value.i = val;
+}
+INLINE void dvmSetStaticFieldChar(StaticField* sfield, u2 val) {
+    sfield->value.i = val;
+}
+INLINE void dvmSetStaticFieldInt(StaticField* sfield, s4 val) {
+    sfield->value.i = val;
+}
+INLINE void dvmSetStaticFieldLong(StaticField* sfield, s8 val) {
+    sfield->value.j = val;
+}
+INLINE void dvmSetStaticFieldFloat(StaticField* sfield, float val) {
+    sfield->value.f = val;
+}
+INLINE void dvmSetStaticFieldDouble(StaticField* sfield, double val) {
+    sfield->value.d = val;
+}
+INLINE void dvmSetStaticFieldObject(StaticField* sfield, Object* val) {
+    sfield->value.l = val;
+}
+
+/*
+ * Helpers.
+ */
+INLINE bool dvmIsPublicMethod(const Method* method) {
+    return (method->accessFlags & ACC_PUBLIC) != 0;
+}
+INLINE bool dvmIsPrivateMethod(const Method* method) {
+    return (method->accessFlags & ACC_PRIVATE) != 0;
+}
+INLINE bool dvmIsStaticMethod(const Method* method) {
+    return (method->accessFlags & ACC_STATIC) != 0;
+}
+INLINE bool dvmIsSynchronizedMethod(const Method* method) {
+    return (method->accessFlags & ACC_SYNCHRONIZED) != 0;
+}
+INLINE bool dvmIsFinalMethod(const Method* method) {
+    return (method->accessFlags & ACC_FINAL) != 0;
+}
+INLINE bool dvmIsNativeMethod(const Method* method) {
+    return (method->accessFlags & ACC_NATIVE) != 0;
+}
+INLINE bool dvmIsAbstractMethod(const Method* method) {
+    return (method->accessFlags & ACC_ABSTRACT) != 0;
+}
+INLINE bool dvmIsMirandaMethod(const Method* method) {
+    return (method->accessFlags & ACC_MIRANDA) != 0;
+}
+INLINE bool dvmIsConstructorMethod(const Method* method) {
+    return *method->name == '<';
+}
+/* Dalvik puts private, static, and constructors into non-virtual table */
+INLINE bool dvmIsDirectMethod(const Method* method) {
+    return dvmIsPrivateMethod(method) ||
+           dvmIsStaticMethod(method) ||
+           dvmIsConstructorMethod(method);
+}
+/* Get whether the given method has associated bytecode. This is the
+ * case for methods which are neither native nor abstract. */
+INLINE bool dvmIsBytecodeMethod(const Method* method) {
+    return (method->accessFlags & (ACC_NATIVE | ACC_ABSTRACT)) == 0;
+}
+
+INLINE bool dvmIsProtectedField(const Field* field) {
+    return (field->accessFlags & ACC_PROTECTED) != 0;
+}
+INLINE bool dvmIsStaticField(const Field* field) {
+    return (field->accessFlags & ACC_STATIC) != 0;
+}
+INLINE bool dvmIsFinalField(const Field* field) {
+    return (field->accessFlags & ACC_FINAL) != 0;
+}
+
+INLINE bool dvmIsInterfaceClass(const ClassObject* clazz) {
+    return (clazz->accessFlags & ACC_INTERFACE) != 0;
+}
+INLINE bool dvmIsPublicClass(const ClassObject* clazz) {
+    return (clazz->accessFlags & ACC_PUBLIC) != 0;
+}
+INLINE bool dvmIsFinalClass(const ClassObject* clazz) {
+    return (clazz->accessFlags & ACC_FINAL) != 0;
+}
+INLINE bool dvmIsAbstractClass(const ClassObject* clazz) {
+    return (clazz->accessFlags & ACC_ABSTRACT) != 0;
+}
+INLINE bool dvmIsAnnotationClass(const ClassObject* clazz) { 
+    return (clazz->accessFlags & ACC_ANNOTATION) != 0; 
+}
+INLINE bool dvmIsPrimitiveClass(const ClassObject* clazz) {
+    return clazz->primitiveType != PRIM_NOT;
+}
+
+/* linked, here meaning prepared and resolved */
+INLINE bool dvmIsClassLinked(const ClassObject* clazz) {
+    return clazz->status >= CLASS_RESOLVED;
+}
+/* has class been verified? */
+INLINE bool dvmIsClassVerified(const ClassObject* clazz) {
+    return clazz->status >= CLASS_VERIFIED;
+}
+
+/*
+ * Get the associated code struct for a method. This returns NULL
+ * for non-bytecode methods.
+ */
+INLINE const DexCode* dvmGetMethodCode(const Method* meth) {
+    if (dvmIsBytecodeMethod(meth)) {
+        /*
+         * The insns field for a bytecode method actually points at
+         * &(DexCode.insns), so we can subtract back to get at the
+         * DexCode in front.
+         */
+        return (const DexCode*)
+            (((const u1*) meth->insns) - offsetof(DexCode, insns));
+    } else {
+        return NULL;
+    }
+}
+
+/*
+ * Get the size of the insns associated with a method. This returns 0
+ * for non-bytecode methods.
+ */
+INLINE u4 dvmGetMethodInsnsSize(const Method* meth) {
+    const DexCode* pCode = dvmGetMethodCode(meth);
+    return (pCode == NULL) ? 0 : pCode->insnsSize;
+}
+
+/* debugging */
+void dvmDumpObject(const Object* obj);
+
+#endif /*_DALVIK_OO_OBJECT*/
diff --git a/vm/oo/Resolve.c b/vm/oo/Resolve.c
new file mode 100644
index 0000000..04c96db
--- /dev/null
+++ b/vm/oo/Resolve.c
@@ -0,0 +1,550 @@
+/*
+ * Copyright (C) 2008 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/*
+ * Resolve classes, methods, fields, and strings.
+ *
+ * According to the VM spec (v2 5.5), classes may be initialized by use
+ * of the "new", "getstatic", "putstatic", or "invokestatic" instructions.
+ * If we are resolving a static method or static field, we make the
+ * initialization check here.
+ *
+ * (NOTE: the verifier has its own resolve functions, which can be invoked
+ * if a class isn't pre-verified.  Those functions must not update the
+ * "resolved stuff" tables for static fields and methods, because they do
+ * not perform initialization.)
+ */
+#include "Dalvik.h"
+
+#include <stdlib.h>
+
+
+/*
+ * Find the class corresponding to "classIdx", which maps to a class name
+ * string.  It might be in the same DEX file as "referrer", in a different
+ * DEX file, generated by a class loader, or generated by the VM (e.g.
+ * array classes).
+ *
+ * Because the DexTypeId is associated with the referring class' DEX file,
+ * we may have to resolve the same class more than once if it's referred
+ * to from classes in multiple DEX files.  This is a necessary property for
+ * DEX files associated with different class loaders.
+ *
+ * We cache a copy of the lookup in the DexFile's "resolved class" table,
+ * so future references to "classIdx" are faster.
+ *
+ * Note that "referrer" may be in the process of being linked.
+ *
+ * Traditional VMs might do access checks here, but in Dalvik the class
+ * "constant pool" is shared between all classes in the DEX file.  We rely
+ * on the verifier to do the checks for us.
+ *
+ * Does not initialize the class.
+ *
+ * "fromUnverifiedConstant" should only be set if this call is the direct
+ * result of executing a "const-class" or "instance-of" instruction, which
+ * use class constants not resolved by the bytecode verifier.
+ *
+ * Returns NULL with an exception raised on failure.
+ */
+ClassObject* dvmResolveClass(const ClassObject* referrer, u4 classIdx,
+    bool fromUnverifiedConstant)
+{
+    DvmDex* pDvmDex = referrer->pDvmDex;
+    ClassObject* resClass;
+
+    /*
+     * Check the table first -- this gets called from the other "resolve"
+     * methods.
+     */
+    resClass = dvmDexGetResolvedClass(pDvmDex, classIdx);
+    if (resClass != NULL)
+        return resClass;
+
+    LOGVV("--- resolving class %u (referrer=%s cl=%p)\n",
+        classIdx, referrer->descriptor, referrer->classLoader);
+
+    /*
+     * Class hasn't been loaded yet, or is in the process of being loaded
+     * and initialized now.  Try to get a copy.  If we find one, put the
+     * pointer in the DexTypeId.  There isn't a race condition here --
+     * 32-bit writes are guaranteed atomic on all target platforms.  Worst
+     * case we have two threads storing the same value.
+     *
+     * If this is an array class, we'll generate it here.
+     */
+    resClass = dvmFindClassNoInit(
+            dexStringByTypeIdx(pDvmDex->pDexFile, classIdx),
+            referrer->classLoader);
+
+    if (resClass != NULL) {
+        /*
+         * If the referrer was pre-verified, the resolved class must come
+         * from the same DEX or from a bootstrap class.  The pre-verifier
+         * makes assumptions that could be invalidated by a wacky class
+         * loader.  (See the notes at the top of oo/Class.c.)
+         *
+         * The verifier does *not* fail a class for using a const-class
+         * or instance-of instruction referring to an unresolveable class,
+         * because the result of the instruction is simply a Class object
+         * or boolean -- there's no need to resolve the class object during
+         * verification.  Instance field and virtual method accesses can
+         * break dangerously if we get the wrong class, but const-class and
+         * instance-of are only interesting at execution time.  So, if we
+         * we got here as part of executing one of the "unverified class"
+         * instructions, we skip the additional check.
+         *
+         * Ditto for class references from annotations and exception
+         * handler lists.
+         */
+        if (!fromUnverifiedConstant &&
+            IS_CLASS_FLAG_SET(referrer, CLASS_ISPREVERIFIED))
+        {
+            ClassObject* resClassCheck = resClass;
+            if (dvmIsArrayClass(resClassCheck))
+                resClassCheck = resClassCheck->elementClass;
+
+            if (referrer->pDvmDex != resClassCheck->pDvmDex &&
+                resClassCheck->classLoader != NULL)
+            {
+                LOGW("Class resolved by unexpected DEX:"
+                     " %s(%p):%p ref [%s] %s(%p):%p\n",
+                    referrer->descriptor, referrer->classLoader,
+                    referrer->pDvmDex,
+                    resClass->descriptor, resClassCheck->descriptor,
+                    resClassCheck->classLoader, resClassCheck->pDvmDex);
+                dvmThrowException("Ljava/lang/IllegalAccessError;",
+                    "cross-loader access from pre-verified class");
+                return NULL;
+            }
+        }
+
+        LOGVV("##### +ResolveClass(%s): referrer=%s dex=%p ldr=%p ref=%d\n",
+            resClass->descriptor, referrer->descriptor, referrer->pDvmDex,
+            referrer->classLoader, classIdx);
+
+        /*
+         * Add what we found to the list so we can skip the class search
+         * next time through.
+         *
+         * TODO: should we be doing this when fromUnverifiedConstant==true?
+         * (see comments at top of oo/Class.c)
+         */
+        dvmDexSetResolvedClass(pDvmDex, classIdx, resClass);
+    } else {
+        /* not found, exception should be raised */
+        LOGVV("Class not found: %s\n",
+            dexStringByTypeIdx(pDvmDex->pDexFile, classIdx));
+        assert(dvmCheckException(dvmThreadSelf()));
+    }
+
+    return resClass;
+}
+
+
+/*
+ * Find the method corresponding to "methodRef".
+ *
+ * We use "referrer" to find the DexFile with the constant pool that
+ * "methodRef" is an index into.  We also use its class loader.  The method
+ * being resolved may very well be in a different DEX file.
+ *
+ * If this is a static method, we ensure that the method's class is
+ * initialized.
+ */
+Method* dvmResolveMethod(const ClassObject* referrer, u4 methodIdx,
+    MethodType methodType)
+{
+    DvmDex* pDvmDex = referrer->pDvmDex;
+    ClassObject* resClass;
+    const DexMethodId* pMethodId;
+    Method* resMethod;
+
+    assert(methodType != METHOD_INTERFACE);
+
+    LOGVV("--- resolving method %u (referrer=%s)\n", methodIdx,
+        referrer->descriptor);
+    pMethodId = dexGetMethodId(pDvmDex->pDexFile, methodIdx);
+
+    resClass = dvmResolveClass(referrer, pMethodId->classIdx, false);
+    if (resClass == NULL) {
+        /* can't find the class that the method is a part of */
+        assert(dvmCheckException(dvmThreadSelf()));
+        return NULL;
+    }
+    if (dvmIsInterfaceClass(resClass)) {
+        /* method is part of an interface */
+        dvmThrowExceptionWithClassMessage(
+            "Ljava/lang/IncompatibleClassChangeError;",
+            resClass->descriptor);
+        return NULL;
+    }
+
+    const char* name = dexStringById(pDvmDex->pDexFile, pMethodId->nameIdx);
+    DexProto proto;
+    dexProtoSetFromMethodId(&proto, pDvmDex->pDexFile, pMethodId);
+
+    /*
+     * We need to chase up the class hierarchy to find methods defined
+     * in super-classes.  (We only want to check the current class
+     * if we're looking for a constructor; since DIRECT calls are only
+     * for constructors and private methods, we don't want to walk up.)
+     */
+    if (methodType == METHOD_DIRECT) {
+        resMethod = dvmFindDirectMethod(resClass, name, &proto);
+    } else if (methodType == METHOD_STATIC) {
+        resMethod = dvmFindDirectMethodHier(resClass, name, &proto);
+    } else {
+        resMethod = dvmFindVirtualMethodHier(resClass, name, &proto);
+    }
+
+    if (resMethod == NULL) {
+        dvmThrowException("Ljava/lang/NoSuchMethodError;", name);
+        return NULL;
+    }
+
+    LOGVV("--- found method %d (%s.%s)\n",
+        methodIdx, resClass->descriptor, resMethod->name);
+
+    /* see if this is a pure-abstract method */
+    if (dvmIsAbstractMethod(resMethod) && !dvmIsAbstractClass(resClass)) {
+        dvmThrowException("Ljava/lang/AbstractMethodError;", name);
+        return NULL;
+    }
+
+    /*
+     * If we're the first to resolve this class, we need to initialize
+     * it now.  Only necessary for METHOD_STATIC.
+     */
+    if (methodType == METHOD_STATIC) {
+        if (!dvmIsClassInitialized(resMethod->clazz) &&
+            !dvmInitClass(resMethod->clazz))
+        {
+            assert(dvmCheckException(dvmThreadSelf()));
+            return NULL;
+        } else {
+            assert(!dvmCheckException(dvmThreadSelf()));
+        }
+    } else {
+        /*
+         * Edge case: if the <clinit> for a class creates an instance
+         * of itself, we will call <init> on a class that is still being
+         * initialized by us.
+         */
+        assert(dvmIsClassInitialized(resMethod->clazz) ||
+               dvmIsClassInitializing(resMethod->clazz));
+    }
+
+    /*
+     * The class is initialized, the method has been found.  Add a pointer
+     * to our data structure so we don't have to jump through the hoops again.
+     */
+    dvmDexSetResolvedMethod(pDvmDex, methodIdx, resMethod);
+
+    return resMethod;
+}
+
+/*
+ * Resolve an interface method reference.
+ *
+ * Returns NULL with an exception raised on failure.
+ */
+Method* dvmResolveInterfaceMethod(const ClassObject* referrer, u4 methodIdx)
+{
+    DvmDex* pDvmDex = referrer->pDvmDex;
+    ClassObject* resClass;
+    const DexMethodId* pMethodId;
+    Method* resMethod;
+    int i;
+
+    LOGVV("--- resolving interface method %d (referrer=%s)\n",
+        methodIdx, referrer->descriptor);
+    pMethodId = dexGetMethodId(pDvmDex->pDexFile, methodIdx);
+
+    resClass = dvmResolveClass(referrer, pMethodId->classIdx, false);
+    if (resClass == NULL) {
+        /* can't find the class that the method is a part of */
+        assert(dvmCheckException(dvmThreadSelf()));
+        return NULL;
+    }
+    if (!dvmIsInterfaceClass(resClass)) {
+        /* whoops */
+        dvmThrowExceptionWithClassMessage(
+            "Ljava/lang/IncompatibleClassChangeError;",
+            resClass->descriptor);
+        return NULL;
+    }
+
+    /*
+     * This is the first time the method has been resolved.  Set it in our
+     * resolved-method structure.  It always resolves to the same thing,
+     * so looking it up and storing it doesn't create a race condition.
+     *
+     * If we scan into the interface's superclass -- which is always
+     * java/lang/Object -- we will catch things like:
+     *   interface I ...
+     *   I myobj = (something that implements I)
+     *   myobj.hashCode()
+     * However, the Method->methodIndex will be an offset into clazz->vtable,
+     * rather than an offset into clazz->iftable.  The invoke-interface
+     * code can test to see if the method returned is abstract or concrete,
+     * and use methodIndex accordingly.  I'm not doing this yet because
+     * (a) we waste time in an unusual case, and (b) we're probably going
+     * to fix it in the DEX optimizer.
+     *
+     * We do need to scan the superinterfaces, in case we're invoking a
+     * superinterface method on an interface reference.  The class in the
+     * DexTypeId is for the static type of the object, not the class in
+     * which the method is first defined.  We have the full, flattened
+     * list in "iftable".
+     */
+    const char* methodName =
+        dexStringById(pDvmDex->pDexFile, pMethodId->nameIdx);
+
+    DexProto proto;
+    dexProtoSetFromMethodId(&proto, pDvmDex->pDexFile, pMethodId);
+
+    LOGVV("+++ looking for '%s' '%s' in resClass='%s'\n",
+        methodName, methodSig, resClass->descriptor);
+    resMethod = dvmFindVirtualMethod(resClass, methodName, &proto);
+    if (resMethod == NULL) {
+        LOGVV("+++ did not resolve immediately\n");
+        for (i = 0; i < resClass->iftableCount; i++) {
+            resMethod = dvmFindVirtualMethod(resClass->iftable[i].clazz,
+                            methodName, &proto);
+            if (resMethod != NULL)
+                break;
+        }
+
+        if (resMethod == NULL) {
+            dvmThrowException("Ljava/lang/NoSuchMethodError;", methodName);
+            return NULL;
+        }
+    } else {
+        LOGVV("+++ resolved immediately: %s (%s %d)\n", resMethod->name,
+            resMethod->clazz->descriptor, (u4) resMethod->methodIndex);
+    }
+
+    LOGVV("--- found interface method %d (%s.%s)\n",
+        methodIdx, resClass->descriptor, resMethod->name);
+
+    /* we're expecting this to be abstract */
+    assert(dvmIsAbstractMethod(resMethod));
+
+    /* interface methods are always public; no need to check access */
+
+    /*
+     * The interface class *may* be initialized.  According to VM spec
+     * v2 2.17.4, the interfaces a class refers to "need not" be initialized
+     * when the class is initialized.
+     *
+     * It isn't necessary for an interface class to be initialized before
+     * we resolve methods on that interface.
+     *
+     * We choose not to do the initialization now.
+     */
+    //assert(dvmIsClassInitialized(resMethod->clazz));
+
+    /*
+     * The class is initialized, the method has been found.  Add a pointer
+     * to our data structure so we don't have to jump through the hoops again.
+     */
+    dvmDexSetResolvedMethod(pDvmDex, methodIdx, resMethod);
+
+    return resMethod;
+}
+
+/*
+ * Resolve an instance field reference.
+ *
+ * Returns NULL and throws an exception on error (no such field, illegal
+ * access).
+ */
+InstField* dvmResolveInstField(const ClassObject* referrer, u4 ifieldIdx)
+{
+    DvmDex* pDvmDex = referrer->pDvmDex;
+    ClassObject* resClass;
+    const DexFieldId* pFieldId;
+    InstField* resField;
+
+    LOGVV("--- resolving field %u (referrer=%s cl=%p)\n",
+        ifieldIdx, referrer->descriptor, referrer->classLoader);
+
+    pFieldId = dexGetFieldId(pDvmDex->pDexFile, ifieldIdx);
+
+    /*
+     * Find the field's class.
+     */
+    resClass = dvmResolveClass(referrer, pFieldId->classIdx, false);
+    if (resClass == NULL) {
+        assert(dvmCheckException(dvmThreadSelf()));
+        return NULL;
+    }
+
+    resField = dvmFindInstanceFieldHier(resClass,
+        dexStringById(pDvmDex->pDexFile, pFieldId->nameIdx),
+        dexStringByTypeIdx(pDvmDex->pDexFile, pFieldId->typeIdx));
+    if (resField == NULL) {
+        dvmThrowException("Ljava/lang/NoSuchFieldError;",
+            dexStringById(pDvmDex->pDexFile, pFieldId->nameIdx));
+        return NULL;
+    }
+
+    /*
+     * Class must be initialized by now (unless verifier is buggy).  We
+     * could still be in the process of initializing it if the field
+     * access is from a static initializer.
+     */
+    assert(dvmIsClassInitialized(resField->field.clazz) ||
+           dvmIsClassInitializing(resField->field.clazz));
+
+    /*
+     * The class is initialized, the method has been found.  Add a pointer
+     * to our data structure so we don't have to jump through the hoops again.
+     */
+    dvmDexSetResolvedField(pDvmDex, ifieldIdx, (Field*)resField);
+    LOGVV("    field %u is %s.%s\n",
+        ifieldIdx, resField->field.clazz->descriptor, resField->field.name);
+
+    return resField;
+}
+
+/*
+ * Resolve a static field reference.  The DexFile format doesn't distinguish
+ * between static and instance field references, so the "resolved" pointer
+ * in the Dex struct will have the wrong type.  We trivially cast it here.
+ *
+ * Causes the field's class to be initialized.
+ */
+StaticField* dvmResolveStaticField(const ClassObject* referrer, u4 sfieldIdx)
+{
+    DvmDex* pDvmDex = referrer->pDvmDex;
+    ClassObject* resClass;
+    const DexFieldId* pFieldId;
+    StaticField* resField;
+
+    pFieldId = dexGetFieldId(pDvmDex->pDexFile, sfieldIdx);
+
+    /*
+     * Find the field's class.
+     */
+    resClass = dvmResolveClass(referrer, pFieldId->classIdx, false);
+    if (resClass == NULL) {
+        assert(dvmCheckException(dvmThreadSelf()));
+        return NULL;
+    }
+
+    resField = dvmFindStaticFieldHier(resClass,
+                dexStringById(pDvmDex->pDexFile, pFieldId->nameIdx),
+                dexStringByTypeIdx(pDvmDex->pDexFile, pFieldId->typeIdx));
+    if (resField == NULL) {
+        dvmThrowException("Ljava/lang/NoSuchFieldError;",
+            dexStringById(pDvmDex->pDexFile, pFieldId->nameIdx));
+        return NULL;
+    }
+
+    /*
+     * If we're the first to resolve the field in which this class resides,
+     * we need to do it now.  Note that, if the field was inherited from
+     * a superclass, it is not necessarily the same as "resClass".
+     */
+    if (!dvmIsClassInitialized(resField->field.clazz) &&
+        !dvmInitClass(resField->field.clazz))
+    {
+        assert(dvmCheckException(dvmThreadSelf()));
+        return NULL;
+    }
+
+    /*
+     * The class is initialized, the method has been found.  Add a pointer
+     * to our data structure so we don't have to jump through the hoops again.
+     */
+    dvmDexSetResolvedField(pDvmDex, sfieldIdx, (Field*) resField);
+
+    return resField;
+}
+
+
+/*
+ * Resolve a string reference.
+ *
+ * Finding the string is easy.  We need to return a reference to a
+ * java/lang/String object, not a bunch of characters, which means the
+ * first time we get here we need to create an interned string.
+ */
+StringObject* dvmResolveString(const ClassObject* referrer, u4 stringIdx)
+{
+    DvmDex* pDvmDex = referrer->pDvmDex;
+    StringObject* strObj;
+    StringObject* internStrObj;
+    const char* utf8;
+    u4 utf16Size;
+
+    LOGVV("+++ resolving string, referrer is %s\n", referrer->descriptor);
+
+    /*
+     * Create a UTF-16 version so we can trivially compare it to what's
+     * already interned.
+     */
+    utf8 = dexStringAndSizeById(pDvmDex->pDexFile, stringIdx, &utf16Size);
+    strObj = dvmCreateStringFromCstrAndLength(utf8, utf16Size,
+                ALLOC_DEFAULT);
+    if (strObj == NULL) {
+        /* ran out of space in GC heap? */
+        assert(dvmCheckException(dvmThreadSelf()));
+        goto bail;
+    }
+
+    /*
+     * Add it to the intern list.  The return value is the one in the
+     * intern list, which (due to race conditions) may or may not be
+     * the one we just created.  The intern list is synchronized, so
+     * there will be only one "live" version.
+     *
+     * By requesting an immortal interned string, we guarantee that
+     * the returned object will never be collected by the GC.
+     *
+     * A NULL return here indicates some sort of hashing failure.
+     */
+    internStrObj = dvmLookupImmortalInternedString(strObj);
+    dvmReleaseTrackedAlloc((Object*) strObj, NULL);
+    strObj = internStrObj;
+    if (strObj == NULL) {
+        assert(dvmCheckException(dvmThreadSelf()));
+        goto bail;
+    }
+
+    /* save a reference so we can go straight to the object next time */
+    dvmDexSetResolvedString(pDvmDex, stringIdx, strObj);
+
+bail:
+    return strObj;
+}
+
+/*
+ * For debugging: return a string representing the methodType.
+ */
+const char* dvmMethodTypeStr(MethodType methodType)
+{
+    switch (methodType) {
+    case METHOD_DIRECT:         return "direct";
+    case METHOD_STATIC:         return "static";
+    case METHOD_VIRTUAL:        return "virtual";
+    case METHOD_INTERFACE:      return "interface";
+    case METHOD_UNKNOWN:        return "UNKNOWN";
+    }
+    assert(false);
+    return "BOGUS";
+}
diff --git a/vm/oo/Resolve.h b/vm/oo/Resolve.h
new file mode 100644
index 0000000..70b2294
--- /dev/null
+++ b/vm/oo/Resolve.h
@@ -0,0 +1,95 @@
+/*
+ * Copyright (C) 2008 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/*
+ * Resolve "constant pool" references into pointers to VM structs.
+ */
+#ifndef _DALVIK_OO_RESOLVE
+#define _DALVIK_OO_RESOLVE
+
+/*
+ * "Direct" and "virtual" methods are stored independently.  The type of call
+ * used to invoke the method determines which list we search, and whether
+ * we travel up into superclasses.
+ *
+ * (<clinit>, <init>, and methods declared "private" or "static" are stored
+ * in the "direct" list.  All others are stored in the "virtual" list.)
+ */
+typedef enum MethodType {
+    METHOD_UNKNOWN  = 0,
+    METHOD_DIRECT,      // <init>, private
+    METHOD_STATIC,      // static
+    METHOD_VIRTUAL,     // virtual, super
+    METHOD_INTERFACE    // interface
+} MethodType;
+
+/*
+ * Resolve a class, given the referring class and a constant pool index
+ * for the DexTypeId.
+ *
+ * Does not initialize the class.
+ *
+ * Throws an exception and returns NULL on failure.
+ */
+ClassObject* dvmResolveClass(const ClassObject* referrer, u4 classIdx,
+    bool fromUnverifiedConstant);
+
+/*
+ * Resolve a direct, static, or virtual method.
+ *
+ * Can cause the method's class to be initialized if methodType is
+ * METHOD_STATIC.
+ *
+ * Throws an exception and returns NULL on failure.
+ */
+Method* dvmResolveMethod(const ClassObject* referrer, u4 methodIdx,
+    MethodType methodType);
+
+/*
+ * Resolve an interface method.
+ *
+ * Throws an exception and returns NULL on failure.
+ */
+Method* dvmResolveInterfaceMethod(const ClassObject* referrer, u4 methodIdx);
+
+/*
+ * Resolve an instance field.
+ *
+ * Throws an exception and returns NULL on failure.
+ */
+InstField* dvmResolveInstField(const ClassObject* referrer, u4 ifieldIdx);
+
+/*
+ * Resolve a static field.
+ *
+ * Causes the field's class to be initialized.
+ *
+ * Throws an exception and returns NULL on failure.
+ */
+StaticField* dvmResolveStaticField(const ClassObject* referrer, u4 sfieldIdx);
+
+/*
+ * Resolve a "const-string" reference.
+ *
+ * Throws an exception and returns NULL on failure.
+ */
+StringObject* dvmResolveString(const ClassObject* referrer, u4 stringIdx);
+
+/*
+ * Return debug string constant for enum.
+ */
+const char* dvmMethodTypeStr(MethodType methodType);
+
+#endif /*_DALVIK_OO_RESOLVE*/
diff --git a/vm/oo/TypeCheck.c b/vm/oo/TypeCheck.c
new file mode 100644
index 0000000..fe1a83f
--- /dev/null
+++ b/vm/oo/TypeCheck.c
@@ -0,0 +1,246 @@
+/*
+ * Copyright (C) 2008 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/*
+ * instanceof, checkcast, etc.
+ */
+#include "Dalvik.h"
+
+#include <stdlib.h>
+
+/*
+ * I think modern C mandates that the results of a boolean expression are
+ * 0 or 1.  If not, or we suddenly turn into C++ and bool != int, use this.
+ */
+#define BOOL_TO_INT(x)  (x)
+//#define BOOL_TO_INT(x)  ((x) ? 1 : 0)
+
+/*
+ * Number of entries in instanceof cache.  MUST be a power of 2.
+ */
+#define INSTANCEOF_CACHE_SIZE   1024
+
+
+/*
+ * Allocate cache.
+ */
+bool dvmInstanceofStartup(void)
+{
+    gDvm.instanceofCache = dvmAllocAtomicCache(INSTANCEOF_CACHE_SIZE);
+    if (gDvm.instanceofCache == NULL)
+        return false;
+    return true;
+}
+
+/*
+ * Discard the cache.
+ */
+void dvmInstanceofShutdown(void)
+{
+    dvmFreeAtomicCache(gDvm.instanceofCache);
+}
+
+
+/*
+ * Determine whether "sub" is an instance of "clazz", where both of these
+ * are array classes.
+ *
+ * Consider an array class, e.g. Y[][], where Y is a subclass of X.
+ *   Y[][] instanceof Y[][]        --> true (identity)
+ *   Y[][] instanceof X[][]        --> true (element superclass)
+ *   Y[][] instanceof Y            --> false
+ *   Y[][] instanceof Y[]          --> false
+ *   Y[][] instanceof Object       --> true (everything is an object)
+ *   Y[][] instanceof Object[]     --> true
+ *   Y[][] instanceof Object[][]   --> true
+ *   Y[][] instanceof Object[][][] --> false (too many []s)
+ *   Y[][] instanceof Serializable     --> true (all arrays are Serializable)
+ *   Y[][] instanceof Serializable[]   --> true
+ *   Y[][] instanceof Serializable[][] --> false (unless Y is Serializable)
+ *
+ * Don't forget about primitive types.
+ *   int[] instanceof Object[]     --> false
+ *
+ * "subElemClass" is sub->elementClass.
+ *
+ * "subDim" is usually just sub->dim, but for some kinds of checks we want
+ * to pass in a non-array class and pretend that it's an array.
+ */
+static int isArrayInstanceOfArray(ClassObject* subElemClass, int subDim,
+    ClassObject* clazz)
+{
+    //assert(dvmIsArrayClass(sub));
+    assert(dvmIsArrayClass(clazz));
+
+    /* "If T is an array type TC[]... one of the following must be true:
+     *   TC and SC are the same primitive type.
+     *   TC and SC are reference types and type SC can be cast to TC [...]."
+     *
+     * We need the class objects for the array elements.  For speed we
+     * tucked them into the class object.
+     */
+    assert(subDim > 0 && clazz->arrayDim > 0);
+    if (subDim == clazz->arrayDim) {
+        /*
+         * See if "sub" is an instance of "clazz".  This handles the
+         * interfaces, java.lang.Object, superclassing, etc.
+         */
+        return dvmInstanceof(subElemClass, clazz->elementClass);
+    } else if (subDim > clazz->arrayDim) {
+        /*
+         * The thing we might be an instance of has fewer dimensions.  It
+         * must be an Object or array of Object, or a standard array
+         * interface or array of standard array interfaces (the standard
+         * interfaces being java/lang/Cloneable and java/io/Serializable).
+         */
+        if (dvmIsInterfaceClass(clazz->elementClass)) {
+            /*
+             * See if the class implements its base element.  We know the
+             * base element is an interface; if the array class implements
+             * it, we know it's a standard array interface.
+             */
+            return dvmImplements(clazz, clazz->elementClass);
+        } else {
+            /*
+             * See if this is an array of Object, Object[], etc.  We know
+             * that the superclass of an array is always Object, so we
+             * just compare the element type to that.
+             */
+            return (clazz->elementClass == clazz->super);
+        }
+    } else {
+        /*
+         * Too many []s.
+         */
+        return false;
+    }
+}
+
+/*
+ * Determine whether "sub" is a sub-class of "clazz", where "sub" is an
+ * array class.
+ *
+ * "clazz" could be an array class, interface, or simple class.
+ */
+static int isArrayInstanceOf(ClassObject* sub, ClassObject* clazz)
+{
+    assert(dvmIsArrayClass(sub));
+
+    /* "If T is an interface type, T must be one of the interfaces
+     * implemented by arrays."
+     *
+     * I'm not checking that here, because dvmInstanceof tests for
+     * interfaces first, and the generic dvmImplements stuff should
+     * work correctly.
+     */
+    assert(!dvmIsInterfaceClass(clazz));     /* make sure */
+
+    /* "If T is a class type, then T must be Object."
+     *
+     * The superclass of an array is always java.lang.Object, so just
+     * compare against that.
+     */
+    if (!dvmIsArrayClass(clazz))
+        return BOOL_TO_INT(clazz == sub->super);
+
+    /*
+     * If T is an array type TC[] ...
+     */
+    return isArrayInstanceOfArray(sub->elementClass, sub->arrayDim, clazz);
+}
+
+
+/*
+ * Returns 1 (true) if "clazz" is an implementation of "interface".
+ *
+ * "clazz" could be a class or an interface.
+ */
+int dvmImplements(ClassObject* clazz, ClassObject* interface)
+{
+    int i;
+
+    assert(dvmIsInterfaceClass(interface));
+
+    /*
+     * All interfaces implemented directly and by our superclass, and
+     * recursively all super-interfaces of those interfaces, are listed
+     * in "iftable", so we can just do a linear scan through that.
+     */
+    for (i = 0; i < clazz->iftableCount; i++) {
+        if (clazz->iftable[i].clazz == interface)
+            return 1;
+    }
+
+    return 0;
+}
+
+/*
+ * Determine whether or not we can put an object into an array, based on
+ * the class hierarchy.  The object might itself by an array, which means
+ * we have to pay attention to the array instanceof rules.
+ *
+ * Note that "objectClass" could be an array, but objectClass->elementClass
+ * is always a non-array type.
+ */
+bool dvmCanPutArrayElement(ClassObject* objectClass, ClassObject* arrayClass)
+{
+    if (dvmIsArrayClass(objectClass)) {
+        /*
+         * We're stuffing an array into an array.  We want to see if the
+         * elements of "arrayClass" are compatible with "objectClass".
+         * We bump up the number of dimensions in "objectClass" so that we
+         * can compare the two directly.
+         */
+        return isArrayInstanceOfArray(objectClass->elementClass,
+                    objectClass->arrayDim + 1, arrayClass);
+    } else {
+        /*
+         * We're putting a non-array element into an array.  We need to
+         * test to see if the elements are compatible.  The easiest way
+         * to do that is to "arrayify" it and use the standard array
+         * compatibility check.
+         */
+        return isArrayInstanceOfArray(objectClass, 1, arrayClass);
+    }
+}
+
+
+/*
+ * Perform the instanceof calculation.
+ */
+static inline int isInstanceof(ClassObject* instance, ClassObject* clazz)
+{
+    if (dvmIsInterfaceClass(clazz)) {
+        return dvmImplements(instance, clazz);
+    } else if (dvmIsArrayClass(instance)) {
+        return isArrayInstanceOf(instance, clazz);
+    } else {
+        return dvmIsSubClass(instance, clazz);
+    }
+}
+
+
+/*
+ * Do the instanceof calculation, pulling the result from the cache if
+ * possible.
+ */
+int dvmInstanceofNonTrivial(ClassObject* instance, ClassObject* clazz)
+{
+#define ATOMIC_CACHE_CALC isInstanceof(instance, clazz)
+    return ATOMIC_CACHE_LOOKUP(gDvm.instanceofCache,
+                INSTANCEOF_CACHE_SIZE, instance, clazz);
+#undef ATOMIC_CACHE_CALC
+}
+
diff --git a/vm/oo/TypeCheck.h b/vm/oo/TypeCheck.h
new file mode 100644
index 0000000..4c46d37
--- /dev/null
+++ b/vm/oo/TypeCheck.h
@@ -0,0 +1,76 @@
+/*
+ * Copyright (C) 2008 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/*
+ * instanceof, checkcast, etc.
+ */
+#ifndef _DALVIK_OO_TYPECHECK
+#define _DALVIK_OO_TYPECHECK
+
+/* VM startup/shutdown */
+bool dvmInstanceofStartup(void);
+void dvmInstanceofShutdown(void);
+
+
+/* used by dvmInstanceof; don't call */
+int dvmInstanceofNonTrivial(ClassObject* instance, ClassObject* clazz);
+
+/*
+ * Determine whether "instance" is an instance of "clazz".
+ *
+ * Returns 0 (false) if not, 1 (true) if so.
+ */
+INLINE int dvmInstanceof(ClassObject* instance, ClassObject* clazz)
+{
+    if (instance == clazz) {
+        if (CALC_CACHE_STATS)
+            gDvm.instanceofCache->trivial++;
+        return 1;
+    } else
+        return dvmInstanceofNonTrivial(instance, clazz);
+}
+
+/*
+ * Determine whether a class implements an interface.
+ *
+ * Returns 0 (false) if not, 1 (true) if so.
+ */
+int dvmImplements(ClassObject* clazz, ClassObject* interface);
+
+/*
+ * Determine whether "sub" is a sub-class of "clazz".
+ *
+ * Returns 0 (false) if not, 1 (true) if so.
+ */
+INLINE int dvmIsSubClass(const ClassObject* sub, const ClassObject* clazz) {
+    do {
+        /*printf("###### sub='%s' clazz='%s'\n", sub->name, clazz->name);*/
+        if (sub == clazz)
+            return 1;
+        sub = sub->super;
+    } while (sub != NULL);
+
+    return 0;
+}
+
+/*
+ * Determine whether or not we can store an object into an array, based
+ * on the classes of the two.
+ *
+ * Returns 0 (false) if not, 1 (true) if so.
+ */
+bool dvmCanPutArrayElement(ClassObject* elemClass, ClassObject* arrayClass);
+
+#endif /*_DALVIK_OO_TYPECHECK*/
diff --git a/vm/reflect/Annotation.c b/vm/reflect/Annotation.c
new file mode 100644
index 0000000..cf35b51
--- /dev/null
+++ b/vm/reflect/Annotation.c
@@ -0,0 +1,2140 @@
+/*
+ * Copyright (C) 2008 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/*
+ * Annotations.
+ *
+ * We're not expecting to make much use of runtime annotations, so speed vs.
+ * space choices are weighted heavily toward small size.
+ *
+ * It would have been nice to treat "system" annotations in the same way
+ * we do "real" annotations, but that doesn't work.  The chief difficulty
+ * is that some of them have member types that are not legal in annotations,
+ * such as Method and Annotation.  Another source of pain comes from the
+ * AnnotationDefault annotation, which by virtue of being an annotation
+ * could itself have default values, requiring some additional checks to
+ * prevent recursion.
+ *
+ * It's simpler, and more efficient, to handle the system annotations
+ * entirely inside the VM.  There are empty classes defined for the system
+ * annotation types, but their only purpose is to allow the system
+ * annotations to share name space with standard annotations.
+ */
+#include "Dalvik.h"
+
+// fwd
+static Object* processEncodedAnnotation(const ClassObject* clazz,\
+    const u1** pPtr);
+static bool skipEncodedAnnotation(const ClassObject* clazz, const u1** pPtr);
+
+/*
+ * System annotation descriptors.
+ */
+static const char* kDescrAnnotationDefault
+                                    = "Ldalvik/annotation/AnnotationDefault;";
+static const char* kDescrEnclosingClass
+                                    = "Ldalvik/annotation/EnclosingClass;";
+static const char* kDescrEnclosingMethod
+                                    = "Ldalvik/annotation/EnclosingMethod;";
+static const char* kDescrInnerClass = "Ldalvik/annotation/InnerClass;";
+static const char* kDescrMemberClasses
+                                    = "Ldalvik/annotation/MemberClasses;";
+static const char* kDescrSignature  = "Ldalvik/annotation/Signature;";
+static const char* kDescrThrows     = "Ldalvik/annotation/Throws;";
+
+
+/*
+ * Perform Annotation setup.
+ */
+bool dvmReflectAnnotationStartup(void)
+{
+    Method* meth;
+
+    /*
+     * Find some standard Annotation classes.
+     */
+    gDvm.classJavaLangAnnotationAnnotationArray =
+        dvmFindArrayClass("[Ljava/lang/annotation/Annotation;", NULL);
+    gDvm.classJavaLangAnnotationAnnotationArrayArray =
+        dvmFindArrayClass("[[Ljava/lang/annotation/Annotation;", NULL);
+    if (gDvm.classJavaLangAnnotationAnnotationArray == NULL ||
+        gDvm.classJavaLangAnnotationAnnotationArrayArray == NULL)
+    {
+        LOGE("Could not find Annotation-array classes\n");
+        return false;
+    }
+
+    /*
+     * VM-specific annotation classes.
+     */
+    gDvm.classOrgApacheHarmonyLangAnnotationAnnotationFactory =
+        dvmFindSystemClassNoInit("Lorg/apache/harmony/lang/annotation/AnnotationFactory;");
+    gDvm.classOrgApacheHarmonyLangAnnotationAnnotationMember =
+        dvmFindSystemClassNoInit("Lorg/apache/harmony/lang/annotation/AnnotationMember;");
+    gDvm.classOrgApacheHarmonyLangAnnotationAnnotationMemberArray =
+        dvmFindArrayClass("[Lorg/apache/harmony/lang/annotation/AnnotationMember;", NULL);
+    if (gDvm.classOrgApacheHarmonyLangAnnotationAnnotationFactory == NULL ||
+        gDvm.classOrgApacheHarmonyLangAnnotationAnnotationMember == NULL ||
+        gDvm.classOrgApacheHarmonyLangAnnotationAnnotationMemberArray == NULL)
+    {
+        LOGE("Could not find android.lang annotation classes\n");
+        return false;
+    }
+
+    meth = dvmFindDirectMethodByDescriptor(gDvm.classOrgApacheHarmonyLangAnnotationAnnotationFactory,
+            "createAnnotation",
+            "(Ljava/lang/Class;[Lorg/apache/harmony/lang/annotation/AnnotationMember;)Ljava/lang/annotation/Annotation;");
+    if (meth == NULL) {
+        LOGE("Unable to find createAnnotation() in android AnnotationFactory\n");
+        return false;
+    }
+    gDvm.methOrgApacheHarmonyLangAnnotationAnnotationFactory_createAnnotation = meth;
+
+    meth = dvmFindDirectMethodByDescriptor(gDvm.classOrgApacheHarmonyLangAnnotationAnnotationMember,
+            "<init>",
+            "(Ljava/lang/String;Ljava/lang/Object;Ljava/lang/Class;Ljava/lang/reflect/Method;)V");
+    if (meth == NULL) {
+        LOGE("Unable to find 4-arg constructor in android AnnotationMember\n");
+        return false;
+    }
+            
+    gDvm.methOrgApacheHarmonyLangAnnotationAnnotationMember_init = meth;
+
+    return true;
+}
+
+/*
+ * Read an unsigned LEB128 value from a buffer.  Advances "pBuf".
+ */
+static u4 readUleb128(const u1** pBuf)
+{
+    u4 result = 0; 
+    int shift = 0; 
+    const u1* buf = *pBuf;
+    u1 val;
+
+    do {
+        /*
+         * Worst-case on bad data is we read too much data and return a bogus
+         * result.  Safe to assume that we will encounter a byte with its
+         * high bit clear before the end of the mapped file.
+         */
+        assert(shift < 32);
+
+        val = *buf++;
+        result |= (val & 0x7f) << shift;
+        shift += 7;
+    } while ((val & 0x80) != 0);
+
+    *pBuf = buf;
+    return result;
+}
+
+/*
+ * Get the annotations directory item.
+ */
+static const DexAnnotationsDirectoryItem* getAnnoDirectory(DexFile* pDexFile,
+    const ClassObject* clazz)
+{
+    const DexClassDef* pClassDef;
+
+    /*
+     * Find the class def in the DEX file.  For better performance we should
+     * stash this in the ClassObject.
+     */
+    pClassDef = dexFindClass(pDexFile, clazz->descriptor);
+    assert(pClassDef != NULL);
+    return dexGetAnnotationsDirectoryItem(pDexFile, pClassDef);
+}
+
+/*
+ * Return a zero-length array of Annotation objects.
+ *
+ * TODO: this currently allocates a new array each time, but I think we
+ * can get away with returning a canonical copy.
+ *
+ * Caller must call dvmReleaseTrackedAlloc().
+ */
+static ArrayObject* emptyAnnoArray(void)
+{
+    return dvmAllocArrayByClass(
+        gDvm.classJavaLangAnnotationAnnotationArray, 0, ALLOC_DEFAULT);
+}
+
+/*
+ * Return a zero-length array of arrays of Annotation objects.
+ *
+ * TODO: this currently allocates a new array each time, but I think we
+ * can get away with returning a canonical copy.
+ *
+ * Caller must call dvmReleaseTrackedAlloc().
+ */
+static ArrayObject* emptyAnnoArrayArray(void)
+{
+    return dvmAllocArrayByClass(
+        gDvm.classJavaLangAnnotationAnnotationArrayArray, 0, ALLOC_DEFAULT);
+}
+
+/*
+ * Read a signed integer.  "zwidth" is the zero-based byte count.
+ */
+static s4 readSignedInt(const u1* ptr, int zwidth)
+{
+    s4 val = 0;
+    int i;
+
+    for (i = zwidth; i >= 0; --i)
+        val = ((u4)val >> 8) | (((s4)*ptr++) << 24);
+    val >>= (3 - zwidth) * 8;
+
+    return val;
+}
+
+/*
+ * Read an unsigned integer.  "zwidth" is the zero-based byte count,
+ * "fillOnRight" indicates which side we want to zero-fill from.
+ */
+static u4 readUnsignedInt(const u1* ptr, int zwidth, bool fillOnRight)
+{
+    u4 val = 0;
+    int i;
+
+    if (!fillOnRight) {
+        for (i = zwidth; i >= 0; --i)
+            val = (val >> 8) | (((u4)*ptr++) << 24);
+        val >>= (3 - zwidth) * 8;
+    } else {
+        for (i = zwidth; i >= 0; --i)
+            val = (val >> 8) | (((u4)*ptr++) << 24);
+    }
+    return val;
+}
+
+/*
+ * Read a signed long.  "zwidth" is the zero-based byte count.
+ */
+static s8 readSignedLong(const u1* ptr, int zwidth)
+{
+    s8 val = 0;
+    int i;
+
+    for (i = zwidth; i >= 0; --i)
+        val = ((u8)val >> 8) | (((s8)*ptr++) << 56);
+    val >>= (7 - zwidth) * 8;
+
+    return val;
+}
+
+/*
+ * Read an unsigned long.  "zwidth" is the zero-based byte count,
+ * "fillOnRight" indicates which side we want to zero-fill from.
+ */
+static u8 readUnsignedLong(const u1* ptr, int zwidth, bool fillOnRight)
+{
+    u8 val = 0;
+    int i;
+
+    if (!fillOnRight) {
+        for (i = zwidth; i >= 0; --i)
+            val = (val >> 8) | (((u8)*ptr++) << 56);
+        val >>= (7 - zwidth) * 8;
+    } else {
+        for (i = zwidth; i >= 0; --i)
+            val = (val >> 8) | (((u8)*ptr++) << 56);
+    }
+    return val;
+}
+
+
+/*
+ * ===========================================================================
+ *      Element extraction
+ * ===========================================================================
+ */
+
+/*
+ * An annotation in "clazz" refers to a method by index.  This just gives
+ * us the name of the class and the name and signature of the method.  We
+ * need to find the method's class, and then find the method within that
+ * class.  If the method has been resolved before, we can just use the
+ * results of the previous lookup.
+ *
+ * Normally we do this as part of method invocation in the interpreter, which
+ * provides us with a bit of context: is it virtual or direct, do we need
+ * to initialize the class because it's a static method, etc.  We don't have
+ * that information here, so we have to do a bit of searching.
+ *
+ * Returns NULL if the method was not found.
+ */
+static Method* resolveAmbiguousMethod(const ClassObject* referrer, u4 methodIdx)
+{
+    DexFile* pDexFile;
+    ClassObject* resClass;
+    Method* resMethod;
+    const DexMethodId* pMethodId;
+    const char* name;
+    const char* signature;
+
+    /* if we've already resolved this method, return it */
+    resMethod = dvmDexGetResolvedMethod(referrer->pDvmDex, methodIdx);
+    if (resMethod != NULL)
+        return resMethod;
+
+    pDexFile = referrer->pDvmDex->pDexFile;
+    pMethodId = dexGetMethodId(pDexFile, methodIdx);
+    resClass = dvmResolveClass(referrer, pMethodId->classIdx, true);
+    if (resClass == NULL) {
+        LOGD("resolveAmbiguousMethod: unable to find class %d\n", methodIdx);
+        return NULL;
+    }
+    if (dvmIsInterfaceClass(resClass)) {
+        /* method is part of an interface -- not expecting that */
+        LOGD("resolveAmbiguousMethod: method in interface?\n");
+        return NULL;
+    }
+
+    // TODO - consider a method access flag that indicates direct vs. virtual
+    name = dexStringById(pDexFile, pMethodId->nameIdx);
+
+    DexProto proto;
+    dexProtoSetFromMethodId(&proto, pDexFile, pMethodId);
+
+    if (name[0] == '<') {
+        /*
+         * Constructor or class initializer.  Only need to examine the
+         * "direct" list, and don't need to look up the class hierarchy.
+         */
+        resMethod = dvmFindDirectMethod(resClass, name, &proto);
+    } else {
+        /*
+         * Try both lists, and scan up the tree.
+         */
+        resMethod = dvmFindVirtualMethodHier(resClass, name, &proto);
+        if (resMethod == NULL)
+            resMethod = dvmFindDirectMethodHier(resClass, name, &proto);
+    }
+
+    return resMethod;
+}
+
+/*
+ * constants for processAnnotationValue indicating what style of
+ * result is wanted
+ */
+typedef enum {
+    kAllObjects,         /* return everything as an object */
+    kAllRaw,             /* return everything as a raw value or index */
+    kPrimitivesOrObjects /* return primitives as-is but the rest as objects */
+} AnnotationResultStyle;
+    
+/*
+ * Recursively process an annotation value.
+ *
+ * "clazz" is the class on which the annotations are defined.  It may be
+ * NULL when "resultStyle" is "kAllRaw".
+ *
+ * If "resultStyle" is "kAllObjects", the result will always be an Object of an
+ * appropriate type (in pValue->value.l).  For primitive types, the usual
+ * wrapper objects will be created.
+ *
+ * If "resultStyle" is "kAllRaw", numeric constants are stored directly into
+ * "pValue", and indexed values like String and Method are returned as
+ * indexes.  Complex values like annotations and arrays are not handled.
+ *
+ * If "resultStyle" is "kPrimitivesOrObjects", numeric constants are stored
+ * directly into "pValue", and everything else is constructed as an Object
+ * of appropriate type (in pValue->value.l).
+ *
+ * The caller must call dvmReleaseTrackedAlloc on returned objects, when
+ * using "kAllObjects" or "kPrimitivesOrObjects".
+ *
+ * Returns "true" on success, "false" if the value could not be processed
+ * or an object could not be allocated.  On allocation failure an exception
+ * will be raised.
+ */
+static bool processAnnotationValue(const ClassObject* clazz,
+    const u1** pPtr, AnnotationValue* pValue,
+    AnnotationResultStyle resultStyle)
+{
+    Thread* self = dvmThreadSelf();
+    Object* elemObj = NULL;
+    bool setObject = false;
+    const u1* ptr = *pPtr;
+    u1 valueType, valueArg;
+    int width;
+    u4 idx;
+
+    valueType = *ptr++;
+    valueArg = valueType >> kDexAnnotationValueArgShift;
+    width = valueArg + 1;       /* assume, correct later */
+
+    LOGV("----- type is 0x%02x %d, ptr=%p\n",
+        valueType & kDexAnnotationValueTypeMask, valueArg, ptr-1);
+
+    pValue->type = valueType & kDexAnnotationValueTypeMask;
+
+    switch (valueType & kDexAnnotationValueTypeMask) {
+    case kDexAnnotationByte:
+        pValue->value.i = (s1) readSignedInt(ptr, valueArg);
+        if (resultStyle == kAllObjects) {
+            elemObj = (Object*) dvmWrapPrimitive(pValue->value,
+                        dvmFindPrimitiveClass('B'));
+            setObject = true;
+        }
+        break;
+    case kDexAnnotationShort:
+        pValue->value.i = (s2) readSignedInt(ptr, valueArg);
+        if (resultStyle == kAllObjects) {
+            elemObj = (Object*) dvmWrapPrimitive(pValue->value,
+                        dvmFindPrimitiveClass('S'));
+            setObject = true;
+        }
+        break;
+    case kDexAnnotationChar:
+        pValue->value.i = (u2) readUnsignedInt(ptr, valueArg, false);
+        if (resultStyle == kAllObjects) {
+            elemObj = (Object*) dvmWrapPrimitive(pValue->value,
+                        dvmFindPrimitiveClass('C'));
+            setObject = true;
+        }
+        break;
+    case kDexAnnotationInt:
+        pValue->value.i = readSignedInt(ptr, valueArg);
+        if (resultStyle == kAllObjects) {
+            elemObj = (Object*) dvmWrapPrimitive(pValue->value,
+                        dvmFindPrimitiveClass('I'));
+            setObject = true;
+        }
+        break;
+    case kDexAnnotationLong:
+        pValue->value.j = readSignedLong(ptr, valueArg);
+        if (resultStyle == kAllObjects) {
+            elemObj = (Object*) dvmWrapPrimitive(pValue->value,
+                        dvmFindPrimitiveClass('J'));
+            setObject = true;
+        }
+        break;
+    case kDexAnnotationFloat:
+        pValue->value.i = readUnsignedInt(ptr, valueArg, true);
+        if (resultStyle == kAllObjects) {
+            elemObj = (Object*) dvmWrapPrimitive(pValue->value,
+                        dvmFindPrimitiveClass('F'));
+            setObject = true;
+        }
+        break;
+    case kDexAnnotationDouble:
+        pValue->value.j = readUnsignedLong(ptr, valueArg, true);
+        if (resultStyle == kAllObjects) {
+            elemObj = (Object*) dvmWrapPrimitive(pValue->value,
+                        dvmFindPrimitiveClass('D'));
+            setObject = true;
+        }
+        break;
+    case kDexAnnotationBoolean:
+        pValue->value.i = (valueArg != 0);
+        if (resultStyle == kAllObjects) {
+            elemObj = (Object*) dvmWrapPrimitive(pValue->value,
+                        dvmFindPrimitiveClass('Z'));
+            setObject = true;
+        }
+        width = 0;
+        break;
+
+    case kDexAnnotationString:
+        idx = readUnsignedInt(ptr, valueArg, false);
+        if (resultStyle == kAllRaw) {
+            pValue->value.i = idx;
+        } else {
+            elemObj = (Object*) dvmResolveString(clazz, idx);
+            setObject = true;
+            if (elemObj == NULL)
+                return false;
+            dvmAddTrackedAlloc(elemObj, self);      // balance the Release
+        }
+        break;
+    case kDexAnnotationType:
+        idx = readUnsignedInt(ptr, valueArg, false);
+        if (resultStyle == kAllRaw) {
+            pValue->value.i = idx;
+        } else {
+            elemObj = (Object*) dvmResolveClass(clazz, idx, true);
+            setObject = true;
+            if (elemObj == NULL) {
+                /* we're expected to throw a TypeNotPresentException here */
+                DexFile* pDexFile = clazz->pDvmDex->pDexFile;
+                const char* desc = dexStringByTypeIdx(pDexFile, idx);
+                dvmClearException(self);
+                dvmThrowExceptionWithClassMessage(
+                        "Ljava/lang/TypeNotPresentException;", desc);
+                return false;
+            } else
+                dvmAddTrackedAlloc(elemObj, self);      // balance the Release
+        }
+        break;
+    case kDexAnnotationMethod:
+        idx = readUnsignedInt(ptr, valueArg, false);
+        if (resultStyle == kAllRaw) {
+            pValue->value.i = idx;
+        } else {
+            Method* meth = resolveAmbiguousMethod(clazz, idx);
+            if (meth == NULL)
+                return false;
+            elemObj = dvmCreateReflectObjForMethod(clazz, meth);
+            setObject = true;
+            if (elemObj == NULL)
+                return false;
+        }
+        break;
+    case kDexAnnotationField:
+        idx = readUnsignedInt(ptr, valueArg, false);
+        assert(false);      // TODO
+        break;
+    case kDexAnnotationEnum:
+        /* enum values are the contents of a static field */
+        idx = readUnsignedInt(ptr, valueArg, false);
+        if (resultStyle == kAllRaw) {
+            pValue->value.i = idx;
+        } else {
+            StaticField* sfield;
+
+            sfield = dvmResolveStaticField(clazz, idx);
+            if (sfield == NULL) {
+                return false;
+            } else {
+                assert(sfield->field.clazz->descriptor[0] == 'L');
+                elemObj = sfield->value.l;
+                setObject = true;
+                dvmAddTrackedAlloc(elemObj, self);      // balance the Release
+            }
+        }
+        break;
+    case kDexAnnotationArray:
+        /*
+         * encoded_array format, which is a size followed by a stream
+         * of annotation_value.
+         *
+         * We create an array of Object, populate it, and return it.
+         */
+        if (resultStyle == kAllRaw) {
+            return false;
+        } else {
+            ArrayObject* newArray;
+            Object** pObj;
+            u4 size;
+
+            size = readUleb128(&ptr);
+            LOGVV("--- annotation array, size is %u\n", size);
+            newArray = dvmAllocArrayByClass(gDvm.classJavaLangObjectArray,
+                size, ALLOC_DEFAULT);
+            if (newArray == NULL) {
+                LOGE("annotation element array alloc failed (%d)\n", size);
+                return false;
+            }
+            pObj = (Object**)newArray->contents;
+
+            AnnotationValue avalue;
+            while (size--) {
+                if (!processAnnotationValue(clazz, &ptr, &avalue,
+                                kAllObjects)) {
+                    dvmReleaseTrackedAlloc((Object*)newArray, self);
+                    return false;
+                }
+                Object* obj = avalue.value.l;
+                dvmReleaseTrackedAlloc(obj, self);
+                *pObj++ = obj;
+            }
+
+            elemObj = (Object*) newArray;
+            setObject = true;
+        }
+        width = 0;
+        break;
+    case kDexAnnotationAnnotation:
+        /* encoded_annotation format */
+        if (resultStyle == kAllRaw)
+            return false;
+        elemObj = processEncodedAnnotation(clazz, &ptr);
+        setObject = true;
+        if (elemObj == NULL)
+            return false;
+        dvmAddTrackedAlloc(elemObj, self);      // balance the Release
+        width = 0;
+        break;
+    case kDexAnnotationNull:
+        if (resultStyle == kAllRaw) {
+            pValue->value.i = 0;
+        } else {
+            assert(elemObj == NULL);
+            setObject = true;
+        }
+        width = 0;
+        break;
+    default:
+        LOGE("Bad annotation element value byte 0x%02x (0x%02x)\n",
+            valueType, valueType & kDexAnnotationValueTypeMask);
+        assert(false);
+        return false;
+    }
+
+    ptr += width;
+
+    *pPtr = ptr;
+    if (setObject)
+        pValue->value.l = elemObj;
+    return true;
+}
+
+
+/*
+ * For most object types, we have nothing to do here, and we just return
+ * "valueObj".
+ *
+ * For an array annotation, the type of the extracted object will always
+ * be java.lang.Object[], but we want it to match the type that the
+ * annotation member is expected to return.  In theory we can just stomp
+ * the object's class to have the correct type, but this strikes me as a
+ * risky proposition (at the very least we would need to call instanceof()
+ * on every element).
+ *
+ * We allocate a second array with the correct type, then copy the data
+ * over.  This releases the tracked allocation on "valueObj" and returns
+ * a new, tracked object.
+ *
+ * On failure, this releases the tracking on "valueObj" and returns NULL
+ * (allowing the call to say "foo = convertReturnType(foo, ..)").
+ */
+static Object* convertReturnType(Object* valueObj, ClassObject* methodReturn)
+{
+    if (valueObj == NULL ||
+        !dvmIsArray((ArrayObject*)valueObj) || !dvmIsArrayClass(methodReturn))
+    {
+        return valueObj;
+    }
+
+    Thread* self = dvmThreadSelf();
+    ClassObject* srcElemClass;
+    ClassObject* dstElemClass;
+
+    /*
+     * Strip off one '[' to get element class.  Note this is not the
+     * same as clazz->elementClass.
+     */
+    srcElemClass = dvmFindClass(valueObj->clazz->descriptor+1,
+        valueObj->clazz->classLoader);
+    dstElemClass = dvmFindClass(methodReturn->descriptor+1,
+        methodReturn->classLoader);
+    if (srcElemClass->primitiveType != PRIM_NOT ||
+        dstElemClass->primitiveType != PRIM_NOT)
+    {
+        LOGE("ERROR: array of primitives not expected here\n");
+        dvmAbort();
+    }
+    LOGV("HEY: converting valueObj from [%s to [%s\n",
+        srcElemClass->descriptor, dstElemClass->descriptor);
+
+    ArrayObject* srcArray = (ArrayObject*) valueObj;
+    u4 length = srcArray->length;
+    ArrayObject* newArray;
+
+    newArray = dvmAllocArrayByClass(methodReturn, length, ALLOC_DEFAULT);
+    if (newArray == NULL) {
+        LOGE("Failed creating duplicate annotation class (%s %d)\n",
+            methodReturn->descriptor, length);
+        goto bail;
+    }
+
+    if (!dvmCopyObjectArray(newArray, srcArray, dstElemClass)) {
+        LOGE("Annotation array copy failed\n");
+        dvmReleaseTrackedAlloc((Object*)newArray, self);
+        newArray = NULL;
+        goto bail;
+    }
+
+bail:
+    /* replace old, return new */
+    dvmReleaseTrackedAlloc(valueObj, self);
+    return (Object*) newArray;
+}
+
+/*
+ * Create a new AnnotationMember.
+ *
+ * "clazz" is the class on which the annotations are defined.  "pPtr"
+ * points to a pointer into the annotation data.  "annoClass" is the
+ * annotation's class.
+ *
+ * We extract the annotation's value, create a new AnnotationMember object,
+ * and construct it.
+ *
+ * Returns NULL on failure.
+ */
+static Object* createAnnotationMember(const ClassObject* clazz,
+    const ClassObject* annoClass, const u1** pPtr)
+{
+    Thread* self = dvmThreadSelf();
+    const DexFile* pDexFile = clazz->pDvmDex->pDexFile;
+    StringObject* nameObj = NULL;
+    Object* valueObj = NULL;
+    Object* newMember = NULL;
+    Object* methodObj = NULL;
+    ClassObject* methodReturn = NULL;
+    u4 elementNameIdx;
+    const char* name;
+    AnnotationValue avalue;
+    JValue result;
+    bool failed = true;
+
+    elementNameIdx = readUleb128(pPtr);
+
+    if (!processAnnotationValue(clazz, pPtr, &avalue, kAllObjects)) {
+        LOGW("Failed processing annotation value\n");
+        goto bail;
+    }
+    valueObj = avalue.value.l;
+
+    /* new member to hold the element */
+    newMember =
+        dvmAllocObject(gDvm.classOrgApacheHarmonyLangAnnotationAnnotationMember,
+        ALLOC_DEFAULT);
+    name = dexStringById(pDexFile, elementNameIdx);
+    nameObj = dvmCreateStringFromCstr(name, ALLOC_DEFAULT);
+
+    /* find the method in the annotation class, given only the name */
+    if (name != NULL) {
+        Method* annoMeth = dvmFindVirtualMethodByName(annoClass, name);
+        if (annoMeth == NULL) {
+            LOGW("WARNING: could not find annotation member %s in %s\n",
+                name, annoClass->descriptor);
+        } else {
+            methodObj = dvmCreateReflectMethodObject(annoMeth);
+            methodReturn = dvmGetBoxedReturnType(annoMeth);
+        }
+    }
+    if (newMember == NULL || nameObj == NULL || methodObj == NULL ||
+        methodReturn == NULL)
+    {
+        LOGE("Failed creating annotation element (m=%p n=%p a=%p r=%p)\n",
+            newMember, nameObj, methodObj, methodReturn);
+        goto bail;
+    }
+
+    /* convert the return type, if necessary */
+    valueObj = convertReturnType(valueObj, methodReturn);
+    if (valueObj == NULL)
+        goto bail;
+
+    /* call 4-argument constructor */
+    dvmCallMethod(self, gDvm.methOrgApacheHarmonyLangAnnotationAnnotationMember_init,
+        newMember, &result, nameObj, valueObj, methodReturn, methodObj);
+    if (dvmCheckException(self)) {
+        LOGD("Failed constructing annotation element\n");
+        goto bail;
+    }
+
+    failed = false;
+
+bail:
+    /* release tracked allocations */
+    dvmReleaseTrackedAlloc(newMember, self);
+    dvmReleaseTrackedAlloc((Object*)nameObj, self);
+    dvmReleaseTrackedAlloc(valueObj, self);
+    dvmReleaseTrackedAlloc(methodObj, self);
+    if (failed)
+        return NULL;
+    else
+        return newMember;
+}
+
+/*
+ * Create a new Annotation object from what we find in the annotation item.
+ *
+ * "clazz" is the class on which the annotations are defined.  "pPtr"
+ * points to a pointer into the annotation data.
+ *
+ * We use the AnnotationFactory class to create the annotation for us.  The
+ * method we call is:
+ *
+ *  public static Annotation createAnnotation(
+ *      Class<? extends Annotation> annotationType,
+ *      AnnotationMember[] elements)
+ *
+ * Returns a new Annotation, which will NOT be in the local ref table and
+ * not referenced elsewhere, so store it away soon.  On failure, returns NULL.
+ */
+static Object* processEncodedAnnotation(const ClassObject* clazz,
+    const u1** pPtr)
+{
+    Thread* self = dvmThreadSelf();
+    const DexFile* pDexFile = clazz->pDvmDex->pDexFile;
+    Object* newAnno = NULL;
+    ArrayObject* elementArray = NULL;
+    const ClassObject* annoClass;
+    const u1* ptr;
+    u4 typeIdx, size;
+
+    ptr = *pPtr;
+    typeIdx = readUleb128(&ptr);
+    size = readUleb128(&ptr);
+
+    LOGV("----- processEnc ptr=%p type=%d size=%d\n", ptr, typeIdx, size);
+
+    annoClass = dvmDexGetResolvedClass(clazz->pDvmDex, typeIdx);
+    if (annoClass == NULL) {
+        annoClass = dvmResolveClass(clazz, typeIdx, true);
+        if (annoClass == NULL) {
+            LOGE("Unable to resolve %s annotation class %d\n",
+                clazz->descriptor, typeIdx);
+            assert(dvmCheckException(self));
+            return NULL;
+        }
+    }
+
+    //LOGI("Found typeIdx=%d size=%d class=%s\n",
+    //    typeIdx, size, annoClass->descriptor);
+
+    /*
+     * Elements are parsed out and stored in an array.  The Harmony
+     * constructor wants an array with just the declared elements --
+     * default values get merged in later.
+     */
+    JValue result;
+    Object** pElement = NULL;
+
+    if (size > 0) {
+        elementArray = dvmAllocArrayByClass(
+            gDvm.classOrgApacheHarmonyLangAnnotationAnnotationMemberArray,
+            size, ALLOC_DEFAULT);
+        if (elementArray == NULL) {
+            LOGE("failed to allocate annotation member array (%d elements)\n",
+                size);
+            goto bail;
+        }
+        pElement = (Object**) elementArray->contents;
+    }
+
+    /*
+     * "ptr" points to a byte stream with "size" occurrences of
+     * annotation_element.
+     */
+    while (size--) {
+        Object* newMember = createAnnotationMember(clazz, annoClass, &ptr);
+
+        /* add it to the array */
+        *pElement++ = newMember;
+    }
+
+    dvmCallMethod(self,
+        gDvm.methOrgApacheHarmonyLangAnnotationAnnotationFactory_createAnnotation,
+        NULL, &result, annoClass, elementArray);
+    if (dvmCheckException(self)) {
+        LOGD("Failed creating an annotation\n");
+        //dvmLogExceptionStackTrace();
+        goto bail;
+    }
+
+    newAnno = result.l;
+
+bail:
+    dvmReleaseTrackedAlloc((Object*) elementArray, NULL);
+    *pPtr = ptr;
+    return newAnno;
+}
+
+/*
+ * Run through an annotation set and convert each entry into an Annotation
+ * object.
+ *
+ * Returns an array of Annotation objects, or NULL with an exception raised
+ * on alloc failure.
+ */
+static ArrayObject* processAnnotationSet(const ClassObject* clazz,
+    const DexAnnotationSetItem* pAnnoSet, int visibility)
+{
+    DexFile* pDexFile = clazz->pDvmDex->pDexFile;
+    const DexAnnotationItem* pAnnoItem;
+    ArrayObject* annoArray;
+    Object** pContents;
+    int i, count;
+
+    /* we need these later; make sure they're initialized */
+    if (!dvmIsClassInitialized(gDvm.classOrgApacheHarmonyLangAnnotationAnnotationFactory))
+        dvmInitClass(gDvm.classOrgApacheHarmonyLangAnnotationAnnotationFactory);
+    if (!dvmIsClassInitialized(gDvm.classOrgApacheHarmonyLangAnnotationAnnotationMember))
+        dvmInitClass(gDvm.classOrgApacheHarmonyLangAnnotationAnnotationMember);
+
+    /* count up the number of visible elements */
+    for (i = count = 0; i < (int) pAnnoSet->size; i++) {
+        pAnnoItem = dexGetAnnotationItem(pDexFile, pAnnoSet, i);
+        if (pAnnoItem->visibility == visibility)
+            count++;
+    }
+
+    annoArray =dvmAllocArrayByClass(gDvm.classJavaLangAnnotationAnnotationArray,
+        count, ALLOC_DEFAULT);
+    if (annoArray == NULL)
+        return NULL;
+    pContents = (Object**) annoArray->contents;
+
+    /*
+     * Generate Annotation objects.  We must put them into the array
+     * immediately (or add them to the tracked ref table).
+     */
+    for (i = 0; i < (int) pAnnoSet->size; i++) {
+        pAnnoItem = dexGetAnnotationItem(pDexFile, pAnnoSet, i);
+        if (pAnnoItem->visibility != visibility)
+            continue;
+        const u1* ptr = pAnnoItem->annotation;
+        *pContents = processEncodedAnnotation(clazz, &ptr);
+        if (*pContents == NULL) {
+            dvmReleaseTrackedAlloc((Object*) annoArray, NULL);
+            return NULL;
+        }
+        pContents++;
+    }
+
+    return annoArray;
+}
+
+
+/*
+ * ===========================================================================
+ *      Skipping and scanning
+ * ===========================================================================
+ */
+
+/*
+ * Skip past an annotation value.
+ *
+ * "clazz" is the class on which the annotations are defined.
+ *
+ * Returns "true" on success, "false" on parsing failure.
+ */
+static bool skipAnnotationValue(const ClassObject* clazz, const u1** pPtr)
+{
+    const u1* ptr = *pPtr;
+    u1 valueType, valueArg;
+    int width;
+
+    valueType = *ptr++;
+    valueArg = valueType >> kDexAnnotationValueArgShift;
+    width = valueArg + 1;       /* assume */
+
+    LOGV("----- type is 0x%02x %d, ptr=%p\n",
+        valueType & kDexAnnotationValueTypeMask, valueArg, ptr-1);
+
+    switch (valueType & kDexAnnotationValueTypeMask) {
+    case kDexAnnotationByte:        break;
+    case kDexAnnotationShort:       break;
+    case kDexAnnotationChar:        break;
+    case kDexAnnotationInt:         break;
+    case kDexAnnotationLong:        break;
+    case kDexAnnotationFloat:       break;
+    case kDexAnnotationDouble:      break;
+    case kDexAnnotationString:      break;
+    case kDexAnnotationType:        break;
+    case kDexAnnotationMethod:      break;
+    case kDexAnnotationField:       break;
+    case kDexAnnotationEnum:        break;
+
+    case kDexAnnotationArray:
+        /* encoded_array format */
+        {
+            u4 size = readUleb128(&ptr);
+            while (size--) {
+                if (!skipAnnotationValue(clazz, &ptr))
+                    return false;
+            }
+        }
+        width = 0;
+        break;
+    case kDexAnnotationAnnotation:
+        /* encoded_annotation format */
+        if (!skipEncodedAnnotation(clazz, &ptr))
+            return false;
+        width = 0;
+        break;
+    case kDexAnnotationBoolean:
+    case kDexAnnotationNull:
+        width = 0;
+        break;
+    default:
+        LOGE("Bad annotation element value byte 0x%02x\n", valueType);
+        assert(false);
+        return false;
+    }
+
+    ptr += width;
+
+    *pPtr = ptr;
+    return true;
+}
+
+/*
+ * Skip past an encoded annotation.  Mainly useful for annotations embedded
+ * in other annotations.
+ */
+static bool skipEncodedAnnotation(const ClassObject* clazz, const u1** pPtr)
+{
+    const DexFile* pDexFile = clazz->pDvmDex->pDexFile;
+    const u1* ptr;
+    u4 size;
+
+    ptr = *pPtr;
+    (void) readUleb128(&ptr);
+    size = readUleb128(&ptr);
+
+    /*
+     * "ptr" points to a byte stream with "size" occurrences of
+     * annotation_element.
+     */
+    while (size--) {
+        (void) readUleb128(&ptr);
+
+        if (!skipAnnotationValue(clazz, &ptr))
+            return false;
+    }
+
+    *pPtr = ptr;
+    return true;
+}
+
+
+/*
+ * Compare the name of the class in the DEX file to the supplied descriptor.
+ * Return value is equivalent to strcmp.
+ */
+static int compareClassDescriptor(DexFile* pDexFile, u4 typeIdx,
+    const char* descriptor)
+{
+    const char* str = dexStringByTypeIdx(pDexFile, typeIdx);
+
+    return strcmp(str, descriptor);
+}
+
+/*
+ * Search through the annotation set for an annotation with a matching
+ * descriptor.
+ *
+ * Comparing the string descriptor is slower than comparing an integer class
+ * index.  If annotation lists are expected to be long, we could look up
+ * the class' index by name from the DEX file, rather than doing a class
+ * lookup and string compare on each entry.  (Note the index will be
+ * different for each DEX file, so we can't cache annotation class indices
+ * globally.)
+ */
+static const DexAnnotationItem* searchAnnotationSet(const ClassObject* clazz,
+    const DexAnnotationSetItem* pAnnoSet, const char* descriptor,
+    int visibility)
+{
+    DexFile* pDexFile = clazz->pDvmDex->pDexFile;
+    const DexAnnotationItem* result = NULL;
+    u4 typeIdx;
+    int i;
+
+    //printf("##### searchAnnotationSet %s %d\n", descriptor, visibility);
+
+    for (i = 0; i < (int) pAnnoSet->size; i++) {
+        const DexAnnotationItem* pAnnoItem;
+
+        pAnnoItem = dexGetAnnotationItem(pDexFile, pAnnoSet, i);
+        if (pAnnoItem->visibility != visibility)
+            continue;
+        const u1* ptr = pAnnoItem->annotation;
+        typeIdx = readUleb128(&ptr);
+
+        if (compareClassDescriptor(pDexFile, typeIdx, descriptor) == 0) {
+            //printf("#####  match on %x/%p at %d\n", typeIdx, pDexFile, i);
+            result = pAnnoItem;
+            break;
+        }
+    }
+
+    return result;
+}
+
+/*
+ * Find an annotation value in the annotation_item whose name matches "name".
+ * A pointer to the annotation_value is returned, or NULL if it's not found.
+ */
+static const u1* searchEncodedAnnotation(const ClassObject* clazz,
+    const u1* ptr, const char* name)
+{
+    DexFile* pDexFile = clazz->pDvmDex->pDexFile;
+    u4 typeIdx, size;
+
+    typeIdx = readUleb128(&ptr);
+    size = readUleb128(&ptr);
+    //printf("#####   searching ptr=%p type=%u size=%u\n", ptr, typeIdx, size);
+
+    while (size--) {
+        u4 elementNameIdx;
+        const char* elemName;
+
+        elementNameIdx = readUleb128(&ptr);
+        elemName = dexStringById(pDexFile, elementNameIdx);
+        if (strcmp(name, elemName) == 0) {
+            //printf("#####   item match on %s\n", name);
+            return ptr;     /* points to start of value */
+        }
+
+        skipAnnotationValue(clazz, &ptr);
+    }
+
+    //printf("#####   no item match on %s\n", name);
+    return NULL;
+}
+
+#define GAV_FAILED  ((Object*) 0x10000001)
+
+/*
+ * Extract an encoded annotation value from the field specified by "annoName".
+ *
+ * "expectedType" is an annotation value type, e.g. kDexAnnotationString.
+ * "debugAnnoName" is only used in debug messages.
+ *
+ * Returns GAV_FAILED on failure.  If an allocation failed, an exception
+ * will be raised.
+ */
+static Object* getAnnotationValue(const ClassObject* clazz,
+    const DexAnnotationItem* pAnnoItem, const char* annoName,
+    int expectedType, const char* debugAnnoName)
+{
+    const u1* ptr;
+    Object* obj;
+    AnnotationValue avalue;
+
+    /* find the annotation */
+    ptr = searchEncodedAnnotation(clazz, pAnnoItem->annotation, annoName);
+    if (ptr == NULL) {
+        LOGW("%s annotation lacks '%s' member\n", debugAnnoName, annoName);
+        return GAV_FAILED;
+    }
+
+    if (!processAnnotationValue(clazz, &ptr, &avalue, kAllObjects))
+        return GAV_FAILED;
+
+    /* make sure it has the expected format */
+    if (avalue.type != expectedType) {
+        LOGW("%s %s has wrong type (0x%02x, expected 0x%02x)\n",
+            debugAnnoName, annoName, avalue.type, expectedType);
+        return GAV_FAILED;
+    }
+
+    return avalue.value.l;
+}
+
+
+/*
+ * Find the Signature attribute and extract its value.  (Signatures can
+ * be found in annotations on classes, constructors, methods, and fields.)
+ *
+ * Caller must call dvmReleaseTrackedAlloc().
+ *
+ * Returns NULL if not found.  On memory alloc failure, returns NULL with an
+ * exception raised.
+ */
+static ArrayObject* getSignatureValue(const ClassObject* clazz,
+    const DexAnnotationSetItem* pAnnoSet)
+{
+    const DexAnnotationItem* pAnnoItem;
+    Object* obj;
+
+    pAnnoItem = searchAnnotationSet(clazz, pAnnoSet, kDescrSignature,
+        kDexVisibilitySystem);
+    if (pAnnoItem == NULL)
+        return NULL;
+
+    /*
+     * The Signature annotation has one member, "String value".
+     */
+    obj = getAnnotationValue(clazz, pAnnoItem, "value", kDexAnnotationArray,
+            "Signature");
+    if (obj == GAV_FAILED)
+        return NULL;
+    assert(obj->clazz == gDvm.classJavaLangObjectArray);
+
+    return (ArrayObject*)obj;
+}
+
+
+/*
+ * ===========================================================================
+ *      Class
+ * ===========================================================================
+ */
+
+/*
+ * Find the DexAnnotationSetItem for this class.
+ */
+static const DexAnnotationSetItem* findAnnotationSetForClass(
+    const ClassObject* clazz)
+{
+    DexFile* pDexFile;
+    const DexAnnotationsDirectoryItem* pAnnoDir;
+
+    if (clazz->pDvmDex == NULL)         /* generated class (Proxy, array) */
+        return NULL;
+
+    pDexFile = clazz->pDvmDex->pDexFile;
+    pAnnoDir = getAnnoDirectory(pDexFile, clazz);
+    if (pAnnoDir != NULL)
+        return dexGetClassAnnotationSet(pDexFile, pAnnoDir);
+    else
+        return NULL;
+}
+
+/*
+ * Return an array of Annotation objects for the class.  Returns an empty
+ * array if there are no annotations.
+ *
+ * Caller must call dvmReleaseTrackedAlloc().
+ *
+ * On allocation failure, this returns NULL with an exception raised.
+ */
+ArrayObject* dvmGetClassAnnotations(const ClassObject* clazz)
+{
+    ArrayObject* annoArray;
+    const DexAnnotationSetItem* pAnnoSet = NULL;
+
+    pAnnoSet = findAnnotationSetForClass(clazz);
+    if (pAnnoSet == NULL) {
+        /* no annotations for anything in class, or no class annotations */
+        annoArray = emptyAnnoArray();
+    } else {
+        annoArray = processAnnotationSet(clazz, pAnnoSet,
+                        kDexVisibilityRuntime);
+    }
+
+    return annoArray;
+}
+
+/*
+ * Retrieve the Signature annotation, if any.  Returns NULL if no signature
+ * exists.
+ *
+ * Caller must call dvmReleaseTrackedAlloc().
+ */
+ArrayObject* dvmGetClassSignatureAnnotation(const ClassObject* clazz)
+{
+    ArrayObject* signature = NULL;
+    const DexAnnotationSetItem* pAnnoSet;
+
+    pAnnoSet = findAnnotationSetForClass(clazz);
+    if (pAnnoSet != NULL)
+        signature = getSignatureValue(clazz, pAnnoSet);
+
+    return signature;
+}
+
+/*
+ * Get the EnclosingMethod attribute from an annotation.  Returns a Method
+ * object, or NULL.
+ *
+ * Caller must call dvmReleaseTrackedAlloc().
+ */
+Object* dvmGetEnclosingMethod(const ClassObject* clazz)
+{
+    const DexAnnotationItem* pAnnoItem;
+    const DexAnnotationSetItem* pAnnoSet;
+    Object* obj;
+
+    pAnnoSet = findAnnotationSetForClass(clazz);
+    if (pAnnoSet == NULL)
+        return NULL;
+
+    pAnnoItem = searchAnnotationSet(clazz, pAnnoSet, kDescrEnclosingMethod,
+        kDexVisibilitySystem);
+    if (pAnnoItem == NULL)
+        return NULL;
+
+    /*
+     * The EnclosingMethod annotation has one member, "Method value".
+     */
+    obj = getAnnotationValue(clazz, pAnnoItem, "value", kDexAnnotationMethod,
+            "EnclosingMethod");
+    if (obj == GAV_FAILED)
+        return NULL;
+    assert(obj->clazz == gDvm.classJavaLangReflectConstructor ||
+           obj->clazz == gDvm.classJavaLangReflectMethod);
+
+    return obj;
+}
+
+/*
+ * Find a class' enclosing class.  We return what we find in the
+ * EnclosingClass attribute.
+ *
+ * Returns a Class object, or NULL.
+ *
+ * Caller must call dvmReleaseTrackedAlloc().
+ */
+ClassObject* dvmGetDeclaringClass(const ClassObject* clazz)
+{
+    const DexAnnotationItem* pAnnoItem;
+    const DexAnnotationSetItem* pAnnoSet;
+    Object* obj;
+
+    pAnnoSet = findAnnotationSetForClass(clazz);
+    if (pAnnoSet == NULL)
+        return NULL;
+
+    pAnnoItem = searchAnnotationSet(clazz, pAnnoSet, kDescrEnclosingClass,
+        kDexVisibilitySystem);
+    if (pAnnoItem == NULL)
+        return NULL;
+
+    /*
+     * The EnclosingClass annotation has one member, "Class value".
+     */
+    obj = getAnnotationValue(clazz, pAnnoItem, "value", kDexAnnotationType,
+            "EnclosingClass");
+    if (obj == GAV_FAILED)
+        return NULL;
+
+    assert(obj->clazz == gDvm.classJavaLangClass);
+    return (ClassObject*)obj;
+}
+
+/*
+ * Find a class' enclosing class.  We first search for an EnclosingClass
+ * attribute, and if that's not found we look for an EnclosingMethod.
+ *
+ * Returns a Class object, or NULL.
+ *
+ * Caller must call dvmReleaseTrackedAlloc().
+ */
+ClassObject* dvmGetEnclosingClass(const ClassObject* clazz)
+{
+    const DexAnnotationItem* pAnnoItem;
+    const DexAnnotationSetItem* pAnnoSet;
+    Object* obj;
+
+    pAnnoSet = findAnnotationSetForClass(clazz);
+    if (pAnnoSet == NULL)
+        return NULL;
+
+    pAnnoItem = searchAnnotationSet(clazz, pAnnoSet, kDescrEnclosingClass,
+        kDexVisibilitySystem);
+    if (pAnnoItem != NULL) {
+        /*
+         * The EnclosingClass annotation has one member, "Class value".
+         */
+        obj = getAnnotationValue(clazz, pAnnoItem, "value", kDexAnnotationType,
+                "EnclosingClass");
+        if (obj != GAV_FAILED) {
+            assert(obj->clazz == gDvm.classJavaLangClass);
+            return (ClassObject*)obj;
+        }
+    }
+
+    /*
+     * That didn't work.  Look for an EnclosingMethod.
+     *
+     * We could create a java.lang.reflect.Method object and extract the
+     * declaringClass from it, but that's more work than we want to do.
+     * Instead, we find the "value" item and parse the index out ourselves.
+     */
+    pAnnoItem = searchAnnotationSet(clazz, pAnnoSet, kDescrEnclosingMethod,
+        kDexVisibilitySystem);
+    if (pAnnoItem == NULL)
+        return NULL;
+
+    /* find the value member */
+    const u1* ptr;
+    ptr = searchEncodedAnnotation(clazz, pAnnoItem->annotation, "value");
+    if (ptr == NULL) {
+        LOGW("EnclosingMethod annotation lacks 'value' member\n");
+        return NULL;
+    }
+
+    /* parse it, verify the type */
+    AnnotationValue avalue;
+    if (!processAnnotationValue(clazz, &ptr, &avalue, kAllRaw)) {
+        LOGW("EnclosingMethod parse failed\n");
+        return NULL;
+    }
+    if (avalue.type != kDexAnnotationMethod) {
+        LOGW("EnclosingMethod value has wrong type (0x%02x, expected 0x%02x)\n",
+            avalue.type, kDexAnnotationMethod);
+        return NULL;
+    }
+
+    /* pull out the method index and resolve the method */
+    Method* meth = resolveAmbiguousMethod(clazz, avalue.value.i);
+    if (meth == NULL)
+        return NULL;
+
+    ClassObject* methClazz = meth->clazz;
+    dvmAddTrackedAlloc((Object*) methClazz, NULL);      // balance the Release
+    return methClazz;
+}
+
+/*
+ * Get the EnclosingClass attribute from an annotation.  If found, returns
+ * "true".  A String with the original name of the class and the original
+ * access flags are returned through the arguments.  (The name will be NULL
+ * for an anonymous inner class.)
+ *
+ * Caller must call dvmReleaseTrackedAlloc().
+ */
+bool dvmGetInnerClass(const ClassObject* clazz, StringObject** pName,
+    int* pAccessFlags)
+{
+    const DexAnnotationItem* pAnnoItem;
+    const DexAnnotationSetItem* pAnnoSet;
+
+    pAnnoSet = findAnnotationSetForClass(clazz);
+    if (pAnnoSet == NULL)
+        return false;
+
+    pAnnoItem = searchAnnotationSet(clazz, pAnnoSet, kDescrInnerClass,
+        kDexVisibilitySystem);
+    if (pAnnoItem == NULL)
+        return false;
+
+    /*
+     * The InnerClass annotation has two members, "String name" and
+     * "int accessFlags".  We don't want to get the access flags as an
+     * Integer, so we process that as a simple value.
+     */
+    const u1* ptr;
+    ptr = searchEncodedAnnotation(clazz, pAnnoItem->annotation, "name");
+    if (ptr == NULL) {
+        LOGW("InnerClass annotation lacks 'name' member\n");
+        return false;
+    }
+
+    /* parse it into an Object */
+    AnnotationValue avalue;
+    if (!processAnnotationValue(clazz, &ptr, &avalue, kAllObjects)) {
+        LOGD("processAnnotationValue failed on InnerClass member 'name'\n");
+        return false;
+    }
+
+    /* make sure it has the expected format */
+    if (avalue.type != kDexAnnotationNull &&
+        avalue.type != kDexAnnotationString)
+    {
+        LOGW("InnerClass name has bad type (0x%02x, expected STRING or NULL)\n",
+            avalue.type);
+        return false;
+    }
+
+    *pName = (StringObject*) avalue.value.l;
+    assert(*pName == NULL || (*pName)->obj.clazz == gDvm.classJavaLangString);
+
+    ptr = searchEncodedAnnotation(clazz, pAnnoItem->annotation, "accessFlags");
+    if (ptr == NULL) {
+        LOGW("InnerClass annotation lacks 'accessFlags' member\n");
+        return false;
+    }
+
+    /* parse it, verify the type */
+    if (!processAnnotationValue(clazz, &ptr, &avalue, kAllRaw)) {
+        LOGW("InnerClass accessFlags parse failed\n");
+        return false;
+    }
+    if (avalue.type != kDexAnnotationInt) {
+        LOGW("InnerClass value has wrong type (0x%02x, expected 0x%02x)\n",
+            avalue.type, kDexAnnotationInt);
+        return false;
+    }
+
+    *pAccessFlags = avalue.value.i;
+
+    return true;
+}
+
+/*
+ * Extract an array of Class objects from the MemberClasses annotation
+ * for this class.
+ *
+ * Caller must call dvmReleaseTrackedAlloc().
+ *
+ * Returns NULL if we don't find any member classes.
+ */
+ArrayObject* dvmGetDeclaredClasses(const ClassObject* clazz)
+{
+    const DexAnnotationSetItem* pAnnoSet;
+    const DexAnnotationItem* pAnnoItem;
+    Object* obj;
+
+    pAnnoSet = findAnnotationSetForClass(clazz);
+    if (pAnnoSet == NULL)
+        return NULL;
+
+    pAnnoItem = searchAnnotationSet(clazz, pAnnoSet, kDescrMemberClasses,
+        kDexVisibilitySystem);
+    if (pAnnoItem == NULL)
+        return NULL;
+
+    /*
+     * The MemberClasses annotation has one member, "Class[] value".
+     */
+    obj = getAnnotationValue(clazz, pAnnoItem, "value",
+            kDexAnnotationArray, "MemberClasses");
+    if (obj == GAV_FAILED)
+        return NULL;
+    assert(dvmIsArray((ArrayObject*)obj));
+    obj = convertReturnType(obj, gDvm.classJavaLangClassArray);
+    return (ArrayObject*)obj;
+}
+
+
+/*
+ * ===========================================================================
+ *      Method (and Constructor)
+ * ===========================================================================
+ */
+
+/*
+ * Compare the attributes (class name, method name, method signature) of
+ * the specified method to "method".
+ */
+static int compareMethodStr(DexFile* pDexFile, u4 methodIdx,
+    const Method* method)
+{
+    const DexMethodId* pMethodId = dexGetMethodId(pDexFile, methodIdx);
+    const char* str = dexStringByTypeIdx(pDexFile, pMethodId->classIdx);
+    int result = strcmp(str, method->clazz->descriptor);
+
+    if (result == 0) {
+        str = dexStringById(pDexFile, pMethodId->nameIdx);
+        result = strcmp(str, method->name);
+        if (result == 0) {
+            DexProto proto;
+            dexProtoSetFromMethodId(&proto, pDexFile, pMethodId);
+            result = dexProtoCompare(&proto, &method->prototype);
+        }
+    }
+
+    return result;
+}
+
+/*
+ * Given a method, determine the method's index.
+ *
+ * We could simply store this in the Method*, but that would cost 4 bytes
+ * per method.  Instead we plow through the DEX data.
+ *
+ * We have two choices: look through the class method data, or look through
+ * the global method_ids table.  The former is awkward because the method
+ * could have been defined in a superclass or interface.  The latter works
+ * out reasonably well because it's in sorted order, though we're still left
+ * doing a fair number of string comparisons.
+ */
+static u4 getMethodIdx(const Method* method)
+{
+    DexFile* pDexFile = method->clazz->pDvmDex->pDexFile;
+    u4 hi = pDexFile->pHeader->methodIdsSize -1;
+    u4 lo = 0;
+    u4 cur;
+
+    while (hi >= lo) {
+        int cmp;
+        cur = (lo + hi) / 2;
+
+        cmp = compareMethodStr(pDexFile, cur, method);
+        if (cmp < 0) {
+            lo = cur + 1;
+        } else if (cmp > 0) {
+            hi = cur - 1;
+        } else {
+            break;
+        }
+    }
+
+    if (hi < lo) {
+        /* this should be impossible -- the method came out of this DEX */
+        char* desc = dexProtoCopyMethodDescriptor(&method->prototype);
+        LOGE("Unable to find method %s.%s %s in DEX file!\n",
+            method->clazz->descriptor, method->name, desc);
+        free(desc);
+        dvmAbort();
+    }
+
+    return cur;
+}
+
+/*
+ * Find the DexAnnotationSetItem for this method.
+ *
+ * Returns NULL if none found.
+ */
+static const DexAnnotationSetItem* findAnnotationSetForMethod(
+    const Method* method)
+{
+    ClassObject* clazz = method->clazz;
+    DexFile* pDexFile;
+    const DexAnnotationsDirectoryItem* pAnnoDir;
+    const DexMethodAnnotationsItem* pMethodList;
+    const DexAnnotationSetItem* pAnnoSet = NULL;
+
+    if (clazz->pDvmDex == NULL)         /* generated class (Proxy, array) */
+        return NULL;
+    pDexFile = clazz->pDvmDex->pDexFile;
+
+    pAnnoDir = getAnnoDirectory(pDexFile, clazz);
+    if (pAnnoDir != NULL) {
+        pMethodList = dexGetMethodAnnotations(pDexFile, pAnnoDir);
+        if (pMethodList != NULL) {
+            /*
+             * Run through the list and find a matching method.  We compare the
+             * method ref indices in the annotation list with the method's DEX
+             * method_idx value.
+             *
+             * TODO: use a binary search for long lists
+             *
+             * Alternate approach: for each entry in the annotations list,
+             * find the method definition in the DEX file and perform string
+             * comparisons on class name, method name, and signature.
+             */
+            u4 methodIdx = getMethodIdx(method);
+            u4 count = dexGetMethodAnnotationsSize(pDexFile, pAnnoDir);
+            u4 idx;
+
+            for (idx = 0; idx < count; idx++) {
+                if (pMethodList[idx].methodIdx == methodIdx) {
+                    /* found! */
+                    pAnnoSet = dexGetMethodAnnotationSetItem(pDexFile,
+                                    &pMethodList[idx]);
+                    break;
+                }
+            }
+        }
+    }
+
+    return pAnnoSet;
+}
+
+/*
+ * Return an array of Annotation objects for the method.  Returns an empty
+ * array if there are no annotations.
+ *
+ * Caller must call dvmReleaseTrackedAlloc().
+ *
+ * On allocation failure, this returns NULL with an exception raised.
+ */
+ArrayObject* dvmGetMethodAnnotations(const Method* method)
+{
+    ClassObject* clazz = method->clazz;
+    const DexAnnotationSetItem* pAnnoSet;
+    ArrayObject* annoArray = NULL;
+
+    pAnnoSet = findAnnotationSetForMethod(method);
+    if (pAnnoSet == NULL) {
+        /* no matching annotations found */
+        annoArray = emptyAnnoArray();
+    } else {
+        annoArray = processAnnotationSet(clazz, pAnnoSet,kDexVisibilityRuntime);
+    }
+
+    return annoArray;
+}
+
+/*
+ * Retrieve the Signature annotation, if any.  Returns NULL if no signature
+ * exists.
+ *
+ * Caller must call dvmReleaseTrackedAlloc().
+ */
+ArrayObject* dvmGetMethodSignatureAnnotation(const Method* method)
+{
+    ClassObject* clazz = method->clazz;
+    const DexAnnotationSetItem* pAnnoSet;
+    ArrayObject* signature = NULL;
+
+    pAnnoSet = findAnnotationSetForMethod(method);
+    if (pAnnoSet != NULL)
+        signature = getSignatureValue(clazz, pAnnoSet);
+
+    return signature;
+}
+
+/*
+ * Extract an array of exception classes from the "system" annotation list
+ * for this method.
+ *
+ * Caller must call dvmReleaseTrackedAlloc().
+ *
+ * Returns NULL if we don't find any exceptions for this method.
+ */
+ArrayObject* dvmGetMethodThrows(const Method* method)
+{
+    ClassObject* clazz = method->clazz;
+    const DexAnnotationSetItem* pAnnoSet;
+    const DexAnnotationItem* pAnnoItem;
+
+    /* find the set for this method */
+    pAnnoSet = findAnnotationSetForMethod(method);
+    if (pAnnoSet == NULL)
+        return NULL;        /* nothing for this method */
+
+    /* find the "Throws" annotation, if any */
+    pAnnoItem = searchAnnotationSet(clazz, pAnnoSet, kDescrThrows,
+        kDexVisibilitySystem);
+    if (pAnnoItem == NULL)
+        return NULL;        /* no Throws */
+
+    /*
+     * The Throws annotation has one member, "Class[] value".
+     */
+    Object* obj = getAnnotationValue(clazz, pAnnoItem, "value",
+        kDexAnnotationArray, "Throws");
+    if (obj == GAV_FAILED)
+        return NULL;
+    assert(dvmIsArray((ArrayObject*)obj));
+    obj = convertReturnType(obj, gDvm.classJavaLangClassArray);
+    return (ArrayObject*)obj;
+}
+
+/*
+ * Given an Annotation's method, find the default value, if any.
+ *
+ * If this is a CLASS annotation, and we can't find a match for the
+ * default class value, we need to throw a TypeNotPresentException.
+ *
+ * Caller must call dvmReleaseTrackedAlloc().
+ */
+Object* dvmGetAnnotationDefaultValue(const Method* method)
+{
+    const ClassObject* clazz = method->clazz;
+    DexFile* pDexFile = clazz->pDvmDex->pDexFile;
+    const DexAnnotationsDirectoryItem* pAnnoDir;
+    const DexAnnotationSetItem* pAnnoSet = NULL;
+
+    /*
+     * The method's declaring class (the annotation) will have an
+     * AnnotationDefault "system" annotation associated with it if any
+     * of its methods have default values.  Start by finding the
+     * DexAnnotationItem associated with the class.
+     */
+    pAnnoDir = getAnnoDirectory(pDexFile, clazz);
+    if (pAnnoDir != NULL)
+        pAnnoSet = dexGetClassAnnotationSet(pDexFile, pAnnoDir);
+    if (pAnnoSet == NULL) {
+        /* no annotations for anything in class, or no class annotations */
+        return NULL;
+    }
+
+    /* find the "AnnotationDefault" annotation, if any */
+    const DexAnnotationItem* pAnnoItem;
+    pAnnoItem = searchAnnotationSet(clazz, pAnnoSet, kDescrAnnotationDefault,
+        kDexVisibilitySystem);
+    if (pAnnoItem == NULL) {
+        /* no default values for any member in this annotation */
+        //printf("##### no default annotations for %s.%s\n",
+        //    method->clazz->descriptor, method->name);
+        return NULL;
+    }
+
+    /*
+     * The AnnotationDefault annotation has one member, "Annotation value".
+     * We need to pull that out.
+     */
+    const u1* ptr;
+    ptr = searchEncodedAnnotation(clazz, pAnnoItem->annotation, "value");
+    if (ptr == NULL) {
+        LOGW("AnnotationDefault annotation lacks 'value'\n");
+        return NULL;
+    }
+    if ((*ptr & kDexAnnotationValueTypeMask) != kDexAnnotationAnnotation) {
+        LOGW("AnnotationDefault value has wrong type (0x%02x)\n",
+            *ptr & kDexAnnotationValueTypeMask);
+        return NULL;
+    }
+
+    /*
+     * The value_type byte for VALUE_ANNOTATION is followed by
+     * encoded_annotation data.  We want to scan through it to find an
+     * entry whose name matches our method name.
+     */
+    ptr++;
+    ptr = searchEncodedAnnotation(clazz, ptr, method->name);
+    if (ptr == NULL)
+        return NULL;        /* no default annotation for this method */
+
+    /* got it, pull it out */
+    AnnotationValue avalue;
+    if (!processAnnotationValue(clazz, &ptr, &avalue, kAllObjects)) {
+        LOGD("processAnnotationValue failed on default for '%s'\n",
+            method->name);
+        return NULL;
+    }
+
+    /* convert the return type, if necessary */
+    ClassObject* methodReturn = dvmGetBoxedReturnType(method);
+    Object* obj = avalue.value.l;
+    obj = convertReturnType(obj, methodReturn);
+
+    return obj;
+}
+
+
+/*
+ * ===========================================================================
+ *      Field
+ * ===========================================================================
+ */
+
+/*
+ * Compare the attributes (class name, field name, field signature) of
+ * the specified field to "field".
+ */
+static int compareFieldStr(DexFile* pDexFile, u4 idx, const Field* field)
+{
+    const DexFieldId* pFieldId = dexGetFieldId(pDexFile, idx);
+    const char* str = dexStringByTypeIdx(pDexFile, pFieldId->classIdx);
+    int result = strcmp(str, field->clazz->descriptor);
+
+    if (result == 0) {
+        str = dexStringById(pDexFile, pFieldId->nameIdx);
+        result = strcmp(str, field->name);
+        if (result == 0) {
+            str = dexStringByTypeIdx(pDexFile, pFieldId->typeIdx);
+            result = strcmp(str, field->signature);
+        }
+    }
+
+    return result;
+}
+
+/*
+ * Given a field, determine the field's index.
+ *
+ * This has the same tradeoffs as getMethodIdx.
+ */
+static u4 getFieldIdx(const Field* field)
+{
+    DexFile* pDexFile = field->clazz->pDvmDex->pDexFile;
+    u4 hi = pDexFile->pHeader->fieldIdsSize -1;
+    u4 lo = 0;
+    u4 cur;
+
+    while (hi >= lo) {
+        int cmp;
+        cur = (lo + hi) / 2;
+
+        cmp = compareFieldStr(pDexFile, cur, field);
+        if (cmp < 0) {
+            lo = cur + 1;
+        } else if (cmp > 0) {
+            hi = cur - 1;
+        } else {
+            break;
+        }
+    }
+
+    if (hi < lo) {
+        /* this should be impossible -- the field came out of this DEX */
+        LOGE("Unable to find field %s.%s %s in DEX file!\n",
+            field->clazz->descriptor, field->name, field->signature);
+        dvmAbort();
+    }
+
+    return cur;
+}
+
+/*
+ * Find the DexAnnotationSetItem for this field.
+ *
+ * Returns NULL if none found.
+ */
+static const DexAnnotationSetItem* findAnnotationSetForField(const Field* field)
+{
+    ClassObject* clazz = field->clazz;
+    DexFile* pDexFile = clazz->pDvmDex->pDexFile;
+    const DexAnnotationsDirectoryItem* pAnnoDir;
+    const DexFieldAnnotationsItem* pFieldList;
+    const DexAnnotationSetItem* pAnnoSet = NULL;
+
+    pAnnoDir = getAnnoDirectory(pDexFile, clazz);
+    if (pAnnoDir == NULL)
+        return NULL;
+
+    pFieldList = dexGetFieldAnnotations(pDexFile, pAnnoDir);
+    if (pFieldList == NULL)
+        return NULL;
+
+    /*
+     * Run through the list and find a matching field.  We compare the
+     * field ref indices in the annotation list with the field's DEX
+     * field_idx value.
+     *
+     * TODO: use a binary search for long lists
+     *
+     * Alternate approach: for each entry in the annotations list,
+     * find the field definition in the DEX file and perform string
+     * comparisons on class name, field name, and signature.
+     */
+    u4 fieldIdx = getFieldIdx(field);
+    u4 count = dexGetFieldAnnotationsSize(pDexFile, pAnnoDir);
+    u4 idx;
+
+    for (idx = 0; idx < count; idx++) {
+        if (pFieldList[idx].fieldIdx == fieldIdx) {
+            /* found! */
+            return dexGetFieldAnnotationSetItem(pDexFile, &pFieldList[idx]);
+        }
+    }
+
+    return NULL;
+}
+
+/*
+ * Return an array of Annotation objects for the field.  Returns an empty
+ * array if there are no annotations.
+ *
+ * Caller must call dvmReleaseTrackedAlloc().
+ *
+ * On allocation failure, this returns NULL with an exception raised.
+ */
+ArrayObject* dvmGetFieldAnnotations(const Field* field)
+{
+    ClassObject* clazz = field->clazz;
+    ArrayObject* annoArray = NULL;
+    DexFile* pDexFile = clazz->pDvmDex->pDexFile;
+    const DexAnnotationsDirectoryItem* pAnnoDir;
+    const DexAnnotationSetItem* pAnnoSet = NULL;
+
+    pAnnoSet = findAnnotationSetForField(field);
+    if (pAnnoSet == NULL) {
+        /* no matching annotations found */
+        annoArray = emptyAnnoArray();
+    } else {
+        annoArray = processAnnotationSet(clazz, pAnnoSet,
+                        kDexVisibilityRuntime);
+    }
+
+    return annoArray;
+}
+
+/*
+ * Retrieve the Signature annotation, if any.  Returns NULL if no signature
+ * exists.
+ *
+ * Caller must call dvmReleaseTrackedAlloc().
+ */
+ArrayObject* dvmGetFieldSignatureAnnotation(const Field* field)
+{
+    ClassObject* clazz = field->clazz;
+    const DexAnnotationSetItem* pAnnoSet;
+    ArrayObject* signature = NULL;
+
+    pAnnoSet = findAnnotationSetForField(field);
+    if (pAnnoSet != NULL)
+        signature = getSignatureValue(clazz, pAnnoSet);
+
+    return signature;
+}
+
+
+/*
+ * ===========================================================================
+ *      Parameter
+ * ===========================================================================
+ */
+
+/*
+ * We have an annotation_set_ref_list, which is essentially a list of
+ * entries that we pass to processAnnotationSet().
+ *
+ * The returned object must be released with dvmReleaseTrackedAlloc.
+ */
+static ArrayObject* processAnnotationSetRefList(const ClassObject* clazz,
+    const DexAnnotationSetRefList* pAnnoSetList, u4 count)
+{
+    DexFile* pDexFile = clazz->pDvmDex->pDexFile;
+    ArrayObject* annoArrayArray = NULL;
+    ArrayObject** pContents;
+    u4 idx;
+
+    /* allocate an array of Annotation arrays to hold results */
+    annoArrayArray = dvmAllocArrayByClass(
+        gDvm.classJavaLangAnnotationAnnotationArrayArray, count, ALLOC_DEFAULT);
+    if (annoArrayArray == NULL) {
+        LOGW("annotation set ref array alloc failed\n");
+        goto bail;
+    }
+
+    pContents = (ArrayObject**) annoArrayArray->contents;
+
+    for (idx = 0; idx < count; idx++) {
+        Thread* self = dvmThreadSelf();
+        const DexAnnotationSetRefItem* pItem;
+        const DexAnnotationSetItem* pAnnoSet;
+
+        pItem = dexGetParameterAnnotationSetRef(pAnnoSetList, idx);
+        pAnnoSet = dexGetSetRefItemItem(pDexFile, pItem);
+        *pContents = processAnnotationSet(clazz, pAnnoSet,
+                        kDexVisibilityRuntime);
+        if (*pContents == NULL) {
+            LOGW("processAnnotationSet failed\n");
+            annoArrayArray = NULL;
+            goto bail;
+        }
+        dvmReleaseTrackedAlloc((Object*) *pContents, self);
+        pContents++;
+    }
+
+bail:
+    return annoArrayArray;
+}
+
+/*
+ * Find the DexAnnotationSetItem for this parameter.
+ *
+ * Returns NULL if none found.
+ */
+static const DexParameterAnnotationsItem* findAnnotationsItemForMethod(
+    const Method* method)
+{
+    ClassObject* clazz = method->clazz;
+    DexFile* pDexFile;
+    const DexAnnotationsDirectoryItem* pAnnoDir;
+    const DexParameterAnnotationsItem* pParameterList;
+
+    if (clazz->pDvmDex == NULL)         /* generated class (Proxy, array) */
+        return NULL;
+
+    pDexFile = clazz->pDvmDex->pDexFile;
+    pAnnoDir = getAnnoDirectory(pDexFile, clazz);
+    if (pAnnoDir == NULL)
+        return NULL;
+
+    pParameterList = dexGetParameterAnnotations(pDexFile, pAnnoDir);
+    if (pParameterList == NULL)
+        return NULL;
+
+    /*
+     * Run through the list and find a matching method.  We compare the
+     * method ref indices in the annotation list with the method's DEX
+     * method_idx value.
+     *
+     * TODO: use a binary search for long lists
+     *
+     * Alternate approach: for each entry in the annotations list,
+     * find the method definition in the DEX file and perform string
+     * comparisons on class name, method name, and signature.
+     */
+    u4 methodIdx = getMethodIdx(method);
+    u4 count = dexGetParameterAnnotationsSize(pDexFile, pAnnoDir);
+    u4 idx;
+
+    for (idx = 0; idx < count; idx++) {
+        if (pParameterList[idx].methodIdx == methodIdx) {
+            /* found! */
+            return &pParameterList[idx];
+        }
+    }
+
+    return NULL;
+}
+
+/*
+ * Return an array of arrays of Annotation objects.  The outer array has
+ * one entry per method parameter, the inner array has the list of annotations
+ * associated with that parameter.
+ *
+ * Caller must call dvmReleaseTrackedAlloc().
+ */
+ArrayObject* dvmGetParameterAnnotations(const Method* method)
+{
+    ClassObject* clazz = method->clazz;
+    const DexParameterAnnotationsItem* pItem;
+    ArrayObject* annoArrayArray = NULL;
+
+    pItem = findAnnotationsItemForMethod(method);
+    if (pItem != NULL) {
+        DexFile* pDexFile = clazz->pDvmDex->pDexFile;
+        const DexAnnotationSetRefList* pAnnoSetList;
+        u4 size;
+        
+        size = dexGetParameterAnnotationSetRefSize(pDexFile, pItem);
+        pAnnoSetList = dexGetParameterAnnotationSetRefList(pDexFile, pItem);
+        annoArrayArray = processAnnotationSetRefList(clazz, pAnnoSetList, size);
+    } else {
+        /* no matching annotations found */
+        annoArrayArray = emptyAnnoArrayArray();
+    }
+
+    return annoArrayArray;
+}
+
+
+/*
+ * ===========================================================================
+ *      DexEncodedArray interpretation
+ * ===========================================================================
+ */
+
+/**
+ * Initializes an encoded array iterator.
+ * 
+ * @param iterator iterator to initialize
+ * @param encodedArray encoded array to iterate over
+ * @param clazz class to use when resolving strings and types
+ */
+void dvmEncodedArrayIteratorInitialize(EncodedArrayIterator* iterator,
+        const DexEncodedArray* encodedArray, const ClassObject* clazz) {
+    iterator->encodedArray = encodedArray;
+    iterator->cursor = encodedArray->array;
+    iterator->size = readUleb128(&iterator->cursor);
+    iterator->elementsLeft = iterator->size;
+    iterator->clazz = clazz;
+}
+
+/**
+ * Returns whether there are more elements to be read.
+ */
+bool dvmEncodedArrayIteratorHasNext(const EncodedArrayIterator* iterator) {
+    return (iterator->elementsLeft != 0);
+}
+
+/**
+ * Returns the next decoded value from the iterator, advancing its
+ * cursor. This returns primitive values in their corresponding union
+ * slots, and returns everything else (including nulls) as object
+ * references in the "l" union slot.
+ * 
+ * The caller must call dvmReleaseTrackedAlloc() on any returned reference.
+ * 
+ * @param value pointer to store decoded value into
+ * @returns true if a value was decoded and the cursor advanced; false if
+ * the last value had already been decoded or if there was a problem decoding
+ */
+bool dvmEncodedArrayIteratorGetNext(EncodedArrayIterator* iterator,
+        AnnotationValue* value) {
+    bool processed;
+    
+    if (iterator->elementsLeft == 0) {
+        return false;
+    }
+
+    processed = processAnnotationValue(iterator->clazz, &iterator->cursor,
+            value, kPrimitivesOrObjects);
+
+    if (! processed) {
+        LOGE("Failed to process array element %d from %p",
+                iterator->size - iterator->elementsLeft,
+                iterator->encodedArray);
+        iterator->elementsLeft = 0;
+        return false;
+    }
+
+    iterator->elementsLeft--;
+    return true;
+}
+
diff --git a/vm/reflect/Proxy.c b/vm/reflect/Proxy.c
new file mode 100644
index 0000000..8641ab9
--- /dev/null
+++ b/vm/reflect/Proxy.c
@@ -0,0 +1,627 @@
+/*
+ * Copyright (C) 2008 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/*
+ * Implementation of java.lang.reflect.Proxy.
+ *
+ * Traditionally this is implemented entirely in interpreted code,
+ * generating bytecode that defines the proxy class.  Dalvik doesn't
+ * currently support this approach, so we generate the class directly.  If
+ * we add support for DefineClass with standard classfiles we can
+ * eliminate this.
+ */
+#include "Dalvik.h"
+
+#include <stdlib.h>
+
+// fwd
+static bool gatherMethods(ArrayObject* interfaces, Method*** pMethods,
+    int* pMethodCount);
+static bool addMethod(Method* meth, Method** methArray, int slot);
+static void createConstructor(ClassObject* clazz, Method* meth);
+static void createHandlerMethod(ClassObject* clazz, Method* dstMeth,
+    const Method* srcMeth);
+static void proxyConstructor(const u4* args, JValue* pResult,
+    const Method* method, Thread* self);
+static void proxyInvoker(const u4* args, JValue* pResult,
+    const Method* method, Thread* self);
+
+/*
+ * Perform Proxy setup.
+ */
+bool dvmReflectProxyStartup()
+{
+    /*
+     * Standard methods we must provide in our proxy.
+     */
+    Method* methE;
+    Method* methH;
+    Method* methT;
+    Method* methF;
+    methE = dvmFindVirtualMethodByDescriptor(gDvm.classJavaLangObject, "equals",
+            "(Ljava/lang/Object;)Z");
+    methH = dvmFindVirtualMethodByDescriptor(gDvm.classJavaLangObject, "hashCode",
+            "()I");
+    methT = dvmFindVirtualMethodByDescriptor(gDvm.classJavaLangObject, "toString",
+            "()Ljava/lang/String;");
+    methF = dvmFindVirtualMethodByDescriptor(gDvm.classJavaLangObject, "finalize",
+            "()V");
+    if (methE == NULL || methH == NULL || methT == NULL || methF == NULL) {
+        LOGE("Could not find equals/hashCode/toString/finalize in Object\n");
+        return false;
+    }
+    gDvm.voffJavaLangObject_equals = methE->methodIndex;
+    gDvm.voffJavaLangObject_hashCode = methH->methodIndex;
+    gDvm.voffJavaLangObject_toString = methT->methodIndex;
+    gDvm.voffJavaLangObject_finalize = methF->methodIndex;
+
+    /*
+     * The prototype signature needs to be cloned from a method in a
+     * "real" DEX file.  We declared this otherwise unused method just
+     * for this purpose.
+     */
+    ClassObject* proxyClass;
+    Method* meth;
+    proxyClass = dvmFindSystemClassNoInit("Ljava/lang/reflect/Proxy;");
+    if (proxyClass == NULL) {
+        LOGE("No java.lang.reflect.Proxy\n");
+        return false;
+    }
+    meth = dvmFindDirectMethodByDescriptor(proxyClass, "constructorPrototype",
+                "(Ljava/lang/reflect/InvocationHandler;)V");
+    if (meth == NULL) {
+        LOGE("Could not find java.lang.Proxy.constructorPrototype()\n");
+        return false;
+    }
+    gDvm.methJavaLangReflectProxy_constructorPrototype = meth;
+
+    return true;
+}
+
+
+/*
+ * Generate a proxy class with the specified name, interfaces, and loader.
+ * "interfaces" is an array of class objects.
+ *
+ * The interpreted code has done all of the necessary checks, e.g. we know
+ * that "interfaces" contains only interface classes.
+ *
+ * On failure we leave a partially-created class object sitting around,
+ * but the garbage collector will take care of it.
+ */
+ClassObject* dvmGenerateProxyClass(StringObject* str, ArrayObject* interfaces,
+    Object* loader)
+{
+    int result = -1;
+    char* nameStr = NULL;
+    Method** methods = NULL;
+    ClassObject* newClass = NULL;
+    int i;
+    
+    nameStr = dvmCreateCstrFromString(str);
+    if (nameStr == NULL) {
+        dvmThrowException("Ljava/lang/IllegalArgumentException;",
+            "missing name");
+        goto bail;
+    }
+
+    LOGV("+++ Generate proxy class '%s' %p from %d interface classes\n",
+        nameStr, loader, interfaces->length);
+
+
+    /*
+     * Characteristics of a Proxy class:
+     * - concrete class, public and final
+     * - superclass is java.lang.reflect.Proxy
+     * - implements all listed interfaces (req'd for instanceof)
+     * - has one method for each method in the interfaces (barring duplicates)
+     * - has one constructor (takes an InvocationHandler arg)
+     * - has overrides for hashCode, equals, and toString (these come first)
+     * - has one field, a reference to the InvocationHandler object
+     *
+     * The idea here is to create a class object and fill in the details
+     * as we would in loadClassFromDex(), and then call dvmLinkClass() to do
+     * all the heavy lifting (notably populating the virtual and interface
+     * method tables).
+     */
+
+    /*
+     * Generate a temporary list of virtual methods.
+     */
+    int methodCount;
+    if (!gatherMethods(interfaces, &methods, &methodCount))
+        goto bail;
+
+    /*
+     * Allocate storage for the class object and set some basic fields.
+     */
+    newClass = (ClassObject*) dvmMalloc(sizeof(*newClass), ALLOC_DEFAULT);
+    if (newClass == NULL)
+        return NULL;
+    DVM_OBJECT_INIT(&newClass->obj, gDvm.unlinkedJavaLangClass);
+    newClass->descriptorAlloc = dvmNameToDescriptor(nameStr);
+    newClass->descriptor = newClass->descriptorAlloc;
+    newClass->accessFlags = ACC_PUBLIC | ACC_FINAL;
+    newClass->super = gDvm.classJavaLangReflectProxy;
+    newClass->primitiveType = PRIM_NOT;
+    newClass->classLoader = loader;
+#if WITH_HPROF && WITH_HPROF_STACK
+    newClass->hprofSerialNumber = 0;
+    hprofFillInStackTrace(newClass);
+#endif
+
+    /*
+     * Add direct method definitions.  We have one (the constructor).
+     */
+    newClass->directMethodCount = 1;
+    newClass->directMethods = (Method*) dvmLinearAlloc(newClass->classLoader,
+            1 * sizeof(Method));
+    createConstructor(newClass, &newClass->directMethods[0]);
+    dvmLinearReadOnly(newClass->classLoader, newClass->directMethods);
+
+    /*
+     * Add virtual method definitions.
+     */
+    newClass->virtualMethodCount = methodCount;
+    newClass->virtualMethods = (Method*) dvmLinearAlloc(newClass->classLoader,
+            newClass->virtualMethodCount * sizeof(Method));
+    for (i = 0; i < newClass->virtualMethodCount; i++) {
+        createHandlerMethod(newClass, &newClass->virtualMethods[i],methods[i]);
+    }
+    dvmLinearReadOnly(newClass->classLoader, newClass->virtualMethods);
+
+    /*
+     * Add interface list.
+     */
+    int interfaceCount = interfaces->length;
+    ClassObject** ifArray = (ClassObject**) interfaces->contents;
+    newClass->interfaceCount = interfaceCount;
+    newClass->interfaces = (ClassObject**)dvmLinearAlloc(newClass->classLoader,
+                                sizeof(ClassObject*) * interfaceCount);
+    for (i = 0; i < interfaceCount; i++)
+        newClass->interfaces[i] = ifArray[i];
+    dvmLinearReadOnly(newClass->classLoader, newClass->interfaces);
+
+    /*
+     * The class has one instance field, "protected InvocationHandler h",
+     * which is filled in by the constructor.
+     */
+    newClass->ifieldCount = 1;
+    newClass->ifields = (InstField*) dvmLinearAlloc(newClass->classLoader,
+            1 * sizeof(InstField));
+    InstField* ifield = &newClass->ifields[0];
+    ifield->field.clazz = newClass;
+    ifield->field.name = "h";
+    ifield->field.signature = "Ljava/lang/reflect/InvocationHandler;";
+    ifield->field.accessFlags = ACC_PROTECTED;
+    ifield->byteOffset = -1;        /* set later */
+    dvmLinearReadOnly(newClass->classLoader, newClass->ifields);
+
+
+    /*
+     * Everything is ready.  See if the linker will lap it up.
+     */
+    newClass->status = CLASS_LOADED;
+    if (!dvmLinkClass(newClass, true)) {
+        LOGI("Proxy class link failed\n");
+        goto bail;
+    }
+
+    /*
+     * All good.  Add it to the hash table.  We should NOT see a collision
+     * here; if we do, it means the caller has screwed up and provided us
+     * with a duplicate name.
+     */
+    if (!dvmAddClassToHash(newClass)) {
+        LOGE("ERROR: attempted to generate %s more than once\n",
+            newClass->descriptor);
+        goto bail;
+    }
+
+    result = 0;
+
+bail:
+    free(nameStr);
+    free(methods);
+    if (result != 0) {
+        /* must free innards explicitly if we didn't finish linking */
+        dvmFreeClassInnards(newClass);
+        newClass = NULL;
+        dvmThrowException("Ljava/lang/RuntimeException;", NULL);
+    }
+
+    /* this allows the GC to free it */
+    dvmReleaseTrackedAlloc((Object*) newClass, NULL);
+
+    return newClass;
+}
+
+/*
+ * Generate a list of methods.  The Method pointers returned point to the
+ * abstract method definition from the appropriate interface, or to the
+ * virtual method definition in java.lang.Object.
+ */
+static bool gatherMethods(ArrayObject* interfaces, Method*** pMethods,
+    int* pMethodCount)
+{
+    ClassObject** classes;
+    Method** methods;
+    int numInterfaces, maxCount, actualCount;
+    int i;
+
+    /*
+     * Get a maximum count so we can allocate storage.  We need the
+     * methods declared by each interface and all of its superinterfaces.
+     */
+    maxCount = 3;       // 3 methods in java.lang.Object
+    numInterfaces = interfaces->length;
+    classes = (ClassObject**) interfaces->contents;
+
+    for (i = 0; i < numInterfaces; i++, classes++) {
+        ClassObject* clazz = *classes;
+
+        LOGVV("---  %s virtualMethodCount=%d\n",
+            clazz->descriptor, clazz->virtualMethodCount);
+        maxCount += clazz->virtualMethodCount;
+
+        int j;
+        for (j = 0; j < clazz->iftableCount; j++) {
+            ClassObject* iclass = clazz->iftable[j].clazz;
+
+            LOGVV("---  +%s %d\n",
+                iclass->descriptor, iclass->virtualMethodCount);
+            maxCount += iclass->virtualMethodCount;
+        }
+    }
+
+    methods = (Method**) malloc(maxCount * sizeof(*methods));
+    if (methods == NULL)
+        return false;
+
+    /*
+     * First three entries are the java.lang.Object methods.
+     */
+    ClassObject* obj = gDvm.classJavaLangObject;
+    methods[0] = obj->vtable[gDvm.voffJavaLangObject_equals];
+    methods[1] = obj->vtable[gDvm.voffJavaLangObject_hashCode];
+    methods[2] = obj->vtable[gDvm.voffJavaLangObject_toString];
+    actualCount = 3;
+
+    /*
+     * Add the methods from each interface, in order, checking for
+     * duplicates.  This is O(n^2), but that should be okay here.
+     */
+    classes = (ClassObject**) interfaces->contents;
+    for (i = 0; i < numInterfaces; i++, classes++) {
+        ClassObject* clazz = *classes;
+        int j;
+
+        for (j = 0; j < clazz->virtualMethodCount; j++) {
+            if (addMethod(&clazz->virtualMethods[j], methods, actualCount))
+                actualCount++;
+        }
+
+        for (j = 0; j < clazz->iftableCount; j++) {
+            ClassObject* iclass = clazz->iftable[j].clazz;
+            int k;
+
+            for (k = 0; k < iclass->virtualMethodCount; k++) {
+                if (addMethod(&iclass->virtualMethods[k], methods, actualCount))
+                    actualCount++;
+            }
+        }
+    }
+
+    //for (i = 0; i < actualCount; i++) {
+    //    LOGI(" %d: %s.%s\n",
+    //        i, methods[i]->clazz->descriptor, methods[i]->name);
+    //}
+
+    *pMethods = methods;
+    *pMethodCount = actualCount;
+    return true;
+}
+
+/*
+ * Add a method to "methArray" if a matching method does not already
+ * exist.  Two methods match if they have the same name and signature.
+ *
+ * Returns "true" if the item was added, "false" if a duplicate was
+ * found and the method was not added.
+ */
+static bool addMethod(Method* meth, Method** methArray, int slot)
+{
+    int i;
+
+    for (i = 0; i < slot; i++) {
+        if (dvmCompareMethodNamesAndProtos(methArray[i], meth) == 0) {
+            return false;
+        }
+    }
+
+    methArray[slot] = meth;
+    return true;
+}
+
+/*
+ * Create a constructor for our Proxy class.  The constructor takes one
+ * argument, a java.lang.reflect.InvocationHandler.
+ */
+static void createConstructor(ClassObject* clazz, Method* meth)
+{
+    meth->clazz = clazz;
+    meth->accessFlags = ACC_PUBLIC | ACC_NATIVE;
+    meth->name = "<init>";
+    meth->prototype =
+        gDvm.methJavaLangReflectProxy_constructorPrototype->prototype;
+    meth->shorty = 
+        gDvm.methJavaLangReflectProxy_constructorPrototype->shorty;
+    // no pDexCode or pDexMethod
+
+    int argsSize = dvmComputeMethodArgsSize(meth) + 1;
+    meth->registersSize = meth->insSize = argsSize;
+
+    meth->nativeFunc = proxyConstructor;
+}
+
+/*
+ * Create a method in our Proxy class with the name and signature of
+ * the interface method it implements.
+ */
+static void createHandlerMethod(ClassObject* clazz, Method* dstMeth,
+    const Method* srcMeth)
+{
+    dstMeth->clazz = clazz;
+    dstMeth->insns = (u2*) srcMeth;
+    dstMeth->accessFlags = ACC_PUBLIC | ACC_NATIVE;
+    dstMeth->name = srcMeth->name;
+    dstMeth->prototype = srcMeth->prototype;
+    dstMeth->shorty = srcMeth->shorty;
+    // no pDexCode or pDexMethod
+
+    int argsSize = dvmComputeMethodArgsSize(dstMeth) + 1;
+    dstMeth->registersSize = dstMeth->insSize = argsSize;
+
+    dstMeth->nativeFunc = proxyInvoker;
+}
+
+/*
+ * Return a new Object[] array with the contents of "args".  We determine
+ * the number and types of values in "args" based on the method signature.
+ * Primitive types are boxed.
+ *
+ * Returns NULL if the method takes no arguments.
+ *
+ * The caller must call dvmReleaseTrackedAlloc() on the return value.
+ *
+ * On failure, returns with an appropriate exception raised.
+ */
+static ArrayObject* boxMethodArgs(const Method* method, const u4* args)
+{
+    const char* desc = &method->shorty[1]; // [0] is the return type.
+    ArrayObject* argArray = NULL;
+    int argCount;
+    Object** argObjects;
+    bool failed = true;
+
+    /* count args */
+    argCount = dexProtoGetParameterCount(&method->prototype);
+
+    /* allocate storage */
+    argArray = dvmAllocArray(gDvm.classJavaLangObjectArray, argCount,
+        kObjectArrayRefWidth, ALLOC_DEFAULT);
+    if (argArray == NULL)
+        goto bail;
+    argObjects = (Object**) argArray->contents;
+
+    /*
+     * Fill in the array.
+     */
+
+    int srcIndex = 0;
+    
+    argCount = 0;
+    while (*desc != '\0') {
+        char descChar = *(desc++);
+        JValue value;
+
+        switch (descChar) {
+        case 'Z':
+        case 'C':
+        case 'F':
+        case 'B':
+        case 'S':
+        case 'I':
+            value.i = args[srcIndex++];
+            argObjects[argCount] = (Object*) dvmWrapPrimitive(value,
+                dvmFindPrimitiveClass(descChar));
+            /* argObjects is tracked, don't need to hold this too */
+            dvmReleaseTrackedAlloc(argObjects[argCount], NULL);
+            argCount++;
+            break;
+        case 'D':
+        case 'J':
+            value.j = dvmGetArgLong(args, srcIndex);
+            srcIndex += 2;
+            argObjects[argCount] = (Object*) dvmWrapPrimitive(value,
+                dvmFindPrimitiveClass(descChar));
+            dvmReleaseTrackedAlloc(argObjects[argCount], NULL);
+            argCount++;
+            break;
+        case '[':
+        case 'L':
+            argObjects[argCount++] = (Object*) args[srcIndex++];
+            break;
+        }
+    }
+
+    failed = false;
+
+bail:
+    if (failed) {
+        dvmReleaseTrackedAlloc((Object*)argArray, NULL);
+        argArray = NULL;
+    }
+    return argArray;
+}
+
+/*
+ * This is the constructor for a generated proxy object.
+ */
+static void proxyConstructor(const u4* args, JValue* pResult,
+    const Method* method, Thread* self)
+{
+    Object* obj = (Object*) args[0];
+    Object* handler = (Object*) args[1];
+    ClassObject* clazz = obj->clazz;
+    int fieldOffset;
+
+    fieldOffset = dvmFindFieldOffset(clazz, "h",
+                    "Ljava/lang/reflect/InvocationHandler;");
+    if (fieldOffset < 0) {
+        LOGE("Unable to find 'h' in Proxy object\n");
+        //dvmDumpClass(clazz, kDumpClassFullDetail);
+        dvmAbort();     // this should never happen
+    }
+    dvmSetFieldObject(obj, fieldOffset, handler);
+}
+
+/*
+ * This is the common message body for proxy methods.
+ *
+ * The method we're calling looks like:
+ *   public Object invoke(Object proxy, Method method, Object[] args)
+ *
+ * This means we have to create a Method object, box our arguments into
+ * a new Object[] array, make the call, and unbox the return value if
+ * necessary.
+ */
+static void proxyInvoker(const u4* args, JValue* pResult,
+    const Method* method, Thread* self)
+{
+    Object* thisObj = (Object*) args[0];
+    Object* methodObj = NULL;
+    ArrayObject* argArray = NULL;
+    Object* handler;
+    Method* invoke;
+    ClassObject* returnType;
+    int hOffset;
+    JValue invokeResult;
+
+    /*
+     * Retrieve handler object for this proxy instance.
+     */
+    hOffset = dvmFindFieldOffset(thisObj->clazz, "h",
+                    "Ljava/lang/reflect/InvocationHandler;");
+    if (hOffset < 0) {
+        LOGE("Unable to find 'h' in Proxy object\n");
+        dvmAbort();
+    }
+    handler = dvmGetFieldObject(thisObj, hOffset);
+
+    /*
+     * Find the invoke() method, looking in "this"s class.  (Because we
+     * start here we don't have to convert it to a vtable index and then
+     * index into this' vtable.)
+     */
+    invoke = dvmFindVirtualMethodHierByDescriptor(handler->clazz, "invoke",
+            "(Ljava/lang/Object;Ljava/lang/reflect/Method;[Ljava/lang/Object;)Ljava/lang/Object;");
+    if (invoke == NULL) {
+        LOGE("Unable to find invoke()\n");
+        dvmAbort();
+    }
+
+    LOGV("invoke: %s.%s, this=%p, handler=%s\n",
+        method->clazz->descriptor, method->name,
+        thisObj, handler->clazz->descriptor);
+
+    /*
+     * Create a java.lang.reflect.Method object for this method.
+     *
+     * We don't want to use "method", because that's the concrete
+     * implementation in the proxy class.  We want the abstract Method
+     * from the declaring interface.  We have a pointer to it tucked
+     * away in the "insns" field.
+     *
+     * TODO: this could be cached for performance.
+     */
+    methodObj = dvmCreateReflectMethodObject((Method*) method->insns);
+    if (methodObj == NULL) {
+        assert(dvmCheckException(self));
+        goto bail;
+    }
+
+    /*
+     * Determine the return type from the signature.
+     *
+     * TODO: this could be cached for performance.
+     */
+    returnType = dvmGetBoxedReturnType(method);
+    if (returnType == NULL) {
+        char* desc = dexProtoCopyMethodDescriptor(&method->prototype);
+        LOGE("Could not determine return type for '%s'\n", desc);
+        free(desc);
+        assert(dvmCheckException(self));
+        goto bail;
+    }
+    LOGV("  return type will be %s\n", returnType->descriptor);
+
+    /*
+     * Convert "args" array into Object[] array, using the method
+     * signature to determine types.  If the method takes no arguments,
+     * we must pass null.
+     */
+    argArray = boxMethodArgs(method, args+1);
+    if (dvmCheckException(self))
+        goto bail;
+
+    /*
+     * Call h.invoke(proxy, method, args).
+     *
+     * We don't need to repackage exceptions, so if one has been thrown
+     * just jump to the end.
+     */
+    dvmCallMethod(self, invoke, handler, &invokeResult,
+        thisObj, methodObj, argArray);
+    if (dvmCheckException(self))
+        goto bail;
+
+    /*
+     * Unbox the return value.  If it's the wrong type, throw a
+     * ClassCastException.  If it's a null pointer and we need a
+     * primitive type, throw a NullPointerException.
+     */
+    if (returnType->primitiveType == PRIM_VOID) {
+        LOGVV("+++ ignoring return to void\n");
+    } else if (invokeResult.l == NULL) {
+        if (dvmIsPrimitiveClass(returnType)) {
+            dvmThrowException("Ljava/lang/NullPointerException;",
+                "null result when primitive expected");
+            goto bail;
+        }
+        pResult->l = NULL;
+    } else {
+        if (!dvmUnwrapPrimitive(invokeResult.l, returnType, pResult)) {
+            dvmThrowExceptionWithClassMessage("Ljava/lang/ClassCastException;",
+                ((Object*)invokeResult.l)->clazz->descriptor);
+            goto bail;
+        }
+    }
+
+bail:
+    dvmReleaseTrackedAlloc(methodObj, self);
+    dvmReleaseTrackedAlloc((Object*)argArray, self);
+}
+
diff --git a/vm/reflect/Reflect.c b/vm/reflect/Reflect.c
new file mode 100644
index 0000000..19a572c
--- /dev/null
+++ b/vm/reflect/Reflect.c
@@ -0,0 +1,1252 @@
+/*
+ * Copyright (C) 2008 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/*
+ * Basic reflection calls and utility functions.
+ */
+#include "Dalvik.h"
+
+#include <stdlib.h>
+
+/*
+ * Cache some classes.
+ */
+bool dvmReflectStartup(void)
+{
+    gDvm.classJavaLangReflectAccessibleObject =
+        dvmFindSystemClassNoInit("Ljava/lang/reflect/AccessibleObject;");
+    gDvm.classJavaLangReflectConstructor =
+        dvmFindSystemClassNoInit("Ljava/lang/reflect/Constructor;");
+    gDvm.classJavaLangReflectConstructorArray =
+        dvmFindArrayClass("[Ljava/lang/reflect/Constructor;", NULL);
+    gDvm.classJavaLangReflectField =
+        dvmFindSystemClassNoInit("Ljava/lang/reflect/Field;");
+    gDvm.classJavaLangReflectFieldArray =
+        dvmFindArrayClass("[Ljava/lang/reflect/Field;", NULL);
+    gDvm.classJavaLangReflectMethod =
+        dvmFindSystemClassNoInit("Ljava/lang/reflect/Method;");
+    gDvm.classJavaLangReflectMethodArray =
+        dvmFindArrayClass("[Ljava/lang/reflect/Method;", NULL);
+    gDvm.classJavaLangReflectProxy =
+        dvmFindSystemClassNoInit("Ljava/lang/reflect/Proxy;");
+    if (gDvm.classJavaLangReflectAccessibleObject == NULL ||
+        gDvm.classJavaLangReflectConstructor == NULL ||
+        gDvm.classJavaLangReflectConstructorArray == NULL ||
+        gDvm.classJavaLangReflectField == NULL ||
+        gDvm.classJavaLangReflectFieldArray == NULL ||
+        gDvm.classJavaLangReflectMethod == NULL ||
+        gDvm.classJavaLangReflectMethodArray == NULL ||
+        gDvm.classJavaLangReflectProxy == NULL)
+    {
+        LOGE("Could not find one or more reflection classes\n");
+        return false;
+    }
+
+    gDvm.methJavaLangReflectConstructor_init =
+        dvmFindDirectMethodByDescriptor(gDvm.classJavaLangReflectConstructor, "<init>",
+        "(Ljava/lang/Class;[Ljava/lang/Class;[Ljava/lang/Class;I)V");
+    gDvm.methJavaLangReflectField_init =
+        dvmFindDirectMethodByDescriptor(gDvm.classJavaLangReflectField, "<init>",
+        "(Ljava/lang/Class;Ljava/lang/Class;Ljava/lang/String;I)V");
+    gDvm.methJavaLangReflectMethod_init =
+        dvmFindDirectMethodByDescriptor(gDvm.classJavaLangReflectMethod, "<init>",
+        "(Ljava/lang/Class;[Ljava/lang/Class;[Ljava/lang/Class;Ljava/lang/Class;Ljava/lang/String;I)V");
+    if (gDvm.methJavaLangReflectConstructor_init == NULL ||
+        gDvm.methJavaLangReflectField_init == NULL ||
+        gDvm.methJavaLangReflectMethod_init == NULL)
+    {
+        LOGE("Could not find reflection constructors\n");
+        return false;
+    }
+
+    gDvm.classJavaLangClassArray = 
+        dvmFindArrayClass("[Ljava/lang/Class;", NULL);
+    gDvm.classJavaLangObjectArray = 
+        dvmFindArrayClass("[Ljava/lang/Object;", NULL);
+    if (gDvm.classJavaLangClassArray == NULL ||
+        gDvm.classJavaLangObjectArray == NULL)
+    {
+        LOGE("Could not find class-array or object-array class\n");
+        return false;
+    }
+
+    gDvm.offJavaLangReflectAccessibleObject_flag =
+        dvmFindFieldOffset(gDvm.classJavaLangReflectAccessibleObject, "flag",
+            "Z");
+
+    gDvm.offJavaLangReflectConstructor_slot =
+        dvmFindFieldOffset(gDvm.classJavaLangReflectConstructor, "slot", "I");
+    gDvm.offJavaLangReflectConstructor_declClass =
+        dvmFindFieldOffset(gDvm.classJavaLangReflectConstructor,
+            "declaringClass", "Ljava/lang/Class;");
+
+    gDvm.offJavaLangReflectField_slot =
+        dvmFindFieldOffset(gDvm.classJavaLangReflectField, "slot", "I");
+    gDvm.offJavaLangReflectField_declClass =
+        dvmFindFieldOffset(gDvm.classJavaLangReflectField,
+            "declaringClass", "Ljava/lang/Class;");
+
+    gDvm.offJavaLangReflectMethod_slot =
+        dvmFindFieldOffset(gDvm.classJavaLangReflectMethod, "slot", "I");
+    gDvm.offJavaLangReflectMethod_declClass =
+        dvmFindFieldOffset(gDvm.classJavaLangReflectMethod,
+            "declaringClass", "Ljava/lang/Class;");
+
+    if (gDvm.offJavaLangReflectAccessibleObject_flag < 0 ||
+        gDvm.offJavaLangReflectConstructor_slot < 0 ||
+        gDvm.offJavaLangReflectConstructor_declClass < 0 ||
+        gDvm.offJavaLangReflectField_slot < 0 ||
+        gDvm.offJavaLangReflectField_declClass < 0 ||
+        gDvm.offJavaLangReflectMethod_slot < 0 ||
+        gDvm.offJavaLangReflectMethod_declClass < 0)
+    {
+        LOGE("Could not find reflection fields\n");
+        return false;
+    }
+
+    if (!dvmReflectProxyStartup())
+        return false;
+    if (!dvmReflectAnnotationStartup())
+        return false;
+
+    return true;
+}
+
+/*
+ * Clean up.
+ */
+void dvmReflectShutdown(void)
+{
+    // nothing to do
+}
+
+/*
+ * For some of the reflection stuff we need to un-box primitives, e.g.
+ * convert a java/lang/Integer to int or even a float.  We assume that
+ * the first instance field holds the value.
+ *
+ * To verify this, we either need to ensure that the class has only one
+ * instance field, or we need to look up the field by name and verify
+ * that it comes first.  The former is simpler, and should work.
+ */
+bool dvmValidateBoxClasses()
+{
+    static const char* classes[] = {
+        "Ljava/lang/Boolean;",
+        "Ljava/lang/Character;",
+        "Ljava/lang/Float;",
+        "Ljava/lang/Double;",
+        "Ljava/lang/Byte;",
+        "Ljava/lang/Short;",
+        "Ljava/lang/Integer;",
+        "Ljava/lang/Long;",
+        NULL
+    };
+    const char** ccp;
+
+    for (ccp = classes; *ccp != NULL; ccp++) {
+        ClassObject* clazz;
+
+        clazz = dvmFindClassNoInit(*ccp, NULL);
+        if (clazz == NULL) {
+            LOGE("Couldn't find '%s'\n", *ccp);
+            return false;
+        }
+
+        if (clazz->ifieldCount != 1) {
+            LOGE("Found %d instance fields in '%s'\n",
+                clazz->ifieldCount, *ccp);
+            return false;
+        }
+    }
+
+    return true;
+}
+
+
+/*
+ * Find the named class object.  We have to trim "*pSignature" down to just
+ * the first token, do the lookup, and then restore anything important
+ * that we've stomped on.
+ *
+ * "pSig" will be advanced to the start of the next token.
+ */
+static ClassObject* convertSignaturePartToClass(char** pSignature,
+    const ClassObject* defClass)
+{
+    ClassObject* clazz = NULL;
+    char* signature = *pSignature;
+
+    if (*signature == '[') {
+        /* looks like "[[[Landroid/debug/Stuff;"; we want the whole thing */
+        char savedChar;
+
+        while (*++signature == '[')
+            ;
+        if (*signature == 'L') {
+            while (*++signature != ';')
+                ;
+        }
+
+        /* advance past ';', and stomp on whatever comes next */
+        savedChar = *++signature;
+        *signature = '\0';
+        clazz = dvmFindArrayClass(*pSignature, defClass->classLoader);
+        *signature = savedChar;
+    } else if (*signature == 'L') {
+        /* looks like 'Landroid/debug/Stuff;"; we want the whole thing */
+        char savedChar;
+        while (*++signature != ';')
+            ;
+        savedChar = *++signature;
+        *signature = '\0';
+        clazz = dvmFindClass(*pSignature, defClass->classLoader);
+        *signature = savedChar;
+    } else {
+        clazz = dvmFindPrimitiveClass(*signature++);
+    }
+
+    if (clazz == NULL) {
+        LOGW("Unable to match class for part: '%s'\n", *pSignature);
+        dvmClearException(dvmThreadSelf());
+        dvmThrowException("Ljava/lang/NoSuchMethodException;", NULL);
+    }
+    *pSignature = signature;
+    return clazz;
+}
+
+/*
+ * Convert the method signature to an array of classes.
+ *
+ * The tokenization process may mangle "*pSignature".  On return, it will
+ * be pointing at the closing ')'.
+ *
+ * "defClass" is the method's class, which is needed to make class loaders
+ * happy.
+ */
+static ArrayObject* convertSignatureToClassArray(char** pSignature,
+    ClassObject* defClass)
+{
+    ArrayObject* classArray;
+    ClassObject** classes;
+    char* signature = *pSignature;
+    char* cp;
+    int i, count;
+
+    assert(*signature == '(');
+    signature++;
+
+    /* count up the number of parameters */
+    count = 0;
+    cp = signature;
+    while (*cp != ')') {
+        count++;
+
+        if (*cp == '[') {
+            while (*++cp == '[')
+                ;
+        }
+        if (*cp == 'L') {
+            while (*++cp != ';')
+                ;
+        }
+        cp++;
+    }
+    LOGVV("REFLECT found %d parameters in '%s'\n", count, *pSignature);
+
+    /* create an array to hold them */
+    classArray = dvmAllocArray(gDvm.classJavaLangClassArray, count,
+                    kObjectArrayRefWidth, ALLOC_DEFAULT);
+    if (classArray == NULL)
+        return NULL;
+
+    /* fill it in */
+    classes = (ClassObject**) classArray->contents;
+    cp = signature;
+    for (i = 0; i < count; i++) {
+        ClassObject* clazz;
+
+        clazz = convertSignaturePartToClass(&cp, defClass);
+        if (clazz == NULL) {
+            assert(dvmCheckException(dvmThreadSelf()));
+            return NULL;
+        }
+        LOGVV("REFLECT  %d: '%s'\n", i, clazz->descriptor);
+
+        *classes++ = clazz;
+    }
+
+    *pSignature = cp;
+
+    /* caller must call dvmReleaseTrackedAlloc */
+    return classArray;
+}
+
+
+/*
+ * Convert a field pointer to a slot number.
+ *
+ * We use positive values starting from 0 for instance fields, negative
+ * values starting from -1 for static fields.
+ */
+static int fieldToSlot(const Field* field, const ClassObject* clazz)
+{
+    int slot;
+
+    if (dvmIsStaticField(field)) {
+        slot = (StaticField*)field - clazz->sfields;
+        assert(slot >= 0 && slot < clazz->sfieldCount);
+        slot = -(slot+1);
+    } else {
+        slot = (InstField*)field - clazz->ifields;
+        assert(slot >= 0 && slot < clazz->ifieldCount);
+    }
+
+    return slot;
+}
+
+/*
+ * Convert a slot number to a field pointer.
+ */
+Field* dvmSlotToField(ClassObject* clazz, int slot)
+{
+    if (slot < 0) {
+        slot = -(slot+1);
+        assert(slot < clazz->sfieldCount);
+        return (Field*) &clazz->sfields[slot];
+    } else {
+        assert(slot < clazz->ifieldCount);
+        return (Field*) &clazz->ifields[slot];
+    }
+}
+
+/*
+ * Create a new java.lang.reflect.Field object from "field".
+ *
+ * The Field spec doesn't specify the constructor.  We're going to use the
+ * one from our existing class libs:
+ *
+ *  private Field(Class declaringClass, Class type, String name, int slot)
+ */
+static Object* createFieldObject(Field* field, const ClassObject* clazz)
+{
+    Object* result = NULL;
+    Object* fieldObj = NULL;
+    StringObject* nameObj = NULL;
+    ClassObject* type;
+    char* mangle;
+    char* cp;
+    int slot;
+
+    assert(dvmIsClassInitialized(gDvm.classJavaLangReflectField));
+
+    fieldObj = dvmAllocObject(gDvm.classJavaLangReflectField, ALLOC_DEFAULT);
+    if (fieldObj == NULL)
+        goto bail;
+
+    cp = mangle = strdup(field->signature);
+    type = convertSignaturePartToClass(&cp, clazz);
+    free(mangle);
+    if (type == NULL)
+        goto bail;
+
+    nameObj = dvmCreateStringFromCstr(field->name, ALLOC_DEFAULT);
+    if (nameObj == NULL)
+        goto bail;
+
+    slot = fieldToSlot(field, clazz);
+
+    JValue unused;
+    dvmCallMethod(dvmThreadSelf(), gDvm.methJavaLangReflectField_init,
+        fieldObj, &unused, clazz, type, nameObj, slot);
+    if (dvmCheckException(dvmThreadSelf())) {
+        LOGD("Field class init threw exception\n");
+        goto bail;
+    }
+
+    result = fieldObj;
+
+bail:
+    dvmReleaseTrackedAlloc((Object*) nameObj, NULL);
+    if (result == NULL)
+        dvmReleaseTrackedAlloc((Object*) fieldObj, NULL);
+    /* caller must dvmReleaseTrackedAlloc(result) */
+    return result;
+}
+
+/*
+ *
+ * Get an array with all fields declared by a class.
+ *
+ * This includes both static and instance fields.
+ */
+ArrayObject* dvmGetDeclaredFields(ClassObject* clazz, bool publicOnly)
+{
+    ArrayObject* fieldArray = NULL;
+    Object** fields;
+    int i, count;
+
+    if (!dvmIsClassInitialized(gDvm.classJavaLangReflectField))
+        dvmInitClass(gDvm.classJavaLangReflectField);
+
+    /* count #of fields */
+    if (!publicOnly)
+        count = clazz->sfieldCount + clazz->ifieldCount;
+    else {
+        count = 0;
+        for (i = 0; i < clazz->sfieldCount; i++) {
+            if ((clazz->sfields[i].field.accessFlags & ACC_PUBLIC) != 0)
+                count++;
+        }
+        for (i = 0; i < clazz->ifieldCount; i++) {
+            if ((clazz->ifields[i].field.accessFlags & ACC_PUBLIC) != 0)
+                count++;
+        }
+    }
+
+    /* create the Field[] array */
+    fieldArray = dvmAllocArray(gDvm.classJavaLangReflectFieldArray, count,
+                    kObjectArrayRefWidth, ALLOC_DEFAULT);
+    if (fieldArray == NULL)
+        return NULL;
+    fields = (Object**) fieldArray->contents;
+
+    /* populate */
+    for (i = 0; i < clazz->sfieldCount; i++) {
+        if (!publicOnly ||
+            (clazz->sfields[i].field.accessFlags & ACC_PUBLIC) != 0)
+        {
+            *fields = createFieldObject(&clazz->sfields[i].field, clazz);
+            if (*fields == NULL)
+                goto fail;
+            dvmReleaseTrackedAlloc(*fields, NULL);
+            fields++;
+            count--;
+        }
+    }
+    for (i = 0; i < clazz->ifieldCount; i++) {
+        if (!publicOnly ||
+            (clazz->ifields[i].field.accessFlags & ACC_PUBLIC) != 0)
+        {
+            *fields = createFieldObject(&clazz->ifields[i].field, clazz);
+            if (*fields == NULL)
+                goto fail;
+            dvmReleaseTrackedAlloc(*fields, NULL);
+            fields++;
+            count--;
+        }
+    }
+
+    /* caller must call dvmReleaseTrackedAlloc */
+    return fieldArray;
+
+fail:
+    dvmReleaseTrackedAlloc((Object*) fieldArray, NULL);
+    return NULL;
+}
+
+
+/*
+ * Convert a method pointer to a slot number.
+ *
+ * We use positive values starting from 0 for virtual methods, negative
+ * values starting from -1 for static methods.
+ */
+static int methodToSlot(const Method* meth)
+{
+    ClassObject* clazz = meth->clazz;
+    int slot;
+
+    if (dvmIsDirectMethod(meth)) {
+        slot = meth - clazz->directMethods;
+        assert(slot >= 0 && slot < clazz->directMethodCount);
+        slot = -(slot+1);
+    } else {
+        slot = meth - clazz->virtualMethods;
+        assert(slot >= 0 && slot < clazz->virtualMethodCount);
+    }
+
+    return slot;
+}
+
+/*
+ * Convert a slot number to a method pointer.
+ */
+Method* dvmSlotToMethod(ClassObject* clazz, int slot)
+{
+    if (slot < 0) {
+        slot = -(slot+1);
+        assert(slot < clazz->directMethodCount);
+        return &clazz->directMethods[slot];
+    } else {
+        assert(slot < clazz->virtualMethodCount);
+        return &clazz->virtualMethods[slot];
+    }
+}
+
+/*
+ * Create a new java/lang/reflect/Constructor object, using the contents of
+ * "meth" to construct it.
+ *
+ * The spec doesn't specify the constructor.  We're going to use the
+ * one from our existing class libs:
+ *
+ *  private Constructor (Class declaringClass, Class[] ptypes, Class[] extypes,
+ *      int slot)
+ */
+static Object* createConstructorObject(Method* meth)
+{
+    Object* result = NULL;
+    ArrayObject* params = NULL;
+    ArrayObject* exceptions = NULL;
+    Object* consObj;
+    DexStringCache mangle;
+    char* cp;
+    int slot;
+
+    dexStringCacheInit(&mangle);
+
+    /* parent should guarantee init so we don't have to check on every call */
+    assert(dvmIsClassInitialized(gDvm.classJavaLangReflectConstructor));
+
+    consObj = dvmAllocObject(gDvm.classJavaLangReflectConstructor,
+                ALLOC_DEFAULT);
+    if (consObj == NULL)
+        goto bail;
+
+    /*
+     * Convert the signature string into an array of classes representing
+     * the arguments.
+     */
+    cp = dvmCopyDescriptorStringFromMethod(meth, &mangle);
+    params = convertSignatureToClassArray(&cp, meth->clazz);
+    if (params == NULL)
+        goto bail;
+    assert(*cp == ')');
+    assert(*(cp+1) == 'V');
+
+    /*
+     * Create an array with one entry for every exception that the class
+     * is declared to throw.
+     */
+    exceptions = dvmGetMethodThrows(meth);
+    if (dvmCheckException(dvmThreadSelf()))
+        goto bail;
+
+    slot = methodToSlot(meth);
+
+    JValue unused;
+    dvmCallMethod(dvmThreadSelf(), gDvm.methJavaLangReflectConstructor_init,
+        consObj, &unused, meth->clazz, params, exceptions, slot);
+    if (dvmCheckException(dvmThreadSelf())) {
+        LOGD("Constructor class init threw exception\n");
+        goto bail;
+    }
+
+    result = consObj;
+
+bail:
+    dexStringCacheRelease(&mangle);
+    dvmReleaseTrackedAlloc((Object*) params, NULL);
+    dvmReleaseTrackedAlloc((Object*) exceptions, NULL);
+    if (result == NULL) {
+        assert(dvmCheckException(dvmThreadSelf()));
+        dvmReleaseTrackedAlloc(consObj, NULL);
+    }
+    /* caller must dvmReleaseTrackedAlloc(result) */
+    return result;
+}
+
+/*
+ * Get an array with all constructors declared by a class.
+ */
+ArrayObject* dvmGetDeclaredConstructors(ClassObject* clazz, bool publicOnly)
+{
+    ArrayObject* consArray;
+    Object** consObjPtr;
+    Method* meth;
+    int i, count;
+
+    if (!dvmIsClassInitialized(gDvm.classJavaLangReflectConstructor))
+        dvmInitClass(gDvm.classJavaLangReflectConstructor);
+
+    /*
+     * Ordinarily we init the class the first time we resolve a method.
+     * We're bypassing the normal resolution mechanism, so we init it here.
+     */
+    if (!dvmIsClassInitialized(clazz))
+        dvmInitClass(clazz);
+
+    /*
+     * Count up the #of relevant methods.
+     */
+    count = 0;
+    meth = clazz->directMethods;
+    for (i = 0; i < clazz->directMethodCount; i++, meth++) {
+        if ((!publicOnly || dvmIsPublicMethod(meth)) &&
+            dvmIsConstructorMethod(meth) && !dvmIsStaticMethod(meth))
+        {
+            count++;
+        }
+    }
+
+    /*
+     * Create an array of Constructor objects.
+     */
+    consArray = dvmAllocArray(gDvm.classJavaLangReflectConstructorArray, count,
+                kObjectArrayRefWidth, ALLOC_DEFAULT);
+    if (consArray == NULL)
+        return NULL;
+
+    consObjPtr = (Object**) consArray->contents;
+
+    /*
+     * Fill out the array.
+     */
+    meth = clazz->directMethods;
+    for (i = 0; i < clazz->directMethodCount; i++, meth++) {
+        if ((!publicOnly || dvmIsPublicMethod(meth)) &&
+            dvmIsConstructorMethod(meth) && !dvmIsStaticMethod(meth))
+        {
+            Object* consObj = createConstructorObject(meth);
+            if (consObj == NULL)
+                goto fail;
+            *consObjPtr++ = consObj;
+            dvmReleaseTrackedAlloc(consObj, NULL);
+        }
+    }
+
+    assert(consObjPtr - (Object**) consArray->contents == count);
+
+    /* caller must call dvmReleaseTrackedAlloc */
+    return consArray;
+
+fail:
+    dvmReleaseTrackedAlloc((Object*) consArray, NULL);
+    return NULL;
+}
+
+/*
+ * Create a new java/lang/reflect/Method object, using the contents of
+ * "meth" to construct it.
+ *
+ * The spec doesn't specify the constructor.  We're going to use the
+ * one from our existing class libs:
+ *
+ *  private Method(Class declaring, Class[] paramTypes, Class[] exceptTypes,
+ *      Class returnType, String name, int slot)
+ *
+ * The caller must call dvmReleaseTrackedAlloc() on the result.
+ */
+Object* dvmCreateReflectMethodObject(const Method* meth)
+{
+    Object* result = NULL;
+    ArrayObject* params = NULL;
+    ArrayObject* exceptions = NULL;
+    StringObject* nameObj = NULL;
+    Object* methObj;
+    ClassObject* returnType;
+    DexStringCache mangle;
+    char* cp;
+    int slot;
+
+    dexStringCacheInit(&mangle);
+
+    /* parent should guarantee init so we don't have to check on every call */
+    assert(dvmIsClassInitialized(gDvm.classJavaLangReflectMethod));
+
+    methObj = dvmAllocObject(gDvm.classJavaLangReflectMethod, ALLOC_DEFAULT);
+    if (methObj == NULL)
+        goto bail;
+
+    /*
+     * Convert the signature string into an array of classes representing
+     * the arguments, and a class for the return type.
+     */
+    cp = dvmCopyDescriptorStringFromMethod(meth, &mangle);
+    params = convertSignatureToClassArray(&cp, meth->clazz);
+    if (params == NULL)
+        goto bail;
+    assert(*cp == ')');
+    cp++;
+    returnType = convertSignaturePartToClass(&cp, meth->clazz);
+    if (returnType == NULL)
+        goto bail;
+
+    /*
+     * Create an array with one entry for every exception that the class
+     * is declared to throw.
+     */
+    exceptions = dvmGetMethodThrows(meth);
+    if (dvmCheckException(dvmThreadSelf()))
+        goto bail;
+
+    /* method name */
+    nameObj = dvmCreateStringFromCstr(meth->name, ALLOC_DEFAULT);
+    if (nameObj == NULL)
+        goto bail;
+
+    slot = methodToSlot(meth);
+
+    JValue unused;
+    dvmCallMethod(dvmThreadSelf(), gDvm.methJavaLangReflectMethod_init,
+        methObj, &unused, meth->clazz, params, exceptions, returnType,
+        nameObj, slot);
+    if (dvmCheckException(dvmThreadSelf())) {
+        LOGD("Method class init threw exception\n");
+        goto bail;
+    }
+
+    result = methObj;
+
+bail:
+    dexStringCacheRelease(&mangle);
+    if (result == NULL) {
+        assert(dvmCheckException(dvmThreadSelf()));
+    }
+    dvmReleaseTrackedAlloc((Object*) nameObj, NULL);
+    dvmReleaseTrackedAlloc((Object*) params, NULL);
+    dvmReleaseTrackedAlloc((Object*) exceptions, NULL);
+    if (result == NULL)
+        dvmReleaseTrackedAlloc(methObj, NULL);
+    return result;
+}
+
+/*
+ * Get an array with all methods declared by a class.
+ *
+ * This includes both static and virtual methods, and can include private
+ * members if "publicOnly" is false.  It does not include Miranda methods,
+ * since those weren't declared in the class, or constructors.
+ */
+ArrayObject* dvmGetDeclaredMethods(ClassObject* clazz, bool publicOnly)
+{
+    ArrayObject* methodArray;
+    Object** methObjPtr;
+    Method* meth;
+    int i, count;
+
+    if (!dvmIsClassInitialized(gDvm.classJavaLangReflectMethod))
+        dvmInitClass(gDvm.classJavaLangReflectMethod);
+
+    /*
+     * Count up the #of relevant methods.
+     *
+     * Ignore virtual Miranda methods and direct class/object constructors.
+     */
+    count = 0;
+    meth = clazz->virtualMethods;
+    for (i = 0; i < clazz->virtualMethodCount; i++, meth++) {
+        if ((!publicOnly || dvmIsPublicMethod(meth)) &&
+            !dvmIsMirandaMethod(meth))
+        {
+            count++;
+        }
+    }
+    meth = clazz->directMethods;
+    for (i = 0; i < clazz->directMethodCount; i++, meth++) {
+        if ((!publicOnly || dvmIsPublicMethod(meth)) &&
+            meth->name[0] != '<')
+        {
+            count++;
+        }
+    }
+
+    /*
+     * Create an array of Method objects.
+     */
+    methodArray = dvmAllocArray(gDvm.classJavaLangReflectMethodArray, count,
+                kObjectArrayRefWidth, ALLOC_DEFAULT);
+    if (methodArray == NULL)
+        return NULL;
+
+    methObjPtr = (Object**) methodArray->contents;
+
+    /*
+     * Fill out the array.
+     */
+    meth = clazz->virtualMethods;
+    for (i = 0; i < clazz->virtualMethodCount; i++, meth++) {
+        if ((!publicOnly || dvmIsPublicMethod(meth)) &&
+            !dvmIsMirandaMethod(meth))
+        {
+            Object* methObj = dvmCreateReflectMethodObject(meth);
+            if (methObj == NULL)
+                goto fail;
+            *methObjPtr++ = methObj;
+            dvmReleaseTrackedAlloc(methObj, NULL);
+        }
+    }
+    meth = clazz->directMethods;
+    for (i = 0; i < clazz->directMethodCount; i++, meth++) {
+        if ((!publicOnly || dvmIsPublicMethod(meth)) &&
+            meth->name[0] != '<')
+        {
+            Object* methObj = dvmCreateReflectMethodObject(meth);
+            if (methObj == NULL)
+                goto fail;
+            *methObjPtr++ = methObj;
+            dvmReleaseTrackedAlloc(methObj, NULL);
+        }
+    }
+
+    assert(methObjPtr - (Object**) methodArray->contents == count);
+
+    /* caller must call dvmReleaseTrackedAlloc */
+    return methodArray;
+
+fail:
+    dvmReleaseTrackedAlloc((Object*) methodArray, NULL);
+    return NULL;
+}
+
+/*
+ * Get all interfaces a class implements. If this is unable to allocate
+ * the result array, this raises an OutOfMemoryError and returns NULL.
+ */
+ArrayObject* dvmGetInterfaces(ClassObject* clazz)
+{
+    ArrayObject* interfaceArray;
+
+    if (!dvmIsClassInitialized(gDvm.classJavaLangReflectMethod))
+        dvmInitClass(gDvm.classJavaLangReflectMethod);
+
+    /*
+     * Create an array of Class objects.
+     */
+    int count = clazz->interfaceCount;
+    interfaceArray = dvmAllocArray(gDvm.classJavaLangClassArray, count,
+                kObjectArrayRefWidth, ALLOC_DEFAULT);
+    if (interfaceArray == NULL)
+        return NULL;
+
+    /*
+     * Fill out the array.
+     */
+    Object** interfaceObjPtr = (Object**) interfaceArray->contents;
+    int i;
+    for (i = 0; i < count; i++) {
+        *interfaceObjPtr++ = (Object*) clazz->interfaces[i];
+    }
+
+    /* caller must call dvmReleaseTrackedAlloc */
+    return interfaceArray;
+}
+
+/*
+ * Given a boxed primitive type, such as java/lang/Integer, return the
+ * primitive type index.
+ *
+ * Returns PRIM_NOT for void, since we never "box" that.
+ */
+static PrimitiveType getBoxedType(DataObject* arg)
+{
+    static const int kJavaLangLen = 11;     // strlen("Ljava/lang/")
+    const char* name;
+
+    if (arg == NULL)
+        return PRIM_NOT;
+
+    name = arg->obj.clazz->descriptor;
+
+    if (strncmp(name, "Ljava/lang/", kJavaLangLen) != 0)
+        return PRIM_NOT;
+
+    if (strcmp(name + kJavaLangLen, "Boolean;") == 0)
+        return PRIM_BOOLEAN;
+    if (strcmp(name + kJavaLangLen, "Character;") == 0)
+        return PRIM_CHAR;
+    if (strcmp(name + kJavaLangLen, "Float;") == 0)
+        return PRIM_FLOAT;
+    if (strcmp(name + kJavaLangLen, "Double;") == 0)
+        return PRIM_DOUBLE;
+    if (strcmp(name + kJavaLangLen, "Byte;") == 0)
+        return PRIM_BYTE;
+    if (strcmp(name + kJavaLangLen, "Short;") == 0)
+        return PRIM_SHORT;
+    if (strcmp(name + kJavaLangLen, "Integer;") == 0)
+        return PRIM_INT;
+    if (strcmp(name + kJavaLangLen, "Long;") == 0)
+        return PRIM_LONG;
+    return PRIM_NOT;
+}
+
+/*
+ * Convert primitive, boxed data from "srcPtr" to "dstPtr".
+ *
+ * Section v2 2.6 lists the various conversions and promotions.  We
+ * allow the "widening" and "identity" conversions, but don't allow the
+ * "narrowing" conversions.
+ *
+ * Allowed:
+ *  byte to short, int, long, float, double
+ *  short to int, long, float double
+ *  char to int, long, float, double
+ *  int to long, float, double
+ *  long to float, double
+ *  float to double
+ * Values of types byte, char, and short are "internally" widened to int.
+ *
+ * Returns the width in bytes of the destination primitive, or -1 if the
+ * conversion is not allowed.
+ *
+ * TODO? use JValue rather than u4 pointers
+ */
+int dvmConvertPrimitiveValue(PrimitiveType srcType,
+    PrimitiveType dstType, const s4* srcPtr, s4* dstPtr)
+{
+    enum {
+        OK4, OK8, ItoJ,
+        ItoD, JtoD, FtoD,
+        ItoF, JtoF,
+        bad, kMax
+    };
+    /* [src][dst] */
+    static const int kConvMode[kMax][kMax] = {
+    /*FROM *TO: bool    char    float   double  byte    short   int     long */
+    /*bool */ { OK4,    bad,    bad,    bad,    bad,    bad,    bad,    bad  },
+    /*char */ { bad,    OK4,    ItoF,   ItoD,   bad,    bad,    OK4,    ItoJ },
+    /*float*/ { bad,    bad,    OK4,    FtoD,   bad,    bad,    bad,    bad  },
+    /*doubl*/ { bad,    bad,    bad,    OK8,    bad,    bad,    bad,    bad  },
+    /*byte */ { bad,    bad,    ItoF,   ItoD,   OK4,    OK4,    OK4,    ItoJ },
+    /*short*/ { bad,    bad,    ItoF,   ItoD,   bad,    OK4,    OK4,    ItoJ },
+    /*int  */ { bad,    bad,    ItoF,   ItoD,   bad,    bad,    OK4,    ItoJ },
+    /*long */ { bad,    bad,    JtoF,   JtoD,   bad,    bad,    bad,    OK8  },
+    };
+    int result;
+
+    assert(srcType != PRIM_NOT && dstType != PRIM_NOT &&
+           srcType != PRIM_VOID && dstType != PRIM_VOID);
+    result = kConvMode[srcType][dstType];
+
+    //LOGV("+++ convprim: src=%d dst=%d result=%d\n", srcType, dstType, result);
+
+    switch (result) {
+    case OK4:
+        *dstPtr = *srcPtr;
+        return 1;
+    case OK8:
+        *(s8*)dstPtr = *(s8*)srcPtr;
+        return 2;
+    case ItoJ:
+        *(s8*)dstPtr = (s8) (*(s4*) srcPtr);
+        return 2;
+    case ItoD:
+        *(double*)dstPtr = (double) (*(s4*) srcPtr);
+        return 2;
+    case JtoD:
+        *(double*)dstPtr = (double) (*(long long*) srcPtr);
+        return 2;
+    case FtoD:
+        *(double*)dstPtr = (double) (*(float*) srcPtr);
+        return 2;
+    case ItoF:
+        *(float*)dstPtr = (float) (*(int*) srcPtr);
+        return 1;
+    case JtoF:
+        *(float*)dstPtr = (float) (*(long long*) srcPtr);
+        return 1;
+    case bad:
+        LOGV("convert primitive: prim %d to %d not allowed\n",
+            srcType, dstType);
+        return -1;
+    default:
+        assert(false);
+        return -1;
+    }
+}
+
+/*
+ * Convert types and widen primitives.  Puts the value of "arg" into
+ * "destPtr".
+ *
+ * Returns the width of the argument in 32-bit words (1 or 2), or -1 on error.
+ */
+int dvmConvertArgument(DataObject* arg, ClassObject* type, s4* destPtr)
+{
+    int retVal;
+
+    if (dvmIsPrimitiveClass(type)) {
+        /* e.g.: "arg" is java/lang/Float instance, "type" is VM float class */
+        PrimitiveType srcType;
+        s4* valuePtr;
+
+        srcType = getBoxedType(arg);
+        if (srcType < 0) {     // didn't pass a boxed primitive in
+            LOGVV("conv arg: type '%s' not boxed primitive\n",
+                arg->obj.clazz->descriptor);
+            return -1;
+        }
+
+        /* assumes value is stored in first instance field */
+        valuePtr = (s4*) arg->instanceData;
+
+        retVal = dvmConvertPrimitiveValue(srcType, type->primitiveType,
+                    valuePtr, destPtr);
+    } else {
+        /* verify object is compatible */
+        if ((arg == NULL) || dvmInstanceof(arg->obj.clazz, type)) {
+            *destPtr = (s4) arg;
+            retVal = 1;
+        } else {
+            LOGVV("Arg %p (%s) not compatible with %s\n",
+                arg, arg->obj.clazz->descriptor, type->descriptor);
+            retVal = -1;
+        }
+    }
+
+    return retVal;
+}
+
+/*
+ * Create a wrapper object for a primitive data type.  If "returnType" is
+ * not primitive, this just casts "value" to an object and returns it.
+ *
+ * We could invoke the "toValue" method on the box types to take
+ * advantage of pre-created values, but running that through the
+ * interpreter is probably less efficient than just allocating storage here.
+ *
+ * The caller must call dvmReleaseTrackedAlloc on the result.
+ */
+DataObject* dvmWrapPrimitive(JValue value, ClassObject* returnType)
+{
+    static const char* boxTypes[] = {       // order from enum PrimitiveType
+        "Ljava/lang/Boolean;", 
+        "Ljava/lang/Character;",
+        "Ljava/lang/Float;",
+        "Ljava/lang/Double;",
+        "Ljava/lang/Byte;",
+        "Ljava/lang/Short;",
+        "Ljava/lang/Integer;",
+        "Ljava/lang/Long;"
+    };
+    ClassObject* wrapperClass;
+    DataObject* wrapperObj;
+    s4* dataPtr;
+    PrimitiveType typeIndex = returnType->primitiveType;
+    const char* classDescriptor;
+
+    if (typeIndex == PRIM_NOT) {
+        /* add to tracking table so return value is always in table */
+        if (value.l != NULL)
+            dvmAddTrackedAlloc(value.l, NULL);
+        return (DataObject*) value.l;
+    }
+
+    assert(typeIndex >= 0 && typeIndex < PRIM_MAX);
+    if (typeIndex == PRIM_VOID)
+        return NULL;
+
+    classDescriptor = boxTypes[typeIndex];
+
+    wrapperClass = dvmFindSystemClass(classDescriptor);
+    if (wrapperClass == NULL) {
+        LOGW("Unable to find '%s'\n", classDescriptor);
+        assert(dvmCheckException(dvmThreadSelf()));
+        return NULL;
+    }
+
+    wrapperObj = (DataObject*) dvmAllocObject(wrapperClass, ALLOC_DEFAULT);
+    if (wrapperObj == NULL)
+        return NULL;
+    dataPtr = (s4*) wrapperObj->instanceData;
+
+    /* assumes value is stored in first instance field */
+    /* (see dvmValidateBoxClasses) */
+    if (typeIndex == PRIM_LONG || typeIndex == PRIM_DOUBLE)
+        *(s8*)dataPtr = value.j;
+    else
+        *dataPtr = value.i;
+
+    return wrapperObj;
+}
+
+/*
+ * Unwrap a primitive data type, if necessary.
+ *
+ * If "returnType" is not primitive, we just tuck "value" into JValue and
+ * return it after verifying that it's the right type of object.
+ *
+ * Fails if the field is primitive and "value" is either not a boxed
+ * primitive or is of a type that cannot be converted.
+ *
+ * Returns "true" on success, "false" on failure.
+ */
+bool dvmUnwrapPrimitive(Object* value, ClassObject* returnType,
+    JValue* pResult)
+{
+    JValue result;
+    PrimitiveType typeIndex = returnType->primitiveType;
+    PrimitiveType valueIndex;
+    //const u4* dataPtr;
+
+    if (typeIndex == PRIM_NOT) {
+        if (value != NULL && !dvmInstanceof(value->clazz, returnType)) {
+            LOGD("wrong object type: %s %s\n",
+                value->clazz->descriptor, returnType->descriptor);
+            return false;
+        }
+        pResult->l = value;
+        return true;
+    } else if (typeIndex == PRIM_VOID) {
+        /* can't put anything into a void */
+        return false;
+    }
+
+    valueIndex = getBoxedType((DataObject*)value);
+    if (valueIndex == PRIM_NOT)
+        return false;
+
+    /* assumes value is stored in first instance field of "value" */
+    /* (see dvmValidateBoxClasses) */
+    if (dvmConvertPrimitiveValue(valueIndex, typeIndex,
+            (s4*) ((DataObject*)value)->instanceData, (s4*)pResult) < 0)
+    {
+        LOGV("Prim conversion failed\n");
+        return false;
+    }
+
+    return true;
+}
+
+
+/*
+ * Find the return type in the signature, and convert it to a class
+ * object.  For primitive types we use a boxed class, for reference types
+ * we do a name lookup.
+ *
+ * On failure, we return NULL with an exception raised.
+ */
+ClassObject* dvmGetBoxedReturnType(const Method* meth)
+{
+    const char* sig = dexProtoGetReturnType(&meth->prototype);
+
+    switch (*sig) {
+    case 'Z':
+    case 'C':
+    case 'F':
+    case 'D':
+    case 'B':
+    case 'S':
+    case 'I':
+    case 'J':
+    case 'V':
+        return dvmFindPrimitiveClass(*sig);
+    case '[':
+    case 'L':
+        return dvmFindClass(sig, meth->clazz->classLoader);
+    default: {
+        /* should not have passed verification */
+        char* desc = dexProtoCopyMethodDescriptor(&meth->prototype);
+        LOGE("Bad return type in signature '%s'\n", desc);
+        free(desc);
+        dvmThrowException("Ljava/lang/InternalError;", NULL);
+        return NULL;
+    }
+    }
+}
+
+
+/*
+ * JNI reflection support: convert reflection object to Field ptr.
+ */
+Field* dvmGetFieldFromReflectObj(Object* obj)
+{
+    ClassObject* clazz;
+    int slot;
+
+    assert(obj->clazz == gDvm.classJavaLangReflectField);
+    clazz = (ClassObject*)dvmGetFieldObject(obj,
+                                gDvm.offJavaLangReflectField_declClass);
+    slot = dvmGetFieldInt(obj, gDvm.offJavaLangReflectField_slot);
+
+    /* must initialize the class before returning a field ID */
+    if (!dvmInitClass(clazz))
+        return NULL;
+
+    return dvmSlotToField(clazz, slot);
+}
+
+/*
+ * JNI reflection support: convert reflection object to Method ptr.
+ */
+Method* dvmGetMethodFromReflectObj(Object* obj)
+{
+    ClassObject* clazz;
+    int slot;
+
+    if (obj->clazz == gDvm.classJavaLangReflectConstructor) {
+        clazz = (ClassObject*)dvmGetFieldObject(obj,
+                                gDvm.offJavaLangReflectConstructor_declClass);
+        slot = dvmGetFieldInt(obj, gDvm.offJavaLangReflectConstructor_slot);
+    } else if (obj->clazz == gDvm.classJavaLangReflectMethod) {
+        clazz = (ClassObject*)dvmGetFieldObject(obj,
+                                gDvm.offJavaLangReflectMethod_declClass);
+        slot = dvmGetFieldInt(obj, gDvm.offJavaLangReflectMethod_slot);
+    } else {
+        assert(false);
+        return NULL;
+    }
+
+    /* must initialize the class before returning a method ID */
+    if (!dvmInitClass(clazz))
+        return NULL;
+
+    return dvmSlotToMethod(clazz, slot);
+}
+
+/*
+ * JNI reflection support: convert Field to reflection object.
+ *
+ * The return value is a java.lang.reflect.Field.
+ *
+ * Caller must call dvmReleaseTrackedAlloc().
+ */
+Object* dvmCreateReflectObjForField(const ClassObject* clazz, Field* field)
+{
+    if (!dvmIsClassInitialized(gDvm.classJavaLangReflectField))
+        dvmInitClass(gDvm.classJavaLangReflectField);
+
+    /* caller must dvmReleaseTrackedAlloc(result) */
+    return createFieldObject(field, clazz);
+}
+
+/*
+ * JNI reflection support: convert Method to reflection object.
+ *
+ * The returned object will be either a java.lang.reflect.Method or
+ * .Constructor, depending on whether "method" is a constructor.
+ *
+ * This is also used for certain "system" annotations.
+ *
+ * Caller must call dvmReleaseTrackedAlloc().
+ */
+Object* dvmCreateReflectObjForMethod(const ClassObject* clazz, Method* method)
+{
+    UNUSED_PARAMETER(clazz);
+
+    if (strcmp(method->name, "<init>") == 0) {
+        if (!dvmIsClassInitialized(gDvm.classJavaLangReflectConstructor))
+            dvmInitClass(gDvm.classJavaLangReflectConstructor);
+
+        return createConstructorObject(method);
+    } else {
+        if (!dvmIsClassInitialized(gDvm.classJavaLangReflectMethod))
+            dvmInitClass(gDvm.classJavaLangReflectMethod);
+
+        return dvmCreateReflectMethodObject(method);
+    }
+}
+
diff --git a/vm/reflect/Reflect.h b/vm/reflect/Reflect.h
new file mode 100644
index 0000000..cd8e76c
--- /dev/null
+++ b/vm/reflect/Reflect.h
@@ -0,0 +1,239 @@
+/*
+ * Copyright (C) 2008 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/*
+ * Basic reflection calls and utility functions.
+ */
+#ifndef _DALVIK_REFLECT_REFLECT
+#define _DALVIK_REFLECT_REFLECT
+
+bool dvmReflectStartup(void);
+bool dvmReflectProxyStartup(void);
+bool dvmReflectAnnotationStartup(void);
+void dvmReflectShutdown(void);
+
+/*
+ * During startup, validate the "box" classes, e.g. java/lang/Integer.
+ */
+bool dvmValidateBoxClasses();
+
+/*
+ * Get all fields declared by a class.
+ *
+ * Includes both class and instance fields.
+ */
+ArrayObject* dvmGetDeclaredFields(ClassObject* clazz, bool publicOnly);
+
+/*
+ * Get all constructors declared by a class.
+ */
+ArrayObject* dvmGetDeclaredConstructors(ClassObject* clazz, bool publicOnly);
+
+/*
+ * Get all methods declared by a class.
+ *
+ * This includes both static and virtual methods, and can include private
+ * members if "publicOnly" is false.  It does not include Miranda methods,
+ * since those weren't declared in the class, or constructors.
+ */
+ArrayObject* dvmGetDeclaredMethods(ClassObject* clazz, bool publicOnly);
+
+/*
+ * Get all interfaces a class implements. If this is unable to allocate
+ * the result array, this raises an OutOfMemoryError and returns NULL.
+ */
+ArrayObject* dvmGetInterfaces(ClassObject* clazz);
+
+/*
+ * Convert slot numbers back to objects.
+ */
+Field* dvmSlotToField(ClassObject* clazz, int slot);
+Method* dvmSlotToMethod(ClassObject* clazz, int slot);
+
+/*
+ * Convert a primitive value, performing a widening conversion if necessary.
+ */
+int dvmConvertPrimitiveValue(PrimitiveType srcType,
+    PrimitiveType dstType, const s4* srcPtr, s4* dstPtr);
+
+/*
+ * Convert the argument to the specified type.
+ *
+ * Returns the width of the argument (1 for most types, 2 for J/D, -1 on
+ * error).
+ */
+int dvmConvertArgument(DataObject* arg, ClassObject* type, s4* ins);
+
+/*
+ * Create a wrapper object for a primitive data type.  If "returnType" is
+ * not primitive, this just returns "value" cast to an object.
+ */
+DataObject* dvmWrapPrimitive(JValue value, ClassObject* returnType);
+
+/*
+ * Unwrap a boxed primitive.  If "returnType" is not primitive, this just
+ * returns "value" cast into a JValue.
+ */
+bool dvmUnwrapPrimitive(Object* value, ClassObject* returnType,
+    JValue* pResult);
+
+/*
+ * Return the class object that matches the method's signature.  For
+ * primitive types, returns the box class.
+ */
+ClassObject* dvmGetBoxedReturnType(const Method* meth);
+
+/*
+ * JNI reflection support.
+ */
+Field* dvmGetFieldFromReflectObj(Object* obj);
+Method* dvmGetMethodFromReflectObj(Object* obj);
+Object* dvmCreateReflectObjForField(const ClassObject* clazz, Field* field);
+Object* dvmCreateReflectObjForMethod(const ClassObject* clazz, Method* method);
+
+/*
+ * Quick test to determine if the method in question is a reflection call.
+ * Used for some stack parsing.  Currently defined as "the method's declaring
+ * class is java.lang.reflect.Method".
+ */
+INLINE bool dvmIsReflectionMethod(const Method* method)
+{
+    return (method->clazz == gDvm.classJavaLangReflectMethod);
+}
+
+/*
+ * Proxy class generation.
+ */
+ClassObject* dvmGenerateProxyClass(StringObject* str, ArrayObject* interfaces,
+    Object* loader);
+
+/*
+ * Create a new java.lang.reflect.Method object based on "meth".
+ */
+Object* dvmCreateReflectMethodObject(const Method* meth);
+
+/*
+ * Return an array of Annotation objects for the specified piece.  For method
+ * parameters this is an array of arrays of Annotation objects.
+ *
+ * Method also applies to Constructor.
+ */
+ArrayObject* dvmGetClassAnnotations(const ClassObject* clazz);
+ArrayObject* dvmGetMethodAnnotations(const Method* method);
+ArrayObject* dvmGetFieldAnnotations(const Field* field);
+ArrayObject* dvmGetParameterAnnotations(const Method* method);
+
+/*
+ * Find the default value for an annotation member.
+ */
+Object* dvmGetAnnotationDefaultValue(const Method* method);
+
+/*
+ * Get the list of thrown exceptions for a method.  Returns NULL if there
+ * are no exceptions listed.
+ */
+ArrayObject* dvmGetMethodThrows(const Method* method);
+
+/*
+ * Get the Signature annotation.
+ */
+ArrayObject* dvmGetClassSignatureAnnotation(const ClassObject* clazz);
+ArrayObject* dvmGetMethodSignatureAnnotation(const Method* method);
+ArrayObject* dvmGetFieldSignatureAnnotation(const Field* field);
+
+/*
+ * Get the EnclosingMethod attribute from an annotation.  Returns a Method
+ * object, or NULL.
+ */
+Object* dvmGetEnclosingMethod(const ClassObject* clazz);
+
+/*
+ * Return clazz's declaring class, or NULL if there isn't one.
+ */
+ClassObject* dvmGetDeclaringClass(const ClassObject* clazz);
+
+/*
+ * Return clazz's enclosing class, or NULL if there isn't one.
+ */
+ClassObject* dvmGetEnclosingClass(const ClassObject* clazz);
+
+/*
+ * Get the EnclosingClass attribute from an annotation.  If found, returns
+ * "true".  A String with the original name of the class and the original
+ * access flags are returned through the arguments.  (The name will be NULL
+ * for an anonymous inner class.)
+ */
+bool dvmGetInnerClass(const ClassObject* clazz, StringObject** pName,
+    int* pAccessFlags);
+
+/*
+ * Get an array of class objects from the MemberClasses annotation.  Returns
+ * NULL if none found.
+ */
+ArrayObject* dvmGetDeclaredClasses(const ClassObject* clazz);
+
+/*
+ * Used to pass values out of annotation (and encoded array) processing
+ * functions.
+ */
+typedef struct AnnotationValue {
+    JValue  value;
+    u1      type;
+} AnnotationValue;
+
+
+/**
+ * Iterator structure for iterating over DexEncodedArray instances. The
+ * structure should be treated as opaque.
+ */
+typedef struct {
+    const u1* cursor;                    /* current cursor */
+    u4 elementsLeft;                     /* number of elements left to read */
+    const DexEncodedArray* encodedArray; /* instance being iterated over */
+    u4 size;                             /* number of elements in instance */
+    const ClassObject* clazz;            /* class to resolve with respect to */
+} EncodedArrayIterator;
+
+/**
+ * Initializes an encoded array iterator.
+ * 
+ * @param iterator iterator to initialize
+ * @param encodedArray encoded array to iterate over
+ * @param clazz class to use when resolving strings and types
+ */
+void dvmEncodedArrayIteratorInitialize(EncodedArrayIterator* iterator,
+        const DexEncodedArray* encodedArray, const ClassObject* clazz);
+
+/**
+ * Returns whether there are more elements to be read.
+ */
+bool dvmEncodedArrayIteratorHasNext(const EncodedArrayIterator* iterator);
+
+/**
+ * Returns the next decoded value from the iterator, advancing its
+ * cursor. This returns primitive values in their corresponding union
+ * slots, and returns everything else (including nulls) as object
+ * references in the "l" union slot.
+ * 
+ * The caller must call dvmReleaseTrackedAlloc() on any returned reference.
+ * 
+ * @param value pointer to store decoded value into
+ * @returns true if a value was decoded and the cursor advanced; false if
+ * the last value had already been decoded or if there was a problem decoding
+ */
+bool dvmEncodedArrayIteratorGetNext(EncodedArrayIterator* iterator,
+        AnnotationValue* value);
+
+#endif /*_DALVIK_REFLECT_REFLECT*/
diff --git a/vm/test/Test.h b/vm/test/Test.h
new file mode 100644
index 0000000..a6b54a5
--- /dev/null
+++ b/vm/test/Test.h
@@ -0,0 +1,24 @@
+/*
+ * Copyright (C) 2008 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/*
+ * Internal unit tests.
+ */
+#ifndef _DALVIK_TEST_TEST
+#define _DALVIK_TEST_TEST
+
+bool dvmTestHash(void);
+
+#endif /*_DALVIK_TEST_TEST*/
diff --git a/vm/test/TestHash.c b/vm/test/TestHash.c
new file mode 100644
index 0000000..42fe014
--- /dev/null
+++ b/vm/test/TestHash.c
@@ -0,0 +1,187 @@
+/*
+ * Copyright (C) 2008 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/*
+ * Test the hash table functions.
+ */
+#include "Dalvik.h"
+
+#include <stdlib.h>
+
+#define kNumTestEntries 14
+
+/*
+ * Test foreach.
+ */
+static int printFunc(void* data, void* arg)
+{
+    //printf("  '%s'\n", (const char*) data);
+    // (should verify strings)
+
+    int* count = (int*) arg;
+    (*count)++;
+    return 0;
+}
+static void dumpForeach(HashTable* pTab)
+{
+    int count = 0;
+
+    //printf("Print from foreach:\n");
+    dvmHashForeach(pTab, printFunc, &count);
+    if (count != kNumTestEntries) {
+        LOGE("TestHash foreach test failed\n");
+        assert(false);
+    }
+}
+
+/*
+ * Test iterator.
+ */
+static void dumpIterator(HashTable* pTab)
+{
+    int count = 0;
+
+    //printf("Print from iterator:\n");
+    HashIter iter;
+    for (dvmHashIterBegin(pTab, &iter); !dvmHashIterDone(&iter);
+        dvmHashIterNext(&iter))
+    {
+        const char* str = (const char*) dvmHashIterData(&iter);
+        //printf("  '%s'\n", str);
+        // (should verify strings)
+        count++;
+    }
+    if (count != kNumTestEntries) {
+        LOGE("TestHash iterator test failed\n");
+        assert(false);
+    }
+}
+
+/*
+ * Some quick hash table tests.
+ */
+bool dvmTestHash(void)
+{
+    HashTable* pTab;
+    char tmpStr[64];
+    const char* str;
+    u4 hash;
+    int i;
+
+    LOGV("TestHash BEGIN\n");
+
+    pTab = dvmHashTableCreate(dvmHashSize(12), free);
+    if (pTab == NULL)
+        return false;
+
+    dvmHashTableLock(pTab);
+
+    /* add some entries */
+    for (i = 0; i < kNumTestEntries; i++) {
+        sprintf(tmpStr, "entry %d", i);
+        hash = dvmComputeUtf8Hash(tmpStr);
+        dvmHashTableLookup(pTab, hash, strdup(tmpStr),
+            (HashCompareFunc) strcmp, true);
+    }
+
+    dvmHashTableUnlock(pTab);
+
+    /* make sure we can find all entries */
+    for (i = 0; i < kNumTestEntries; i++) {
+        sprintf(tmpStr, "entry %d", i);
+        hash = dvmComputeUtf8Hash(tmpStr);
+        str = (const char*) dvmHashTableLookup(pTab, hash, tmpStr,
+                (HashCompareFunc) strcmp, false);
+        if (str == NULL) {
+            LOGE("TestHash: failure: could not find '%s'\n", tmpStr);
+            /* return false */
+        }
+    }
+
+    /* make sure it behaves correctly when entry not found and !doAdd */
+    sprintf(tmpStr, "entry %d", 17);
+    hash = dvmComputeUtf8Hash(tmpStr);
+    str = (const char*) dvmHashTableLookup(pTab, hash, tmpStr,
+            (HashCompareFunc) strcmp, false);
+    if (str == NULL) {
+        /* good */
+    } else {
+        LOGE("TestHash found nonexistent string (improper add?)\n");
+    }
+
+    dumpForeach(pTab);
+    dumpIterator(pTab);
+
+    /* make sure they all get freed */
+    dvmHashTableFree(pTab);
+
+
+    /*
+     * Round 2: verify probing & tombstones.
+     */
+    pTab = dvmHashTableCreate(dvmHashSize(2), free);
+    if (pTab == NULL)
+        return false;
+
+    hash = 0;
+
+    /* two entries, same hash, different values */
+    char* str1;
+    str1 = dvmHashTableLookup(pTab, hash, strdup("one"),
+            (HashCompareFunc) strcmp, true);
+    assert(str1 != NULL);
+    str = dvmHashTableLookup(pTab, hash, strdup("two"),
+            (HashCompareFunc) strcmp, true);
+
+    /* remove the first one */
+    if (!dvmHashTableRemove(pTab, hash, str1))
+        LOGE("TestHash failed to delete item\n");
+    else
+        free(str1);     // "Remove" doesn't call the free func
+
+    /* make sure iterator doesn't included deleted entries */
+    int count = 0;
+    HashIter iter;
+    for (dvmHashIterBegin(pTab, &iter); !dvmHashIterDone(&iter);
+        dvmHashIterNext(&iter))
+    {
+        count++;
+    }
+    if (count != 1) {
+        LOGE("TestHash wrong number of entries (%d)\n", count);
+    }
+
+    /* see if we can find them */
+    str = dvmHashTableLookup(pTab, hash, "one", (HashCompareFunc) strcmp,false);
+    if (str != NULL)
+        LOGE("TestHash deleted entry has returned!");
+    str = dvmHashTableLookup(pTab, hash, "two", (HashCompareFunc) strcmp,false);
+    if (str == NULL)
+        LOGE("TestHash entry vanished\n");
+
+    /* force a table realloc to exercise tombstone removal */
+    for (i = 0; i < 20; i++) {
+        sprintf(tmpStr, "entry %d", i);
+        str = (const char*) dvmHashTableLookup(pTab, hash, strdup(tmpStr),
+                (HashCompareFunc) strcmp, true);
+        assert(str != NULL);
+    }
+
+    dvmHashTableFree(pTab);
+    LOGV("TestHash END\n");
+
+    return true;
+}
+